summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-11-16 02:21:19 -0500
committerDeepak Nibade <dnibade@nvidia.com>2017-11-16 02:21:35 -0500
commitba8dc318595f597308902ad16ffed89bdbe7000f (patch)
treee882886e0cbc05ac39473b95ead16ee50bd69c15
parent69e032653df5aae335764f6346703a1e55c96a2d (diff)
parent77a90d0b8d2eb1bbb207ae5f46b357f2d7cd07ab (diff)
Merge remote-tracking branch 'remotes/origin/dev/linux-nvgpu-t19x' into linux-nvgpu
Bug 200363166 Change-Id: Ic662d7b44b673db28dc0aeba338ae67cf2a43d64 Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
-rw-r--r--NVIDIA-REVIEWERS2
-rw-r--r--drivers/gpu/nvgpu/Kconfig7
-rw-r--r--drivers/gpu/nvgpu/Makefile48
-rw-r--r--drivers/gpu/nvgpu/acr_t19x.h29
-rw-r--r--drivers/gpu/nvgpu/channel_t19x.h33
-rw-r--r--drivers/gpu/nvgpu/common/linux/io_t19x.c29
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.c33
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.h23
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.c115
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.h21
-rw-r--r--drivers/gpu/nvgpu/common/linux/module_t19x.c62
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvhost_t19x.c35
-rw-r--r--drivers/gpu/nvgpu/common/linux/pci_t19x.c24
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu_t19x.c31
-rw-r--r--drivers/gpu/nvgpu/ecc_t19x.h29
-rw-r--r--drivers/gpu/nvgpu/fifo_t19x.h30
-rw-r--r--drivers/gpu/nvgpu/gr_t19x.h29
-rw-r--r--drivers/gpu/nvgpu/gv100/bios_gv100.c108
-rw-r--r--drivers/gpu/nvgpu/gv100/bios_gv100.h31
-rw-r--r--drivers/gpu/nvgpu/gv100/fb_gv100.c184
-rw-r--r--drivers/gpu/nvgpu/gv100/fb_gv100.h32
-rw-r--r--drivers/gpu/nvgpu/gv100/fifo_gv100.c40
-rw-r--r--drivers/gpu/nvgpu/gv100/fifo_gv100.h33
-rw-r--r--drivers/gpu/nvgpu/gv100/gr_ctx_gv100.c47
-rw-r--r--drivers/gpu/nvgpu/gv100/gr_ctx_gv100.h34
-rw-r--r--drivers/gpu/nvgpu/gv100/gr_gv100.c349
-rw-r--r--drivers/gpu/nvgpu/gv100/gr_gv100.h36
-rw-r--r--drivers/gpu/nvgpu/gv100/gv100.h32
-rw-r--r--drivers/gpu/nvgpu/gv100/hal_gv100.c769
-rw-r--r--drivers/gpu/nvgpu/gv100/hal_gv100.h30
-rw-r--r--drivers/gpu/nvgpu/gv100/mm_gv100.c55
-rw-r--r--drivers/gpu/nvgpu/gv100/mm_gv100.h33
-rw-r--r--drivers/gpu/nvgpu/gv100/regops_gv100.c463
-rw-r--r--drivers/gpu/nvgpu/gv100/regops_gv100.h42
-rw-r--r--drivers/gpu/nvgpu/gv11b/acr_gv11b.c294
-rw-r--r--drivers/gpu/nvgpu/gv11b/acr_gv11b.h30
-rw-r--r--drivers/gpu/nvgpu/gv11b/ce_gv11b.c110
-rw-r--r--drivers/gpu/nvgpu/gv11b/ce_gv11b.h35
-rw-r--r--drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c206
-rw-r--r--drivers/gpu/nvgpu/gv11b/css_gr_gv11b.h34
-rw-r--r--drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c99
-rw-r--r--drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.h28
-rw-r--r--drivers/gpu/nvgpu/gv11b/ecc_gv11b.h64
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.c1555
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.h72
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c1907
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.h117
-rw-r--r--drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.c72
-rw-r--r--drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.h36
-rw-r--r--drivers/gpu/nvgpu/gv11b/gr_gv11b.c3639
-rw-r--r--drivers/gpu/nvgpu/gv11b/gr_gv11b.h215
-rw-r--r--drivers/gpu/nvgpu/gv11b/gv11b.c38
-rw-r--r--drivers/gpu/nvgpu/gv11b/gv11b.h32
-rw-r--r--drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c748
-rw-r--r--drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.h99
-rw-r--r--drivers/gpu/nvgpu/gv11b/hal_gv11b.c778
-rw-r--r--drivers/gpu/nvgpu/gv11b/hal_gv11b.h31
-rw-r--r--drivers/gpu/nvgpu/gv11b/ltc_gv11b.c205
-rw-r--r--drivers/gpu/nvgpu/gv11b/ltc_gv11b.h34
-rw-r--r--drivers/gpu/nvgpu/gv11b/mc_gv11b.c92
-rw-r--r--drivers/gpu/nvgpu/gv11b/mc_gv11b.h30
-rw-r--r--drivers/gpu/nvgpu/gv11b/mm_gv11b.c330
-rw-r--r--drivers/gpu/nvgpu/gv11b/mm_gv11b.h46
-rw-r--r--drivers/gpu/nvgpu/gv11b/platform_gv11b_tegra.c549
-rw-r--r--drivers/gpu/nvgpu/gv11b/pmu_gv11b.c283
-rw-r--r--drivers/gpu/nvgpu/gv11b/pmu_gv11b.h37
-rw-r--r--drivers/gpu/nvgpu/gv11b/regops_gv11b.c1548
-rw-r--r--drivers/gpu/nvgpu/gv11b/regops_gv11b.h42
-rw-r--r--drivers/gpu/nvgpu/gv11b/subctx_gv11b.c185
-rw-r--r--drivers/gpu/nvgpu/gv11b/subctx_gv11b.h34
-rw-r--r--drivers/gpu/nvgpu/gv11b/therm_gv11b.c75
-rw-r--r--drivers/gpu/nvgpu/gv11b/therm_gv11b.h28
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/enabled_t19x.h29
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/gmmu_t19x.h34
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_bus_gv100.h227
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ccsr_gv100.h187
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ce_gv100.h107
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ctxsw_prog_gv100.h455
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_falcon_gv100.h599
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fb_gv100.h1511
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fifo_gv100.h551
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_flush_gv100.h187
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fuse_gv100.h143
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gmmu_gv100.h1287
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gr_gv100.h3935
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ltc_gv100.h619
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_mc_gv100.h259
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pbdma_gv100.h659
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_perf_gv100.h211
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pram_gv100.h63
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringmaster_gv100.h167
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_gpc_gv100.h79
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_sys_gv100.h91
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_proj_gv100.h195
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pwr_gv100.h935
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ram_gv100.h775
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_therm_gv100.h299
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_timer_gv100.h115
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_top_gv100.h235
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_usermode_gv100.h95
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xp_gv100.h143
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xve_gv100.h207
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_bus_gv11b.h223
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ccsr_gv11b.h187
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ce_gv11b.h107
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h455
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_falcon_gv11b.h599
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fb_gv11b.h1827
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h687
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_flush_gv11b.h187
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fuse_gv11b.h151
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gmmu_gv11b.h1495
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gr_gv11b.h4939
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ltc_gv11b.h803
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_mc_gv11b.h251
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pbdma_gv11b.h659
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_perf_gv11b.h211
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pram_gv11b.h63
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h167
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_gpc_gv11b.h79
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_sys_gv11b.h91
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_proj_gv11b.h187
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h951
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ram_gv11b.h775
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_therm_gv11b.h435
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_timer_gv11b.h115
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_top_gv11b.h235
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_usermode_gv11b.h95
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/io_t19x.h29
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/linux/io_t19x.h26
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/linux/module_t19x.h27
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/linux/os_linux_t19x.h26
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/linux/pci_t19x.h23
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/nvhost_t19x.h37
-rw-r--r--drivers/gpu/nvgpu/nvgpu_gpuid_t19x.h47
-rw-r--r--drivers/gpu/nvgpu/tsg_t19x.h36
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/platform_gv11b_vgpu_tegra.c105
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c117
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.h31
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.c41
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.h30
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c37
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.h21
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c642
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c79
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.h31
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c59
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.h29
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu_t19x.h36
-rw-r--r--include/linux/tegra_gpu_t19x.h24
-rw-r--r--include/linux/tegra_vgpu_t19x.h55
-rw-r--r--include/uapi/linux/nvgpu-t19x.h59
152 files changed, 48673 insertions, 2 deletions
diff --git a/NVIDIA-REVIEWERS b/NVIDIA-REVIEWERS
index 99b63bdb..55dcaa70 100644
--- a/NVIDIA-REVIEWERS
+++ b/NVIDIA-REVIEWERS
@@ -63,5 +63,3 @@ S: Supported
63F: drivers/gpu/nvgpu/* 63F: drivers/gpu/nvgpu/*
64F: include/* 64F: include/*
65F: ../../gpu-firmware-private/ 65F: ../../gpu-firmware-private/
66
67
diff --git a/drivers/gpu/nvgpu/Kconfig b/drivers/gpu/nvgpu/Kconfig
index 4f90a35c..7a9a99c6 100644
--- a/drivers/gpu/nvgpu/Kconfig
+++ b/drivers/gpu/nvgpu/Kconfig
@@ -136,3 +136,10 @@ config GK20A_VIDMEM
136 Enable support for using and allocating buffers in a distinct video 136 Enable support for using and allocating buffers in a distinct video
137 memory aperture (in contrast to general system memory), available on 137 memory aperture (in contrast to general system memory), available on
138 GPUs that have their own banks. PCIe GPUs have this, for example. 138 GPUs that have their own banks. PCIe GPUs have this, for example.
139
140config TEGRA_19x_GPU
141 bool "Tegra 19x family GPU"
142 depends on GK20A && ARCH_TEGRA_19x_SOC
143 default y
144 help
145 Support for NVIDIA Tegra 19x family of GPU
diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile
index f3338dc2..9a7a3d07 100644
--- a/drivers/gpu/nvgpu/Makefile
+++ b/drivers/gpu/nvgpu/Makefile
@@ -258,3 +258,51 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
258 vgpu/gp10b/vgpu_gr_gp10b.o \ 258 vgpu/gp10b/vgpu_gr_gp10b.o \
259 vgpu/gp10b/vgpu_mm_gp10b.o 259 vgpu/gp10b/vgpu_mm_gp10b.o
260endif 260endif
261
262ifeq ($(CONFIG_ARCH_TEGRA_19x_SOC),y)
263nvgpu-y += \
264 common/mm/gmmu_t19x.o \
265 common/linux/ioctl_tsg_t19x.o \
266 common/linux/ioctl_ctrl_t19x.o \
267 common/linux/io_t19x.o \
268 common/linux/module_t19x.o \
269 common/linux/pci_t19x.o \
270 gv11b/gv11b.o \
271 gv11b/css_gr_gv11b.o \
272 gv11b/dbg_gpu_gv11b.o \
273 gv11b/mc_gv11b.o \
274 gv11b/ltc_gv11b.o \
275 gv11b/hal_gv11b.o \
276 gv11b/gv11b_gating_reglist.o \
277 gv11b/gr_gv11b.o \
278 gv11b/fb_gv11b.o \
279 gv11b/fifo_gv11b.o \
280 gv11b/mm_gv11b.o \
281 gv11b/ce_gv11b.o \
282 gv11b/gr_ctx_gv11b.o \
283 gv11b/pmu_gv11b.o \
284 gv11b/acr_gv11b.o \
285 gv11b/subctx_gv11b.o \
286 gv11b/regops_gv11b.o \
287 gv11b/therm_gv11b.o \
288 gv100/mm_gv100.o \
289 gv100/gr_ctx_gv100.o \
290 gv100/fb_gv100.o \
291 gv100/bios_gv100.o \
292 gv100/fifo_gv100.o \
293 gv100/gr_gv100.o \
294 gv100/regops_gv100.o \
295 gv100/hal_gv100.o
296
297nvgpu-$(CONFIG_TEGRA_GK20A) += gv11b/platform_gv11b_tegra.o
298nvgpu-$(CONFIG_TEGRA_GK20A_NVHOST) += common/linux/nvhost_t19x.o
299
300nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \
301 vgpu/gv11b/platform_gv11b_vgpu_tegra.o \
302 vgpu/gv11b/vgpu_gv11b.o \
303 vgpu/gv11b/vgpu_hal_gv11b.o \
304 vgpu/gv11b/vgpu_gr_gv11b.o \
305 vgpu/gv11b/vgpu_fifo_gv11b.o \
306 vgpu/gv11b/vgpu_subctx_gv11b.o \
307 vgpu/gv11b/vgpu_tsg_gv11b.o
308endif
diff --git a/drivers/gpu/nvgpu/acr_t19x.h b/drivers/gpu/nvgpu/acr_t19x.h
new file mode 100644
index 00000000..0693c6a1
--- /dev/null
+++ b/drivers/gpu/nvgpu/acr_t19x.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _NVGPU_ACR_T19X_H_
24#define _NVGPU_ACR_T19X_H_
25
26#define BIGGPU_FECS_UCODE_SIG "gv100/fecs_sig.bin"
27#define BIGGPU_GPCCS_UCODE_SIG "gv100/gpccs_sig.bin"
28
29#endif
diff --git a/drivers/gpu/nvgpu/channel_t19x.h b/drivers/gpu/nvgpu/channel_t19x.h
new file mode 100644
index 00000000..d3cb71a1
--- /dev/null
+++ b/drivers/gpu/nvgpu/channel_t19x.h
@@ -0,0 +1,33 @@
1/*
2 * NVIDIA T19x Channel info
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVGPU_CHANNEL_T19X_H_
26#define _NVGPU_CHANNEL_T19X_H_
27
28struct channel_t19x {
29 u32 subctx_id;
30 u32 runqueue_sel;
31};
32
33#endif
diff --git a/drivers/gpu/nvgpu/common/linux/io_t19x.c b/drivers/gpu/nvgpu/common/linux/io_t19x.c
new file mode 100644
index 00000000..5c6b76ba
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/io_t19x.c
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <nvgpu/io.h>
15#include <nvgpu/types.h>
16
17#include "common/linux/os_linux.h"
18#include "gk20a/gk20a.h"
19
20#include <nvgpu/hw/gv11b/hw_usermode_gv11b.h>
21
22void gv11b_usermode_writel(struct gk20a *g, u32 r, u32 v)
23{
24 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
25 void __iomem *reg = l->t19x.usermode_regs + (r - usermode_cfg0_r());
26
27 writel_relaxed(v, reg);
28 gk20a_dbg(gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v);
29}
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.c
new file mode 100644
index 00000000..a04fb5c9
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.c
@@ -0,0 +1,33 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <uapi/linux/nvgpu.h>
15
16#include <nvgpu/types.h>
17#include <nvgpu/enabled.h>
18#include <nvgpu/enabled_t19x.h>
19
20#include "ioctl_ctrl_t19x.h"
21#include "common/linux/os_linux.h"
22#include "gk20a/gk20a.h"
23
24u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags_t19x(struct gk20a *g)
25{
26 u64 ioctl_flags = 0;
27
28 if (nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS))
29 ioctl_flags |= NVGPU_GPU_FLAGS_SUPPORT_TSG_SUBCONTEXTS;
30
31 return ioctl_flags;
32}
33
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.h b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.h
new file mode 100644
index 00000000..64141223
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _NVGPU_IOCTL_CTRL_T19X
15#define _NVGPU_IOCTL_CTRL_T19X
16
17#include <nvgpu/types.h>
18
19struct gk20a;
20
21u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags_t19x(struct gk20a *g);
22
23#endif
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.c b/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.c
new file mode 100644
index 00000000..1c96db69
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.c
@@ -0,0 +1,115 @@
1/*
2 * GV11B TSG IOCTL Handler
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/types.h>
17#include <uapi/linux/nvgpu.h>
18
19#include "gk20a/gk20a.h"
20
21#include "gv11b/fifo_gv11b.h"
22#include "gv11b/subctx_gv11b.h"
23#include "ioctl_tsg_t19x.h"
24#include "common/linux/os_linux.h"
25
26static int gv11b_tsg_ioctl_bind_channel_ex(struct gk20a *g,
27 struct tsg_gk20a *tsg, struct nvgpu_tsg_bind_channel_ex_args *arg)
28{
29 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
30 struct gk20a_sched_ctrl *sched = &l->sched_ctrl;
31 struct channel_gk20a *ch;
32 struct gr_gk20a *gr = &g->gr;
33 int err = 0;
34
35 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid);
36
37 nvgpu_mutex_acquire(&sched->control_lock);
38 if (sched->control_locked) {
39 err = -EPERM;
40 goto mutex_release;
41 }
42 err = gk20a_busy(g);
43 if (err) {
44 nvgpu_err(g, "failed to power on gpu");
45 goto mutex_release;
46 }
47
48 ch = gk20a_get_channel_from_file(arg->channel_fd);
49 if (!ch) {
50 err = -EINVAL;
51 goto idle;
52 }
53
54 if (arg->tpc_pg_enabled && (!tsg->t19x.tpc_num_initialized)) {
55 if ((arg->num_active_tpcs > gr->max_tpc_count) ||
56 !(arg->num_active_tpcs)) {
57 nvgpu_err(g, "Invalid num of active TPCs");
58 err = -EINVAL;
59 goto ch_put;
60 }
61 tsg->t19x.tpc_num_initialized = true;
62 tsg->t19x.num_active_tpcs = arg->num_active_tpcs;
63 tsg->t19x.tpc_pg_enabled = true;
64 } else {
65 tsg->t19x.tpc_pg_enabled = false;
66 nvgpu_log(g, gpu_dbg_info, "dynamic TPC-PG not enabled");
67 }
68
69 if (arg->subcontext_id < g->fifo.t19x.max_subctx_count) {
70 ch->t19x.subctx_id = arg->subcontext_id;
71 } else {
72 err = -EINVAL;
73 goto ch_put;
74 }
75
76 nvgpu_log(g, gpu_dbg_info, "channel id : %d : subctx: %d",
77 ch->chid, ch->t19x.subctx_id);
78
79 /* Use runqueue selector 1 for all ASYNC ids */
80 if (ch->t19x.subctx_id > CHANNEL_INFO_VEID0)
81 ch->t19x.runqueue_sel = 1;
82
83 err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch);
84ch_put:
85 gk20a_channel_put(ch);
86idle:
87 gk20a_idle(g);
88mutex_release:
89 nvgpu_mutex_release(&sched->control_lock);
90 return err;
91}
92
93int t19x_tsg_ioctl_handler(struct gk20a *g, struct tsg_gk20a *tsg,
94 unsigned int cmd, u8 *buf)
95{
96 int err = 0;
97
98 nvgpu_log(g, gpu_dbg_fn, "t19x_tsg_ioctl_handler");
99
100 switch (cmd) {
101 case NVGPU_TSG_IOCTL_BIND_CHANNEL_EX:
102 {
103 err = gv11b_tsg_ioctl_bind_channel_ex(g, tsg,
104 (struct nvgpu_tsg_bind_channel_ex_args *)buf);
105 break;
106 }
107
108 default:
109 nvgpu_err(g, "unrecognized tsg gpu ioctl cmd: 0x%x",
110 cmd);
111 err = -ENOTTY;
112 break;
113 }
114 return err;
115}
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.h b/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.h
new file mode 100644
index 00000000..3376ffce
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.h
@@ -0,0 +1,21 @@
1/*
2 * GV11B TSG IOCTL handler
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _NVGPU_IOCTL_TSG_T19X
17#define _NVGPU_IOCTL_TSG_T19X
18
19int t19x_tsg_ioctl_handler(struct gk20a *g, struct tsg_gk20a *tsg,
20 unsigned int cmd, u8 *arg);
21#endif
diff --git a/drivers/gpu/nvgpu/common/linux/module_t19x.c b/drivers/gpu/nvgpu/common/linux/module_t19x.c
new file mode 100644
index 00000000..f0e3d438
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/module_t19x.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <nvgpu/types.h>
18
19#include <nvgpu/hw/gv11b/hw_usermode_gv11b.h>
20
21#include "common/linux/os_linux.h"
22
23/*
24 * Locks out the driver from accessing GPU registers. This prevents access to
25 * thse registers after the GPU has been clock or power gated. This should help
26 * find annoying bugs where register reads and writes are silently dropped
27 * after the GPU has been turned off. On older chips these reads and writes can
28 * also lock the entire CPU up.
29 */
30void t19x_lockout_registers(struct gk20a *g)
31{
32 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
33
34 l->t19x.usermode_regs = NULL;
35}
36
37/*
38 * Undoes t19x_lockout_registers().
39 */
40void t19x_restore_registers(struct gk20a *g)
41{
42 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
43
44 l->t19x.usermode_regs = l->t19x.usermode_regs_saved;
45}
46
47void t19x_remove_support(struct gk20a *g)
48{
49 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
50
51 if (l->t19x.usermode_regs) {
52 l->t19x.usermode_regs = NULL;
53 }
54}
55
56void t19x_init_support(struct gk20a *g)
57{
58 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
59
60 l->t19x.usermode_regs = l->regs + usermode_cfg0_r();
61 l->t19x.usermode_regs_saved = l->t19x.usermode_regs;
62}
diff --git a/drivers/gpu/nvgpu/common/linux/nvhost_t19x.c b/drivers/gpu/nvgpu/common/linux/nvhost_t19x.c
new file mode 100644
index 00000000..21cf62ec
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/nvhost_t19x.c
@@ -0,0 +1,35 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/nvhost.h>
18#include <linux/nvhost_t194.h>
19
20#include <nvgpu/nvhost_t19x.h>
21
22#include "common/linux/nvhost_priv.h"
23
24int nvgpu_nvhost_syncpt_unit_interface_get_aperture(
25 struct nvgpu_nvhost_dev *nvhost_dev,
26 u64 *base, size_t *size)
27{
28 return nvhost_syncpt_unit_interface_get_aperture(
29 nvhost_dev->host1x_pdev, (phys_addr_t *)base, size);
30}
31
32u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id)
33{
34 return nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id);
35}
diff --git a/drivers/gpu/nvgpu/common/linux/pci_t19x.c b/drivers/gpu/nvgpu/common/linux/pci_t19x.c
new file mode 100644
index 00000000..54efd68e
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/pci_t19x.c
@@ -0,0 +1,24 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <nvgpu/types.h>
15
16#include <nvgpu/hw/gv11b/hw_usermode_gv11b.h>
17
18#include "common/linux/os_linux.h"
19
20void t19x_nvgpu_pci_init_support(struct nvgpu_os_linux *l)
21{
22 l->t19x.usermode_regs = l->regs + usermode_cfg0_r();
23 l->t19x.usermode_regs_saved = l->t19x.usermode_regs;
24}
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu_t19x.c b/drivers/gpu/nvgpu/common/mm/gmmu_t19x.c
new file mode 100644
index 00000000..9f9c188d
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/mm/gmmu_t19x.c
@@ -0,0 +1,31 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <uapi/linux/nvgpu.h>
24
25#include <nvgpu/gmmu.h>
26
27void nvgpu_gmmu_add_t19x_attrs(struct nvgpu_gmmu_attrs *attrs, u32 flags)
28{
29 attrs->t19x_attrs.l3_alloc = (bool)(flags &
30 NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC);
31}
diff --git a/drivers/gpu/nvgpu/ecc_t19x.h b/drivers/gpu/nvgpu/ecc_t19x.h
new file mode 100644
index 00000000..5b571ce1
--- /dev/null
+++ b/drivers/gpu/nvgpu/ecc_t19x.h
@@ -0,0 +1,29 @@
1/*
2 * NVIDIA T19x ECC
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _NVGPU_ECC_T19X_H_
25#define _NVGPU_ECC_T19X_H_
26
27#include "gv11b/ecc_gv11b.h"
28
29#endif
diff --git a/drivers/gpu/nvgpu/fifo_t19x.h b/drivers/gpu/nvgpu/fifo_t19x.h
new file mode 100644
index 00000000..7274d1fe
--- /dev/null
+++ b/drivers/gpu/nvgpu/fifo_t19x.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _FIFO_T19X_H_
24#define _FIFO_T19X_H_
25
26struct fifo_t19x {
27 u32 max_subctx_count;
28};
29
30#endif
diff --git a/drivers/gpu/nvgpu/gr_t19x.h b/drivers/gpu/nvgpu/gr_t19x.h
new file mode 100644
index 00000000..954472fa
--- /dev/null
+++ b/drivers/gpu/nvgpu/gr_t19x.h
@@ -0,0 +1,29 @@
1/*
2 * NVIDIA T19x GR
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _NVGPU_GR_T19X_H_
25#define _NVGPU_GR_T19X_H_
26
27#include "gv11b/gr_gv11b.h"
28
29#endif
diff --git a/drivers/gpu/nvgpu/gv100/bios_gv100.c b/drivers/gpu/nvgpu/gv100/bios_gv100.c
new file mode 100644
index 00000000..9ca05a11
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/bios_gv100.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/bios.h>
24#include <nvgpu/nvgpu_common.h>
25#include <nvgpu/timers.h>
26
27#include "gk20a/gk20a.h"
28#include "gp106/bios_gp106.h"
29#include "bios_gv100.h"
30
31#include <nvgpu/hw/gv100/hw_pwr_gv100.h>
32#include <nvgpu/hw/gv100/hw_bus_gv100.h>
33
34#define PMU_BOOT_TIMEOUT_DEFAULT 100 /* usec */
35#define PMU_BOOT_TIMEOUT_MAX 2000000 /* usec */
36
37#define SCRATCH_PREOS_PROGRESS 6
38#define PREOS_PROGRESS_MASK(r) ((r >> 12) & 0xf)
39#define PREOS_PROGRESS_NOT_STARTED 0
40#define PREOS_PROGRESS_STARTED 1
41#define PREOS_PROGRESS_EXIT 2
42#define PREOS_PROGRESS_EXIT_SECUREMODE 3
43#define PREOS_PROGRESS_ABORTED 6
44
45#define SCRATCH_PMU_EXIT_AND_HALT 1
46#define PMU_EXIT_AND_HALT_SET(r, v) ((r & ~0x200UL) | v)
47#define PMU_EXIT_AND_HALT_YES (0x1UL << 9)
48
49#define SCRATCH_PRE_OS_RELOAD 1
50#define PRE_OS_RELOAD_SET(r, v) ((r & ~0x100UL) | v)
51#define PRE_OS_RELOAD_YES (0x1UL << 8)
52
53
54void gv100_bios_preos_reload_check(struct gk20a *g)
55{
56 u32 progress = gk20a_readl(g,
57 bus_sw_scratch_r(SCRATCH_PREOS_PROGRESS));
58
59 if (PREOS_PROGRESS_MASK(progress) != PREOS_PROGRESS_NOT_STARTED) {
60 u32 reload = gk20a_readl(g,
61 bus_sw_scratch_r(SCRATCH_PRE_OS_RELOAD));
62
63 gk20a_writel(g, bus_sw_scratch_r(SCRATCH_PRE_OS_RELOAD),
64 PRE_OS_RELOAD_SET(reload, PRE_OS_RELOAD_YES));
65 }
66}
67
68int gv100_bios_preos_wait_for_halt(struct gk20a *g)
69{
70 int err = -EINVAL;
71 u32 progress;
72 u32 tmp;
73 int preos_completed;
74 struct nvgpu_timeout timeout;
75
76 nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT);
77
78 /* Check the progress */
79 progress = gk20a_readl(g, bus_sw_scratch_r(SCRATCH_PREOS_PROGRESS));
80
81 if (PREOS_PROGRESS_MASK(progress) == PREOS_PROGRESS_STARTED) {
82 err = 0;
83
84 /* Complete the handshake */
85 tmp = gk20a_readl(g,
86 bus_sw_scratch_r(SCRATCH_PMU_EXIT_AND_HALT));
87
88 gk20a_writel(g, bus_sw_scratch_r(SCRATCH_PMU_EXIT_AND_HALT),
89 PMU_EXIT_AND_HALT_SET(tmp, PMU_EXIT_AND_HALT_YES));
90
91 nvgpu_timeout_init(g, &timeout,
92 PMU_BOOT_TIMEOUT_MAX /
93 PMU_BOOT_TIMEOUT_DEFAULT,
94 NVGPU_TIMER_RETRY_TIMER);
95
96 do {
97 progress = gk20a_readl(g,
98 bus_sw_scratch_r(SCRATCH_PREOS_PROGRESS));
99 preos_completed = pwr_falcon_cpuctl_halt_intr_v(
100 gk20a_readl(g, pwr_falcon_cpuctl_r())) &&
101 (PREOS_PROGRESS_MASK(progress) ==
102 PREOS_PROGRESS_EXIT);
103 nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT);
104 } while (!preos_completed && !nvgpu_timeout_expired(&timeout));
105 }
106
107 return err;
108}
diff --git a/drivers/gpu/nvgpu/gv100/bios_gv100.h b/drivers/gpu/nvgpu/gv100/bios_gv100.h
new file mode 100644
index 00000000..c6433f57
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/bios_gv100.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef NVGPU_BIOS_GV100_H
24#define NVGPU_BIOS_GV100_H
25
26struct gk20a;
27
28void gv100_bios_preos_reload_check(struct gk20a *g);
29int gv100_bios_preos_wait_for_halt(struct gk20a *g);
30
31#endif
diff --git a/drivers/gpu/nvgpu/gv100/fb_gv100.c b/drivers/gpu/nvgpu/gv100/fb_gv100.c
new file mode 100644
index 00000000..0a2939bf
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/fb_gv100.c
@@ -0,0 +1,184 @@
1/*
2 * GV100 FB
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/types.h>
26
27#include <nvgpu/dma.h>
28#include <nvgpu/log.h>
29#include <nvgpu/enabled.h>
30#include <nvgpu/gmmu.h>
31#include <nvgpu/nvgpu_common.h>
32#include <nvgpu/kmem.h>
33#include <nvgpu/nvgpu_mem.h>
34#include <nvgpu/acr/nvgpu_acr.h>
35#include <nvgpu/firmware.h>
36#include <nvgpu/pmu.h>
37#include <nvgpu/falcon.h>
38
39#include "gk20a/gk20a.h"
40#include "gv100/fb_gv100.h"
41#include "gm20b/acr_gm20b.h"
42
43#include <nvgpu/hw/gv100/hw_fb_gv100.h>
44#include <nvgpu/hw/gv100/hw_falcon_gv100.h>
45#include <nvgpu/hw/gv100/hw_mc_gv100.h>
46
47#define HW_SCRUB_TIMEOUT_DEFAULT 100 /* usec */
48#define HW_SCRUB_TIMEOUT_MAX 2000000 /* usec */
49#define MEM_UNLOCK_TIMEOUT 3500 /* msec */
50
51void gv100_fb_reset(struct gk20a *g)
52{
53 u32 val;
54 int retries = HW_SCRUB_TIMEOUT_MAX / HW_SCRUB_TIMEOUT_DEFAULT;
55
56 nvgpu_info(g, "reset gv100 fb");
57
58 /* wait for memory to be accessible */
59 do {
60 u32 w = gk20a_readl(g, fb_niso_scrub_status_r());
61 if (fb_niso_scrub_status_flag_v(w)) {
62 nvgpu_info(g, "done");
63 break;
64 }
65 nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT);
66 } while (--retries);
67
68 val = gk20a_readl(g, fb_mmu_priv_level_mask_r());
69 val &= ~fb_mmu_priv_level_mask_write_violation_m();
70 gk20a_writel(g, fb_mmu_priv_level_mask_r(), val);
71}
72
73int gv100_fb_memory_unlock(struct gk20a *g)
74{
75 struct nvgpu_firmware *mem_unlock_fw = NULL;
76 struct bin_hdr *hsbin_hdr = NULL;
77 struct acr_fw_header *fw_hdr = NULL;
78 u32 *mem_unlock_ucode = NULL;
79 u32 *mem_unlock_ucode_header = NULL;
80 u32 sec_imem_dest = 0;
81 u32 val = 0;
82 int err = 0;
83
84 nvgpu_log_fn(g, " ");
85
86 /* Check vpr enable status */
87 val = gk20a_readl(g, fb_mmu_vpr_info_r());
88 val &= ~fb_mmu_vpr_info_index_m();
89 val |= fb_mmu_vpr_info_index_cya_lo_v();
90 gk20a_writel(g, fb_mmu_vpr_info_r(), val);
91 val = gk20a_readl(g, fb_mmu_vpr_info_r());
92 if (!(val & fb_mmu_vpr_info_cya_lo_in_use_m())) {
93 nvgpu_log_info(g, "mem unlock not required on this SKU, skipping");
94 goto exit;
95 }
96
97 /* get mem unlock ucode binary */
98 mem_unlock_fw = nvgpu_request_firmware(g, "mem_unlock.bin", 0);
99 if (!mem_unlock_fw) {
100 nvgpu_err(g, "mem unlock ucode get fail");
101 err = -ENOENT;
102 goto exit;
103 }
104
105 /* Enable nvdec */
106 g->ops.mc.enable(g, mc_enable_nvdec_enabled_f());
107
108 /* nvdec falcon reset */
109 nvgpu_flcn_reset(&g->nvdec_flcn);
110
111 hsbin_hdr = (struct bin_hdr *)mem_unlock_fw->data;
112 fw_hdr = (struct acr_fw_header *)(mem_unlock_fw->data +
113 hsbin_hdr->header_offset);
114
115 mem_unlock_ucode_header = (u32 *)(mem_unlock_fw->data +
116 fw_hdr->hdr_offset);
117 mem_unlock_ucode = (u32 *)(mem_unlock_fw->data +
118 hsbin_hdr->data_offset);
119
120 /* Patch Ucode singnatures */
121 if (acr_ucode_patch_sig(g, mem_unlock_ucode,
122 (u32 *)(mem_unlock_fw->data + fw_hdr->sig_prod_offset),
123 (u32 *)(mem_unlock_fw->data + fw_hdr->sig_dbg_offset),
124 (u32 *)(mem_unlock_fw->data + fw_hdr->patch_loc),
125 (u32 *)(mem_unlock_fw->data + fw_hdr->patch_sig)) < 0) {
126 nvgpu_err(g, "mem unlock patch signatures fail");
127 err = -EPERM;
128 goto exit;
129 }
130
131 /* Clear interrupts */
132 nvgpu_flcn_set_irq(&g->nvdec_flcn, false, 0x0, 0x0);
133
134 /* Copy Non Secure IMEM code */
135 nvgpu_flcn_copy_to_imem(&g->nvdec_flcn, 0,
136 (u8 *)&mem_unlock_ucode[
137 mem_unlock_ucode_header[OS_CODE_OFFSET] >> 2],
138 mem_unlock_ucode_header[OS_CODE_SIZE], 0, false,
139 GET_IMEM_TAG(mem_unlock_ucode_header[OS_CODE_OFFSET]));
140
141 /* Put secure code after non-secure block */
142 sec_imem_dest = GET_NEXT_BLOCK(mem_unlock_ucode_header[OS_CODE_SIZE]);
143
144 nvgpu_flcn_copy_to_imem(&g->nvdec_flcn, sec_imem_dest,
145 (u8 *)&mem_unlock_ucode[
146 mem_unlock_ucode_header[APP_0_CODE_OFFSET] >> 2],
147 mem_unlock_ucode_header[APP_0_CODE_SIZE], 0, true,
148 GET_IMEM_TAG(mem_unlock_ucode_header[APP_0_CODE_OFFSET]));
149
150 /* load DMEM: ensure that signatures are patched */
151 nvgpu_flcn_copy_to_dmem(&g->nvdec_flcn, 0, (u8 *)&mem_unlock_ucode[
152 mem_unlock_ucode_header[OS_DATA_OFFSET] >> 2],
153 mem_unlock_ucode_header[OS_DATA_SIZE], 0);
154
155 nvgpu_log_info(g, "nvdec sctl reg %x\n",
156 gk20a_readl(g, g->nvdec_flcn.flcn_base +
157 falcon_falcon_sctl_r()));
158
159 /* set BOOTVEC to start of non-secure code */
160 nvgpu_flcn_bootstrap(&g->nvdec_flcn, 0);
161
162 /* wait for complete & halt */
163 nvgpu_flcn_wait_for_halt(&g->nvdec_flcn, MEM_UNLOCK_TIMEOUT);
164
165 /* check mem unlock status */
166 val = nvgpu_flcn_mailbox_read(&g->nvdec_flcn, 0);
167 if (val) {
168 nvgpu_err(g, "memory unlock failed, err %x", val);
169 err = -1;
170 goto exit;
171 }
172
173 nvgpu_log_info(g, "nvdec sctl reg %x\n",
174 gk20a_readl(g, g->nvdec_flcn.flcn_base +
175 falcon_falcon_sctl_r()));
176
177exit:
178 if (mem_unlock_fw)
179 nvgpu_release_firmware(g, mem_unlock_fw);
180
181 nvgpu_log_fn(g, "done, status - %d", err);
182
183 return err;
184}
diff --git a/drivers/gpu/nvgpu/gv100/fb_gv100.h b/drivers/gpu/nvgpu/gv100/fb_gv100.h
new file mode 100644
index 00000000..b6db262a
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/fb_gv100.h
@@ -0,0 +1,32 @@
1/*
2 * GV100 FB
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVGPU_GV100_FB
26#define _NVGPU_GV100_FB
27
28struct gk20a;
29
30void gv100_fb_reset(struct gk20a *g);
31int gv100_fb_memory_unlock(struct gk20a *g);
32#endif
diff --git a/drivers/gpu/nvgpu/gv100/fifo_gv100.c b/drivers/gpu/nvgpu/gv100/fifo_gv100.c
new file mode 100644
index 00000000..79862f6b
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/fifo_gv100.c
@@ -0,0 +1,40 @@
1/*
2 * GV100 fifo
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "fifo_gv100.h"
26
27#include <nvgpu/hw/gv100/hw_ccsr_gv100.h>
28
29#define DEFAULT_FIFO_PREEMPT_TIMEOUT 0x3FFFFFUL
30
31u32 gv100_fifo_get_num_fifos(struct gk20a *g)
32{
33 return ccsr_channel__size_1_v();
34}
35
36u32 gv100_fifo_get_preempt_timeout(struct gk20a *g)
37{
38 return DEFAULT_FIFO_PREEMPT_TIMEOUT;
39}
40
diff --git a/drivers/gpu/nvgpu/gv100/fifo_gv100.h b/drivers/gpu/nvgpu/gv100/fifo_gv100.h
new file mode 100644
index 00000000..af6ad030
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/fifo_gv100.h
@@ -0,0 +1,33 @@
1/*
2 * GV100 Fifo
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef FIFO_GV100_H
26#define FIFO_GV100_H
27
28#include <nvgpu/types.h>
29struct gk20a;
30
31u32 gv100_fifo_get_num_fifos(struct gk20a *g);
32u32 gv100_fifo_get_preempt_timeout(struct gk20a *g);
33#endif
diff --git a/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.c b/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.c
new file mode 100644
index 00000000..8b50125e
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.c
@@ -0,0 +1,47 @@
1/*
2 * GV100 Graphics Context
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26#include "gr_ctx_gv100.h"
27
28int gr_gv100_get_netlist_name(struct gk20a *g, int index, char *name)
29{
30 u32 ver = g->params.gpu_arch + g->params.gpu_impl;
31
32 switch (ver) {
33 case NVGPU_GPUID_GV100:
34 sprintf(name, "%s/%s", "gv100",
35 GV100_NETLIST_IMAGE_FW_NAME);
36 break;
37 default:
38 nvgpu_err(g, "no support for GPUID %x", ver);
39 }
40
41 return 0;
42}
43
44bool gr_gv100_is_firmware_defined(void)
45{
46 return true;
47}
diff --git a/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.h b/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.h
new file mode 100644
index 00000000..2302d988
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#ifndef __GR_CTX_GV100_H__
23#define __GR_CTX_GV100_H__
24
25#include "gk20a/gr_ctx_gk20a.h"
26#include "nvgpu_gpuid_t19x.h"
27
28/* production netlist, one and only one from below */
29#define GV100_NETLIST_IMAGE_FW_NAME GK20A_NETLIST_IMAGE_D
30
31int gr_gv100_get_netlist_name(struct gk20a *g, int index, char *name);
32bool gr_gv100_is_firmware_defined(void);
33
34#endif /*__GR_CTX_GV100_H__*/
diff --git a/drivers/gpu/nvgpu/gv100/gr_gv100.c b/drivers/gpu/nvgpu/gv100/gr_gv100.c
new file mode 100644
index 00000000..430c7cd0
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/gr_gv100.c
@@ -0,0 +1,349 @@
1/*
2 * GV100 GPU GR
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/log.h>
26#include <nvgpu/debug.h>
27#include <nvgpu/enabled.h>
28
29#include "gk20a/gk20a.h"
30#include "gk20a/gr_gk20a.h"
31
32#include "gv100/gr_gv100.h"
33#include "gv11b/subctx_gv11b.h"
34
35#include <nvgpu/hw/gv100/hw_gr_gv100.h>
36#include <nvgpu/hw/gv100/hw_proj_gv100.h>
37
38/*
39 * Estimate performance if the given logical TPC in the given logical GPC were
40 * removed.
41 */
42static int gr_gv100_scg_estimate_perf(struct gk20a *g,
43 unsigned long *gpc_tpc_mask,
44 u32 disable_gpc_id, u32 disable_tpc_id,
45 int *perf)
46{
47 struct gr_gk20a *gr = &g->gr;
48 int err = 0;
49 u32 scale_factor = 512UL; /* Use fx23.9 */
50 u32 pix_scale = 1024*1024UL; /* Pix perf in [29:20] */
51 u32 world_scale = 1024UL; /* World performance in [19:10] */
52 u32 tpc_scale = 1; /* TPC balancing in [9:0] */
53 u32 scg_num_pes = 0;
54 u32 min_scg_gpc_pix_perf = scale_factor; /* Init perf as maximum */
55 u32 average_tpcs = 0; /* Average of # of TPCs per GPC */
56 u32 deviation; /* absolute diff between TPC# and
57 * average_tpcs, averaged across GPCs
58 */
59 u32 norm_tpc_deviation; /* deviation/max_tpc_per_gpc */
60 u32 tpc_balance;
61 u32 scg_gpc_pix_perf;
62 u32 scg_world_perf;
63 u32 gpc_id;
64 u32 pes_id;
65 int diff;
66 bool is_tpc_removed_gpc = false;
67 bool is_tpc_removed_pes = false;
68 u32 max_tpc_gpc = 0;
69 u32 num_tpc_mask;
70 u32 *num_tpc_gpc = nvgpu_kzalloc(g, sizeof(u32) *
71 nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS));
72
73 if (!num_tpc_gpc)
74 return -ENOMEM;
75
76 /* Calculate pix-perf-reduction-rate per GPC and find bottleneck TPC */
77 for (gpc_id = 0; gpc_id < gr->gpc_count; gpc_id++) {
78 num_tpc_mask = gpc_tpc_mask[gpc_id];
79
80 if ((gpc_id == disable_gpc_id) && num_tpc_mask &
81 (0x1 << disable_tpc_id)) {
82 /* Safety check if a TPC is removed twice */
83 if (is_tpc_removed_gpc) {
84 err = -EINVAL;
85 goto free_resources;
86 }
87 /* Remove logical TPC from set */
88 num_tpc_mask &= ~(0x1 << disable_tpc_id);
89 is_tpc_removed_gpc = true;
90 }
91
92 /* track balancing of tpcs across gpcs */
93 num_tpc_gpc[gpc_id] = hweight32(num_tpc_mask);
94 average_tpcs += num_tpc_gpc[gpc_id];
95
96 /* save the maximum numer of gpcs */
97 max_tpc_gpc = num_tpc_gpc[gpc_id] > max_tpc_gpc ?
98 num_tpc_gpc[gpc_id] : max_tpc_gpc;
99
100 /*
101 * Calculate ratio between TPC count and post-FS and post-SCG
102 *
103 * ratio represents relative throughput of the GPC
104 */
105 scg_gpc_pix_perf = scale_factor * num_tpc_gpc[gpc_id] /
106 gr->gpc_tpc_count[gpc_id];
107
108 if (min_scg_gpc_pix_perf > scg_gpc_pix_perf)
109 min_scg_gpc_pix_perf = scg_gpc_pix_perf;
110
111 /* Calculate # of surviving PES */
112 for (pes_id = 0; pes_id < gr->gpc_ppc_count[gpc_id]; pes_id++) {
113 /* Count the number of TPC on the set */
114 num_tpc_mask = gr->pes_tpc_mask[pes_id][gpc_id] &
115 gpc_tpc_mask[gpc_id];
116
117 if ((gpc_id == disable_gpc_id) && (num_tpc_mask &
118 (0x1 << disable_tpc_id))) {
119
120 if (is_tpc_removed_pes) {
121 err = -EINVAL;
122 goto free_resources;
123 }
124 num_tpc_mask &= ~(0x1 << disable_tpc_id);
125 is_tpc_removed_pes = true;
126 }
127 if (hweight32(num_tpc_mask))
128 scg_num_pes++;
129 }
130 }
131
132 if (!is_tpc_removed_gpc || !is_tpc_removed_pes) {
133 err = -EINVAL;
134 goto free_resources;
135 }
136
137 if (max_tpc_gpc == 0) {
138 *perf = 0;
139 goto free_resources;
140 }
141
142 /* Now calculate perf */
143 scg_world_perf = (scale_factor * scg_num_pes) / gr->ppc_count;
144 deviation = 0;
145 average_tpcs = scale_factor * average_tpcs / gr->gpc_count;
146 for (gpc_id =0; gpc_id < gr->gpc_count; gpc_id++) {
147 diff = average_tpcs - scale_factor * num_tpc_gpc[gpc_id];
148 if (diff < 0)
149 diff = -diff;
150 deviation += diff;
151 }
152
153 deviation /= gr->gpc_count;
154
155 norm_tpc_deviation = deviation / max_tpc_gpc;
156
157 tpc_balance = scale_factor - norm_tpc_deviation;
158
159 if ((tpc_balance > scale_factor) ||
160 (scg_world_perf > scale_factor) ||
161 (min_scg_gpc_pix_perf > scale_factor) ||
162 (norm_tpc_deviation > scale_factor)) {
163 err = -EINVAL;
164 goto free_resources;
165 }
166
167 *perf = (pix_scale * min_scg_gpc_pix_perf) +
168 (world_scale * scg_world_perf) +
169 (tpc_scale * tpc_balance);
170free_resources:
171 nvgpu_kfree(g, num_tpc_gpc);
172 return err;
173}
174
175void gr_gv100_bundle_cb_defaults(struct gk20a *g)
176{
177 struct gr_gk20a *gr = &g->gr;
178
179 gr->bundle_cb_default_size =
180 gr_scc_bundle_cb_size_div_256b__prod_v();
181 gr->min_gpm_fifo_depth =
182 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v();
183 gr->bundle_cb_token_limit =
184 gr_pd_ab_dist_cfg2_token_limit_init_v();
185}
186
187void gr_gv100_cb_size_default(struct gk20a *g)
188{
189 struct gr_gk20a *gr = &g->gr;
190
191 if (!gr->attrib_cb_default_size)
192 gr->attrib_cb_default_size =
193 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
194 gr->alpha_cb_default_size =
195 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
196}
197
198void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
199{
200}
201
202void gr_gv100_init_sm_id_table(struct gk20a *g)
203{
204 u32 gpc, tpc, sm, pes, gtpc;
205 u32 sm_id = 0;
206 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC);
207 u32 num_sm = sm_per_tpc * g->gr.tpc_count;
208 int perf, maxperf;
209 int err;
210 unsigned long *gpc_tpc_mask;
211 u32 *tpc_table, *gpc_table;
212
213 gpc_table = nvgpu_kzalloc(g, g->gr.tpc_count * sizeof(u32));
214 tpc_table = nvgpu_kzalloc(g, g->gr.tpc_count * sizeof(u32));
215 gpc_tpc_mask = nvgpu_kzalloc(g, sizeof(unsigned long) *
216 nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS));
217
218 if (!gpc_table || !tpc_table || !gpc_tpc_mask) {
219 nvgpu_err(g, "Error allocating memory for sm tables");
220 goto exit_build_table;
221 }
222
223 for (gpc = 0; gpc < g->gr.gpc_count; gpc++)
224 for (pes = 0; pes < g->gr.gpc_ppc_count[gpc]; pes++)
225 gpc_tpc_mask[gpc] |= g->gr.pes_tpc_mask[pes][gpc];
226
227 for (gtpc = 0; gtpc < g->gr.tpc_count; gtpc++) {
228 maxperf = -1;
229 for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
230 for_each_set_bit(tpc, &gpc_tpc_mask[gpc],
231 g->gr.gpc_tpc_count[gpc]) {
232 perf = -1;
233 err = gr_gv100_scg_estimate_perf(g,
234 gpc_tpc_mask, gpc, tpc, &perf);
235
236 if (err) {
237 nvgpu_err(g,
238 "Error while estimating perf");
239 goto exit_build_table;
240 }
241
242 if (perf >= maxperf) {
243 maxperf = perf;
244 gpc_table[gtpc] = gpc;
245 tpc_table[gtpc] = tpc;
246 }
247 }
248 }
249 gpc_tpc_mask[gpc_table[gtpc]] &= ~(0x1 << tpc_table[gtpc]);
250 }
251
252 for (tpc = 0, sm_id = 0; sm_id < num_sm; tpc++, sm_id += sm_per_tpc) {
253 for (sm = 0; sm < sm_per_tpc; sm++) {
254 u32 index = sm_id + sm;
255
256 g->gr.sm_to_cluster[index].gpc_index = gpc_table[tpc];
257 g->gr.sm_to_cluster[index].tpc_index = tpc_table[tpc];
258 g->gr.sm_to_cluster[index].sm_index = sm;
259 g->gr.sm_to_cluster[index].global_tpc_index = tpc;
260 nvgpu_log_info(g,
261 "gpc : %d tpc %d sm_index %d global_index: %d",
262 g->gr.sm_to_cluster[index].gpc_index,
263 g->gr.sm_to_cluster[index].tpc_index,
264 g->gr.sm_to_cluster[index].sm_index,
265 g->gr.sm_to_cluster[index].global_tpc_index);
266
267 }
268 }
269
270 g->gr.no_of_sm = num_sm;
271 nvgpu_log_info(g, " total number of sm = %d", g->gr.no_of_sm);
272exit_build_table:
273 nvgpu_kfree(g, gpc_table);
274 nvgpu_kfree(g, tpc_table);
275 nvgpu_kfree(g, gpc_tpc_mask);
276}
277
278void gr_gv100_load_tpc_mask(struct gk20a *g)
279{
280 u64 pes_tpc_mask = 0x0ULL;
281 u32 gpc, pes;
282 u32 num_tpc_per_gpc = nvgpu_get_litter_value(g,
283 GPU_LIT_NUM_TPC_PER_GPC);
284
285 /* gv100 has 6 GPC and 7 TPC/GPC */
286 for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
287 for (pes = 0; pes < g->gr.pe_count_per_gpc; pes++) {
288 pes_tpc_mask |= (u64) g->gr.pes_tpc_mask[pes][gpc] <<
289 (num_tpc_per_gpc * gpc);
290 }
291 }
292
293 nvgpu_log_info(g, "pes_tpc_mask: %016llx\n", pes_tpc_mask);
294 gk20a_writel(g, gr_fe_tpc_fs_r(0), u64_lo32(pes_tpc_mask));
295 gk20a_writel(g, gr_fe_tpc_fs_r(1), u64_hi32(pes_tpc_mask));
296}
297
298u32 gr_gv100_get_patch_slots(struct gk20a *g)
299{
300 struct gr_gk20a *gr = &g->gr;
301 struct fifo_gk20a *f = &g->fifo;
302 u32 size = 0;
303
304 /*
305 * CMD to update PE table
306 */
307 size++;
308
309 /*
310 * Update PE table contents
311 * for PE table, each patch buffer update writes 32 TPCs
312 */
313 size += DIV_ROUND_UP(gr->tpc_count, 32);
314
315 /*
316 * Update the PL table contents
317 * For PL table, each patch buffer update configures 4 TPCs
318 */
319 size += DIV_ROUND_UP(gr->tpc_count, 4);
320
321 /*
322 * We need this for all subcontexts
323 */
324 size *= f->t19x.max_subctx_count;
325
326 /*
327 * Add space for a partition mode change as well
328 * reserve two slots since DYNAMIC -> STATIC requires
329 * DYNAMIC -> NONE -> STATIC
330 */
331 size += 2;
332
333 /*
334 * Add current patch buffer size
335 */
336 size += gr_gk20a_get_patch_slots(g);
337
338 /*
339 * Align to 4K size
340 */
341 size = ALIGN(size, PATCH_CTX_SLOTS_PER_PAGE);
342
343 /*
344 * Increase the size to accommodate for additional TPC partition update
345 */
346 size += 2 * PATCH_CTX_SLOTS_PER_PAGE;
347
348 return size;
349}
diff --git a/drivers/gpu/nvgpu/gv100/gr_gv100.h b/drivers/gpu/nvgpu/gv100/gr_gv100.h
new file mode 100644
index 00000000..612f76f9
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/gr_gv100.h
@@ -0,0 +1,36 @@
1/*
2 * GV100 GPU GR
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVGPU_GR_GV100_H_
26#define _NVGPU_GR_GV100_H_
27
28void gr_gv100_bundle_cb_defaults(struct gk20a *g);
29void gr_gv100_cb_size_default(struct gk20a *g);
30void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
31void gr_gv100_init_sm_id_table(struct gk20a *g);
32void gr_gv100_program_sm_id_numbering(struct gk20a *g,
33 u32 gpc, u32 tpc, u32 smid);
34int gr_gv100_load_smid_config(struct gk20a *g);
35u32 gr_gv100_get_patch_slots(struct gk20a *g);
36#endif
diff --git a/drivers/gpu/nvgpu/gv100/gv100.h b/drivers/gpu/nvgpu/gv100/gv100.h
new file mode 100644
index 00000000..7cc1f77b
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/gv100.h
@@ -0,0 +1,32 @@
1/*
2 * GV100 Graphics
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef GV100_H
26#define GV100_H
27
28#include "gk20a/gk20a.h"
29
30int gv100_init_gpu_characteristics(struct gk20a *g);
31
32#endif /* GV11B_H */
diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c
new file mode 100644
index 00000000..4044c4b5
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c
@@ -0,0 +1,769 @@
1/*
2 * GV100 Tegra HAL interface
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/types.h>
26#include <linux/printk.h>
27
28#include <linux/types.h>
29#include <linux/tegra_gpu_t19x.h>
30
31#include "gk20a/gk20a.h"
32#include "gk20a/fifo_gk20a.h"
33#include "gk20a/fecs_trace_gk20a.h"
34#include "gk20a/css_gr_gk20a.h"
35#include "gk20a/mc_gk20a.h"
36#include "gk20a/dbg_gpu_gk20a.h"
37#include "gk20a/bus_gk20a.h"
38#include "gk20a/pramin_gk20a.h"
39#include "gk20a/flcn_gk20a.h"
40#include "gk20a/regops_gk20a.h"
41#include "gk20a/fb_gk20a.h"
42#include "gk20a/mm_gk20a.h"
43#include "gk20a/pmu_gk20a.h"
44#include "gk20a/gr_gk20a.h"
45
46#include "gm20b/ltc_gm20b.h"
47#include "gm20b/gr_gm20b.h"
48#include "gm20b/fifo_gm20b.h"
49#include "gm20b/fb_gm20b.h"
50#include "gm20b/mm_gm20b.h"
51#include "gm20b/pmu_gm20b.h"
52#include "gm20b/acr_gm20b.h"
53
54#include "gp10b/fb_gp10b.h"
55#include "gp10b/gr_gp10b.h"
56
57#include "gp106/clk_gp106.h"
58#include "gp106/clk_arb_gp106.h"
59#include "gp106/pmu_gp106.h"
60#include "gp106/acr_gp106.h"
61#include "gp106/sec2_gp106.h"
62#include "gp106/bios_gp106.h"
63#include "gv100/bios_gv100.h"
64#include "gp106/therm_gp106.h"
65#include "gp106/xve_gp106.h"
66#include "gp106/clk_gp106.h"
67#include "gp106/flcn_gp106.h"
68#include "gp10b/ltc_gp10b.h"
69#include "gp10b/therm_gp10b.h"
70#include "gp10b/mc_gp10b.h"
71#include "gp10b/ce_gp10b.h"
72#include "gp10b/priv_ring_gp10b.h"
73#include "gp10b/fifo_gp10b.h"
74#include "gp10b/fecs_trace_gp10b.h"
75#include "gp10b/mm_gp10b.h"
76#include "gp10b/pmu_gp10b.h"
77
78#include "gv11b/css_gr_gv11b.h"
79#include "gv11b/dbg_gpu_gv11b.h"
80#include "gv11b/hal_gv11b.h"
81#include "gv100/gr_gv100.h"
82#include "gv11b/mc_gv11b.h"
83#include "gv11b/ltc_gv11b.h"
84#include "gv11b/gv11b.h"
85#include "gv11b/ce_gv11b.h"
86#include "gv100/gr_ctx_gv100.h"
87#include "gv11b/mm_gv11b.h"
88#include "gv11b/pmu_gv11b.h"
89#include "gv11b/fb_gv11b.h"
90#include "gv100/mm_gv100.h"
91#include "gv11b/pmu_gv11b.h"
92#include "gv100/fb_gv100.h"
93#include "gv100/fifo_gv100.h"
94#include "gv11b/fifo_gv11b.h"
95#include "gv11b/regops_gv11b.h"
96
97#include "gv11b/gv11b_gating_reglist.h"
98#include "gv100/regops_gv100.h"
99#include "gv11b/subctx_gv11b.h"
100
101#include "gv100.h"
102#include "hal_gv100.h"
103#include "gv100/fb_gv100.h"
104#include "gv100/mm_gv100.h"
105
106#include <nvgpu/bus.h>
107#include <nvgpu/debug.h>
108#include <nvgpu/enabled.h>
109#include <nvgpu/enabled_t19x.h>
110#include <nvgpu/ctxsw_trace.h>
111
112#include <nvgpu/hw/gv100/hw_proj_gv100.h>
113#include <nvgpu/hw/gv100/hw_fifo_gv100.h>
114#include <nvgpu/hw/gv100/hw_ram_gv100.h>
115#include <nvgpu/hw/gv100/hw_top_gv100.h>
116#include <nvgpu/hw/gv100/hw_pram_gv100.h>
117#include <nvgpu/hw/gv100/hw_pwr_gv100.h>
118
119static int gv100_get_litter_value(struct gk20a *g, int value)
120{
121 int ret = EINVAL;
122 switch (value) {
123 case GPU_LIT_NUM_GPCS:
124 ret = proj_scal_litter_num_gpcs_v();
125 break;
126 case GPU_LIT_NUM_PES_PER_GPC:
127 ret = proj_scal_litter_num_pes_per_gpc_v();
128 break;
129 case GPU_LIT_NUM_ZCULL_BANKS:
130 ret = proj_scal_litter_num_zcull_banks_v();
131 break;
132 case GPU_LIT_NUM_TPC_PER_GPC:
133 ret = proj_scal_litter_num_tpc_per_gpc_v();
134 break;
135 case GPU_LIT_NUM_SM_PER_TPC:
136 ret = proj_scal_litter_num_sm_per_tpc_v();
137 break;
138 case GPU_LIT_NUM_FBPS:
139 ret = proj_scal_litter_num_fbps_v();
140 break;
141 case GPU_LIT_GPC_BASE:
142 ret = proj_gpc_base_v();
143 break;
144 case GPU_LIT_GPC_STRIDE:
145 ret = proj_gpc_stride_v();
146 break;
147 case GPU_LIT_GPC_SHARED_BASE:
148 ret = proj_gpc_shared_base_v();
149 break;
150 case GPU_LIT_TPC_IN_GPC_BASE:
151 ret = proj_tpc_in_gpc_base_v();
152 break;
153 case GPU_LIT_TPC_IN_GPC_STRIDE:
154 ret = proj_tpc_in_gpc_stride_v();
155 break;
156 case GPU_LIT_TPC_IN_GPC_SHARED_BASE:
157 ret = proj_tpc_in_gpc_shared_base_v();
158 break;
159 case GPU_LIT_PPC_IN_GPC_BASE:
160 ret = proj_ppc_in_gpc_base_v();
161 break;
162 case GPU_LIT_PPC_IN_GPC_STRIDE:
163 ret = proj_ppc_in_gpc_stride_v();
164 break;
165 case GPU_LIT_PPC_IN_GPC_SHARED_BASE:
166 ret = proj_ppc_in_gpc_shared_base_v();
167 break;
168 case GPU_LIT_ROP_BASE:
169 ret = proj_rop_base_v();
170 break;
171 case GPU_LIT_ROP_STRIDE:
172 ret = proj_rop_stride_v();
173 break;
174 case GPU_LIT_ROP_SHARED_BASE:
175 ret = proj_rop_shared_base_v();
176 break;
177 case GPU_LIT_HOST_NUM_ENGINES:
178 ret = proj_host_num_engines_v();
179 break;
180 case GPU_LIT_HOST_NUM_PBDMA:
181 ret = proj_host_num_pbdma_v();
182 break;
183 case GPU_LIT_LTC_STRIDE:
184 ret = proj_ltc_stride_v();
185 break;
186 case GPU_LIT_LTS_STRIDE:
187 ret = proj_lts_stride_v();
188 break;
189 case GPU_LIT_NUM_FBPAS:
190 ret = proj_scal_litter_num_fbpas_v();
191 break;
192 case GPU_LIT_FBPA_SHARED_BASE:
193 ret = proj_fbpa_shared_base_v();
194 break;
195 case GPU_LIT_FBPA_BASE:
196 ret = proj_fbpa_base_v();
197 break;
198 case GPU_LIT_FBPA_STRIDE:
199 ret = proj_fbpa_stride_v();
200 break;
201 case GPU_LIT_SM_PRI_STRIDE:
202 ret = proj_sm_stride_v();
203 break;
204 case GPU_LIT_SMPC_PRI_BASE:
205 ret = proj_smpc_base_v();
206 break;
207 case GPU_LIT_SMPC_PRI_SHARED_BASE:
208 ret = proj_smpc_shared_base_v();
209 break;
210 case GPU_LIT_SMPC_PRI_UNIQUE_BASE:
211 ret = proj_smpc_unique_base_v();
212 break;
213 case GPU_LIT_SMPC_PRI_STRIDE:
214 ret = proj_smpc_stride_v();
215 break;
216 case GPU_LIT_TWOD_CLASS:
217 ret = FERMI_TWOD_A;
218 break;
219 case GPU_LIT_THREED_CLASS:
220 ret = VOLTA_A;
221 break;
222 case GPU_LIT_COMPUTE_CLASS:
223 ret = VOLTA_COMPUTE_A;
224 break;
225 case GPU_LIT_GPFIFO_CLASS:
226 ret = VOLTA_CHANNEL_GPFIFO_A;
227 break;
228 case GPU_LIT_I2M_CLASS:
229 ret = KEPLER_INLINE_TO_MEMORY_B;
230 break;
231 case GPU_LIT_DMA_COPY_CLASS:
232 ret = VOLTA_DMA_COPY_A;
233 break;
234 default:
235 break;
236 }
237
238 return ret;
239}
240
241int gv100_init_gpu_characteristics(struct gk20a *g)
242{
243 int err;
244
245 err = gk20a_init_gpu_characteristics(g);
246 if (err)
247 return err;
248
249 __nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);
250
251 return 0;
252}
253
254
255
256static const struct gpu_ops gv100_ops = {
257 .bios = {
258 .init = gp106_bios_init,
259 .preos_wait_for_halt = gv100_bios_preos_wait_for_halt,
260 .preos_reload_check = gv100_bios_preos_reload_check,
261 },
262 .ltc = {
263 .determine_L2_size_bytes = gp10b_determine_L2_size_bytes,
264 .set_zbc_s_entry = gv11b_ltc_set_zbc_stencil_entry,
265 .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry,
266 .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry,
267 .init_cbc = NULL,
268 .init_fs_state = gv11b_ltc_init_fs_state,
269 .init_comptags = gp10b_ltc_init_comptags,
270 .cbc_ctrl = gm20b_ltc_cbc_ctrl,
271 .isr = gv11b_ltc_isr,
272 .cbc_fix_config = NULL,
273 .flush = gm20b_flush_ltc,
274 .set_enabled = gp10b_ltc_set_enabled,
275 },
276 .ce2 = {
277 .isr_stall = gv11b_ce_isr,
278 .isr_nonstall = gp10b_ce_nonstall_isr,
279 .get_num_pce = gv11b_ce_get_num_pce,
280 },
281 .gr = {
282 .get_patch_slots = gr_gv100_get_patch_slots,
283 .init_gpc_mmu = gr_gv11b_init_gpc_mmu,
284 .bundle_cb_defaults = gr_gv100_bundle_cb_defaults,
285 .cb_size_default = gr_gv100_cb_size_default,
286 .calc_global_ctx_buffer_size =
287 gr_gv11b_calc_global_ctx_buffer_size,
288 .commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb,
289 .commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb,
290 .commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
291 .commit_global_pagepool = gr_gp10b_commit_global_pagepool,
292 .handle_sw_method = gr_gv11b_handle_sw_method,
293 .set_alpha_circular_buffer_size =
294 gr_gv11b_set_alpha_circular_buffer_size,
295 .set_circular_buffer_size = gr_gv11b_set_circular_buffer_size,
296 .enable_hww_exceptions = gr_gv11b_enable_hww_exceptions,
297 .is_valid_class = gr_gv11b_is_valid_class,
298 .is_valid_gfx_class = gr_gv11b_is_valid_gfx_class,
299 .is_valid_compute_class = gr_gv11b_is_valid_compute_class,
300 .get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
301 .get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs,
302 .init_fs_state = gr_gv11b_init_fs_state,
303 .set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask,
304 .falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
305 .load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode,
306 .set_gpc_tpc_mask = gr_gv100_set_gpc_tpc_mask,
307 .get_gpc_tpc_mask = gr_gm20b_get_gpc_tpc_mask,
308 .free_channel_ctx = gk20a_free_channel_ctx,
309 .alloc_obj_ctx = gk20a_alloc_obj_ctx,
310 .bind_ctxsw_zcull = gr_gk20a_bind_ctxsw_zcull,
311 .get_zcull_info = gr_gk20a_get_zcull_info,
312 .is_tpc_addr = gr_gm20b_is_tpc_addr,
313 .get_tpc_num = gr_gm20b_get_tpc_num,
314 .detect_sm_arch = gr_gv11b_detect_sm_arch,
315 .add_zbc_color = gr_gp10b_add_zbc_color,
316 .add_zbc_depth = gr_gp10b_add_zbc_depth,
317 .zbc_set_table = gk20a_gr_zbc_set_table,
318 .zbc_query_table = gr_gk20a_query_zbc,
319 .pmu_save_zbc = gk20a_pmu_save_zbc,
320 .add_zbc = gr_gk20a_add_zbc,
321 .pagepool_default_size = gr_gv11b_pagepool_default_size,
322 .init_ctx_state = gr_gp10b_init_ctx_state,
323 .alloc_gr_ctx = gr_gp10b_alloc_gr_ctx,
324 .free_gr_ctx = gr_gp10b_free_gr_ctx,
325 .update_ctxsw_preemption_mode =
326 gr_gp10b_update_ctxsw_preemption_mode,
327 .dump_gr_regs = gr_gv11b_dump_gr_status_regs,
328 .update_pc_sampling = gr_gm20b_update_pc_sampling,
329 .get_fbp_en_mask = gr_gm20b_get_fbp_en_mask,
330 .get_max_ltc_per_fbp = gr_gm20b_get_max_ltc_per_fbp,
331 .get_max_lts_per_ltc = gr_gm20b_get_max_lts_per_ltc,
332 .get_rop_l2_en_mask = gr_gm20b_rop_l2_en_mask,
333 .get_max_fbps_count = gr_gm20b_get_max_fbps_count,
334 .init_sm_dsm_reg_info = gv11b_gr_init_sm_dsm_reg_info,
335 .wait_empty = gr_gv11b_wait_empty,
336 .init_cyclestats = gr_gm20b_init_cyclestats,
337 .set_sm_debug_mode = gv11b_gr_set_sm_debug_mode,
338 .enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs,
339 .bpt_reg_info = gv11b_gr_bpt_reg_info,
340 .get_access_map = gr_gv11b_get_access_map,
341 .handle_fecs_error = gr_gv11b_handle_fecs_error,
342 .handle_sm_exception = gr_gk20a_handle_sm_exception,
343 .handle_tex_exception = gr_gv11b_handle_tex_exception,
344 .enable_gpc_exceptions = gr_gv11b_enable_gpc_exceptions,
345 .enable_exceptions = gr_gv11b_enable_exceptions,
346 .get_lrf_tex_ltc_dram_override = get_ecc_override_val,
347 .update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
348 .update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
349 .record_sm_error_state = gv11b_gr_record_sm_error_state,
350 .update_sm_error_state = gv11b_gr_update_sm_error_state,
351 .clear_sm_error_state = gm20b_gr_clear_sm_error_state,
352 .suspend_contexts = gr_gp10b_suspend_contexts,
353 .resume_contexts = gr_gk20a_resume_contexts,
354 .get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags,
355 .init_sm_id_table = gr_gv100_init_sm_id_table,
356 .load_smid_config = gr_gv11b_load_smid_config,
357 .program_sm_id_numbering = gr_gv11b_program_sm_id_numbering,
358 .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr,
359 .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr,
360 .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr,
361 .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr,
362 .setup_rop_mapping = gr_gv11b_setup_rop_mapping,
363 .program_zcull_mapping = gr_gv11b_program_zcull_mapping,
364 .commit_global_timeslice = gr_gv11b_commit_global_timeslice,
365 .commit_inst = gr_gv11b_commit_inst,
366 .write_zcull_ptr = gr_gv11b_write_zcull_ptr,
367 .write_pm_ptr = gr_gv11b_write_pm_ptr,
368 .init_elcg_mode = gr_gv11b_init_elcg_mode,
369 .load_tpc_mask = gr_gv11b_load_tpc_mask,
370 .inval_icache = gr_gk20a_inval_icache,
371 .trigger_suspend = gv11b_gr_sm_trigger_suspend,
372 .wait_for_pause = gr_gk20a_wait_for_pause,
373 .resume_from_pause = gv11b_gr_resume_from_pause,
374 .clear_sm_errors = gr_gk20a_clear_sm_errors,
375 .tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions,
376 .get_esr_sm_sel = gv11b_gr_get_esr_sm_sel,
377 .sm_debugger_attached = gv11b_gr_sm_debugger_attached,
378 .suspend_single_sm = gv11b_gr_suspend_single_sm,
379 .suspend_all_sms = gv11b_gr_suspend_all_sms,
380 .resume_single_sm = gv11b_gr_resume_single_sm,
381 .resume_all_sms = gv11b_gr_resume_all_sms,
382 .get_sm_hww_warp_esr = gv11b_gr_get_sm_hww_warp_esr,
383 .get_sm_hww_global_esr = gv11b_gr_get_sm_hww_global_esr,
384 .get_sm_no_lock_down_hww_global_esr_mask =
385 gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask,
386 .lock_down_sm = gv11b_gr_lock_down_sm,
387 .wait_for_sm_lock_down = gv11b_gr_wait_for_sm_lock_down,
388 .clear_sm_hww = gv11b_gr_clear_sm_hww,
389 .init_ovr_sm_dsm_perf = gv11b_gr_init_ovr_sm_dsm_perf,
390 .get_ovr_perf_regs = gv11b_gr_get_ovr_perf_regs,
391 .disable_rd_coalesce = gm20a_gr_disable_rd_coalesce,
392 .set_boosted_ctx = gr_gp10b_set_boosted_ctx,
393 .set_preemption_mode = gr_gp10b_set_preemption_mode,
394 .set_czf_bypass = NULL,
395 .pre_process_sm_exception = gr_gv11b_pre_process_sm_exception,
396 .set_preemption_buffer_va = gr_gv11b_set_preemption_buffer_va,
397 .init_preemption_state = NULL,
398 .update_boosted_ctx = gr_gp10b_update_boosted_ctx,
399 .set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3,
400 .create_gr_sysfs = gr_gv11b_create_sysfs,
401 .set_ctxsw_preemption_mode = gr_gp10b_set_ctxsw_preemption_mode,
402 .is_etpc_addr = gv11b_gr_pri_is_etpc_addr,
403 .egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
404 .handle_tpc_mpc_exception = gr_gv11b_handle_tpc_mpc_exception,
405 .zbc_s_query_table = gr_gv11b_zbc_s_query_table,
406 .load_zbc_s_default_tbl = gr_gv11b_load_stencil_default_tbl,
407 .handle_gpc_gpcmmu_exception =
408 gr_gv11b_handle_gpc_gpcmmu_exception,
409 .add_zbc_type_s = gr_gv11b_add_zbc_type_s,
410 .get_egpc_base = gv11b_gr_get_egpc_base,
411 .get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
412 .handle_gpc_gpccs_exception =
413 gr_gv11b_handle_gpc_gpccs_exception,
414 .load_zbc_s_tbl = gr_gv11b_load_stencil_tbl,
415 .access_smpc_reg = gv11b_gr_access_smpc_reg,
416 .is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
417 .add_zbc_s = gr_gv11b_add_zbc_stencil,
418 .handle_gcc_exception = gr_gv11b_handle_gcc_exception,
419 .init_sw_veid_bundle = gr_gv11b_init_sw_veid_bundle,
420 .handle_tpc_sm_ecc_exception =
421 gr_gv11b_handle_tpc_sm_ecc_exception,
422 .decode_egpc_addr = gv11b_gr_decode_egpc_addr,
423 },
424 .fb = {
425 .reset = gv100_fb_reset,
426 .init_hw = gk20a_fb_init_hw,
427 .init_fs_state = NULL,
428 .set_mmu_page_size = gm20b_fb_set_mmu_page_size,
429 .set_use_full_comp_tag_line =
430 gm20b_fb_set_use_full_comp_tag_line,
431 .compression_page_size = gp10b_fb_compression_page_size,
432 .compressible_page_size = gp10b_fb_compressible_page_size,
433 .vpr_info_fetch = gm20b_fb_vpr_info_fetch,
434 .dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info,
435 .read_wpr_info = gm20b_fb_read_wpr_info,
436 .is_debug_mode_enabled = gm20b_fb_debug_mode_enabled,
437 .set_debug_mode = gm20b_fb_set_debug_mode,
438 .tlb_invalidate = gk20a_fb_tlb_invalidate,
439 .hub_isr = gv11b_fb_hub_isr,
440 .mem_unlock = gv100_fb_memory_unlock,
441 },
442 .fifo = {
443 .get_preempt_timeout = gv100_fifo_get_preempt_timeout,
444 .init_fifo_setup_hw = gv11b_init_fifo_setup_hw,
445 .bind_channel = channel_gm20b_bind,
446 .unbind_channel = channel_gv11b_unbind,
447 .disable_channel = gk20a_fifo_disable_channel,
448 .enable_channel = gk20a_fifo_enable_channel,
449 .alloc_inst = gk20a_fifo_alloc_inst,
450 .free_inst = gk20a_fifo_free_inst,
451 .setup_ramfc = channel_gv11b_setup_ramfc,
452 .channel_set_timeslice = gk20a_fifo_set_timeslice,
453 .default_timeslice_us = gk20a_fifo_default_timeslice_us,
454 .setup_userd = gk20a_fifo_setup_userd,
455 .userd_gp_get = gv11b_userd_gp_get,
456 .userd_gp_put = gv11b_userd_gp_put,
457 .userd_pb_get = gv11b_userd_pb_get,
458 .pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
459 .preempt_channel = gv11b_fifo_preempt_channel,
460 .preempt_tsg = gv11b_fifo_preempt_tsg,
461 .enable_tsg = gv11b_fifo_enable_tsg,
462 .disable_tsg = gk20a_disable_tsg,
463 .tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
464 .tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
465 .tsg_verify_status_faulted = gv11b_fifo_tsg_verify_status_faulted,
466 .update_runlist = gk20a_fifo_update_runlist,
467 .trigger_mmu_fault = NULL,
468 .get_mmu_fault_info = NULL,
469 .wait_engine_idle = gk20a_fifo_wait_engine_idle,
470 .get_num_fifos = gv100_fifo_get_num_fifos,
471 .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
472 .set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
473 .tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
474 .force_reset_ch = gk20a_fifo_force_reset_ch,
475 .engine_enum_from_type = gp10b_fifo_engine_enum_from_type,
476 .device_info_data_parse = gp10b_device_info_data_parse,
477 .eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
478 .init_engine_info = gk20a_fifo_init_engine_info,
479 .runlist_entry_size = ram_rl_entry_size_v,
480 .get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
481 .get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
482 .is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
483 .dump_pbdma_status = gk20a_dump_pbdma_status,
484 .dump_eng_status = gv11b_dump_eng_status,
485 .dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
486 .intr_0_error_mask = gv11b_fifo_intr_0_error_mask,
487 .is_preempt_pending = gv11b_fifo_is_preempt_pending,
488 .init_pbdma_intr_descs = gv11b_fifo_init_pbdma_intr_descs,
489 .reset_enable_hw = gk20a_init_fifo_reset_enable_hw,
490 .teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
491 .handle_sched_error = gv11b_fifo_handle_sched_error,
492 .handle_pbdma_intr_0 = gv11b_fifo_handle_pbdma_intr_0,
493 .handle_pbdma_intr_1 = gv11b_fifo_handle_pbdma_intr_1,
494 .init_eng_method_buffers = gv11b_fifo_init_eng_method_buffers,
495 .deinit_eng_method_buffers =
496 gv11b_fifo_deinit_eng_method_buffers,
497 .tsg_bind_channel = gk20a_tsg_bind_channel,
498 .tsg_unbind_channel = gk20a_tsg_unbind_channel,
499#ifdef CONFIG_TEGRA_GK20A_NVHOST
500 .alloc_syncpt_buf = gv11b_fifo_alloc_syncpt_buf,
501 .free_syncpt_buf = gv11b_fifo_free_syncpt_buf,
502 .add_syncpt_wait_cmd = gv11b_fifo_add_syncpt_wait_cmd,
503 .get_syncpt_wait_cmd_size = gv11b_fifo_get_syncpt_wait_cmd_size,
504 .add_syncpt_incr_cmd = gv11b_fifo_add_syncpt_incr_cmd,
505 .get_syncpt_incr_cmd_size = gv11b_fifo_get_syncpt_incr_cmd_size,
506#endif
507 .resetup_ramfc = NULL,
508 .device_info_fault_id = top_device_info_data_fault_id_enum_v,
509 .free_channel_ctx_header = gv11b_free_subctx_header,
510 .preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg,
511 .handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout,
512 },
513 .gr_ctx = {
514 .get_netlist_name = gr_gv100_get_netlist_name,
515 .is_fw_defined = gr_gv100_is_firmware_defined,
516 },
517#ifdef CONFIG_GK20A_CTXSW_TRACE
518 .fecs_trace = {
519 .alloc_user_buffer = NULL,
520 .free_user_buffer = NULL,
521 .mmap_user_buffer = NULL,
522 .init = NULL,
523 .deinit = NULL,
524 .enable = NULL,
525 .disable = NULL,
526 .is_enabled = NULL,
527 .reset = NULL,
528 .flush = NULL,
529 .poll = NULL,
530 .bind_channel = NULL,
531 .unbind_channel = NULL,
532 .max_entries = NULL,
533 },
534#endif /* CONFIG_GK20A_CTXSW_TRACE */
535 .mm = {
536 .support_sparse = gm20b_mm_support_sparse,
537 .gmmu_map = gk20a_locked_gmmu_map,
538 .gmmu_unmap = gk20a_locked_gmmu_unmap,
539 .vm_bind_channel = gk20a_vm_bind_channel,
540 .fb_flush = gk20a_mm_fb_flush,
541 .l2_invalidate = gk20a_mm_l2_invalidate,
542 .l2_flush = gk20a_mm_l2_flush,
543 .cbc_clean = gk20a_mm_cbc_clean,
544 .set_big_page_size = gm20b_mm_set_big_page_size,
545 .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
546 .get_default_big_page_size = gp10b_mm_get_default_big_page_size,
547 .gpu_phys_addr = gv11b_gpu_phys_addr,
548 .get_mmu_levels = gp10b_mm_get_mmu_levels,
549 .get_vidmem_size = gv100_mm_get_vidmem_size,
550 .init_pdb = gp10b_mm_init_pdb,
551 .init_mm_setup_hw = gv11b_init_mm_setup_hw,
552 .is_bar1_supported = gv11b_mm_is_bar1_supported,
553 .alloc_inst_block = gk20a_alloc_inst_block,
554 .init_inst_block = gv11b_init_inst_block,
555 .mmu_fault_pending = gv11b_mm_mmu_fault_pending,
556 .get_kind_invalid = gm20b_get_kind_invalid,
557 .get_kind_pitch = gm20b_get_kind_pitch,
558 .init_bar2_vm = gb10b_init_bar2_vm,
559 .init_bar2_mm_hw_setup = gv11b_init_bar2_mm_hw_setup,
560 .remove_bar2_vm = gv11b_mm_remove_bar2_vm,
561 .fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
562 .get_flush_retries = gv100_mm_get_flush_retries,
563 },
564 .pramin = {
565 .enter = gk20a_pramin_enter,
566 .exit = gk20a_pramin_exit,
567 .data032_r = pram_data032_r,
568 },
569 .pmu = {
570 .init_wpr_region = gm20b_pmu_init_acr,
571 .load_lsfalcon_ucode = gp106_load_falcon_ucode,
572 .is_lazy_bootstrap = gp106_is_lazy_bootstrap,
573 .is_priv_load = gp106_is_priv_load,
574 .prepare_ucode = gp106_prepare_ucode_blob,
575 .pmu_setup_hw_and_bootstrap = gp106_bootstrap_hs_flcn,
576 .get_wpr = gp106_wpr_info,
577 .alloc_blob_space = gp106_alloc_blob_space,
578 .pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg,
579 .flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc,
580 .falcon_wait_for_halt = sec2_wait_for_halt,
581 .falcon_clear_halt_interrupt_status =
582 sec2_clear_halt_interrupt_status,
583 .init_falcon_setup_hw = init_sec2_setup_hw1,
584 .pmu_queue_tail = gk20a_pmu_queue_tail,
585 .pmu_get_queue_head = pwr_pmu_queue_head_r,
586 .pmu_mutex_release = gk20a_pmu_mutex_release,
587 .is_pmu_supported = gp106_is_pmu_supported,
588 .pmu_pg_supported_engines_list = gp106_pmu_pg_engines_list,
589 .pmu_elpg_statistics = gp106_pmu_elpg_statistics,
590 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
591 .pmu_is_lpwr_feature_supported =
592 gp106_pmu_is_lpwr_feature_supported,
593 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
594 .pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list,
595 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
596 .pmu_queue_head = gk20a_pmu_queue_head,
597 .pmu_pg_param_post_init = nvgpu_lpwr_post_init,
598 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
599 .pmu_pg_init_param = gp106_pg_param_init,
600 .reset_engine = gp106_pmu_engine_reset,
601 .write_dmatrfbase = gp10b_write_dmatrfbase,
602 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
603 .is_engine_in_reset = gp106_pmu_is_engine_in_reset,
604 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
605 },
606 .clk = {
607 .init_clk_support = gp106_init_clk_support,
608 .get_crystal_clk_hz = gp106_crystal_clk_hz,
609 .measure_freq = gp106_clk_measure_freq,
610 .suspend_clk_support = gp106_suspend_clk_support,
611 },
612 .clk_arb = {
613 .get_arbiter_clk_domains = gp106_get_arbiter_clk_domains,
614 .get_arbiter_clk_range = gp106_get_arbiter_clk_range,
615 .get_arbiter_clk_default = gp106_get_arbiter_clk_default,
616 .get_current_pstate = nvgpu_clk_arb_get_current_pstate,
617 },
618 .regops = {
619 .get_global_whitelist_ranges =
620 gv100_get_global_whitelist_ranges,
621 .get_global_whitelist_ranges_count =
622 gv100_get_global_whitelist_ranges_count,
623 .get_context_whitelist_ranges =
624 gv100_get_context_whitelist_ranges,
625 .get_context_whitelist_ranges_count =
626 gv100_get_context_whitelist_ranges_count,
627 .get_runcontrol_whitelist = gv100_get_runcontrol_whitelist,
628 .get_runcontrol_whitelist_count =
629 gv100_get_runcontrol_whitelist_count,
630 .get_runcontrol_whitelist_ranges =
631 gv100_get_runcontrol_whitelist_ranges,
632 .get_runcontrol_whitelist_ranges_count =
633 gv100_get_runcontrol_whitelist_ranges_count,
634 .get_qctl_whitelist = gv100_get_qctl_whitelist,
635 .get_qctl_whitelist_count = gv100_get_qctl_whitelist_count,
636 .get_qctl_whitelist_ranges = gv100_get_qctl_whitelist_ranges,
637 .get_qctl_whitelist_ranges_count =
638 gv100_get_qctl_whitelist_ranges_count,
639 .apply_smpc_war = gv100_apply_smpc_war,
640 },
641 .mc = {
642 .intr_enable = mc_gv11b_intr_enable,
643 .intr_unit_config = mc_gp10b_intr_unit_config,
644 .isr_stall = mc_gp10b_isr_stall,
645 .intr_stall = mc_gp10b_intr_stall,
646 .intr_stall_pause = mc_gp10b_intr_stall_pause,
647 .intr_stall_resume = mc_gp10b_intr_stall_resume,
648 .intr_nonstall = mc_gp10b_intr_nonstall,
649 .intr_nonstall_pause = mc_gp10b_intr_nonstall_pause,
650 .intr_nonstall_resume = mc_gp10b_intr_nonstall_resume,
651 .enable = gk20a_mc_enable,
652 .disable = gk20a_mc_disable,
653 .reset = gk20a_mc_reset,
654 .boot_0 = gk20a_mc_boot_0,
655 .is_intr1_pending = mc_gp10b_is_intr1_pending,
656 .is_intr_hub_pending = gv11b_mc_is_intr_hub_pending,
657 },
658 .debug = {
659 .show_dump = gk20a_debug_show_dump,
660 },
661 .dbg_session_ops = {
662 .exec_reg_ops = exec_regops_gk20a,
663 .dbg_set_powergate = dbg_set_powergate,
664 .check_and_set_global_reservation =
665 nvgpu_check_and_set_global_reservation,
666 .check_and_set_context_reservation =
667 nvgpu_check_and_set_context_reservation,
668 .release_profiler_reservation =
669 nvgpu_release_profiler_reservation,
670 .perfbuffer_enable = gv11b_perfbuf_enable_locked,
671 .perfbuffer_disable = gv11b_perfbuf_disable_locked,
672 },
673 .bus = {
674 .init_hw = gk20a_bus_init_hw,
675 .isr = gk20a_bus_isr,
676 .read_ptimer = gk20a_read_ptimer,
677 .get_timestamps_zipper = nvgpu_get_timestamps_zipper,
678 .bar1_bind = NULL,
679 },
680#if defined(CONFIG_GK20A_CYCLE_STATS)
681 .css = {
682 .enable_snapshot = gv11b_css_hw_enable_snapshot,
683 .disable_snapshot = gv11b_css_hw_disable_snapshot,
684 .check_data_available = gv11b_css_hw_check_data_available,
685 .set_handled_snapshots = css_hw_set_handled_snapshots,
686 .allocate_perfmon_ids = css_gr_allocate_perfmon_ids,
687 .release_perfmon_ids = css_gr_release_perfmon_ids,
688 },
689#endif
690 .xve = {
691 .get_speed = xve_get_speed_gp106,
692 .set_speed = xve_set_speed_gp106,
693 .available_speeds = xve_available_speeds_gp106,
694 .xve_readl = xve_xve_readl_gp106,
695 .xve_writel = xve_xve_writel_gp106,
696 .disable_aspm = xve_disable_aspm_gp106,
697 .reset_gpu = xve_reset_gpu_gp106,
698#if defined(CONFIG_PCI_MSI)
699 .rearm_msi = xve_rearm_msi_gp106,
700#endif
701 .enable_shadow_rom = xve_enable_shadow_rom_gp106,
702 .disable_shadow_rom = xve_disable_shadow_rom_gp106,
703 },
704 .falcon = {
705 .falcon_hal_sw_init = gp106_falcon_hal_sw_init,
706 },
707 .priv_ring = {
708 .isr = gp10b_priv_ring_isr,
709 },
710 .chip_init_gpu_characteristics = gv100_init_gpu_characteristics,
711 .get_litter_value = gv100_get_litter_value,
712};
713
714int gv100_init_hal(struct gk20a *g)
715{
716 struct gpu_ops *gops = &g->ops;
717
718 gops->bios = gv100_ops.bios;
719 gops->ltc = gv100_ops.ltc;
720 gops->ce2 = gv100_ops.ce2;
721 gops->gr = gv100_ops.gr;
722 gops->fb = gv100_ops.fb;
723 gops->clock_gating = gv100_ops.clock_gating;
724 gops->fifo = gv100_ops.fifo;
725 gops->gr_ctx = gv100_ops.gr_ctx;
726 gops->mm = gv100_ops.mm;
727#ifdef CONFIG_GK20A_CTXSW_TRACE
728 gops->fecs_trace = gv100_ops.fecs_trace;
729#endif
730 gops->pramin = gv100_ops.pramin;
731 gops->therm = gv100_ops.therm;
732 gops->pmu = gv100_ops.pmu;
733 gops->regops = gv100_ops.regops;
734 gops->mc = gv100_ops.mc;
735 gops->debug = gv100_ops.debug;
736 gops->dbg_session_ops = gv100_ops.dbg_session_ops;
737 gops->bus = gv100_ops.bus;
738#if defined(CONFIG_GK20A_CYCLE_STATS)
739 gops->css = gv100_ops.css;
740#endif
741 gops->xve = gv100_ops.xve;
742 gops->falcon = gv100_ops.falcon;
743 gops->priv_ring = gv100_ops.priv_ring;
744
745 /* clocks */
746 gops->clk.init_clk_support = gv100_ops.clk.init_clk_support;
747 gops->clk.get_crystal_clk_hz = gv100_ops.clk.get_crystal_clk_hz;
748 gops->clk.measure_freq = gv100_ops.clk.measure_freq;
749 gops->clk.suspend_clk_support = gv100_ops.clk.suspend_clk_support;
750
751 /* Lone functions */
752 gops->chip_init_gpu_characteristics =
753 gv100_ops.chip_init_gpu_characteristics;
754 gops->get_litter_value = gv100_ops.get_litter_value;
755
756 __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true);
757 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true);
758 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true);
759 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
760 /* for now */
761 __nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false);
762
763 g->pmu_lsf_pmu_wpr_init_done = 0;
764 g->bootstrap_owner = LSF_FALCON_ID_SEC2;
765
766 g->name = "gv10x";
767
768 return 0;
769}
diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.h b/drivers/gpu/nvgpu/gv100/hal_gv100.h
new file mode 100644
index 00000000..7dcf1d77
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/hal_gv100.h
@@ -0,0 +1,30 @@
1/*
2 * GV100 Tegra HAL interface
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVGPU_HAL_GV11B_H
26#define _NVGPU_HAL_GV11B_H
27struct gk20a;
28
29int gv100_init_hal(struct gk20a *gops);
30#endif
diff --git a/drivers/gpu/nvgpu/gv100/mm_gv100.c b/drivers/gpu/nvgpu/gv100/mm_gv100.c
new file mode 100644
index 00000000..1b46faae
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/mm_gv100.c
@@ -0,0 +1,55 @@
1/*
2 * GV100 memory management
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26#include "gv100/mm_gv100.h"
27
28#include <nvgpu/hw/gv100/hw_fb_gv100.h>
29
30size_t gv100_mm_get_vidmem_size(struct gk20a *g)
31{
32 u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r());
33 u32 mag = fb_mmu_local_memory_range_lower_mag_v(range);
34 u32 scale = fb_mmu_local_memory_range_lower_scale_v(range);
35 u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range);
36 size_t bytes = ((size_t)mag << scale) * SZ_1M;
37
38 if (ecc)
39 bytes = bytes / 16 * 15;
40
41 return bytes;
42}
43
44u32 gv100_mm_get_flush_retries(struct gk20a *g, enum nvgpu_flush_op op)
45{
46 switch (op) {
47 /* GV100 has a large FB so it needs larger timeouts */
48 case NVGPU_FLUSH_FB:
49 return 2000;
50 case NVGPU_FLUSH_L2_FLUSH:
51 return 2000;
52 default:
53 return 200; /* Default retry timer */
54 }
55}
diff --git a/drivers/gpu/nvgpu/gv100/mm_gv100.h b/drivers/gpu/nvgpu/gv100/mm_gv100.h
new file mode 100644
index 00000000..ea896503
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/mm_gv100.h
@@ -0,0 +1,33 @@
1/*
2 * GV100 memory management
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef MM_GV100_H
26#define MM_GV100_H
27
28struct gk20a;
29
30size_t gv100_mm_get_vidmem_size(struct gk20a *g);
31u32 gv100_mm_get_flush_retries(struct gk20a *g, enum nvgpu_flush_op op);
32
33#endif
diff --git a/drivers/gpu/nvgpu/gv100/regops_gv100.c b/drivers/gpu/nvgpu/gv100/regops_gv100.c
new file mode 100644
index 00000000..00f05418
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/regops_gv100.c
@@ -0,0 +1,463 @@
1/*
2 * Tegra GV100 GPU Driver Register Ops
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26#include "gk20a/dbg_gpu_gk20a.h"
27#include "gk20a/regops_gk20a.h"
28#include "regops_gv100.h"
29
30static const struct regop_offset_range gv100_global_whitelist_ranges[] = {
31 { 0x000004f0, 1},
32 { 0x00001a00, 3},
33 { 0x00002800, 128},
34 { 0x00009400, 1},
35 { 0x00009410, 1},
36 { 0x00009480, 1},
37 { 0x00020200, 24},
38 { 0x00021c00, 4},
39 { 0x00021c14, 3},
40 { 0x00021c24, 1},
41 { 0x00021c2c, 69},
42 { 0x00021d44, 1},
43 { 0x00021d4c, 1},
44 { 0x00021d54, 1},
45 { 0x00021d5c, 1},
46 { 0x00021d64, 2},
47 { 0x00021d70, 16},
48 { 0x00022430, 7},
49 { 0x00022450, 1},
50 { 0x0002245c, 2},
51 { 0x00070000, 5},
52 { 0x000884e0, 1},
53 { 0x0008e00c, 1},
54 { 0x00100c18, 3},
55 { 0x00100c84, 1},
56 { 0x00104038, 1},
57 { 0x0010a0a8, 1},
58 { 0x0010a4f0, 1},
59 { 0x0010e490, 1},
60 { 0x0013cc14, 1},
61 { 0x00140028, 1},
62 { 0x00140280, 1},
63 { 0x001402a0, 1},
64 { 0x00140350, 1},
65 { 0x00140480, 1},
66 { 0x001404a0, 1},
67 { 0x00140550, 1},
68 { 0x00142028, 1},
69 { 0x00142280, 1},
70 { 0x001422a0, 1},
71 { 0x00142350, 1},
72 { 0x00142480, 1},
73 { 0x001424a0, 1},
74 { 0x00142550, 1},
75 { 0x0017e028, 1},
76 { 0x0017e280, 1},
77 { 0x0017e294, 1},
78 { 0x0017e29c, 2},
79 { 0x0017e2ac, 1},
80 { 0x0017e350, 1},
81 { 0x0017e39c, 1},
82 { 0x0017e480, 1},
83 { 0x0017e4a0, 1},
84 { 0x0017e550, 1},
85 { 0x00180040, 41},
86 { 0x001800ec, 10},
87 { 0x00180240, 41},
88 { 0x001802ec, 10},
89 { 0x00180440, 41},
90 { 0x001804ec, 10},
91 { 0x00180640, 41},
92 { 0x001806ec, 10},
93 { 0x00180840, 41},
94 { 0x001808ec, 10},
95 { 0x00180a40, 41},
96 { 0x00180aec, 10},
97 { 0x00180c40, 41},
98 { 0x00180cec, 10},
99 { 0x00180e40, 41},
100 { 0x00180eec, 10},
101 { 0x001a0040, 41},
102 { 0x001a00ec, 10},
103 { 0x001a0240, 41},
104 { 0x001a02ec, 10},
105 { 0x001a0440, 41},
106 { 0x001a04ec, 10},
107 { 0x001a0640, 41},
108 { 0x001a06ec, 10},
109 { 0x001a0840, 41},
110 { 0x001a08ec, 10},
111 { 0x001a0a40, 41},
112 { 0x001a0aec, 10},
113 { 0x001a0c40, 41},
114 { 0x001a0cec, 10},
115 { 0x001a0e40, 41},
116 { 0x001a0eec, 10},
117 { 0x001b0040, 41},
118 { 0x001b00ec, 10},
119 { 0x001b0240, 41},
120 { 0x001b02ec, 10},
121 { 0x001b0440, 41},
122 { 0x001b04ec, 10},
123 { 0x001b0640, 41},
124 { 0x001b06ec, 10},
125 { 0x001b0840, 41},
126 { 0x001b08ec, 10},
127 { 0x001b0a40, 41},
128 { 0x001b0aec, 10},
129 { 0x001b0c40, 41},
130 { 0x001b0cec, 10},
131 { 0x001b0e40, 41},
132 { 0x001b0eec, 10},
133 { 0x001b4000, 1},
134 { 0x001b4008, 1},
135 { 0x001b4010, 3},
136 { 0x001b4020, 3},
137 { 0x001b4030, 3},
138 { 0x001b4040, 3},
139 { 0x001b4050, 3},
140 { 0x001b4060, 4},
141 { 0x001b4074, 7},
142 { 0x001b4094, 3},
143 { 0x001b40a4, 1},
144 { 0x001b4100, 6},
145 { 0x001b4128, 1},
146 { 0x001b8000, 1},
147 { 0x001b8008, 1},
148 { 0x001b8010, 2},
149 { 0x001bc000, 1},
150 { 0x001bc008, 1},
151 { 0x001bc010, 2},
152 { 0x001be000, 1},
153 { 0x001be008, 1},
154 { 0x001be010, 2},
155 { 0x00400500, 1},
156 { 0x0040415c, 1},
157 { 0x00404468, 1},
158 { 0x00404498, 1},
159 { 0x00405800, 1},
160 { 0x00405840, 2},
161 { 0x00405850, 1},
162 { 0x00405908, 1},
163 { 0x00405b40, 1},
164 { 0x00405b50, 1},
165 { 0x00406024, 5},
166 { 0x00407010, 1},
167 { 0x00407808, 1},
168 { 0x0040803c, 1},
169 { 0x00408804, 1},
170 { 0x0040880c, 1},
171 { 0x00408900, 2},
172 { 0x00408910, 1},
173 { 0x00408944, 1},
174 { 0x00408984, 1},
175 { 0x004090a8, 1},
176 { 0x004098a0, 1},
177 { 0x00409b00, 1},
178 { 0x0041000c, 1},
179 { 0x00410110, 1},
180 { 0x00410184, 1},
181 { 0x0041040c, 1},
182 { 0x00410510, 1},
183 { 0x00410584, 1},
184 { 0x00418000, 1},
185 { 0x00418008, 1},
186 { 0x00418380, 2},
187 { 0x00418400, 2},
188 { 0x004184a0, 1},
189 { 0x00418604, 1},
190 { 0x00418680, 1},
191 { 0x00418704, 1},
192 { 0x00418714, 1},
193 { 0x00418800, 1},
194 { 0x0041881c, 1},
195 { 0x00418830, 1},
196 { 0x00418884, 1},
197 { 0x004188b0, 1},
198 { 0x004188c8, 3},
199 { 0x004188fc, 1},
200 { 0x00418b04, 1},
201 { 0x00418c04, 1},
202 { 0x00418c10, 8},
203 { 0x00418c88, 1},
204 { 0x00418d00, 1},
205 { 0x00418e00, 1},
206 { 0x00418e08, 1},
207 { 0x00418e34, 1},
208 { 0x00418e40, 4},
209 { 0x00418e58, 16},
210 { 0x00418f08, 1},
211 { 0x00419000, 1},
212 { 0x0041900c, 1},
213 { 0x00419018, 1},
214 { 0x00419854, 1},
215 { 0x00419864, 1},
216 { 0x00419a04, 2},
217 { 0x00419a14, 1},
218 { 0x00419ab0, 1},
219 { 0x00419ab8, 3},
220 { 0x00419c0c, 1},
221 { 0x00419c8c, 2},
222 { 0x00419d00, 1},
223 { 0x00419d08, 2},
224 { 0x00419e00, 11},
225 { 0x00419e34, 2},
226 { 0x00419e44, 11},
227 { 0x00419e74, 10},
228 { 0x00419ea4, 1},
229 { 0x00419eac, 2},
230 { 0x00419ee8, 1},
231 { 0x00419ef0, 28},
232 { 0x00419f70, 1},
233 { 0x00419f78, 2},
234 { 0x00419f98, 2},
235 { 0x0041a02c, 2},
236 { 0x0041a0a8, 1},
237 { 0x0041a8a0, 3},
238 { 0x0041b014, 1},
239 { 0x0041b0a0, 1},
240 { 0x0041b0cc, 1},
241 { 0x0041b1dc, 1},
242 { 0x0041be0c, 3},
243 { 0x0041bea0, 1},
244 { 0x0041becc, 1},
245 { 0x0041bfdc, 1},
246 { 0x0041c054, 1},
247 { 0x0041c2b0, 1},
248 { 0x0041c2b8, 3},
249 { 0x0041c40c, 1},
250 { 0x0041c48c, 2},
251 { 0x0041c500, 1},
252 { 0x0041c508, 2},
253 { 0x0041c600, 11},
254 { 0x0041c634, 2},
255 { 0x0041c644, 11},
256 { 0x0041c674, 10},
257 { 0x0041c6a4, 1},
258 { 0x0041c6ac, 2},
259 { 0x0041c6e8, 1},
260 { 0x0041c6f0, 28},
261 { 0x0041c770, 1},
262 { 0x0041c778, 2},
263 { 0x0041c798, 2},
264 { 0x0041c854, 1},
265 { 0x0041cab0, 1},
266 { 0x0041cab8, 3},
267 { 0x0041cc0c, 1},
268 { 0x0041cc8c, 2},
269 { 0x0041cd00, 1},
270 { 0x0041cd08, 2},
271 { 0x0041ce00, 11},
272 { 0x0041ce34, 2},
273 { 0x0041ce44, 11},
274 { 0x0041ce74, 10},
275 { 0x0041cea4, 1},
276 { 0x0041ceac, 2},
277 { 0x0041cee8, 1},
278 { 0x0041cef0, 28},
279 { 0x0041cf70, 1},
280 { 0x0041cf78, 2},
281 { 0x0041cf98, 2},
282 { 0x00500384, 1},
283 { 0x005004a0, 1},
284 { 0x00500604, 1},
285 { 0x00500680, 1},
286 { 0x00500714, 1},
287 { 0x0050081c, 1},
288 { 0x00500884, 1},
289 { 0x005008b0, 1},
290 { 0x005008c8, 3},
291 { 0x005008fc, 1},
292 { 0x00500b04, 1},
293 { 0x00500c04, 1},
294 { 0x00500c10, 8},
295 { 0x00500c88, 1},
296 { 0x00500d00, 1},
297 { 0x00500e08, 1},
298 { 0x00500f08, 1},
299 { 0x00501000, 1},
300 { 0x0050100c, 1},
301 { 0x00501018, 1},
302 { 0x00501854, 1},
303 { 0x00501ab0, 1},
304 { 0x00501ab8, 3},
305 { 0x00501c0c, 1},
306 { 0x00501c8c, 2},
307 { 0x00501d00, 1},
308 { 0x00501d08, 2},
309 { 0x00501e00, 11},
310 { 0x00501e34, 2},
311 { 0x00501e44, 11},
312 { 0x00501e74, 10},
313 { 0x00501ea4, 1},
314 { 0x00501eac, 2},
315 { 0x00501ee8, 1},
316 { 0x00501ef0, 28},
317 { 0x00501f70, 1},
318 { 0x00501f78, 2},
319 { 0x00501f98, 2},
320 { 0x0050202c, 2},
321 { 0x005020a8, 1},
322 { 0x005028a0, 3},
323 { 0x00503014, 1},
324 { 0x005030a0, 1},
325 { 0x005030cc, 1},
326 { 0x005031dc, 1},
327 { 0x00503e14, 1},
328 { 0x00503ea0, 1},
329 { 0x00503ecc, 1},
330 { 0x00503fdc, 1},
331 { 0x00504054, 1},
332 { 0x005042b0, 1},
333 { 0x005042b8, 3},
334 { 0x0050440c, 1},
335 { 0x0050448c, 2},
336 { 0x00504500, 1},
337 { 0x00504508, 2},
338 { 0x00504600, 11},
339 { 0x00504634, 2},
340 { 0x00504644, 11},
341 { 0x00504674, 10},
342 { 0x005046a4, 1},
343 { 0x005046ac, 2},
344 { 0x005046e8, 1},
345 { 0x005046f0, 28},
346 { 0x00504770, 1},
347 { 0x00504778, 2},
348 { 0x00504798, 2},
349 { 0x00504854, 1},
350 { 0x00504ab0, 1},
351 { 0x00504ab8, 3},
352 { 0x00504c0c, 1},
353 { 0x00504c8c, 2},
354 { 0x00504d00, 1},
355 { 0x00504d08, 2},
356 { 0x00504e00, 11},
357 { 0x00504e34, 2},
358 { 0x00504e44, 11},
359 { 0x00504e74, 10},
360 { 0x00504ea4, 1},
361 { 0x00504eac, 2},
362 { 0x00504ee8, 1},
363 { 0x00504ef0, 28},
364 { 0x00504f70, 1},
365 { 0x00504f78, 2},
366 { 0x00504f98, 2},
367 { 0x00900100, 1},
368 { 0x009a0100, 1},};
369
370
371static const u32 gv100_global_whitelist_ranges_count =
372 ARRAY_SIZE(gv100_global_whitelist_ranges);
373
374/* context */
375
376/* runcontrol */
377static const u32 gv100_runcontrol_whitelist[] = {
378};
379static const u32 gv100_runcontrol_whitelist_count =
380 ARRAY_SIZE(gv100_runcontrol_whitelist);
381
382static const struct regop_offset_range gv100_runcontrol_whitelist_ranges[] = {
383};
384static const u32 gv100_runcontrol_whitelist_ranges_count =
385 ARRAY_SIZE(gv100_runcontrol_whitelist_ranges);
386
387
388/* quad ctl */
389static const u32 gv100_qctl_whitelist[] = {
390};
391static const u32 gv100_qctl_whitelist_count =
392 ARRAY_SIZE(gv100_qctl_whitelist);
393
394static const struct regop_offset_range gv100_qctl_whitelist_ranges[] = {
395};
396static const u32 gv100_qctl_whitelist_ranges_count =
397 ARRAY_SIZE(gv100_qctl_whitelist_ranges);
398
399const struct regop_offset_range *gv100_get_global_whitelist_ranges(void)
400{
401 return gv100_global_whitelist_ranges;
402}
403
404int gv100_get_global_whitelist_ranges_count(void)
405{
406 return gv100_global_whitelist_ranges_count;
407}
408
409const struct regop_offset_range *gv100_get_context_whitelist_ranges(void)
410{
411 return gv100_global_whitelist_ranges;
412}
413
414int gv100_get_context_whitelist_ranges_count(void)
415{
416 return gv100_global_whitelist_ranges_count;
417}
418
419const u32 *gv100_get_runcontrol_whitelist(void)
420{
421 return gv100_runcontrol_whitelist;
422}
423
424int gv100_get_runcontrol_whitelist_count(void)
425{
426 return gv100_runcontrol_whitelist_count;
427}
428
429const struct regop_offset_range *gv100_get_runcontrol_whitelist_ranges(void)
430{
431 return gv100_runcontrol_whitelist_ranges;
432}
433
434int gv100_get_runcontrol_whitelist_ranges_count(void)
435{
436 return gv100_runcontrol_whitelist_ranges_count;
437}
438
439const u32 *gv100_get_qctl_whitelist(void)
440{
441 return gv100_qctl_whitelist;
442}
443
444int gv100_get_qctl_whitelist_count(void)
445{
446 return gv100_qctl_whitelist_count;
447}
448
449const struct regop_offset_range *gv100_get_qctl_whitelist_ranges(void)
450{
451 return gv100_qctl_whitelist_ranges;
452}
453
454int gv100_get_qctl_whitelist_ranges_count(void)
455{
456 return gv100_qctl_whitelist_ranges_count;
457}
458
459int gv100_apply_smpc_war(struct dbg_session_gk20a *dbg_s)
460{
461 /* Not needed on gv100 */
462 return 0;
463}
diff --git a/drivers/gpu/nvgpu/gv100/regops_gv100.h b/drivers/gpu/nvgpu/gv100/regops_gv100.h
new file mode 100644
index 00000000..06e5b8e1
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv100/regops_gv100.h
@@ -0,0 +1,42 @@
1/*
2 *
3 * Tegra GV100 GPU Driver Register Ops
4 *
5 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25#ifndef __REGOPS_GV100_H_
26#define __REGOPS_GV100_H_
27
28const struct regop_offset_range *gv100_get_global_whitelist_ranges(void);
29int gv100_get_global_whitelist_ranges_count(void);
30const struct regop_offset_range *gv100_get_context_whitelist_ranges(void);
31int gv100_get_context_whitelist_ranges_count(void);
32const u32 *gv100_get_runcontrol_whitelist(void);
33int gv100_get_runcontrol_whitelist_count(void);
34const struct regop_offset_range *gv100_get_runcontrol_whitelist_ranges(void);
35int gv100_get_runcontrol_whitelist_ranges_count(void);
36const u32 *gv100_get_qctl_whitelist(void);
37int gv100_get_qctl_whitelist_count(void);
38const struct regop_offset_range *gv100_get_qctl_whitelist_ranges(void);
39int gv100_get_qctl_whitelist_ranges_count(void);
40int gv100_apply_smpc_war(struct dbg_session_gk20a *dbg_s);
41
42#endif /* __REGOPS_GV11B_H_ */
diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
new file mode 100644
index 00000000..b245dbc6
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
@@ -0,0 +1,294 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifdef CONFIG_DEBUG_FS
24#include <linux/debugfs.h>
25#endif
26
27#include <nvgpu/types.h>
28#include <linux/platform/tegra/mc.h>
29
30#include <nvgpu/dma.h>
31#include <nvgpu/gmmu.h>
32#include <nvgpu/timers.h>
33#include <nvgpu/nvgpu_common.h>
34#include <nvgpu/kmem.h>
35#include <nvgpu/nvgpu_mem.h>
36#include <nvgpu/acr/nvgpu_acr.h>
37#include <nvgpu/firmware.h>
38#include <nvgpu/mm.h>
39
40#include "gk20a/gk20a.h"
41#include "acr_gv11b.h"
42#include "pmu_gv11b.h"
43#include "gk20a/pmu_gk20a.h"
44#include "gm20b/mm_gm20b.h"
45#include "gm20b/acr_gm20b.h"
46#include "gp106/acr_gp106.h"
47
48#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
49
50/*Defines*/
51#define gv11b_dbg_pmu(fmt, arg...) \
52 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
53
54static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value)
55{
56 dma_addr->lo |= u64_lo32(value);
57 dma_addr->hi |= u64_hi32(value);
58}
59/*Externs*/
60
61/*Forwards*/
62
63/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code
64 * start and end are addresses of ucode blob in non-WPR region*/
65int gv11b_bootstrap_hs_flcn(struct gk20a *g)
66{
67 struct mm_gk20a *mm = &g->mm;
68 struct vm_gk20a *vm = mm->pmu.vm;
69 int err = 0;
70 u64 *acr_dmem;
71 u32 img_size_in_bytes = 0;
72 u32 status, size, index;
73 u64 start;
74 struct acr_desc *acr = &g->acr;
75 struct nvgpu_firmware *acr_fw = acr->acr_fw;
76 struct flcn_bl_dmem_desc_v1 *bl_dmem_desc = &acr->bl_dmem_desc_v1;
77 u32 *acr_ucode_header_t210_load;
78 u32 *acr_ucode_data_t210_load;
79
80 start = nvgpu_mem_get_addr(g, &acr->ucode_blob);
81 size = acr->ucode_blob.size;
82
83 gv11b_dbg_pmu("acr ucode blob start %llx\n", start);
84 gv11b_dbg_pmu("acr ucode blob size %x\n", size);
85
86 gv11b_dbg_pmu("");
87
88 if (!acr_fw) {
89 /*First time init case*/
90 acr_fw = nvgpu_request_firmware(g,
91 GM20B_HSBIN_PMU_UCODE_IMAGE, 0);
92 if (!acr_fw) {
93 nvgpu_err(g, "pmu ucode get fail");
94 return -ENOENT;
95 }
96 acr->acr_fw = acr_fw;
97 acr->hsbin_hdr = (struct bin_hdr *)acr_fw->data;
98 acr->fw_hdr = (struct acr_fw_header *)(acr_fw->data +
99 acr->hsbin_hdr->header_offset);
100 acr_ucode_data_t210_load = (u32 *)(acr_fw->data +
101 acr->hsbin_hdr->data_offset);
102 acr_ucode_header_t210_load = (u32 *)(acr_fw->data +
103 acr->fw_hdr->hdr_offset);
104 img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256);
105
106 gv11b_dbg_pmu("sig dbg offset %u\n",
107 acr->fw_hdr->sig_dbg_offset);
108 gv11b_dbg_pmu("sig dbg size %u\n", acr->fw_hdr->sig_dbg_size);
109 gv11b_dbg_pmu("sig prod offset %u\n",
110 acr->fw_hdr->sig_prod_offset);
111 gv11b_dbg_pmu("sig prod size %u\n",
112 acr->fw_hdr->sig_prod_size);
113 gv11b_dbg_pmu("patch loc %u\n", acr->fw_hdr->patch_loc);
114 gv11b_dbg_pmu("patch sig %u\n", acr->fw_hdr->patch_sig);
115 gv11b_dbg_pmu("header offset %u\n", acr->fw_hdr->hdr_offset);
116 gv11b_dbg_pmu("header size %u\n", acr->fw_hdr->hdr_size);
117
118 /* Lets patch the signatures first.. */
119 if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load,
120 (u32 *)(acr_fw->data +
121 acr->fw_hdr->sig_prod_offset),
122 (u32 *)(acr_fw->data +
123 acr->fw_hdr->sig_dbg_offset),
124 (u32 *)(acr_fw->data +
125 acr->fw_hdr->patch_loc),
126 (u32 *)(acr_fw->data +
127 acr->fw_hdr->patch_sig)) < 0) {
128 nvgpu_err(g, "patch signatures fail");
129 err = -1;
130 goto err_release_acr_fw;
131 }
132 err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes,
133 &acr->acr_ucode);
134 if (err) {
135 err = -ENOMEM;
136 goto err_release_acr_fw;
137 }
138
139 for (index = 0; index < 9; index++)
140 gv11b_dbg_pmu("acr_ucode_header_t210_load %u\n",
141 acr_ucode_header_t210_load[index]);
142
143 acr_dmem = (u64 *)
144 &(((u8 *)acr_ucode_data_t210_load)[
145 acr_ucode_header_t210_load[2]]);
146 acr->acr_dmem_desc_v1 = (struct flcn_acr_desc_v1 *)((u8 *)(
147 acr->acr_ucode.cpu_va) + acr_ucode_header_t210_load[2]);
148 ((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_start =
149 (start);
150 ((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_size =
151 size;
152 ((struct flcn_acr_desc_v1 *)acr_dmem)->regions.no_regions = 2;
153 ((struct flcn_acr_desc_v1 *)acr_dmem)->wpr_offset = 0;
154
155 nvgpu_mem_wr_n(g, &acr->acr_ucode, 0,
156 acr_ucode_data_t210_load, img_size_in_bytes);
157 /*
158 * In order to execute this binary, we will be using
159 * a bootloader which will load this image into PMU IMEM/DMEM.
160 * Fill up the bootloader descriptor for PMU HAL to use..
161 * TODO: Use standard descriptor which the generic bootloader is
162 * checked in.
163 */
164 bl_dmem_desc->signature[0] = 0;
165 bl_dmem_desc->signature[1] = 0;
166 bl_dmem_desc->signature[2] = 0;
167 bl_dmem_desc->signature[3] = 0;
168 bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT;
169 flcn64_set_dma(&bl_dmem_desc->code_dma_base,
170 acr->acr_ucode.gpu_va);
171 bl_dmem_desc->non_sec_code_off = acr_ucode_header_t210_load[0];
172 bl_dmem_desc->non_sec_code_size = acr_ucode_header_t210_load[1];
173 bl_dmem_desc->sec_code_off = acr_ucode_header_t210_load[5];
174 bl_dmem_desc->sec_code_size = acr_ucode_header_t210_load[6];
175 bl_dmem_desc->code_entry_point = 0; /* Start at 0th offset */
176 flcn64_set_dma(&bl_dmem_desc->data_dma_base,
177 acr->acr_ucode.gpu_va +
178 acr_ucode_header_t210_load[2]);
179 bl_dmem_desc->data_size = acr_ucode_header_t210_load[3];
180 } else
181 acr->acr_dmem_desc_v1->nonwpr_ucode_blob_size = 0;
182 status = pmu_exec_gen_bl(g, bl_dmem_desc, 1);
183 if (status != 0) {
184 err = status;
185 goto err_free_ucode_map;
186 }
187
188 return 0;
189err_free_ucode_map:
190 nvgpu_dma_unmap_free(vm, &acr->acr_ucode);
191err_release_acr_fw:
192 nvgpu_release_firmware(g, acr_fw);
193 acr->acr_fw = NULL;
194
195 return err;
196}
197
198static int bl_bootstrap(struct nvgpu_pmu *pmu,
199 struct flcn_bl_dmem_desc_v1 *pbl_desc, u32 bl_sz)
200{
201 struct gk20a *g = gk20a_from_pmu(pmu);
202 struct acr_desc *acr = &g->acr;
203 struct mm_gk20a *mm = &g->mm;
204 u32 virt_addr = 0;
205 struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc;
206 u32 dst;
207
208 gk20a_dbg_fn("");
209
210 gk20a_writel(g, pwr_falcon_itfen_r(),
211 gk20a_readl(g, pwr_falcon_itfen_r()) |
212 pwr_falcon_itfen_ctxen_enable_f());
213 gk20a_writel(g, pwr_pmu_new_instblk_r(),
214 pwr_pmu_new_instblk_ptr_f(
215 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
216 pwr_pmu_new_instblk_valid_f(1) |
217 pwr_pmu_new_instblk_target_sys_ncoh_f());
218
219 /*copy bootloader interface structure to dmem*/
220 nvgpu_flcn_copy_to_dmem(pmu->flcn, 0, (u8 *)pbl_desc,
221 sizeof(struct flcn_bl_dmem_desc_v1), 0);
222
223 /* copy bootloader to TOP of IMEM */
224 dst = (pwr_falcon_hwcfg_imem_size_v(
225 gk20a_readl(g, pwr_falcon_hwcfg_r())) << 8) - bl_sz;
226
227 nvgpu_flcn_copy_to_imem(pmu->flcn, dst,
228 (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0,
229 pmu_bl_gm10x_desc->bl_start_tag);
230
231 gv11b_dbg_pmu("Before starting falcon with BL\n");
232
233 virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8;
234
235 nvgpu_flcn_bootstrap(pmu->flcn, virt_addr);
236
237 return 0;
238}
239
240int gv11b_init_pmu_setup_hw1(struct gk20a *g,
241 void *desc, u32 bl_sz)
242{
243
244 struct nvgpu_pmu *pmu = &g->pmu;
245 int err;
246
247 gk20a_dbg_fn("");
248
249 nvgpu_mutex_acquire(&pmu->isr_mutex);
250 nvgpu_flcn_reset(pmu->flcn);
251 pmu->isr_enabled = true;
252 nvgpu_mutex_release(&pmu->isr_mutex);
253
254 /* setup apertures - virtual */
255 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
256 pwr_fbif_transcfg_mem_type_physical_f() |
257 pwr_fbif_transcfg_target_noncoherent_sysmem_f());
258 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT),
259 pwr_fbif_transcfg_mem_type_virtual_f());
260 /* setup apertures - physical */
261 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_VID),
262 pwr_fbif_transcfg_mem_type_physical_f() |
263 pwr_fbif_transcfg_target_noncoherent_sysmem_f());
264 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_COH),
265 pwr_fbif_transcfg_mem_type_physical_f() |
266 pwr_fbif_transcfg_target_coherent_sysmem_f());
267 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_NCOH),
268 pwr_fbif_transcfg_mem_type_physical_f() |
269 pwr_fbif_transcfg_target_noncoherent_sysmem_f());
270
271 /*Copying pmu cmdline args*/
272 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu,
273 g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
274 g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode(pmu, 1);
275 g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
276 pmu, GK20A_PMU_TRACE_BUFSIZE);
277 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
278 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
279 pmu, GK20A_PMU_DMAIDX_VIRT);
280 nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args,
281 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
282 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
283 /*disable irqs for hs falcon booting as we will poll for halt*/
284 nvgpu_mutex_acquire(&pmu->isr_mutex);
285 pmu_enable_irq(pmu, false);
286 pmu->isr_enabled = false;
287 nvgpu_mutex_release(&pmu->isr_mutex);
288 /*Clearing mailbox register used to reflect capabilities*/
289 gk20a_writel(g, pwr_falcon_mailbox1_r(), 0);
290 err = bl_bootstrap(pmu, desc, bl_sz);
291 if (err)
292 return err;
293 return 0;
294}
diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.h b/drivers/gpu/nvgpu/gv11b/acr_gv11b.h
new file mode 100644
index 00000000..72b3ec35
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __ACR_GV11B_H_
24#define __ACR_GV11B_H_
25
26
27int gv11b_bootstrap_hs_flcn(struct gk20a *g);
28int gv11b_init_pmu_setup_hw1(struct gk20a *g,
29 void *desc, u32 bl_sz);
30#endif /*__PMU_GP106_H_*/
diff --git a/drivers/gpu/nvgpu/gv11b/ce_gv11b.c b/drivers/gpu/nvgpu/gv11b/ce_gv11b.c
new file mode 100644
index 00000000..86518ac7
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/ce_gv11b.c
@@ -0,0 +1,110 @@
1/*
2 * Volta GPU series Copy Engine.
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "nvgpu/log.h"
26#include "nvgpu/bitops.h"
27
28#include "gk20a/gk20a.h"
29
30#include "gp10b/ce_gp10b.h"
31
32#include "ce_gv11b.h"
33
34#include <nvgpu/hw/gv11b/hw_ce_gv11b.h>
35#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
36
37u32 gv11b_ce_get_num_pce(struct gk20a *g)
38{
39 /* register contains a bitmask indicating which physical copy
40 * engines are present (and not floorswept).
41 */
42 u32 num_pce;
43 u32 ce_pce_map = gk20a_readl(g, ce_pce_map_r());
44
45 num_pce = hweight32(ce_pce_map);
46 nvgpu_log_info(g, "num PCE: %d", num_pce);
47 return num_pce;
48}
49
50void gv11b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
51{
52 u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id));
53 u32 clear_intr = 0;
54
55 nvgpu_log(g, gpu_dbg_intr, "ce isr 0x%08x 0x%08x", ce_intr, inst_id);
56
57 /* An INVALID_CONFIG interrupt will be generated if a floorswept
58 * PCE is assigned to a valid LCE in the NV_CE_PCE2LCE_CONFIG
59 * registers. This is a fatal error and the LCE will have to be
60 * reset to get back to a working state.
61 */
62 if (ce_intr & ce_intr_status_invalid_config_pending_f()) {
63 nvgpu_log(g, gpu_dbg_intr,
64 "ce: inst %d: invalid config", inst_id);
65 clear_intr |= ce_intr_status_invalid_config_reset_f();
66 }
67
68 /* A MTHD_BUFFER_FAULT interrupt will be triggered if any access
69 * to a method buffer during context load or save encounters a fault.
70 * This is a fatal interrupt and will require at least the LCE to be
71 * reset before operations can start again, if not the entire GPU.
72 */
73 if (ce_intr & ce_intr_status_mthd_buffer_fault_pending_f()) {
74 nvgpu_log(g, gpu_dbg_intr,
75 "ce: inst %d: mthd buffer fault", inst_id);
76 clear_intr |= ce_intr_status_mthd_buffer_fault_reset_f();
77 }
78
79 gk20a_writel(g, ce_intr_status_r(inst_id), clear_intr);
80
81 gp10b_ce_isr(g, inst_id, pri_base);
82}
83
84u32 gv11b_ce_get_num_lce(struct gk20a *g)
85{
86 u32 reg_val, num_lce;
87
88 reg_val = gk20a_readl(g, top_num_ces_r());
89 num_lce = top_num_ces_value_v(reg_val);
90 nvgpu_log_info(g, "num LCE: %d", num_lce);
91
92 return num_lce;
93}
94
95void gv11b_ce_mthd_buffer_fault_in_bar2_fault(struct gk20a *g)
96{
97 u32 reg_val, num_lce, lce, clear_intr;
98
99 num_lce = gv11b_ce_get_num_lce(g);
100
101 for (lce = 0; lce < num_lce; lce++) {
102 reg_val = gk20a_readl(g, ce_intr_status_r(lce));
103 if (reg_val & ce_intr_status_mthd_buffer_fault_pending_f()) {
104 nvgpu_log(g, gpu_dbg_intr,
105 "ce: lce %d: mthd buffer fault", lce);
106 clear_intr = ce_intr_status_mthd_buffer_fault_reset_f();
107 gk20a_writel(g, ce_intr_status_r(lce), clear_intr);
108 }
109 }
110}
diff --git a/drivers/gpu/nvgpu/gv11b/ce_gv11b.h b/drivers/gpu/nvgpu/gv11b/ce_gv11b.h
new file mode 100644
index 00000000..a0c7e0b1
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/ce_gv11b.h
@@ -0,0 +1,35 @@
1/*
2 *
3 * Volta GPU series copy engine
4 *
5 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25#ifndef __CE_GV11B_H__
26#define __CE_GV11B_H__
27
28struct gk20a;
29
30void gv11b_ce_mthd_buffer_fault_in_bar2_fault(struct gk20a *g);
31u32 gv11b_ce_get_num_lce(struct gk20a *g);
32u32 gv11b_ce_get_num_pce(struct gk20a *g);
33void gv11b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base);
34
35#endif /*__CE2_GV11B_H__*/
diff --git a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c
new file mode 100644
index 00000000..2eb45a88
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c
@@ -0,0 +1,206 @@
1/*
2 * GV11B Cycle stats snapshots support
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/dma-mapping.h>
26#include <linux/dma-buf.h>
27
28#include <nvgpu/bitops.h>
29#include <nvgpu/kmem.h>
30#include <nvgpu/lock.h>
31#include <nvgpu/dma.h>
32#include <nvgpu/mm.h>
33
34#include "gk20a/gk20a.h"
35#include "gk20a/css_gr_gk20a.h"
36#include "css_gr_gv11b.h"
37
38#include <nvgpu/log.h>
39#include <nvgpu/bug.h>
40
41#include <nvgpu/hw/gv11b/hw_perf_gv11b.h>
42#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
43
44
45/* reports whether the hw queue overflowed */
46static inline bool css_hw_get_overflow_status(struct gk20a *g)
47{
48 const u32 st = perf_pmasys_control_membuf_status_overflowed_f();
49 return st == (gk20a_readl(g, perf_pmasys_control_r()) & st);
50}
51
52/* returns how many pending snapshot entries are pending */
53static inline u32 css_hw_get_pending_snapshots(struct gk20a *g)
54{
55 return gk20a_readl(g, perf_pmasys_mem_bytes_r()) /
56 sizeof(struct gk20a_cs_snapshot_fifo_entry);
57}
58
59/* informs hw how many snapshots have been processed (frees up fifo space) */
60static inline void gv11b_css_hw_set_handled_snapshots(struct gk20a *g, u32 done)
61{
62 if (done > 0) {
63 gk20a_writel(g, perf_pmasys_mem_bump_r(),
64 done * sizeof(struct gk20a_cs_snapshot_fifo_entry));
65 }
66}
67
68/* disable streaming to memory */
69static void gv11b_css_hw_reset_streaming(struct gk20a *g)
70{
71 u32 engine_status;
72
73 /* reset the perfmon */
74 g->ops.mc.reset(g, mc_enable_perfmon_enabled_f());
75
76 /* RBUFEMPTY must be set -- otherwise we'll pick up */
77 /* snapshot that have been queued up from earlier */
78 engine_status = gk20a_readl(g, perf_pmasys_enginestatus_r());
79
80 /* turn off writes */
81 gk20a_writel(g, perf_pmasys_control_r(),
82 perf_pmasys_control_membuf_clear_status_doit_f());
83
84 /* pointing all pending snapshots as handled */
85 gv11b_css_hw_set_handled_snapshots(g, css_hw_get_pending_snapshots(g));
86}
87
88int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch,
89 struct gk20a_cs_snapshot_client *cs_client)
90{
91 struct gk20a *g = ch->g;
92 struct gr_gk20a *gr = &g->gr;
93 struct gk20a_cs_snapshot *data = gr->cs_data;
94 u32 snapshot_size = cs_client->snapshot_size;
95 int ret;
96
97 u32 virt_addr_lo;
98 u32 virt_addr_hi;
99 u32 inst_pa_page;
100
101 if (data->hw_snapshot)
102 return 0;
103
104 if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE)
105 snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE;
106
107 ret = nvgpu_dma_alloc_map_sys(g->mm.pmu.vm, snapshot_size,
108 &data->hw_memdesc);
109 if (ret)
110 return ret;
111
112 /* perf output buffer may not cross a 4GB boundary - with a separate */
113 /* va smaller than that, it won't but check anyway */
114 if (!data->hw_memdesc.cpu_va ||
115 data->hw_memdesc.size < snapshot_size ||
116 data->hw_memdesc.gpu_va + u64_lo32(snapshot_size) > SZ_4G) {
117 ret = -EFAULT;
118 goto failed_allocation;
119 }
120
121 data->hw_snapshot =
122 (struct gk20a_cs_snapshot_fifo_entry *)data->hw_memdesc.cpu_va;
123 data->hw_end = data->hw_snapshot +
124 snapshot_size / sizeof(struct gk20a_cs_snapshot_fifo_entry);
125 data->hw_get = data->hw_snapshot;
126 memset(data->hw_snapshot, 0xff, snapshot_size);
127
128 virt_addr_lo = u64_lo32(data->hw_memdesc.gpu_va);
129 virt_addr_hi = u64_hi32(data->hw_memdesc.gpu_va);
130
131 gv11b_css_hw_reset_streaming(g);
132
133 gk20a_writel(g, perf_pmasys_outbase_r(), virt_addr_lo);
134 gk20a_writel(g, perf_pmasys_outbaseupper_r(),
135 perf_pmasys_outbaseupper_ptr_f(virt_addr_hi));
136 gk20a_writel(g, perf_pmasys_outsize_r(), snapshot_size);
137
138 /* this field is aligned to 4K */
139 inst_pa_page = nvgpu_inst_block_addr(g, &g->mm.hwpm.inst_block) >> 12;
140
141 gk20a_writel(g, perf_pmasys_mem_block_r(),
142 perf_pmasys_mem_block_base_f(inst_pa_page) |
143 perf_pmasys_mem_block_valid_true_f() |
144 nvgpu_aperture_mask(g, &g->mm.hwpm.inst_block,
145 perf_pmasys_mem_block_target_sys_ncoh_f(),
146 perf_pmasys_mem_block_target_lfb_f()));
147
148
149 gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n");
150
151 return 0;
152
153failed_allocation:
154 if (data->hw_memdesc.size) {
155 nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc);
156 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
157 }
158 data->hw_snapshot = NULL;
159
160 return ret;
161}
162
163void gv11b_css_hw_disable_snapshot(struct gr_gk20a *gr)
164{
165 struct gk20a *g = gr->g;
166 struct gk20a_cs_snapshot *data = gr->cs_data;
167
168 if (!data->hw_snapshot)
169 return;
170
171 gv11b_css_hw_reset_streaming(g);
172
173 gk20a_writel(g, perf_pmasys_outbase_r(), 0);
174 gk20a_writel(g, perf_pmasys_outbaseupper_r(),
175 perf_pmasys_outbaseupper_ptr_f(0));
176 gk20a_writel(g, perf_pmasys_outsize_r(), 0);
177
178 gk20a_writel(g, perf_pmasys_mem_block_r(),
179 perf_pmasys_mem_block_base_f(0) |
180 perf_pmasys_mem_block_valid_false_f() |
181 perf_pmasys_mem_block_target_f(0));
182
183 nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc);
184 memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc));
185 data->hw_snapshot = NULL;
186
187 gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n");
188}
189
190int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending,
191 bool *hw_overflow)
192{
193 struct gk20a *g = ch->g;
194 struct gr_gk20a *gr = &g->gr;
195 struct gk20a_cs_snapshot *css = gr->cs_data;
196
197 if (!css->hw_snapshot)
198 return -EINVAL;
199
200 *pending = css_hw_get_pending_snapshots(g);
201 if (!*pending)
202 return 0;
203
204 *hw_overflow = css_hw_get_overflow_status(g);
205 return 0;
206}
diff --git a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.h b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.h
new file mode 100644
index 00000000..6b11a62e
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.h
@@ -0,0 +1,34 @@
1/*
2 * GV11B Cycle stats snapshots support
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef CSS_GR_GV11B_H
26#define CSS_GR_GV11B_H
27
28int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch,
29 struct gk20a_cs_snapshot_client *cs_client);
30void gv11b_css_hw_disable_snapshot(struct gr_gk20a *gr);
31int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending,
32 bool *hw_overflow);
33
34#endif /* CSS_GR_GV11B_H */
diff --git a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c
new file mode 100644
index 00000000..a02c2ddd
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c
@@ -0,0 +1,99 @@
1/*
2 * Tegra GV11B GPU Debugger/Profiler Driver
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <uapi/linux/nvgpu.h>
26
27#include <nvgpu/log.h>
28#include "gk20a/gk20a.h"
29#include <nvgpu/hw/gv11b/hw_perf_gv11b.h>
30
31int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size)
32{
33 struct mm_gk20a *mm = &g->mm;
34 u32 virt_addr_lo;
35 u32 virt_addr_hi;
36 u32 inst_pa_page;
37 int err;
38
39 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
40 err = gk20a_busy(g);
41 if (err) {
42 nvgpu_err(g, "failed to poweron");
43 return err;
44 }
45
46 err = gk20a_alloc_inst_block(g, &mm->perfbuf.inst_block);
47 if (err)
48 return err;
49
50 g->ops.mm.init_inst_block(&mm->perfbuf.inst_block, mm->perfbuf.vm, 0);
51
52 virt_addr_lo = u64_lo32(offset);
53 virt_addr_hi = u64_hi32(offset);
54
55 gk20a_writel(g, perf_pmasys_outbase_r(), virt_addr_lo);
56 gk20a_writel(g, perf_pmasys_outbaseupper_r(),
57 perf_pmasys_outbaseupper_ptr_f(virt_addr_hi));
58 gk20a_writel(g, perf_pmasys_outsize_r(), size);
59
60 /* this field is aligned to 4K */
61 inst_pa_page = nvgpu_inst_block_addr(g, &mm->perfbuf.inst_block) >> 12;
62
63 gk20a_writel(g, perf_pmasys_mem_block_r(),
64 perf_pmasys_mem_block_base_f(inst_pa_page) |
65 perf_pmasys_mem_block_valid_true_f() |
66 nvgpu_aperture_mask(g, &mm->perfbuf.inst_block,
67+ perf_pmasys_mem_block_target_sys_ncoh_f(),
68+ perf_pmasys_mem_block_target_lfb_f()));
69
70 gk20a_idle(g);
71 return 0;
72}
73
74/* must be called with dbg_sessions_lock held */
75int gv11b_perfbuf_disable_locked(struct gk20a *g)
76{
77 int err;
78
79 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
80 err = gk20a_busy(g);
81 if (err) {
82 nvgpu_err(g, "failed to poweron");
83 return err;
84 }
85
86 gk20a_writel(g, perf_pmasys_outbase_r(), 0);
87 gk20a_writel(g, perf_pmasys_outbaseupper_r(),
88 perf_pmasys_outbaseupper_ptr_f(0));
89 gk20a_writel(g, perf_pmasys_outsize_r(), 0);
90
91 gk20a_writel(g, perf_pmasys_mem_block_r(),
92 perf_pmasys_mem_block_base_f(0) |
93 perf_pmasys_mem_block_valid_false_f() |
94 perf_pmasys_mem_block_target_f(0));
95
96 gk20a_idle(g);
97
98 return 0;
99}
diff --git a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.h b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.h
new file mode 100644
index 00000000..88771a49
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#ifndef DBG_GPU_GV11B_H
23#define DBG_GPU_GV11B_H
24
25int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size);
26int gv11b_perfbuf_disable_locked(struct gk20a *g);
27
28#endif /* DBG_GPU_GV11B_H */
diff --git a/drivers/gpu/nvgpu/gv11b/ecc_gv11b.h b/drivers/gpu/nvgpu/gv11b/ecc_gv11b.h
new file mode 100644
index 00000000..94b25c02
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/ecc_gv11b.h
@@ -0,0 +1,64 @@
1/*
2 * GV11B GPU ECC
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVGPU_ECC_GV11B_H_
26#define _NVGPU_ECC_GV11B_H_
27
28struct ecc_gr_t19x {
29 struct gk20a_ecc_stat sm_l1_tag_corrected_err_count;
30 struct gk20a_ecc_stat sm_l1_tag_uncorrected_err_count;
31 struct gk20a_ecc_stat sm_cbu_corrected_err_count;
32 struct gk20a_ecc_stat sm_cbu_uncorrected_err_count;
33 struct gk20a_ecc_stat sm_l1_data_corrected_err_count;
34 struct gk20a_ecc_stat sm_l1_data_uncorrected_err_count;
35 struct gk20a_ecc_stat sm_icache_corrected_err_count;
36 struct gk20a_ecc_stat sm_icache_uncorrected_err_count;
37 struct gk20a_ecc_stat gcc_l15_corrected_err_count;
38 struct gk20a_ecc_stat gcc_l15_uncorrected_err_count;
39 struct gk20a_ecc_stat fecs_corrected_err_count;
40 struct gk20a_ecc_stat fecs_uncorrected_err_count;
41 struct gk20a_ecc_stat gpccs_corrected_err_count;
42 struct gk20a_ecc_stat gpccs_uncorrected_err_count;
43 struct gk20a_ecc_stat mmu_l1tlb_corrected_err_count;
44 struct gk20a_ecc_stat mmu_l1tlb_uncorrected_err_count;
45};
46
47struct ecc_ltc_t19x {
48 struct gk20a_ecc_stat l2_cache_corrected_err_count;
49 struct gk20a_ecc_stat l2_cache_uncorrected_err_count;
50};
51
52/* TODO: PMU and FB ECC features are still under embargo */
53struct ecc_eng_t19x {
54 /* FB */
55 struct gk20a_ecc_stat mmu_l2tlb_corrected_err_count;
56 struct gk20a_ecc_stat mmu_l2tlb_uncorrected_err_count;
57 struct gk20a_ecc_stat mmu_hubtlb_corrected_err_count;
58 struct gk20a_ecc_stat mmu_hubtlb_uncorrected_err_count;
59 struct gk20a_ecc_stat mmu_fillunit_corrected_err_count;
60 struct gk20a_ecc_stat mmu_fillunit_uncorrected_err_count;
61 /* PMU */
62};
63
64#endif
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
new file mode 100644
index 00000000..ec487bdf
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
@@ -0,0 +1,1555 @@
1/*
2 * GV11B FB
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/types.h>
26
27#include <nvgpu/dma.h>
28#include <nvgpu/log.h>
29#include <nvgpu/enabled.h>
30#include <nvgpu/gmmu.h>
31#include <nvgpu/barrier.h>
32#include <nvgpu/soc.h>
33
34#include "gk20a/gk20a.h"
35#include "gk20a/mm_gk20a.h"
36
37#include "gp10b/fb_gp10b.h"
38
39#include "gv11b/fifo_gv11b.h"
40#include "gv11b/fb_gv11b.h"
41#include "gv11b/ce_gv11b.h"
42
43#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
44#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
45#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
46#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
47
48static int gv11b_fb_fix_page_fault(struct gk20a *g,
49 struct mmu_fault_info *mmfault);
50
51static int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
52 u32 invalidate_replay_val);
53
54static void gv11b_init_nvlink_soc_credits(struct gk20a *g)
55{
56 if (nvgpu_is_bpmp_running(g) && (!nvgpu_platform_is_simulation(g))) {
57 nvgpu_info(g, "nvlink soc credits init done by bpmp");
58 } else {
59 /* MSS_NVLINK_1_BASE */
60 void __iomem *soc1 = ioremap(0x01f20010, 4096);
61 /* MSS_NVLINK_2_BASE */
62 void __iomem *soc2 = ioremap(0x01f40010, 4096);
63 /* MSS_NVLINK_3_BASE */
64 void __iomem *soc3 = ioremap(0x01f60010, 4096);
65 /* MSS_NVLINK_4_BASE */
66 void __iomem *soc4 = ioremap(0x01f80010, 4096);
67 u32 val;
68
69 nvgpu_info(g, "init nvlink soc credits");
70
71 val = readl_relaxed(soc1);
72 writel_relaxed(val, soc1);
73 val = readl_relaxed(soc1 + 4);
74 writel_relaxed(val, soc1 + 4);
75
76 val = readl_relaxed(soc2);
77 writel_relaxed(val, soc2);
78 val = readl_relaxed(soc2 + 4);
79 writel_relaxed(val, soc2 + 4);
80
81 val = readl_relaxed(soc3);
82 writel_relaxed(val, soc3);
83 val = readl_relaxed(soc3 + 4);
84 writel_relaxed(val, soc3 + 4);
85
86 val = readl_relaxed(soc4);
87 writel_relaxed(val, soc4);
88 val = readl_relaxed(soc4 + 4);
89 writel_relaxed(val, soc4 + 4);
90 }
91}
92
93void gv11b_fb_init_fs_state(struct gk20a *g)
94{
95 nvgpu_log(g, gpu_dbg_fn, "initialize gv11b fb");
96
97 nvgpu_log(g, gpu_dbg_info, "fbhub active ltcs %x",
98 gk20a_readl(g, fb_fbhub_num_active_ltcs_r()));
99
100 nvgpu_log(g, gpu_dbg_info, "mmu active ltcs %u",
101 fb_mmu_num_active_ltcs_count_v(
102 gk20a_readl(g, fb_mmu_num_active_ltcs_r())));
103}
104
105void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
106{
107 u32 max_size = gr->max_comptag_mem;
108 /* one tag line covers 64KB */
109 u32 max_comptag_lines = max_size << 4;
110 u32 compbit_base_post_divide;
111 u64 compbit_base_post_multiply64;
112 u64 compbit_store_iova;
113 u64 compbit_base_post_divide64;
114
115 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
116 compbit_store_iova = nvgpu_mem_get_phys_addr(g,
117 &gr->compbit_store.mem);
118 else
119 compbit_store_iova = nvgpu_mem_get_addr(g,
120 &gr->compbit_store.mem);
121
122 compbit_base_post_divide64 = compbit_store_iova >>
123 fb_mmu_cbc_base_address_alignment_shift_v();
124
125 do_div(compbit_base_post_divide64, g->ltc_count);
126 compbit_base_post_divide = u64_lo32(compbit_base_post_divide64);
127
128 compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
129 g->ltc_count) << fb_mmu_cbc_base_address_alignment_shift_v();
130
131 if (compbit_base_post_multiply64 < compbit_store_iova)
132 compbit_base_post_divide++;
133
134 if (g->ops.ltc.cbc_fix_config)
135 compbit_base_post_divide =
136 g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide);
137
138 gk20a_writel(g, fb_mmu_cbc_base_r(),
139 fb_mmu_cbc_base_address_f(compbit_base_post_divide));
140
141 nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
142 "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
143 (u32)(compbit_store_iova >> 32),
144 (u32)(compbit_store_iova & 0xffffffff),
145 compbit_base_post_divide);
146 nvgpu_log(g, gpu_dbg_fn, "cbc base %x",
147 gk20a_readl(g, fb_mmu_cbc_base_r()));
148
149 gr->compbit_store.base_hw = compbit_base_post_divide;
150
151 g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
152 0, max_comptag_lines - 1);
153
154}
155
156void gv11b_fb_reset(struct gk20a *g)
157{
158 if (nvgpu_is_bpmp_running(g) && (!nvgpu_platform_is_simulation(g))) {
159 nvgpu_log(g, gpu_dbg_info, "mc_elpg_enable set by bpmp");
160 } else {
161 u32 mc_elpg_enable_val;
162
163 nvgpu_log(g, gpu_dbg_info, "enable xbar, pfb and hub");
164 mc_elpg_enable_val = mc_elpg_enable_xbar_enabled_f() |
165 mc_elpg_enable_pfb_enabled_f() |
166 mc_elpg_enable_hub_enabled_f();
167 mc_elpg_enable_val |= gk20a_readl(g, mc_elpg_enable_r());
168 gk20a_writel(g, mc_elpg_enable_r(), mc_elpg_enable_val);
169
170 }
171 /* fs hub should be out of reset by now */
172 gv11b_init_nvlink_soc_credits(g);
173}
174
175static const char * const invalid_str = "invalid";
176
177static const char *const fault_type_descs_gv11b[] = {
178 "invalid pde",
179 "invalid pde size",
180 "invalid pte",
181 "limit violation",
182 "unbound inst block",
183 "priv violation",
184 "write",
185 "read",
186 "pitch mask violation",
187 "work creation",
188 "unsupported aperture",
189 "compression failure",
190 "unsupported kind",
191 "region violation",
192 "poison",
193 "atomic"
194};
195
196static const char *const fault_client_type_descs_gv11b[] = {
197 "gpc",
198 "hub",
199};
200
201static const char *const fault_access_type_descs_gv11b[] = {
202 "virt read",
203 "virt write",
204 "virt atomic strong",
205 "virt prefetch",
206 "virt atomic weak",
207 "xxx",
208 "xxx",
209 "xxx",
210 "phys read",
211 "phys write",
212 "phys atomic",
213 "phys prefetch",
214};
215
216static const char *const hub_client_descs_gv11b[] = {
217 "vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu",
218 "host cpu nb", "iso", "mmu", "nvdec", "nvenc1", "nvenc2",
219 "niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc",
220 "scc nb", "sec", "ssync", "gr copy", "xv", "mmu nb",
221 "nvenc", "d falcon", "sked", "a falcon", "hsce0", "hsce1",
222 "hsce2", "hsce3", "hsce4", "hsce5", "hsce6", "hsce7", "hsce8",
223 "hsce9", "hshub", "ptp x0", "ptp x1", "ptp x2", "ptp x3",
224 "ptp x4", "ptp x5", "ptp x6", "ptp x7", "vpr scrubber0",
225 "vpr scrubber1", "dwbif", "fbfalcon", "ce shim", "gsp",
226 "dont care"
227};
228
229static const char *const gpc_client_descs_gv11b[] = {
230 "t1 0", "t1 1", "t1 2", "t1 3",
231 "t1 4", "t1 5", "t1 6", "t1 7",
232 "pe 0", "pe 1", "pe 2", "pe 3",
233 "pe 4", "pe 5", "pe 6", "pe 7",
234 "rast", "gcc", "gpccs",
235 "prop 0", "prop 1", "prop 2", "prop 3",
236 "gpm",
237 "ltp utlb 0", "ltp utlb 1", "ltp utlb 2", "ltp utlb 3",
238 "ltp utlb 4", "ltp utlb 5", "ltp utlb 6", "ltp utlb 7",
239 "utlb",
240 "t1 8", "t1 9", "t1 10", "t1 11",
241 "t1 12", "t1 13", "t1 14", "t1 15",
242 "tpccs 0", "tpccs 1", "tpccs 2", "tpccs 3",
243 "tpccs 4", "tpccs 5", "tpccs 6", "tpccs 7",
244 "pe 8", "pe 9", "tpccs 8", "tpccs 9",
245 "t1 16", "t1 17", "t1 18", "t1 19",
246 "pe 10", "pe 11", "tpccs 10", "tpccs 11",
247 "t1 20", "t1 21", "t1 22", "t1 23",
248 "pe 12", "pe 13", "tpccs 12", "tpccs 13",
249 "t1 24", "t1 25", "t1 26", "t1 27",
250 "pe 14", "pe 15", "tpccs 14", "tpccs 15",
251 "t1 28", "t1 29", "t1 30", "t1 31",
252 "pe 16", "pe 17", "tpccs 16", "tpccs 17",
253 "t1 32", "t1 33", "t1 34", "t1 35",
254 "pe 18", "pe 19", "tpccs 18", "tpccs 19",
255 "t1 36", "t1 37", "t1 38", "t1 39",
256};
257
258u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
259 unsigned int index)
260{
261 u32 reg_val;
262
263 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index));
264 return fb_mmu_fault_buffer_size_enable_v(reg_val);
265}
266
267static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
268 unsigned int index, u32 next)
269{
270 u32 reg_val;
271
272 nvgpu_log(g, gpu_dbg_intr, "updating get index with = %d", next);
273
274 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index));
275 reg_val = set_field(reg_val, fb_mmu_fault_buffer_get_ptr_m(),
276 fb_mmu_fault_buffer_get_ptr_f(next));
277
278 /* while the fault is being handled it is possible for overflow
279 * to happen,
280 */
281 if (reg_val & fb_mmu_fault_buffer_get_overflow_m())
282 reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f();
283
284 gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val);
285
286 /* make sure get ptr update is visible to everyone to avoid
287 * reading already read entry
288 */
289 nvgpu_mb();
290}
291
292static u32 gv11b_fb_fault_buffer_get_index(struct gk20a *g,
293 unsigned int index)
294{
295 u32 reg_val;
296
297 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index));
298 return fb_mmu_fault_buffer_get_ptr_v(reg_val);
299}
300
301static u32 gv11b_fb_fault_buffer_put_index(struct gk20a *g,
302 unsigned int index)
303{
304 u32 reg_val;
305
306 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_put_r(index));
307 return fb_mmu_fault_buffer_put_ptr_v(reg_val);
308}
309
310static u32 gv11b_fb_fault_buffer_size_val(struct gk20a *g,
311 unsigned int index)
312{
313 u32 reg_val;
314
315 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index));
316 return fb_mmu_fault_buffer_size_val_v(reg_val);
317}
318
319static bool gv11b_fb_is_fault_buffer_empty(struct gk20a *g,
320 unsigned int index, u32 *get_idx)
321{
322 u32 put_idx;
323
324 *get_idx = gv11b_fb_fault_buffer_get_index(g, index);
325 put_idx = gv11b_fb_fault_buffer_put_index(g, index);
326
327 return *get_idx == put_idx;
328}
329
330static bool gv11b_fb_is_fault_buffer_full(struct gk20a *g,
331 unsigned int index)
332{
333 u32 get_idx, put_idx, entries;
334
335
336 get_idx = gv11b_fb_fault_buffer_get_index(g, index);
337
338 put_idx = gv11b_fb_fault_buffer_put_index(g, index);
339
340 entries = gv11b_fb_fault_buffer_size_val(g, index);
341
342 return get_idx == ((put_idx + 1) % entries);
343}
344
345void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
346 unsigned int index, unsigned int state)
347{
348 u32 fault_status;
349 u32 reg_val;
350
351 nvgpu_log_fn(g, " ");
352
353 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index));
354 if (state) {
355 if (gv11b_fb_is_fault_buf_enabled(g, index)) {
356 nvgpu_log_info(g, "fault buffer is already enabled");
357 } else {
358 reg_val |= fb_mmu_fault_buffer_size_enable_true_f();
359 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index),
360 reg_val);
361 }
362
363 } else {
364 struct nvgpu_timeout timeout;
365 u32 delay = GR_IDLE_CHECK_DEFAULT;
366
367 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
368 NVGPU_TIMER_CPU_TIMER);
369
370 reg_val &= (~(fb_mmu_fault_buffer_size_enable_m()));
371 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index), reg_val);
372
373 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
374
375 do {
376 if (!(fault_status & fb_mmu_fault_status_busy_true_f()))
377 break;
378 /*
379 * Make sure fault buffer is disabled.
380 * This is to avoid accessing fault buffer by hw
381 * during the window BAR2 is being unmapped by s/w
382 */
383 nvgpu_log_info(g, "fault status busy set, check again");
384 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
385
386 nvgpu_usleep_range(delay, delay * 2);
387 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
388 } while (!nvgpu_timeout_expired_msg(&timeout,
389 "fault status busy set"));
390 }
391}
392
393void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
394{
395 u32 addr_lo;
396 u32 addr_hi;
397
398 nvgpu_log_fn(g, " ");
399
400 gv11b_fb_fault_buf_set_state_hw(g, index,
401 FAULT_BUF_DISABLED);
402 addr_lo = u64_lo32(g->mm.hw_fault_buf[index].gpu_va >>
403 ram_in_base_shift_v());
404 addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va);
405
406 gk20a_writel(g, fb_mmu_fault_buffer_lo_r(index),
407 fb_mmu_fault_buffer_lo_addr_f(addr_lo));
408
409 gk20a_writel(g, fb_mmu_fault_buffer_hi_r(index),
410 fb_mmu_fault_buffer_hi_addr_f(addr_hi));
411
412 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index),
413 fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) |
414 fb_mmu_fault_buffer_size_overflow_intr_enable_f());
415
416 gv11b_fb_fault_buf_set_state_hw(g, index, FAULT_BUF_ENABLED);
417}
418
419static void gv11b_fb_intr_en_set(struct gk20a *g,
420 unsigned int index, u32 mask)
421{
422 u32 reg_val;
423
424 reg_val = gk20a_readl(g, fb_niso_intr_en_set_r(index));
425 reg_val |= mask;
426 gk20a_writel(g, fb_niso_intr_en_set_r(index), reg_val);
427}
428
429static void gv11b_fb_intr_en_clr(struct gk20a *g,
430 unsigned int index, u32 mask)
431{
432 u32 reg_val;
433
434 reg_val = gk20a_readl(g, fb_niso_intr_en_clr_r(index));
435 reg_val |= mask;
436 gk20a_writel(g, fb_niso_intr_en_clr_r(index), reg_val);
437}
438
439static u32 gv11b_fb_get_hub_intr_clr_mask(struct gk20a *g,
440 unsigned int intr_type)
441{
442 u32 mask = 0;
443
444 if (intr_type & HUB_INTR_TYPE_OTHER) {
445 mask |=
446 fb_niso_intr_en_clr_mmu_other_fault_notify_m();
447 }
448
449 if (intr_type & HUB_INTR_TYPE_NONREPLAY) {
450 mask |=
451 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m() |
452 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m();
453 }
454
455 if (intr_type & HUB_INTR_TYPE_REPLAY) {
456 mask |=
457 fb_niso_intr_en_clr_mmu_replayable_fault_notify_m() |
458 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m();
459 }
460
461 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
462 mask |=
463 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_m();
464 }
465
466 if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
467 mask |=
468 fb_niso_intr_en_clr_hub_access_counter_notify_m() |
469 fb_niso_intr_en_clr_hub_access_counter_error_m();
470 }
471
472 return mask;
473}
474
475static u32 gv11b_fb_get_hub_intr_en_mask(struct gk20a *g,
476 unsigned int intr_type)
477{
478 u32 mask = 0;
479
480 if (intr_type & HUB_INTR_TYPE_OTHER) {
481 mask |=
482 fb_niso_intr_en_set_mmu_other_fault_notify_m();
483 }
484
485 if (intr_type & HUB_INTR_TYPE_NONREPLAY) {
486 mask |=
487 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
488 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m();
489 }
490
491 if (intr_type & HUB_INTR_TYPE_REPLAY) {
492 mask |=
493 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
494 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
495 }
496
497 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
498 mask |=
499 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
500 }
501
502 if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
503 mask |=
504 fb_niso_intr_en_set_hub_access_counter_notify_m() |
505 fb_niso_intr_en_set_hub_access_counter_error_m();
506 }
507
508 return mask;
509}
510
511void gv11b_fb_enable_hub_intr(struct gk20a *g,
512 unsigned int index, unsigned int intr_type)
513{
514 u32 mask = 0;
515
516 mask = gv11b_fb_get_hub_intr_en_mask(g, intr_type);
517
518 if (mask)
519 gv11b_fb_intr_en_set(g, index, mask);
520}
521
522void gv11b_fb_disable_hub_intr(struct gk20a *g,
523 unsigned int index, unsigned int intr_type)
524{
525 u32 mask = 0;
526
527 mask = gv11b_fb_get_hub_intr_clr_mask(g, intr_type);
528
529 if (mask)
530 gv11b_fb_intr_en_clr(g, index, mask);
531}
532
533static void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
534{
535 u32 ecc_addr, corrected_cnt, uncorrected_cnt;
536 u32 corrected_delta, uncorrected_delta;
537 u32 corrected_overflow, uncorrected_overflow;
538
539 ecc_addr = gk20a_readl(g, fb_mmu_l2tlb_ecc_address_r());
540 corrected_cnt = gk20a_readl(g,
541 fb_mmu_l2tlb_ecc_corrected_err_count_r());
542 uncorrected_cnt = gk20a_readl(g,
543 fb_mmu_l2tlb_ecc_uncorrected_err_count_r());
544
545 corrected_delta = fb_mmu_l2tlb_ecc_corrected_err_count_total_v(
546 corrected_cnt);
547 uncorrected_delta = fb_mmu_l2tlb_ecc_uncorrected_err_count_total_v(
548 uncorrected_cnt);
549 corrected_overflow = ecc_status &
550 fb_mmu_l2tlb_ecc_status_corrected_err_total_counter_overflow_m();
551
552 uncorrected_overflow = ecc_status &
553 fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m();
554
555 /* clear the interrupt */
556 if ((corrected_delta > 0) || corrected_overflow)
557 gk20a_writel(g, fb_mmu_l2tlb_ecc_corrected_err_count_r(), 0);
558 if ((uncorrected_delta > 0) || uncorrected_overflow)
559 gk20a_writel(g, fb_mmu_l2tlb_ecc_uncorrected_err_count_r(), 0);
560
561 gk20a_writel(g, fb_mmu_l2tlb_ecc_status_r(),
562 fb_mmu_l2tlb_ecc_status_reset_clear_f());
563
564 /* Handle overflow */
565 if (corrected_overflow)
566 corrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_corrected_err_count_total_s());
567 if (uncorrected_overflow)
568 uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s());
569
570
571 g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count.counters[0] +=
572 corrected_delta;
573 g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count.counters[0] +=
574 uncorrected_delta;
575
576 if (ecc_status & fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m())
577 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
578 if (ecc_status & fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m())
579 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
580 if (corrected_overflow || uncorrected_overflow)
581 nvgpu_info(g, "mmu l2tlb ecc counter overflow!");
582
583 nvgpu_log(g, gpu_dbg_intr,
584 "ecc error address: 0x%x", ecc_addr);
585 nvgpu_log(g, gpu_dbg_intr,
586 "ecc error count corrected: %d, uncorrected %d",
587 g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count.counters[0],
588 g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count.counters[0]);
589}
590
591static void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
592{
593 u32 ecc_addr, corrected_cnt, uncorrected_cnt;
594 u32 corrected_delta, uncorrected_delta;
595 u32 corrected_overflow, uncorrected_overflow;
596
597 ecc_addr = gk20a_readl(g, fb_mmu_hubtlb_ecc_address_r());
598 corrected_cnt = gk20a_readl(g,
599 fb_mmu_hubtlb_ecc_corrected_err_count_r());
600 uncorrected_cnt = gk20a_readl(g,
601 fb_mmu_hubtlb_ecc_uncorrected_err_count_r());
602
603 corrected_delta = fb_mmu_hubtlb_ecc_corrected_err_count_total_v(
604 corrected_cnt);
605 uncorrected_delta = fb_mmu_hubtlb_ecc_uncorrected_err_count_total_v(
606 uncorrected_cnt);
607 corrected_overflow = ecc_status &
608 fb_mmu_hubtlb_ecc_status_corrected_err_total_counter_overflow_m();
609
610 uncorrected_overflow = ecc_status &
611 fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m();
612
613 /* clear the interrupt */
614 if ((corrected_delta > 0) || corrected_overflow)
615 gk20a_writel(g, fb_mmu_hubtlb_ecc_corrected_err_count_r(), 0);
616 if ((uncorrected_delta > 0) || uncorrected_overflow)
617 gk20a_writel(g, fb_mmu_hubtlb_ecc_uncorrected_err_count_r(), 0);
618
619 gk20a_writel(g, fb_mmu_hubtlb_ecc_status_r(),
620 fb_mmu_hubtlb_ecc_status_reset_clear_f());
621
622 /* Handle overflow */
623 if (corrected_overflow)
624 corrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_corrected_err_count_total_s());
625 if (uncorrected_overflow)
626 uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s());
627
628
629 g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count.counters[0] +=
630 corrected_delta;
631 g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count.counters[0] +=
632 uncorrected_delta;
633
634 if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m())
635 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
636 if (ecc_status & fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m())
637 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
638 if (corrected_overflow || uncorrected_overflow)
639 nvgpu_info(g, "mmu hubtlb ecc counter overflow!");
640
641 nvgpu_log(g, gpu_dbg_intr,
642 "ecc error address: 0x%x", ecc_addr);
643 nvgpu_log(g, gpu_dbg_intr,
644 "ecc error count corrected: %d, uncorrected %d",
645 g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count.counters[0],
646 g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count.counters[0]);
647}
648
649static void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
650{
651 u32 ecc_addr, corrected_cnt, uncorrected_cnt;
652 u32 corrected_delta, uncorrected_delta;
653 u32 corrected_overflow, uncorrected_overflow;
654
655 ecc_addr = gk20a_readl(g, fb_mmu_fillunit_ecc_address_r());
656 corrected_cnt = gk20a_readl(g,
657 fb_mmu_fillunit_ecc_corrected_err_count_r());
658 uncorrected_cnt = gk20a_readl(g,
659 fb_mmu_fillunit_ecc_uncorrected_err_count_r());
660
661 corrected_delta = fb_mmu_fillunit_ecc_corrected_err_count_total_v(
662 corrected_cnt);
663 uncorrected_delta = fb_mmu_fillunit_ecc_uncorrected_err_count_total_v(
664 uncorrected_cnt);
665 corrected_overflow = ecc_status &
666 fb_mmu_fillunit_ecc_status_corrected_err_total_counter_overflow_m();
667
668 uncorrected_overflow = ecc_status &
669 fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m();
670
671 /* clear the interrupt */
672 if ((corrected_delta > 0) || corrected_overflow)
673 gk20a_writel(g, fb_mmu_fillunit_ecc_corrected_err_count_r(), 0);
674 if ((uncorrected_delta > 0) || uncorrected_overflow)
675 gk20a_writel(g, fb_mmu_fillunit_ecc_uncorrected_err_count_r(), 0);
676
677 gk20a_writel(g, fb_mmu_fillunit_ecc_status_r(),
678 fb_mmu_fillunit_ecc_status_reset_clear_f());
679
680 /* Handle overflow */
681 if (corrected_overflow)
682 corrected_delta += (0x1UL << fb_mmu_fillunit_ecc_corrected_err_count_total_s());
683 if (uncorrected_overflow)
684 uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s());
685
686
687 g->ecc.eng.t19x.mmu_fillunit_corrected_err_count.counters[0] +=
688 corrected_delta;
689 g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count.counters[0] +=
690 uncorrected_delta;
691
692 if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m())
693 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pte data error");
694 if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m())
695 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pte data error");
696 if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m())
697 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pde0 data error");
698 if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m())
699 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error");
700
701 if (corrected_overflow || uncorrected_overflow)
702 nvgpu_info(g, "mmu fillunit ecc counter overflow!");
703
704 nvgpu_log(g, gpu_dbg_intr,
705 "ecc error address: 0x%x", ecc_addr);
706 nvgpu_log(g, gpu_dbg_intr,
707 "ecc error count corrected: %d, uncorrected %d",
708 g->ecc.eng.t19x.mmu_fillunit_corrected_err_count.counters[0],
709 g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count.counters[0]);
710}
711
712static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault)
713{
714 if (WARN_ON(mmfault->fault_type >=
715 ARRAY_SIZE(fault_type_descs_gv11b)))
716 mmfault->fault_type_desc = invalid_str;
717 else
718 mmfault->fault_type_desc =
719 fault_type_descs_gv11b[mmfault->fault_type];
720
721 if (WARN_ON(mmfault->client_type >=
722 ARRAY_SIZE(fault_client_type_descs_gv11b)))
723 mmfault->client_type_desc = invalid_str;
724 else
725 mmfault->client_type_desc =
726 fault_client_type_descs_gv11b[mmfault->client_type];
727
728 mmfault->client_id_desc = invalid_str;
729 if (mmfault->client_type ==
730 gmmu_fault_client_type_hub_v()) {
731
732 if (!(WARN_ON(mmfault->client_id >=
733 ARRAY_SIZE(hub_client_descs_gv11b))))
734 mmfault->client_id_desc =
735 hub_client_descs_gv11b[mmfault->client_id];
736 } else if (mmfault->client_type ==
737 gmmu_fault_client_type_gpc_v()) {
738 if (!(WARN_ON(mmfault->client_id >=
739 ARRAY_SIZE(gpc_client_descs_gv11b))))
740 mmfault->client_id_desc =
741 gpc_client_descs_gv11b[mmfault->client_id];
742 }
743
744}
745
746static void gv11b_fb_print_fault_info(struct gk20a *g,
747 struct mmu_fault_info *mmfault)
748{
749 if (mmfault && mmfault->valid) {
750 nvgpu_err(g, "[MMU FAULT] "
751 "mmu engine id: %d, "
752 "ch id: %d, "
753 "fault addr: 0x%llx, "
754 "fault addr aperture: %d, "
755 "fault type: %s, "
756 "access type: %s, ",
757 mmfault->mmu_engine_id,
758 mmfault->chid,
759 mmfault->fault_addr,
760 mmfault->fault_addr_aperture,
761 mmfault->fault_type_desc,
762 fault_access_type_descs_gv11b[mmfault->access_type]);
763 nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] "
764 "mmu engine id: %d, "
765 "faulted act eng id if any: 0x%x, "
766 "faulted veid if any: 0x%x, "
767 "faulted pbdma id if any: 0x%x, "
768 "fault addr: 0x%llx, ",
769 mmfault->mmu_engine_id,
770 mmfault->faulted_engine,
771 mmfault->faulted_subid,
772 mmfault->faulted_pbdma,
773 mmfault->fault_addr);
774 nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] "
775 "fault addr aperture: %d, "
776 "fault type: %s, "
777 "access type: %s, "
778 "inst ptr: 0x%llx, "
779 "inst ptr aperture: %d, ",
780 mmfault->fault_addr_aperture,
781 mmfault->fault_type_desc,
782 fault_access_type_descs_gv11b[mmfault->access_type],
783 mmfault->inst_ptr,
784 mmfault->inst_aperture);
785 nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] "
786 "ch id: %d, "
787 "timestamp hi:lo 0x%08x:0x%08x, "
788 "client type: %s, "
789 "client id: %s, "
790 "gpc id if client type is gpc: %d, ",
791 mmfault->chid,
792 mmfault->timestamp_hi, mmfault->timestamp_lo,
793 mmfault->client_type_desc,
794 mmfault->client_id_desc,
795 mmfault->gpc_id);
796 nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] "
797 "protected mode: %d, "
798 "replayable fault: %d, "
799 "replayable fault en: %d ",
800 mmfault->protected_mode,
801 mmfault->replayable_fault,
802 mmfault->replay_fault_en);
803 }
804}
805
806/*
807 *Fault buffer format
808 *
809 * 31 28 24 23 16 15 8 7 4 0
810 *.-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-.
811 *| inst_lo |0 0|apr|0 0 0 0 0 0 0 0|
812 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
813 *| inst_hi |
814 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
815 *| addr_31_12 | |AP |
816 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
817 *| addr_63_32 |
818 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
819 *| timestamp_lo |
820 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
821 *| timestamp_hi |
822 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
823 *| (reserved) | engine_id |
824 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
825 *|V|R|P| gpc_id |0 0 0|t|0|acctp|0| client |RF0 0|faulttype|
826 */
827
828static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
829 struct nvgpu_mem *mem, u32 offset, struct mmu_fault_info *mmfault)
830{
831 u32 rd32_val;
832 u32 addr_lo, addr_hi;
833 u64 inst_ptr;
834 u32 chid = FIFO_INVAL_CHANNEL_ID;
835 struct channel_gk20a *refch;
836
837 memset(mmfault, 0, sizeof(*mmfault));
838
839 rd32_val = nvgpu_mem_rd32(g, mem, offset +
840 gmmu_fault_buf_entry_inst_lo_w());
841 addr_lo = gmmu_fault_buf_entry_inst_lo_v(rd32_val);
842 addr_lo = addr_lo << ram_in_base_shift_v();
843
844 addr_hi = nvgpu_mem_rd32(g, mem, offset +
845 gmmu_fault_buf_entry_inst_hi_w());
846 addr_hi = gmmu_fault_buf_entry_inst_hi_v(addr_hi);
847
848 inst_ptr = hi32_lo32_to_u64(addr_hi, addr_lo);
849
850 /* refch will be put back after fault is handled */
851 refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
852 if (refch)
853 chid = refch->chid;
854
855 /* it is ok to continue even if refch is NULL */
856 mmfault->refch = refch;
857 mmfault->chid = chid;
858 mmfault->inst_ptr = inst_ptr;
859 mmfault->inst_aperture = gmmu_fault_buf_entry_inst_aperture_v(rd32_val);
860
861 rd32_val = nvgpu_mem_rd32(g, mem, offset +
862 gmmu_fault_buf_entry_addr_lo_w());
863
864 mmfault->fault_addr_aperture =
865 gmmu_fault_buf_entry_addr_phys_aperture_v(rd32_val);
866 addr_lo = gmmu_fault_buf_entry_addr_lo_v(rd32_val);
867 addr_lo = addr_lo << ram_in_base_shift_v();
868
869 rd32_val = nvgpu_mem_rd32(g, mem, offset +
870 gmmu_fault_buf_entry_addr_hi_w());
871 addr_hi = gmmu_fault_buf_entry_addr_hi_v(rd32_val);
872 mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo);
873
874 rd32_val = nvgpu_mem_rd32(g, mem, offset +
875 gmmu_fault_buf_entry_timestamp_lo_w());
876 mmfault->timestamp_lo =
877 gmmu_fault_buf_entry_timestamp_lo_v(rd32_val);
878
879 rd32_val = nvgpu_mem_rd32(g, mem, offset +
880 gmmu_fault_buf_entry_timestamp_hi_w());
881 mmfault->timestamp_hi =
882 gmmu_fault_buf_entry_timestamp_hi_v(rd32_val);
883
884 rd32_val = nvgpu_mem_rd32(g, mem, offset +
885 gmmu_fault_buf_entry_engine_id_w());
886
887 mmfault->mmu_engine_id =
888 gmmu_fault_buf_entry_engine_id_v(rd32_val);
889 gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(g, mmfault->mmu_engine_id,
890 &mmfault->faulted_engine, &mmfault->faulted_subid,
891 &mmfault->faulted_pbdma);
892
893 rd32_val = nvgpu_mem_rd32(g, mem, offset +
894 gmmu_fault_buf_entry_fault_type_w());
895 mmfault->client_id =
896 gmmu_fault_buf_entry_client_v(rd32_val);
897 mmfault->replayable_fault =
898 gmmu_fault_buf_entry_replayable_fault_v(rd32_val);
899
900 mmfault->fault_type =
901 gmmu_fault_buf_entry_fault_type_v(rd32_val);
902 mmfault->access_type =
903 gmmu_fault_buf_entry_access_type_v(rd32_val);
904
905 mmfault->client_type =
906 gmmu_fault_buf_entry_mmu_client_type_v(rd32_val);
907
908 mmfault->gpc_id =
909 gmmu_fault_buf_entry_gpc_id_v(rd32_val);
910 mmfault->protected_mode =
911 gmmu_fault_buf_entry_protected_mode_v(rd32_val);
912
913 mmfault->replay_fault_en =
914 gmmu_fault_buf_entry_replayable_fault_en_v(rd32_val);
915
916 mmfault->valid = gmmu_fault_buf_entry_valid_v(rd32_val);
917
918 rd32_val = nvgpu_mem_rd32(g, mem, offset +
919 gmmu_fault_buf_entry_fault_type_w());
920 rd32_val &= ~(gmmu_fault_buf_entry_valid_m());
921 nvgpu_mem_wr32(g, mem, offset + gmmu_fault_buf_entry_valid_w(),
922 rd32_val);
923
924 gv11b_fb_parse_mmfault(mmfault);
925}
926
927static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
928 struct mmu_fault_info *mmfault, u32 *invalidate_replay_val)
929{
930 unsigned int id_type;
931 u32 num_lce, act_eng_bitmask = 0;
932 int err = 0;
933 u32 id = ((u32)~0);
934
935 if (!mmfault->valid)
936 return;
937
938 gv11b_fb_print_fault_info(g, mmfault);
939
940 num_lce = gv11b_ce_get_num_lce(g);
941 if ((mmfault->mmu_engine_id >=
942 gmmu_fault_mmu_eng_id_ce0_v()) &&
943 (mmfault->mmu_engine_id <
944 gmmu_fault_mmu_eng_id_ce0_v() + num_lce)) {
945 /* CE page faults are not reported as replayable */
946 nvgpu_log(g, gpu_dbg_intr, "CE Faulted");
947 err = gv11b_fb_fix_page_fault(g, mmfault);
948 gv11b_fifo_reset_pbdma_and_eng_faulted(g, mmfault->refch,
949 mmfault->faulted_pbdma, mmfault->faulted_engine);
950 if (!err) {
951 nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Fixed");
952 *invalidate_replay_val = 0;
953 /* refch in mmfault is assigned at the time of copying
954 * fault info from snap reg or bar2 fault buf
955 */
956 gk20a_channel_put(mmfault->refch);
957 return;
958 }
959 /* Do recovery. Channel recovery needs refch */
960 nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Not Fixed");
961 }
962
963 if (!mmfault->replayable_fault) {
964 if (mmfault->fault_type ==
965 gmmu_fault_type_unbound_inst_block_v()) {
966 /*
967 * Bug 1847172: When an engine faults due to an unbound
968 * instance block, the fault cannot be isolated to a
969 * single context so we need to reset the entire runlist
970 */
971 id_type = ID_TYPE_UNKNOWN;
972 nvgpu_log(g, gpu_dbg_intr, "UNBOUND INST BLOCK MMU FAULT");
973
974 } else if (mmfault->refch) {
975 if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) {
976 id = mmfault->refch->tsgid;
977 id_type = ID_TYPE_TSG;
978 } else {
979 id = mmfault->chid;
980 id_type = ID_TYPE_CHANNEL;
981 }
982 } else {
983 id_type = ID_TYPE_UNKNOWN;
984 }
985 if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID)
986 act_eng_bitmask = BIT(mmfault->faulted_engine);
987
988 g->ops.fifo.teardown_ch_tsg(g, act_eng_bitmask,
989 id, id_type, RC_TYPE_MMU_FAULT, mmfault);
990 } else {
991 if (mmfault->fault_type == gmmu_fault_type_pte_v()) {
992 nvgpu_log(g, gpu_dbg_intr, "invalid pte! try to fix");
993 err = gv11b_fb_fix_page_fault(g, mmfault);
994 if (err)
995 *invalidate_replay_val |=
996 fb_mmu_invalidate_replay_cancel_global_f();
997 else
998 *invalidate_replay_val |=
999 fb_mmu_invalidate_replay_start_ack_all_f();
1000 } else {
1001 /* cancel faults other than invalid pte */
1002 *invalidate_replay_val |=
1003 fb_mmu_invalidate_replay_cancel_global_f();
1004 }
1005 /* refch in mmfault is assigned at the time of copying
1006 * fault info from snap reg or bar2 fault buf
1007 */
1008 gk20a_channel_put(mmfault->refch);
1009 }
1010}
1011
1012static void gv11b_fb_replay_or_cancel_faults(struct gk20a *g,
1013 u32 invalidate_replay_val)
1014{
1015 int err = 0;
1016
1017 nvgpu_log_fn(g, " ");
1018
1019 if (invalidate_replay_val &
1020 fb_mmu_invalidate_replay_cancel_global_f()) {
1021 /*
1022 * cancel faults so that next time it faults as
1023 * replayable faults and channel recovery can be done
1024 */
1025 err = gv11b_fb_mmu_invalidate_replay(g,
1026 fb_mmu_invalidate_replay_cancel_global_f());
1027 } else if (invalidate_replay_val &
1028 fb_mmu_invalidate_replay_start_ack_all_f()) {
1029 /* pte valid is fixed. replay faulting request */
1030 err = gv11b_fb_mmu_invalidate_replay(g,
1031 fb_mmu_invalidate_replay_start_ack_all_f());
1032 }
1033}
1034
1035static void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
1036 u32 fault_status, unsigned int index)
1037{
1038 u32 get_indx, offset, rd32_val, entries;
1039 struct nvgpu_mem *mem;
1040 struct mmu_fault_info *mmfault;
1041 u32 invalidate_replay_val = 0;
1042 u64 prev_fault_addr = 0ULL;
1043 u64 next_fault_addr = 0ULL;
1044
1045 if (gv11b_fb_is_fault_buffer_empty(g, index, &get_indx)) {
1046 nvgpu_log(g, gpu_dbg_intr,
1047 "SPURIOUS mmu fault: reg index:%d", index);
1048 return;
1049 }
1050 nvgpu_info(g, "%s MMU FAULT" ,
1051 index == REPLAY_REG_INDEX ? "REPLAY" : "NON-REPLAY");
1052
1053 nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx);
1054
1055 mem = &g->mm.hw_fault_buf[index];
1056 mmfault = g->mm.fault_info[index];
1057
1058 entries = gv11b_fb_fault_buffer_size_val(g, index);
1059 nvgpu_log(g, gpu_dbg_intr, "buffer num entries = %d", entries);
1060
1061 offset = (get_indx * gmmu_fault_buf_size_v()) / sizeof(u32);
1062 nvgpu_log(g, gpu_dbg_intr, "starting word offset = 0x%x", offset);
1063
1064 rd32_val = nvgpu_mem_rd32(g, mem,
1065 offset + gmmu_fault_buf_entry_valid_w());
1066 nvgpu_log(g, gpu_dbg_intr, "entry valid offset val = 0x%x", rd32_val);
1067
1068 while ((rd32_val & gmmu_fault_buf_entry_valid_m())) {
1069
1070 nvgpu_log(g, gpu_dbg_intr, "entry valid = 0x%x", rd32_val);
1071
1072 gv11b_fb_copy_from_hw_fault_buf(g, mem, offset, mmfault);
1073
1074 get_indx = (get_indx + 1) % entries;
1075 nvgpu_log(g, gpu_dbg_intr, "new get index = %d", get_indx);
1076
1077 gv11b_fb_fault_buffer_get_ptr_update(g, index, get_indx);
1078
1079 offset = (get_indx * gmmu_fault_buf_size_v()) / sizeof(u32);
1080 nvgpu_log(g, gpu_dbg_intr, "next word offset = 0x%x", offset);
1081
1082 rd32_val = nvgpu_mem_rd32(g, mem,
1083 offset + gmmu_fault_buf_entry_valid_w());
1084
1085 if (index == REPLAY_REG_INDEX && mmfault->fault_addr != 0ULL) {
1086 /* fault_addr "0" is not supposed to be fixed ever.
1087 * For the first time when prev = 0, next = 0 and
1088 * fault addr is also 0 then handle_mmu_fault_common will
1089 * not be called. Fix by checking fault_addr not equal to 0
1090 */
1091 prev_fault_addr = next_fault_addr;
1092 next_fault_addr = mmfault->fault_addr;
1093 if (prev_fault_addr == next_fault_addr) {
1094 nvgpu_log(g, gpu_dbg_intr, "pte is fixed");
1095 if (mmfault->refch)
1096 gk20a_channel_put(mmfault->refch);
1097 /* pte already fixed for this addr */
1098 continue;
1099 }
1100 }
1101
1102 gv11b_fb_handle_mmu_fault_common(g, mmfault,
1103 &invalidate_replay_val);
1104
1105 }
1106 if (index == REPLAY_REG_INDEX && invalidate_replay_val)
1107 gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val);
1108}
1109
1110static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
1111 u32 fault_status, struct mmu_fault_info *mmfault)
1112{
1113 u32 reg_val;
1114 u32 addr_lo, addr_hi;
1115 u64 inst_ptr;
1116 int chid = FIFO_INVAL_CHANNEL_ID;
1117 struct channel_gk20a *refch;
1118
1119 memset(mmfault, 0, sizeof(*mmfault));
1120
1121 if (!(fault_status & fb_mmu_fault_status_valid_set_f())) {
1122
1123 nvgpu_log(g, gpu_dbg_intr, "mmu fault status valid not set");
1124 return;
1125 }
1126
1127 reg_val = gk20a_readl(g, fb_mmu_fault_inst_lo_r());
1128 addr_lo = fb_mmu_fault_inst_lo_addr_v(reg_val);
1129 addr_lo = addr_lo << ram_in_base_shift_v();
1130
1131 addr_hi = gk20a_readl(g, fb_mmu_fault_inst_hi_r());
1132 addr_hi = fb_mmu_fault_inst_hi_addr_v(addr_hi);
1133 inst_ptr = hi32_lo32_to_u64(addr_hi, addr_lo);
1134
1135 /* refch will be put back after fault is handled */
1136 refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
1137 if (refch)
1138 chid = refch->chid;
1139
1140 /* It is still ok to continue if refch is NULL */
1141 mmfault->refch = refch;
1142 mmfault->chid = chid;
1143 mmfault->inst_ptr = inst_ptr;
1144 mmfault->inst_aperture = fb_mmu_fault_inst_lo_aperture_v(reg_val);
1145 mmfault->mmu_engine_id = fb_mmu_fault_inst_lo_engine_id_v(reg_val);
1146
1147 gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(g, mmfault->mmu_engine_id,
1148 &mmfault->faulted_engine, &mmfault->faulted_subid,
1149 &mmfault->faulted_pbdma);
1150
1151 reg_val = gk20a_readl(g, fb_mmu_fault_addr_lo_r());
1152 addr_lo = fb_mmu_fault_addr_lo_addr_v(reg_val);
1153 addr_lo = addr_lo << ram_in_base_shift_v();
1154
1155 mmfault->fault_addr_aperture =
1156 fb_mmu_fault_addr_lo_phys_aperture_v(reg_val);
1157
1158 addr_hi = gk20a_readl(g, fb_mmu_fault_addr_hi_r());
1159 addr_hi = fb_mmu_fault_addr_hi_addr_v(addr_hi);
1160 mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo);
1161
1162 reg_val = gk20a_readl(g, fb_mmu_fault_info_r());
1163 mmfault->fault_type = fb_mmu_fault_info_fault_type_v(reg_val);
1164 mmfault->replayable_fault =
1165 fb_mmu_fault_info_replayable_fault_v(reg_val);
1166 mmfault->client_id = fb_mmu_fault_info_client_v(reg_val);
1167 mmfault->access_type = fb_mmu_fault_info_access_type_v(reg_val);
1168 mmfault->client_type = fb_mmu_fault_info_client_type_v(reg_val);
1169 mmfault->gpc_id = fb_mmu_fault_info_gpc_id_v(reg_val);
1170 mmfault->protected_mode =
1171 fb_mmu_fault_info_protected_mode_v(reg_val);
1172 mmfault->replay_fault_en =
1173 fb_mmu_fault_info_replayable_fault_en_v(reg_val);
1174
1175 mmfault->valid = fb_mmu_fault_info_valid_v(reg_val);
1176
1177 fault_status &= ~(fb_mmu_fault_status_valid_m());
1178 gk20a_writel(g, fb_mmu_fault_status_r(), fault_status);
1179
1180 gv11b_fb_parse_mmfault(mmfault);
1181
1182}
1183
1184static void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g,
1185 u32 fault_status)
1186{
1187 u32 reg_val;
1188 unsigned int index = REPLAY_REG_INDEX;
1189
1190 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index));
1191
1192 if (fault_status &
1193 fb_mmu_fault_status_replayable_getptr_corrupted_m()) {
1194
1195 nvgpu_err(g, "replayable getptr corrupted set");
1196
1197 gv11b_fb_fault_buf_configure_hw(g, index);
1198
1199 reg_val = set_field(reg_val,
1200 fb_mmu_fault_buffer_get_getptr_corrupted_m(),
1201 fb_mmu_fault_buffer_get_getptr_corrupted_clear_f());
1202 }
1203
1204 if (fault_status &
1205 fb_mmu_fault_status_replayable_overflow_m()) {
1206 bool buffer_full = gv11b_fb_is_fault_buffer_full(g, index);
1207
1208 nvgpu_err(g, "replayable overflow: buffer full:%s",
1209 buffer_full?"true":"false");
1210
1211 reg_val = set_field(reg_val,
1212 fb_mmu_fault_buffer_get_overflow_m(),
1213 fb_mmu_fault_buffer_get_overflow_clear_f());
1214 }
1215
1216 gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val);
1217}
1218
1219static void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
1220 u32 fault_status)
1221{
1222 u32 reg_val;
1223 unsigned int index = NONREPLAY_REG_INDEX;
1224
1225 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index));
1226
1227 if (fault_status &
1228 fb_mmu_fault_status_non_replayable_getptr_corrupted_m()) {
1229
1230 nvgpu_err(g, "non replayable getptr corrupted set");
1231
1232 gv11b_fb_fault_buf_configure_hw(g, index);
1233
1234 reg_val = set_field(reg_val,
1235 fb_mmu_fault_buffer_get_getptr_corrupted_m(),
1236 fb_mmu_fault_buffer_get_getptr_corrupted_clear_f());
1237 }
1238
1239 if (fault_status &
1240 fb_mmu_fault_status_non_replayable_overflow_m()) {
1241
1242 bool buffer_full = gv11b_fb_is_fault_buffer_full(g, index);
1243
1244 nvgpu_err(g, "non replayable overflow: buffer full:%s",
1245 buffer_full?"true":"false");
1246
1247 reg_val = set_field(reg_val,
1248 fb_mmu_fault_buffer_get_overflow_m(),
1249 fb_mmu_fault_buffer_get_overflow_clear_f());
1250 }
1251
1252 gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val);
1253}
1254
1255static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
1256 struct mmu_fault_info *mmfault, u32 fault_status)
1257{
1258 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX,
1259 HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
1260
1261
1262 if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) {
1263 if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))
1264 gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX);
1265 }
1266
1267 if (fault_status & fb_mmu_fault_status_replayable_error_m()) {
1268 if (gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))
1269 gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX);
1270 }
1271 gv11b_ce_mthd_buffer_fault_in_bar2_fault(g);
1272
1273 g->ops.mm.init_bar2_mm_hw_setup(g);
1274
1275 if (mmfault->refch) {
1276 gk20a_channel_put(mmfault->refch);
1277 mmfault->refch = NULL;
1278 }
1279 gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX,
1280 HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
1281}
1282
1283static void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
1284 u32 fault_status)
1285{
1286 struct mmu_fault_info *mmfault;
1287 u32 invalidate_replay_val = 0;
1288
1289 mmfault = g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY];
1290
1291 gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmfault);
1292
1293 /* BAR2/Physical faults will not be snapped in hw fault buf */
1294 if (mmfault->mmu_engine_id == gmmu_fault_mmu_eng_id_bar2_v()) {
1295 nvgpu_err(g, "BAR2 MMU FAULT");
1296 gv11b_fb_handle_bar2_fault(g, mmfault, fault_status);
1297
1298 } else if (mmfault->mmu_engine_id ==
1299 gmmu_fault_mmu_eng_id_physical_v()) {
1300 /* usually means VPR or out of bounds physical accesses */
1301 nvgpu_err(g, "PHYSICAL MMU FAULT");
1302
1303 } else {
1304 gv11b_fb_handle_mmu_fault_common(g, mmfault,
1305 &invalidate_replay_val);
1306
1307 if (invalidate_replay_val)
1308 gv11b_fb_replay_or_cancel_faults(g,
1309 invalidate_replay_val);
1310 }
1311}
1312
1313static void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status)
1314{
1315 u32 dropped_faults = 0;
1316
1317 dropped_faults = fb_mmu_fault_status_dropped_bar1_phys_set_f() |
1318 fb_mmu_fault_status_dropped_bar1_virt_set_f() |
1319 fb_mmu_fault_status_dropped_bar2_phys_set_f() |
1320 fb_mmu_fault_status_dropped_bar2_virt_set_f() |
1321 fb_mmu_fault_status_dropped_ifb_phys_set_f() |
1322 fb_mmu_fault_status_dropped_ifb_virt_set_f() |
1323 fb_mmu_fault_status_dropped_other_phys_set_f()|
1324 fb_mmu_fault_status_dropped_other_virt_set_f();
1325
1326 if (fault_status & dropped_faults) {
1327 nvgpu_err(g, "dropped mmu fault (0x%08x)",
1328 fault_status & dropped_faults);
1329 gk20a_writel(g, fb_mmu_fault_status_r(), dropped_faults);
1330 }
1331}
1332
1333
1334static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
1335{
1336 u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
1337
1338 nvgpu_log(g, gpu_dbg_intr, "mmu_fault_status = 0x%08x", fault_status);
1339
1340 if (niso_intr &
1341 fb_niso_intr_mmu_other_fault_notify_m()) {
1342
1343 gv11b_fb_handle_dropped_mmu_fault(g, fault_status);
1344
1345 gv11b_fb_handle_other_fault_notify(g, fault_status);
1346 }
1347
1348 if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX)) {
1349
1350 if (niso_intr &
1351 fb_niso_intr_mmu_nonreplayable_fault_notify_m()) {
1352
1353 gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
1354 fault_status, NONREPLAY_REG_INDEX);
1355
1356 /*
1357 * When all the faults are processed,
1358 * GET and PUT will have same value and mmu fault status
1359 * bit will be reset by HW
1360 */
1361 }
1362 if (niso_intr &
1363 fb_niso_intr_mmu_nonreplayable_fault_overflow_m()) {
1364
1365 gv11b_fb_handle_nonreplay_fault_overflow(g,
1366 fault_status);
1367 }
1368
1369 }
1370
1371 if (gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX)) {
1372
1373 if (niso_intr &
1374 fb_niso_intr_mmu_replayable_fault_notify_m()) {
1375
1376 gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
1377 fault_status, REPLAY_REG_INDEX);
1378 }
1379 if (niso_intr &
1380 fb_niso_intr_mmu_replayable_fault_overflow_m()) {
1381
1382 gv11b_fb_handle_replay_fault_overflow(g,
1383 fault_status);
1384 }
1385
1386 }
1387
1388 nvgpu_log(g, gpu_dbg_intr, "clear mmu fault status");
1389 gk20a_writel(g, fb_mmu_fault_status_r(),
1390 fb_mmu_fault_status_valid_clear_f());
1391}
1392
1393void gv11b_fb_hub_isr(struct gk20a *g)
1394{
1395 u32 status, niso_intr;
1396
1397 nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
1398
1399 niso_intr = gk20a_readl(g, fb_niso_intr_r());
1400
1401 nvgpu_info(g, "enter hub isr, niso_intr = 0x%08x", niso_intr);
1402
1403 if (niso_intr &
1404 (fb_niso_intr_hub_access_counter_notify_m() |
1405 fb_niso_intr_hub_access_counter_error_m())) {
1406
1407 nvgpu_info(g, "hub access counter notify/error");
1408 }
1409 if (niso_intr &
1410 fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f()) {
1411
1412 nvgpu_info(g, "ecc uncorrected error notify");
1413
1414 /* disable interrupts during handling */
1415 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX,
1416 HUB_INTR_TYPE_ECC_UNCORRECTED);
1417
1418 status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r());
1419 if (status)
1420 gv11b_handle_l2tlb_ecc_isr(g, status);
1421
1422 status = gk20a_readl(g, fb_mmu_hubtlb_ecc_status_r());
1423 if (status)
1424 gv11b_handle_hubtlb_ecc_isr(g, status);
1425
1426 status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r());
1427 if (status)
1428 gv11b_handle_fillunit_ecc_isr(g, status);
1429
1430 /* re-enable interrupts after handling */
1431 gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX,
1432 HUB_INTR_TYPE_ECC_UNCORRECTED);
1433
1434 }
1435 if (niso_intr &
1436 (fb_niso_intr_mmu_other_fault_notify_m() |
1437 fb_niso_intr_mmu_replayable_fault_notify_m() |
1438 fb_niso_intr_mmu_replayable_fault_overflow_m() |
1439 fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
1440 fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) {
1441
1442 nvgpu_info(g, "MMU Fault");
1443 gv11b_fb_handle_mmu_fault(g, niso_intr);
1444 }
1445
1446 nvgpu_mutex_release(&g->mm.hub_isr_mutex);
1447}
1448
1449bool gv11b_fb_mmu_fault_pending(struct gk20a *g)
1450{
1451 if (gk20a_readl(g, fb_niso_intr_r()) &
1452 (fb_niso_intr_mmu_other_fault_notify_m() |
1453 fb_niso_intr_mmu_ecc_uncorrected_error_notify_m() |
1454 fb_niso_intr_mmu_replayable_fault_notify_m() |
1455 fb_niso_intr_mmu_replayable_fault_overflow_m() |
1456 fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
1457 fb_niso_intr_mmu_nonreplayable_fault_overflow_m()))
1458 return true;
1459
1460 return false;
1461}
1462
1463static int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
1464 u32 invalidate_replay_val)
1465{
1466 int err = -ETIMEDOUT;
1467 u32 reg_val;
1468 struct nvgpu_timeout timeout;
1469
1470 gk20a_dbg_fn("");
1471
1472 nvgpu_mutex_acquire(&g->mm.tlb_lock);
1473
1474 reg_val = gk20a_readl(g, fb_mmu_invalidate_r());
1475
1476 reg_val |= fb_mmu_invalidate_all_va_true_f() |
1477 fb_mmu_invalidate_all_pdb_true_f() |
1478 invalidate_replay_val |
1479 fb_mmu_invalidate_trigger_true_f();
1480
1481 gk20a_writel(g, fb_mmu_invalidate_r(), reg_val);
1482
1483 /* retry 200 times */
1484 nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
1485 do {
1486 reg_val = gk20a_readl(g, fb_mmu_ctrl_r());
1487 if (fb_mmu_ctrl_pri_fifo_empty_v(reg_val) !=
1488 fb_mmu_ctrl_pri_fifo_empty_false_f()) {
1489 err = 0;
1490 break;
1491 }
1492 nvgpu_udelay(5);
1493 } while (!nvgpu_timeout_expired_msg(&timeout,
1494 "invalidate replay failed on 0x%llx"));
1495 if (err)
1496 nvgpu_err(g, "invalidate replay timedout");
1497
1498 nvgpu_mutex_release(&g->mm.tlb_lock);
1499
1500 return err;
1501}
1502
1503static int gv11b_fb_fix_page_fault(struct gk20a *g,
1504 struct mmu_fault_info *mmfault)
1505{
1506 int err = 0;
1507 u32 pte[2];
1508
1509 if (mmfault->refch == NULL) {
1510 nvgpu_log(g, gpu_dbg_intr, "refch from mmu_fault_info is NULL");
1511 return -EINVAL;
1512 }
1513
1514 err = __nvgpu_get_pte(g,
1515 mmfault->refch->vm, mmfault->fault_addr, &pte[0]);
1516 if (err) {
1517 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, "pte not found");
1518 return err;
1519 }
1520 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1521 "pte: %#08x %#08x", pte[1], pte[0]);
1522
1523 if (pte[0] == 0x0 && pte[1] == 0x0) {
1524 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1525 "pte all zeros, do not set valid");
1526 return -1;
1527 }
1528 if (pte[0] & gmmu_new_pte_valid_true_f()) {
1529 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1530 "pte valid already set");
1531 return -1;
1532 }
1533
1534 pte[0] |= gmmu_new_pte_valid_true_f();
1535 if (pte[0] & gmmu_new_pte_read_only_true_f())
1536 pte[0] &= ~(gmmu_new_pte_read_only_true_f());
1537 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1538 "new pte: %#08x %#08x", pte[1], pte[0]);
1539
1540 err = __nvgpu_set_pte(g,
1541 mmfault->refch->vm, mmfault->fault_addr, &pte[0]);
1542 if (err) {
1543 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, "pte not fixed");
1544 return err;
1545 }
1546 /* invalidate tlb so that GMMU does not use old cached translation */
1547 g->ops.fb.tlb_invalidate(g, mmfault->refch->vm->pdb.mem);
1548
1549 err = __nvgpu_get_pte(g,
1550 mmfault->refch->vm, mmfault->fault_addr, &pte[0]);
1551 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1552 "pte after tlb invalidate: %#08x %#08x",
1553 pte[1], pte[0]);
1554 return err;
1555}
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
new file mode 100644
index 00000000..d9a6fa77
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
@@ -0,0 +1,72 @@
1/*
2 * GV11B FB
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVGPU_GV11B_FB
26#define _NVGPU_GV11B_FB
27
28#define STALL_REG_INDEX 0
29#define NONSTALL_REG_INDEX 1
30
31#define NONREPLAY_REG_INDEX 0
32#define REPLAY_REG_INDEX 1
33
34#define FAULT_BUF_DISABLED 0
35#define FAULT_BUF_ENABLED 1
36
37#define FAULT_BUF_INVALID 0
38#define FAULT_BUF_VALID 1
39
40#define HUB_INTR_TYPE_OTHER 1 /* bit 0 */
41#define HUB_INTR_TYPE_NONREPLAY 2 /* bit 1 */
42#define HUB_INTR_TYPE_REPLAY 4 /* bit 2 */
43#define HUB_INTR_TYPE_ECC_UNCORRECTED 8 /* bit 3 */
44#define HUB_INTR_TYPE_ACCESS_COUNTER 16 /* bit 4 */
45#define HUB_INTR_TYPE_ALL (HUB_INTR_TYPE_OTHER | \
46 HUB_INTR_TYPE_NONREPLAY | \
47 HUB_INTR_TYPE_REPLAY | \
48 HUB_INTR_TYPE_ECC_UNCORRECTED | \
49 HUB_INTR_TYPE_ACCESS_COUNTER)
50
51#define FAULT_TYPE_OTHER_AND_NONREPLAY 0
52#define FAULT_TYPE_REPLAY 1
53
54struct gk20a;
55
56void gv11b_fb_init_fs_state(struct gk20a *g);
57void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr);
58void gv11b_fb_reset(struct gk20a *g);
59void gv11b_fb_hub_isr(struct gk20a *g);
60
61u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
62 unsigned int index);
63void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
64 unsigned int index, unsigned int state);
65void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index);
66void gv11b_fb_enable_hub_intr(struct gk20a *g,
67 unsigned int index, unsigned int intr_type);
68void gv11b_fb_disable_hub_intr(struct gk20a *g,
69 unsigned int index, unsigned int intr_type);
70bool gv11b_fb_mmu_fault_pending(struct gk20a *g);
71
72#endif
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
new file mode 100644
index 00000000..f87c6dea
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -0,0 +1,1907 @@
1/*
2 * GV11B fifo
3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#include <linux/delay.h>
25#include <linux/types.h>
26
27#include <nvgpu/semaphore.h>
28#include <nvgpu/timers.h>
29#include <nvgpu/log.h>
30#include <nvgpu/dma.h>
31#include <nvgpu/nvgpu_mem.h>
32#include <nvgpu/gmmu.h>
33#include <nvgpu/soc.h>
34#include <nvgpu/debug.h>
35#include <nvgpu/nvhost_t19x.h>
36#include <nvgpu/barrier.h>
37#include <nvgpu/mm.h>
38#include <nvgpu/ctxsw_trace.h>
39
40#include "gk20a/gk20a.h"
41#include "gk20a/fifo_gk20a.h"
42#include "gk20a/channel_gk20a.h"
43
44#include "gp10b/fifo_gp10b.h"
45
46#include <nvgpu/hw/gv11b/hw_pbdma_gv11b.h>
47#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
48#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
49#include <nvgpu/hw/gv11b/hw_ccsr_gv11b.h>
50#include <nvgpu/hw/gv11b/hw_usermode_gv11b.h>
51#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
52#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
53#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
54#include <nvgpu/hw/gv11b/hw_gr_gv11b.h>
55
56#include "fifo_gv11b.h"
57#include "subctx_gv11b.h"
58#include "gr_gv11b.h"
59#include "mc_gv11b.h"
60
61#define PBDMA_SUBDEVICE_ID 1
62
63static void gv11b_fifo_init_ramfc_eng_method_buffer(struct gk20a *g,
64 struct channel_gk20a *ch, struct nvgpu_mem *mem);
65
66void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist)
67{
68
69 u32 runlist_entry_0 = ram_rl_entry_type_tsg_v();
70
71 if (tsg->timeslice_timeout)
72 runlist_entry_0 |=
73 ram_rl_entry_tsg_timeslice_scale_f(tsg->timeslice_scale) |
74 ram_rl_entry_tsg_timeslice_timeout_f(tsg->timeslice_timeout);
75 else
76 runlist_entry_0 |=
77 ram_rl_entry_tsg_timeslice_scale_f(
78 ram_rl_entry_tsg_timeslice_scale_3_v()) |
79 ram_rl_entry_tsg_timeslice_timeout_f(
80 ram_rl_entry_tsg_timeslice_timeout_128_v());
81
82 runlist[0] = runlist_entry_0;
83 runlist[1] = ram_rl_entry_tsg_length_f(tsg->num_active_channels);
84 runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid);
85 runlist[3] = 0;
86
87 gk20a_dbg_info("gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n",
88 runlist[0], runlist[1], runlist[2], runlist[3]);
89
90}
91
92void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist)
93{
94 struct gk20a *g = c->g;
95 u32 addr_lo, addr_hi;
96 u32 runlist_entry;
97
98 /* Time being use 0 pbdma sequencer */
99 runlist_entry = ram_rl_entry_type_channel_v() |
100 ram_rl_entry_chan_runqueue_selector_f(
101 c->t19x.runqueue_sel) |
102 ram_rl_entry_chan_userd_target_f(
103 nvgpu_aperture_mask(g, &g->fifo.userd,
104 ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(),
105 ram_rl_entry_chan_userd_target_vid_mem_v())) |
106 ram_rl_entry_chan_inst_target_f(
107 nvgpu_aperture_mask(g, &c->inst_block,
108 ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(),
109 ram_rl_entry_chan_inst_target_vid_mem_v()));
110
111 addr_lo = u64_lo32(c->userd_iova) >>
112 ram_rl_entry_chan_userd_ptr_align_shift_v();
113 addr_hi = u64_hi32(c->userd_iova);
114 runlist[0] = runlist_entry | ram_rl_entry_chan_userd_ptr_lo_f(addr_lo);
115 runlist[1] = ram_rl_entry_chan_userd_ptr_hi_f(addr_hi);
116
117 addr_lo = u64_lo32(nvgpu_inst_block_addr(g, &c->inst_block)) >>
118 ram_rl_entry_chan_inst_ptr_align_shift_v();
119 addr_hi = u64_hi32(nvgpu_inst_block_addr(g, &c->inst_block));
120
121 runlist[2] = ram_rl_entry_chan_inst_ptr_lo_f(addr_lo) |
122 ram_rl_entry_chid_f(c->chid);
123 runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi);
124
125 gk20a_dbg_info("gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n",
126 runlist[0], runlist[1], runlist[2], runlist[3]);
127}
128
129static void gv11b_userd_writeback_config(struct gk20a *g)
130{
131 gk20a_writel(g, fifo_userd_writeback_r(), fifo_userd_writeback_timer_f(
132 fifo_userd_writeback_timer_100us_v()));
133
134
135}
136
137int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
138 u64 gpfifo_base, u32 gpfifo_entries,
139 unsigned long acquire_timeout, u32 flags)
140{
141 struct gk20a *g = c->g;
142 struct nvgpu_mem *mem = &c->inst_block;
143 u32 data;
144
145 gk20a_dbg_fn("");
146
147 nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v());
148
149 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(),
150 pbdma_gp_base_offset_f(
151 u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
152
153 nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(),
154 pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
155 pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
156
157 nvgpu_mem_wr32(g, mem, ram_fc_signature_w(),
158 c->g->ops.fifo.get_pbdma_signature(c->g));
159
160 nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(),
161 pbdma_pb_header_priv_user_f() |
162 pbdma_pb_header_method_zero_f() |
163 pbdma_pb_header_subchannel_zero_f() |
164 pbdma_pb_header_level_main_f() |
165 pbdma_pb_header_first_true_f() |
166 pbdma_pb_header_type_inc_f());
167
168 nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(),
169 pbdma_subdevice_id_f(PBDMA_SUBDEVICE_ID) |
170 pbdma_subdevice_status_active_f() |
171 pbdma_subdevice_channel_dma_enable_f());
172
173 nvgpu_mem_wr32(g, mem, ram_fc_target_w(),
174 pbdma_target_eng_ctx_valid_true_f() |
175 pbdma_target_ce_ctx_valid_true_f() |
176 pbdma_target_engine_sw_f());
177
178 nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(),
179 g->ops.fifo.pbdma_acquire_val(acquire_timeout));
180
181 nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(),
182 pbdma_runlist_timeslice_timeout_128_f() |
183 pbdma_runlist_timeslice_timescale_3_f() |
184 pbdma_runlist_timeslice_enable_true_f());
185
186
187 nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid));
188
189 if (c->t19x.subctx_id == CHANNEL_INFO_VEID0)
190 nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(),
191 pbdma_set_channel_info_scg_type_graphics_compute0_f() |
192 pbdma_set_channel_info_veid_f(c->t19x.subctx_id));
193 else
194 nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(),
195 pbdma_set_channel_info_scg_type_compute1_f() |
196 pbdma_set_channel_info_veid_f(c->t19x.subctx_id));
197
198 gv11b_fifo_init_ramfc_eng_method_buffer(g, c, mem);
199
200 if (c->is_privileged_channel) {
201 /* Set privilege level for channel */
202 nvgpu_mem_wr32(g, mem, ram_fc_config_w(),
203 pbdma_config_auth_level_privileged_f());
204
205 gk20a_fifo_setup_ramfc_for_privileged_channel(c);
206 }
207
208 /* Enable userd writeback */
209 data = nvgpu_mem_rd32(g, mem, ram_fc_config_w());
210 data = data | pbdma_config_userd_writeback_enable_f();
211 nvgpu_mem_wr32(g, mem, ram_fc_config_w(),data);
212
213 gv11b_userd_writeback_config(g);
214
215 return channel_gp10b_commit_userd(c);
216}
217
218
219static void gv11b_ring_channel_doorbell(struct channel_gk20a *c)
220{
221 struct fifo_gk20a *f = &c->g->fifo;
222 u32 hw_chid = f->channel_base + c->chid;
223
224 gk20a_dbg_info("channel ring door bell %d\n", c->chid);
225
226 gv11b_usermode_writel(c->g, usermode_notify_channel_pending_r(),
227 usermode_notify_channel_pending_id_f(hw_chid));
228}
229
230u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c)
231{
232 struct nvgpu_mem *userd_mem = &g->fifo.userd;
233 u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32));
234
235 return nvgpu_mem_rd32(g, userd_mem,
236 offset + ram_userd_gp_get_w());
237}
238
239u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c)
240{
241 struct nvgpu_mem *userd_mem = &g->fifo.userd;
242 u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32));
243 u32 lo = nvgpu_mem_rd32(g, userd_mem, offset + ram_userd_get_w());
244 u32 hi = nvgpu_mem_rd32(g, userd_mem, offset + ram_userd_get_hi_w());
245
246 return ((u64)hi << 32) | lo;
247}
248
249void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c)
250{
251 struct nvgpu_mem *userd_mem = &g->fifo.userd;
252 u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32));
253
254 nvgpu_mem_wr32(g, userd_mem, offset + ram_userd_gp_put_w(),
255 c->gpfifo.put);
256 /* commit everything to cpu */
257 nvgpu_smp_mb();
258
259 gv11b_ring_channel_doorbell(c);
260}
261
262void channel_gv11b_unbind(struct channel_gk20a *ch)
263{
264 struct gk20a *g = ch->g;
265
266 gk20a_dbg_fn("");
267
268 if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) {
269 gk20a_writel(g, ccsr_channel_inst_r(ch->chid),
270 ccsr_channel_inst_ptr_f(0) |
271 ccsr_channel_inst_bind_false_f());
272
273 gk20a_writel(g, ccsr_channel_r(ch->chid),
274 ccsr_channel_enable_clr_true_f() |
275 ccsr_channel_pbdma_faulted_reset_f() |
276 ccsr_channel_eng_faulted_reset_f());
277 }
278}
279
280u32 gv11b_fifo_get_num_fifos(struct gk20a *g)
281{
282 return ccsr_channel__size_1_v();
283}
284
285bool gv11b_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid)
286{
287 return (engine_subid == gmmu_fault_client_type_gpc_v());
288}
289
290void gv11b_dump_channel_status_ramfc(struct gk20a *g,
291 struct gk20a_debug_output *o,
292 u32 chid,
293 struct ch_state *ch_state)
294{
295 u32 channel = gk20a_readl(g, ccsr_channel_r(chid));
296 u32 status = ccsr_channel_status_v(channel);
297 u32 *inst_mem;
298 struct channel_gk20a *c = g->fifo.channel + chid;
299 struct nvgpu_semaphore_int *hw_sema = NULL;
300
301 if (c->hw_sema)
302 hw_sema = c->hw_sema;
303
304 if (!ch_state)
305 return;
306
307 inst_mem = &ch_state->inst_block[0];
308
309 gk20a_debug_output(o, "%d-%s, pid %d, refs: %d: ", chid,
310 g->name,
311 ch_state->pid,
312 ch_state->refs);
313 gk20a_debug_output(o, "channel status: %s in use %s %s\n",
314 ccsr_channel_enable_v(channel) ? "" : "not",
315 gk20a_decode_ccsr_chan_status(status),
316 ccsr_channel_busy_v(channel) ? "busy" : "not busy");
317 gk20a_debug_output(o, "RAMFC : TOP: %016llx PUT: %016llx GET: %016llx "
318 "FETCH: %016llx\nHEADER: %08x COUNT: %08x\n"
319 "SEMAPHORE: addr hi: %08x addr lo: %08x\n"
320 "payload %08x execute %08x\n",
321 (u64)inst_mem[ram_fc_pb_top_level_get_w()] +
322 ((u64)inst_mem[ram_fc_pb_top_level_get_hi_w()] << 32ULL),
323 (u64)inst_mem[ram_fc_pb_put_w()] +
324 ((u64)inst_mem[ram_fc_pb_put_hi_w()] << 32ULL),
325 (u64)inst_mem[ram_fc_pb_get_w()] +
326 ((u64)inst_mem[ram_fc_pb_get_hi_w()] << 32ULL),
327 (u64)inst_mem[ram_fc_pb_fetch_w()] +
328 ((u64)inst_mem[ram_fc_pb_fetch_hi_w()] << 32ULL),
329 inst_mem[ram_fc_pb_header_w()],
330 inst_mem[ram_fc_pb_count_w()],
331 inst_mem[ram_fc_sem_addr_hi_w()],
332 inst_mem[ram_fc_sem_addr_lo_w()],
333 inst_mem[ram_fc_sem_payload_lo_w()],
334 inst_mem[ram_fc_sem_execute_w()]);
335 if (hw_sema)
336 gk20a_debug_output(o, "SEMA STATE: value: 0x%08x next_val: 0x%08x addr: 0x%010llx\n",
337 __nvgpu_semaphore_read(hw_sema),
338 nvgpu_atomic_read(&hw_sema->next_value),
339 nvgpu_hw_sema_addr(hw_sema));
340 gk20a_debug_output(o, "\n");
341}
342
343void gv11b_dump_eng_status(struct gk20a *g,
344 struct gk20a_debug_output *o)
345{
346 u32 i, host_num_engines;
347
348 host_num_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
349
350 for (i = 0; i < host_num_engines; i++) {
351 u32 status = gk20a_readl(g, fifo_engine_status_r(i));
352 u32 ctx_status = fifo_engine_status_ctx_status_v(status);
353
354 gk20a_debug_output(o, "%s eng %d: ", g->name, i);
355 gk20a_debug_output(o,
356 "id: %d (%s), next_id: %d (%s), ctx status: %s ",
357 fifo_engine_status_id_v(status),
358 fifo_engine_status_id_type_v(status) ?
359 "tsg" : "channel",
360 fifo_engine_status_next_id_v(status),
361 fifo_engine_status_next_id_type_v(status) ?
362 "tsg" : "channel",
363 gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status));
364
365 if (fifo_engine_status_eng_reload_v(status))
366 gk20a_debug_output(o, "ctx_reload ");
367 if (fifo_engine_status_faulted_v(status))
368 gk20a_debug_output(o, "faulted ");
369 if (fifo_engine_status_engine_v(status))
370 gk20a_debug_output(o, "busy ");
371 gk20a_debug_output(o, "\n");
372 }
373 gk20a_debug_output(o, "\n");
374}
375
376u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g)
377{
378 u32 intr_0_error_mask =
379 fifo_intr_0_bind_error_pending_f() |
380 fifo_intr_0_sched_error_pending_f() |
381 fifo_intr_0_chsw_error_pending_f() |
382 fifo_intr_0_fb_flush_timeout_pending_f() |
383 fifo_intr_0_lb_error_pending_f();
384
385 return intr_0_error_mask;
386}
387
388u32 gv11b_fifo_get_preempt_timeout(struct gk20a *g)
389{
390 return gk20a_get_gr_idle_timeout(g);
391}
392
393static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
394 u32 pbdma_id, unsigned int timeout_rc_type)
395{
396 struct nvgpu_timeout timeout;
397 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */
398 u32 pbdma_stat;
399 u32 chan_stat;
400 int ret = -EBUSY;
401
402 /* timeout in milli seconds */
403 nvgpu_timeout_init(g, &timeout, g->ops.fifo.get_preempt_timeout(g),
404 NVGPU_TIMER_CPU_TIMER);
405
406 nvgpu_log(g, gpu_dbg_info, "wait preempt pbdma %d", pbdma_id);
407 /* Verify that ch/tsg is no longer on the pbdma */
408 do {
409 /*
410 * If the PBDMA has a stalling interrupt and receives a NACK,
411 * the PBDMA won't save out until the STALLING interrupt is
412 * cleared. Stalling interrupt need not be directly addressed,
413 * as simply clearing of the interrupt bit will be sufficient
414 * to allow the PBDMA to save out. If the stalling interrupt
415 * was due to a SW method or another deterministic failure,
416 * the PBDMA will assert it when the channel is reloaded
417 * or resumed. Note that the fault will still be
418 * reported to SW.
419 */
420
421 gk20a_fifo_handle_pbdma_intr(g, &g->fifo, pbdma_id, RC_NO);
422
423 pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id));
424 chan_stat = fifo_pbdma_status_chan_status_v(pbdma_stat);
425
426 if (chan_stat ==
427 fifo_pbdma_status_chan_status_valid_v() ||
428 chan_stat ==
429 fifo_pbdma_status_chan_status_chsw_save_v()) {
430
431 if (id != fifo_pbdma_status_id_v(pbdma_stat)) {
432 ret = 0;
433 break;
434 }
435
436 } else if (chan_stat ==
437 fifo_pbdma_status_chan_status_chsw_load_v()) {
438
439 if (id != fifo_pbdma_status_next_id_v(pbdma_stat)) {
440 ret = 0;
441 break;
442 }
443
444 } else if (chan_stat ==
445 fifo_pbdma_status_chan_status_chsw_switch_v()) {
446
447 if ((id != fifo_pbdma_status_next_id_v(pbdma_stat)) &&
448 (id != fifo_pbdma_status_id_v(pbdma_stat))) {
449 ret = 0;
450 break;
451 }
452 } else {
453 /* pbdma status is invalid i.e. it is not loaded */
454 ret = 0;
455 break;
456 }
457
458 usleep_range(delay, delay * 2);
459 delay = min_t(unsigned long,
460 delay << 1, GR_IDLE_CHECK_MAX);
461 } while (!nvgpu_timeout_expired_msg(&timeout,
462 "preempt timeout pbdma"));
463 return ret;
464}
465
466static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
467 u32 act_eng_id, u32 *reset_eng_bitmask,
468 unsigned int timeout_rc_type)
469{
470 struct nvgpu_timeout timeout;
471 unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */
472 u32 eng_stat;
473 u32 ctx_stat;
474 int ret = -EBUSY;
475 bool stall_intr = false;
476
477 /* timeout in milli seconds */
478 nvgpu_timeout_init(g, &timeout, g->ops.fifo.get_preempt_timeout(g),
479 NVGPU_TIMER_CPU_TIMER);
480
481 nvgpu_log(g, gpu_dbg_info, "wait preempt act engine id: %u",
482 act_eng_id);
483 /* Check if ch/tsg has saved off the engine or if ctxsw is hung */
484 do {
485 eng_stat = gk20a_readl(g, fifo_engine_status_r(act_eng_id));
486 ctx_stat = fifo_engine_status_ctx_status_v(eng_stat);
487
488 if (gv11b_mc_is_stall_and_eng_intr_pending(g, act_eng_id)) {
489 stall_intr = true;
490 nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr,
491 "stall intr set, "
492 "preemption will not finish");
493 }
494 if (ctx_stat ==
495 fifo_engine_status_ctx_status_ctxsw_switch_v()) {
496 /* Eng save hasn't started yet. Continue polling */
497
498 } else if (ctx_stat ==
499 fifo_engine_status_ctx_status_valid_v() ||
500 ctx_stat ==
501 fifo_engine_status_ctx_status_ctxsw_save_v()) {
502
503 if (id == fifo_engine_status_id_v(eng_stat)) {
504 if (stall_intr ||
505 timeout_rc_type == PREEMPT_TIMEOUT_NORC) {
506 /* preemption will not finish */
507 *reset_eng_bitmask |= BIT(act_eng_id);
508 ret = 0;
509 break;
510 }
511 } else {
512 /* context is not running on the engine */
513 ret = 0;
514 break;
515 }
516
517 } else if (ctx_stat ==
518 fifo_engine_status_ctx_status_ctxsw_load_v()) {
519
520 if (id == fifo_engine_status_next_id_v(eng_stat)) {
521
522 if (stall_intr ||
523 timeout_rc_type == PREEMPT_TIMEOUT_NORC) {
524 /* preemption will not finish */
525 *reset_eng_bitmask |= BIT(act_eng_id);
526 ret = 0;
527 break;
528 }
529 } else {
530 /* context is not running on the engine */
531 ret = 0;
532 break;
533 }
534
535 } else {
536 /* Preempt should be finished */
537 ret = 0;
538 break;
539 }
540 nvgpu_usleep_range(delay, delay * 2);
541 delay = min_t(unsigned long,
542 delay << 1, GR_IDLE_CHECK_MAX);
543 } while (!nvgpu_timeout_expired_msg(&timeout,
544 "preempt timeout eng"));
545 return ret;
546}
547
548static void gv11b_reset_eng_faulted_ch(struct gk20a *g, u32 chid)
549{
550 u32 reg_val;
551
552 reg_val = gk20a_readl(g, ccsr_channel_r(chid));
553 reg_val |= ccsr_channel_eng_faulted_reset_f();
554 gk20a_writel(g, ccsr_channel_r(chid), reg_val);
555}
556
557static void gv11b_reset_eng_faulted_tsg(struct tsg_gk20a *tsg)
558{
559 struct gk20a *g = tsg->g;
560 struct channel_gk20a *ch;
561
562 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
563 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
564 gv11b_reset_eng_faulted_ch(g, ch->chid);
565 }
566 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
567}
568
569static void gv11b_reset_pbdma_faulted_ch(struct gk20a *g, u32 chid)
570{
571 u32 reg_val;
572
573 reg_val = gk20a_readl(g, ccsr_channel_r(chid));
574 reg_val |= ccsr_channel_pbdma_faulted_reset_f();
575 gk20a_writel(g, ccsr_channel_r(chid), reg_val);
576}
577
578static void gv11b_reset_pbdma_faulted_tsg(struct tsg_gk20a *tsg)
579{
580 struct gk20a *g = tsg->g;
581 struct channel_gk20a *ch;
582
583 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
584 list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
585 gv11b_reset_pbdma_faulted_ch(g, ch->chid);
586 }
587 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
588}
589
590void gv11b_fifo_reset_pbdma_and_eng_faulted(struct gk20a *g,
591 struct channel_gk20a *refch,
592 u32 faulted_pbdma, u32 faulted_engine)
593{
594 struct tsg_gk20a *tsg;
595
596 nvgpu_log(g, gpu_dbg_intr, "reset faulted pbdma:0x%x eng:0x%x",
597 faulted_pbdma, faulted_engine);
598
599 if (gk20a_is_channel_marked_as_tsg(refch)) {
600 tsg = &g->fifo.tsg[refch->tsgid];
601 if (faulted_pbdma != FIFO_INVAL_PBDMA_ID)
602 gv11b_reset_pbdma_faulted_tsg(tsg);
603 if (faulted_engine != FIFO_INVAL_ENGINE_ID)
604 gv11b_reset_eng_faulted_tsg(tsg);
605 } else {
606 if (faulted_pbdma != FIFO_INVAL_PBDMA_ID)
607 gv11b_reset_pbdma_faulted_ch(g, refch->chid);
608 if (faulted_engine != FIFO_INVAL_ENGINE_ID)
609 gv11b_reset_eng_faulted_ch(g, refch->chid);
610 }
611}
612
613static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask,
614 u32 id, unsigned int id_type, unsigned int rc_type,
615 struct mmu_fault_info *mmfault)
616{
617 u32 runlists_mask = 0;
618 struct fifo_gk20a *f = &g->fifo;
619 struct fifo_runlist_info_gk20a *runlist;
620 u32 pbdma_bitmask = 0;
621
622 if (id_type != ID_TYPE_UNKNOWN) {
623 if (id_type == ID_TYPE_TSG)
624 runlists_mask |= fifo_sched_disable_runlist_m(
625 f->tsg[id].runlist_id);
626 else
627 runlists_mask |= fifo_sched_disable_runlist_m(
628 f->channel[id].runlist_id);
629 }
630
631 if (rc_type == RC_TYPE_MMU_FAULT && mmfault) {
632 if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID)
633 pbdma_bitmask = BIT(mmfault->faulted_pbdma);
634
635 for (id = 0; id < f->max_runlists; id++) {
636
637 runlist = &f->runlist_info[id];
638
639 if (runlist->eng_bitmask & act_eng_bitmask)
640 runlists_mask |=
641 fifo_sched_disable_runlist_m(id);
642
643 if (runlist->pbdma_bitmask & pbdma_bitmask)
644 runlists_mask |=
645 fifo_sched_disable_runlist_m(id);
646 }
647 }
648
649 if (id_type == ID_TYPE_UNKNOWN) {
650 for (id = 0; id < f->max_runlists; id++) {
651 if (act_eng_bitmask) {
652 /* eng ids are known */
653 runlist = &f->runlist_info[id];
654 if (runlist->eng_bitmask & act_eng_bitmask)
655 runlists_mask |=
656 fifo_sched_disable_runlist_m(id);
657 } else {
658 runlists_mask |=
659 fifo_sched_disable_runlist_m(id);
660 }
661 }
662 }
663 gk20a_dbg_info("runlists_mask = %08x", runlists_mask);
664 return runlists_mask;
665}
666
667static void gv11b_fifo_runlist_event_intr_disable(struct gk20a *g)
668{
669 u32 reg_val;
670
671 reg_val = gk20a_readl(g, fifo_intr_en_0_r());
672 reg_val &= fifo_intr_0_runlist_event_pending_f();
673 gk20a_writel(g, fifo_intr_en_0_r(), reg_val);
674}
675
676static void gv11b_fifo_runlist_event_intr_enable(struct gk20a *g)
677{
678 u32 reg_val;
679
680 reg_val = gk20a_readl(g, fifo_intr_en_0_r());
681 reg_val |= fifo_intr_0_runlist_event_pending_f();
682 gk20a_writel(g, fifo_intr_en_0_r(), reg_val);
683}
684
685static void gv11b_fifo_issue_runlist_preempt(struct gk20a *g,
686 u32 runlists_mask)
687{
688 u32 reg_val;
689
690 /* issue runlist preempt */
691 reg_val = gk20a_readl(g, fifo_runlist_preempt_r());
692 reg_val |= runlists_mask;
693 gk20a_writel(g, fifo_runlist_preempt_r(), reg_val);
694}
695
696static int gv11b_fifo_poll_runlist_preempt_pending(struct gk20a *g,
697 u32 runlists_mask)
698{
699 struct nvgpu_timeout timeout;
700 u32 delay = GR_IDLE_CHECK_DEFAULT;
701 int ret = -EBUSY;
702
703 nvgpu_timeout_init(g, &timeout, g->ops.fifo.get_preempt_timeout(g),
704 NVGPU_TIMER_CPU_TIMER);
705 do {
706 if (!((gk20a_readl(g, fifo_runlist_preempt_r())) &
707 runlists_mask)) {
708 ret = 0;
709 break;
710 }
711
712 nvgpu_usleep_range(delay, delay * 2);
713 delay = min_t(unsigned long,
714 delay << 1, GR_IDLE_CHECK_MAX);
715 } while (!nvgpu_timeout_expired_msg(&timeout,
716 "runlist preempt timeout"));
717 return ret;
718}
719
720int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
721 unsigned int id_type, unsigned int timeout_rc_type)
722{
723 struct fifo_gk20a *f = &g->fifo;
724 unsigned long runlist_served_pbdmas;
725 unsigned long runlist_served_engines;
726 u32 pbdma_id;
727 u32 act_eng_id;
728 u32 runlist_id;
729 int func_ret;
730 int ret = 0;
731 u32 tsgid;
732
733 if (id_type == ID_TYPE_TSG) {
734 runlist_id = f->tsg[id].runlist_id;
735 tsgid = id;
736 } else {
737 runlist_id = f->channel[id].runlist_id;
738 tsgid = f->channel[id].tsgid;
739 }
740
741 nvgpu_log_info(g, "Check preempt pending for tsgid = %u", tsgid);
742
743 runlist_served_pbdmas = f->runlist_info[runlist_id].pbdma_bitmask;
744 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask;
745
746 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) {
747
748 func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id,
749 timeout_rc_type);
750 if (func_ret != 0) {
751 gk20a_dbg_info("preempt timeout pbdma %d", pbdma_id);
752 ret |= func_ret;
753 }
754 }
755
756 f->runlist_info[runlist_id].reset_eng_bitmask = 0;
757
758 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) {
759
760 func_ret = gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id,
761 &f->runlist_info[runlist_id].reset_eng_bitmask,
762 timeout_rc_type);
763
764 if (func_ret != 0) {
765 gk20a_dbg_info("preempt timeout engine %d", act_eng_id);
766 ret |= func_ret;
767 }
768 }
769
770 return ret;
771}
772
773int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid)
774{
775 struct fifo_gk20a *f = &g->fifo;
776 u32 tsgid;
777
778 tsgid = f->channel[chid].tsgid;
779 nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsgid);
780
781 /* Preempt tsg. Channel preempt is NOOP */
782 return g->ops.fifo.preempt_tsg(g, tsgid);
783}
784
785static int __locked_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
786{
787 int ret;
788
789 /*
790 * Disable runlist event interrupt as it will get
791 * triggered after runlist preempt finishes
792 */
793 gv11b_fifo_runlist_event_intr_disable(g);
794
795 /* issue runlist preempt */
796 gv11b_fifo_issue_runlist_preempt(g, runlists_mask);
797
798 /* poll for runlist preempt done */
799 ret = gv11b_fifo_poll_runlist_preempt_pending(g, runlists_mask);
800
801 /* Clear outstanding runlist event */
802 gk20a_fifo_handle_runlist_event(g);
803
804 /* Enable runlist event interrupt*/
805 gv11b_fifo_runlist_event_intr_enable(g);
806
807 return ret;
808}
809
810/* TSG enable sequence applicable for Volta and onwards */
811int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg)
812{
813 struct gk20a *g = tsg->g;
814 struct channel_gk20a *ch;
815
816 nvgpu_rwsem_down_read(&tsg->ch_list_lock);
817 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
818 g->ops.fifo.enable_channel(ch);
819 }
820 nvgpu_rwsem_up_read(&tsg->ch_list_lock);
821
822 return 0;
823}
824
825int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
826{
827 struct fifo_gk20a *f = &g->fifo;
828 u32 ret = 0;
829 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
830 u32 mutex_ret = 0;
831 u32 runlist_id;
832
833 gk20a_dbg_fn("%d", tsgid);
834
835 runlist_id = f->tsg[tsgid].runlist_id;
836 gk20a_dbg_fn("runlist_id %d", runlist_id);
837
838 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex);
839
840 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
841
842 ret = __locked_fifo_preempt(g, tsgid, true);
843
844 if (!mutex_ret)
845 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
846
847 nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex);
848
849 return ret;
850}
851
852
853static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask)
854{
855 int ret = 0;
856 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
857 u32 mutex_ret = 0;
858 u32 runlist_id;
859
860 gk20a_dbg_fn("");
861
862 for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) {
863 if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id))
864 nvgpu_mutex_acquire(&g->fifo.
865 runlist_info[runlist_id].mutex);
866 }
867
868 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
869
870 ret = __locked_fifo_preempt_runlists(g, runlists_mask);
871
872 if (!mutex_ret)
873 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
874
875 for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) {
876 if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id))
877 nvgpu_mutex_release(&g->fifo.
878 runlist_info[runlist_id].mutex);
879 }
880
881 return ret;
882}
883
884static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
885 unsigned int id_type, unsigned int timeout_rc_type)
886{
887 int ret;
888 struct fifo_gk20a *f = &g->fifo;
889
890 nvgpu_log_fn(g, "id:%d id_type:%d", id, id_type);
891
892 /* Issue tsg preempt. Channel preempt is noop */
893 if (id_type == ID_TYPE_CHANNEL)
894 gk20a_fifo_issue_preempt(g, f->channel[id].tsgid, true);
895 else
896 gk20a_fifo_issue_preempt(g, id, true);
897
898 /* wait for preempt */
899 ret = g->ops.fifo.is_preempt_pending(g, id, id_type,
900 timeout_rc_type);
901
902 if (ret && (timeout_rc_type == PREEMPT_TIMEOUT_RC))
903 __locked_fifo_preempt_timeout_rc(g, id, id_type);
904
905 return ret;
906}
907
908
909int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
910 unsigned int id_type, unsigned int timeout_rc_type)
911{
912 struct fifo_gk20a *f = &g->fifo;
913 u32 ret = 0;
914 u32 token = PMU_INVALID_MUTEX_OWNER_ID;
915 u32 mutex_ret = 0;
916 u32 runlist_id;
917
918 if (id_type == ID_TYPE_TSG)
919 runlist_id = f->tsg[id].runlist_id;
920 else if (id_type == ID_TYPE_CHANNEL)
921 runlist_id = f->channel[id].runlist_id;
922 else
923 return -EINVAL;
924
925 if (runlist_id >= g->fifo.max_runlists) {
926 gk20a_dbg_info("runlist_id = %d", runlist_id);
927 return -EINVAL;
928 }
929
930 gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id);
931
932 nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex);
933
934 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
935
936 ret = __locked_fifo_preempt_ch_tsg(g, id, id_type, timeout_rc_type);
937
938 if (!mutex_ret)
939 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
940
941 nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex);
942
943 return ret;
944
945}
946
947void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
948 u32 id, unsigned int id_type, unsigned int rc_type,
949 struct mmu_fault_info *mmfault)
950{
951 bool verbose = false;
952 struct tsg_gk20a *tsg = NULL;
953 struct channel_gk20a *refch = NULL;
954 u32 runlists_mask, runlist_id;
955 struct fifo_runlist_info_gk20a *runlist = NULL;
956 u32 engine_id, client_type = ~0;
957
958 gk20a_dbg_info("active engine ids bitmask =0x%x", act_eng_bitmask);
959 gk20a_dbg_info("hw id =%d", id);
960 gk20a_dbg_info("id_type =%d", id_type);
961 gk20a_dbg_info("rc_type =%d", rc_type);
962 gk20a_dbg_info("mmu_fault =0x%p", mmfault);
963
964 runlists_mask = gv11b_fifo_get_runlists_mask(g, act_eng_bitmask, id,
965 id_type, rc_type, mmfault);
966
967 gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_DISABLED,
968 !RUNLIST_INFO_MUTEX_LOCKED);
969
970 g->fifo.deferred_reset_pending = false;
971
972 /* Disable power management */
973 if (g->support_pmu && g->elpg_enabled) {
974 if (nvgpu_pmu_disable_elpg(g))
975 nvgpu_err(g, "failed to set disable elpg");
976 }
977 if (g->ops.clock_gating.slcg_gr_load_gating_prod)
978 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
979 false);
980 if (g->ops.clock_gating.slcg_perf_load_gating_prod)
981 g->ops.clock_gating.slcg_perf_load_gating_prod(g,
982 false);
983 if (g->ops.clock_gating.slcg_ltc_load_gating_prod)
984 g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
985 false);
986
987 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
988
989 if (rc_type == RC_TYPE_MMU_FAULT)
990 gk20a_debug_dump(g);
991
992 /* get the channel/TSG */
993 if (rc_type == RC_TYPE_MMU_FAULT && mmfault && mmfault->refch) {
994 refch = mmfault->refch;
995 client_type = mmfault->client_type;
996 if (gk20a_is_channel_marked_as_tsg(refch))
997 tsg = &g->fifo.tsg[refch->tsgid];
998 gv11b_fifo_reset_pbdma_and_eng_faulted(g, refch,
999 mmfault->faulted_pbdma,
1000 mmfault->faulted_engine);
1001 } else {
1002 if (id_type == ID_TYPE_TSG)
1003 tsg = &g->fifo.tsg[id];
1004 else if (id_type == ID_TYPE_CHANNEL)
1005 refch = gk20a_channel_get(&g->fifo.channel[id]);
1006 }
1007
1008 if (id_type == ID_TYPE_TSG || id_type == ID_TYPE_CHANNEL) {
1009 g->ops.fifo.preempt_ch_tsg(g, id, id_type,
1010 PREEMPT_TIMEOUT_NORC);
1011 } else {
1012 gv11b_fifo_preempt_runlists(g, runlists_mask);
1013 }
1014
1015 if (tsg) {
1016 if (!g->fifo.deferred_reset_pending) {
1017 if (rc_type == RC_TYPE_MMU_FAULT) {
1018 gk20a_fifo_set_ctx_mmu_error_tsg(g, tsg);
1019 verbose = gk20a_fifo_error_tsg(g, tsg);
1020 }
1021 }
1022 gk20a_fifo_abort_tsg(g, tsg->tsgid, false);
1023 if (refch)
1024 gk20a_channel_put(refch);
1025 } else if (refch) {
1026 if (!g->fifo.deferred_reset_pending) {
1027 if (rc_type == RC_TYPE_MMU_FAULT) {
1028 gk20a_fifo_set_ctx_mmu_error_ch(g, refch);
1029 verbose = gk20a_fifo_error_ch(g, refch);
1030 }
1031 }
1032 gk20a_channel_abort(refch, false);
1033 gk20a_channel_put(refch);
1034 } else {
1035 nvgpu_err(g, "id unknown, abort runlist");
1036 for (runlist_id = 0; runlist_id < g->fifo.max_runlists;
1037 runlist_id++) {
1038 if (runlists_mask & BIT(runlist_id))
1039 g->ops.fifo.update_runlist(g, runlist_id,
1040 FIFO_INVAL_CHANNEL_ID, false, true);
1041 }
1042 }
1043
1044 /* check if engine reset should be deferred */
1045 for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) {
1046
1047 runlist = &g->fifo.runlist_info[runlist_id];
1048 if ((runlists_mask & BIT(runlist_id)) &&
1049 runlist->reset_eng_bitmask) {
1050
1051 unsigned long __reset_eng_bitmask =
1052 runlist->reset_eng_bitmask;
1053
1054 for_each_set_bit(engine_id, &__reset_eng_bitmask, 32) {
1055 if ((refch || tsg) &&
1056 gk20a_fifo_should_defer_engine_reset(g,
1057 engine_id, client_type, false)) {
1058
1059 g->fifo.deferred_fault_engines |=
1060 BIT(engine_id);
1061
1062 /* handled during channel free */
1063 g->fifo.deferred_reset_pending = true;
1064 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
1065 "sm debugger attached,"
1066 " deferring channel recovery to channel free");
1067 } else {
1068 /*
1069 * if lock is already taken, a reset is
1070 * taking place so no need to repeat
1071 */
1072 if (nvgpu_mutex_tryacquire(
1073 &g->fifo.gr_reset_mutex)) {
1074
1075 gk20a_fifo_reset_engine(g,
1076 engine_id);
1077
1078 nvgpu_mutex_release(
1079 &g->fifo.gr_reset_mutex);
1080 }
1081 }
1082 }
1083 }
1084 }
1085
1086#ifdef CONFIG_GK20A_CTXSW_TRACE
1087 if (refch)
1088 gk20a_ctxsw_trace_channel_reset(g, refch);
1089 else if (tsg)
1090 gk20a_ctxsw_trace_tsg_reset(g, tsg);
1091#endif
1092
1093 gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_ENABLED,
1094 !RUNLIST_INFO_MUTEX_LOCKED);
1095
1096 /* It is safe to enable ELPG again. */
1097 if (g->support_pmu && g->elpg_enabled)
1098 nvgpu_pmu_enable_elpg(g);
1099}
1100
1101void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f)
1102{
1103 /*
1104 * These are all errors which indicate something really wrong
1105 * going on in the device
1106 */
1107 f->intr.pbdma.device_fatal_0 =
1108 pbdma_intr_0_memreq_pending_f() |
1109 pbdma_intr_0_memack_timeout_pending_f() |
1110 pbdma_intr_0_memack_extra_pending_f() |
1111 pbdma_intr_0_memdat_timeout_pending_f() |
1112 pbdma_intr_0_memdat_extra_pending_f() |
1113 pbdma_intr_0_memflush_pending_f() |
1114 pbdma_intr_0_memop_pending_f() |
1115 pbdma_intr_0_lbconnect_pending_f() |
1116 pbdma_intr_0_lback_timeout_pending_f() |
1117 pbdma_intr_0_lback_extra_pending_f() |
1118 pbdma_intr_0_lbdat_timeout_pending_f() |
1119 pbdma_intr_0_lbdat_extra_pending_f() |
1120 pbdma_intr_0_pri_pending_f();
1121
1122 /*
1123 * These are data parsing, framing errors or others which can be
1124 * recovered from with intervention... or just resetting the
1125 * channel
1126 */
1127 f->intr.pbdma.channel_fatal_0 =
1128 pbdma_intr_0_gpfifo_pending_f() |
1129 pbdma_intr_0_gpptr_pending_f() |
1130 pbdma_intr_0_gpentry_pending_f() |
1131 pbdma_intr_0_gpcrc_pending_f() |
1132 pbdma_intr_0_pbptr_pending_f() |
1133 pbdma_intr_0_pbentry_pending_f() |
1134 pbdma_intr_0_pbcrc_pending_f() |
1135 pbdma_intr_0_method_pending_f() |
1136 pbdma_intr_0_methodcrc_pending_f() |
1137 pbdma_intr_0_pbseg_pending_f() |
1138 pbdma_intr_0_clear_faulted_error_pending_f() |
1139 pbdma_intr_0_eng_reset_pending_f() |
1140 pbdma_intr_0_semaphore_pending_f() |
1141 pbdma_intr_0_signature_pending_f();
1142
1143 /* Can be used for sw-methods, or represents a recoverable timeout. */
1144 f->intr.pbdma.restartable_0 =
1145 pbdma_intr_0_device_pending_f();
1146}
1147
1148static u32 gv11b_fifo_intr_0_en_mask(struct gk20a *g)
1149{
1150 u32 intr_0_en_mask;
1151
1152 intr_0_en_mask = g->ops.fifo.intr_0_error_mask(g);
1153
1154 intr_0_en_mask |= fifo_intr_0_runlist_event_pending_f() |
1155 fifo_intr_0_pbdma_intr_pending_f() |
1156 fifo_intr_0_ctxsw_timeout_pending_f();
1157
1158 return intr_0_en_mask;
1159}
1160
1161int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
1162{
1163 u32 intr_stall;
1164 u32 mask;
1165 u32 timeout;
1166 unsigned int i;
1167 u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA);
1168
1169 gk20a_dbg_fn("");
1170
1171 /* enable pmc pfifo */
1172 g->ops.mc.reset(g, mc_enable_pfifo_enabled_f());
1173
1174 if (g->ops.clock_gating.slcg_ce2_load_gating_prod)
1175 g->ops.clock_gating.slcg_ce2_load_gating_prod(g,
1176 g->slcg_enabled);
1177 if (g->ops.clock_gating.slcg_fifo_load_gating_prod)
1178 g->ops.clock_gating.slcg_fifo_load_gating_prod(g,
1179 g->slcg_enabled);
1180 if (g->ops.clock_gating.blcg_fifo_load_gating_prod)
1181 g->ops.clock_gating.blcg_fifo_load_gating_prod(g,
1182 g->blcg_enabled);
1183
1184 /* enable pbdma */
1185 mask = 0;
1186 for (i = 0; i < host_num_pbdma; ++i)
1187 mask |= mc_enable_pb_sel_f(mc_enable_pb_0_enabled_v(), i);
1188 gk20a_writel(g, mc_enable_pb_r(), mask);
1189
1190
1191 timeout = gk20a_readl(g, fifo_fb_timeout_r());
1192 nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout);
1193 if (!nvgpu_platform_is_silicon(g)) {
1194 timeout = set_field(timeout, fifo_fb_timeout_period_m(),
1195 fifo_fb_timeout_period_max_f());
1196 timeout = set_field(timeout, fifo_fb_timeout_detection_m(),
1197 fifo_fb_timeout_detection_disabled_f());
1198 nvgpu_log_info(g, "new fifo_fb_timeout reg val = 0x%08x",
1199 timeout);
1200 gk20a_writel(g, fifo_fb_timeout_r(), timeout);
1201 }
1202
1203 for (i = 0; i < host_num_pbdma; i++) {
1204 timeout = gk20a_readl(g, pbdma_timeout_r(i));
1205 nvgpu_log_info(g, "pbdma_timeout reg val = 0x%08x",
1206 timeout);
1207 if (!nvgpu_platform_is_silicon(g)) {
1208 timeout = set_field(timeout, pbdma_timeout_period_m(),
1209 pbdma_timeout_period_max_f());
1210 nvgpu_log_info(g, "new pbdma_timeout reg val = 0x%08x",
1211 timeout);
1212 gk20a_writel(g, pbdma_timeout_r(i), timeout);
1213 }
1214 }
1215
1216 /* clear and enable pbdma interrupt */
1217 for (i = 0; i < host_num_pbdma; i++) {
1218 gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFF);
1219 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF);
1220
1221 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
1222 gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall);
1223 gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall);
1224
1225 intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i));
1226 gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, intr_stall);
1227 gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall);
1228 }
1229
1230 /* clear ctxsw timeout interrupts */
1231 gk20a_writel(g, fifo_intr_ctxsw_timeout_r(), ~0);
1232
1233 if (nvgpu_platform_is_silicon(g)) {
1234 /* enable ctxsw timeout */
1235 timeout = GRFIFO_TIMEOUT_CHECK_PERIOD_US;
1236 timeout = scale_ptimer(timeout,
1237 ptimer_scalingfactor10x(g->ptimer_src_freq));
1238 timeout |= fifo_eng_ctxsw_timeout_detection_enabled_f();
1239 gk20a_writel(g, fifo_eng_ctxsw_timeout_r(), timeout);
1240 } else {
1241 timeout = gk20a_readl(g, fifo_eng_ctxsw_timeout_r());
1242 nvgpu_log_info(g, "fifo_eng_ctxsw_timeout reg val = 0x%08x",
1243 timeout);
1244 timeout = set_field(timeout, fifo_eng_ctxsw_timeout_period_m(),
1245 fifo_eng_ctxsw_timeout_period_max_f());
1246 timeout = set_field(timeout,
1247 fifo_eng_ctxsw_timeout_detection_m(),
1248 fifo_eng_ctxsw_timeout_detection_disabled_f());
1249 nvgpu_log_info(g, "new fifo_eng_ctxsw_timeout reg val = 0x%08x",
1250 timeout);
1251 gk20a_writel(g, fifo_eng_ctxsw_timeout_r(), timeout);
1252 }
1253
1254 /* clear runlist interrupts */
1255 gk20a_writel(g, fifo_intr_runlist_r(), ~0);
1256
1257 /* clear and enable pfifo interrupt */
1258 gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
1259 mask = gv11b_fifo_intr_0_en_mask(g);
1260 gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask);
1261 gk20a_writel(g, fifo_intr_en_0_r(), mask);
1262 gk20a_dbg_info("fifo_intr_en_1 = 0x80000000");
1263 gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
1264
1265 gk20a_dbg_fn("done");
1266
1267 return 0;
1268}
1269
1270static const char *const gv11b_sched_error_str[] = {
1271 "xxx-0",
1272 "xxx-1",
1273 "xxx-2",
1274 "xxx-3",
1275 "xxx-4",
1276 "engine_reset",
1277 "rl_ack_timeout",
1278 "rl_ack_extra",
1279 "rl_rdat_timeout",
1280 "rl_rdat_extra",
1281 "xxx-a",
1282 "xxx-b",
1283 "rl_req_timeout",
1284 "new_runlist",
1285 "code_config_while_busy",
1286 "xxx-f",
1287 "xxx-0x10",
1288 "xxx-0x11",
1289 "xxx-0x12",
1290 "xxx-0x13",
1291 "xxx-0x14",
1292 "xxx-0x15",
1293 "xxx-0x16",
1294 "xxx-0x17",
1295 "xxx-0x18",
1296 "xxx-0x19",
1297 "xxx-0x1a",
1298 "xxx-0x1b",
1299 "xxx-0x1c",
1300 "xxx-0x1d",
1301 "xxx-0x1e",
1302 "xxx-0x1f",
1303 "bad_tsg",
1304};
1305
1306bool gv11b_fifo_handle_sched_error(struct gk20a *g)
1307{
1308 u32 sched_error;
1309
1310 sched_error = gk20a_readl(g, fifo_intr_sched_error_r());
1311
1312 if (sched_error < ARRAY_SIZE(gv11b_sched_error_str))
1313 nvgpu_err(g, "fifo sched error :%s",
1314 gv11b_sched_error_str[sched_error]);
1315 else
1316 nvgpu_err(g, "fifo sched error code not supported");
1317
1318 if (sched_error == SCHED_ERROR_CODE_BAD_TSG ) {
1319 /* id is unknown, preempt all runlists and do recovery */
1320 gk20a_fifo_recover(g, 0, 0, false, false, false);
1321 }
1322
1323 return false;
1324}
1325
1326static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id)
1327{
1328 u32 tsgid = FIFO_INVAL_TSG_ID;
1329 u32 timeout_info;
1330 u32 ctx_status, info_status;
1331
1332 timeout_info = gk20a_readl(g,
1333 fifo_intr_ctxsw_timeout_info_r(active_eng_id));
1334
1335 /*
1336 * ctxsw_state and tsgid are snapped at the point of the timeout and
1337 * will not change while the corresponding INTR_CTXSW_TIMEOUT_ENGINE bit
1338 * is PENDING.
1339 */
1340 ctx_status = fifo_intr_ctxsw_timeout_info_ctxsw_state_v(timeout_info);
1341 if (ctx_status ==
1342 fifo_intr_ctxsw_timeout_info_ctxsw_state_load_v()) {
1343
1344 tsgid = fifo_intr_ctxsw_timeout_info_next_tsgid_v(timeout_info);
1345
1346 } else if (ctx_status ==
1347 fifo_intr_ctxsw_timeout_info_ctxsw_state_switch_v() ||
1348 ctx_status ==
1349 fifo_intr_ctxsw_timeout_info_ctxsw_state_save_v()) {
1350
1351 tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info);
1352 }
1353 gk20a_dbg_info("ctxsw timeout info: tsgid = %d", tsgid);
1354
1355 /*
1356 * STATUS indicates whether the context request ack was eventually
1357 * received and whether a subsequent request timed out. This field is
1358 * updated live while the corresponding INTR_CTXSW_TIMEOUT_ENGINE bit
1359 * is PENDING. STATUS starts in AWAITING_ACK, and progresses to
1360 * ACK_RECEIVED and finally ends with DROPPED_TIMEOUT.
1361 *
1362 * AWAITING_ACK - context request ack still not returned from engine.
1363 * ENG_WAS_RESET - The engine was reset via a PRI write to NV_PMC_ENABLE
1364 * or NV_PMC_ELPG_ENABLE prior to receiving the ack. Host will not
1365 * expect ctx ack to return, but if it is already in flight, STATUS will
1366 * transition shortly to ACK_RECEIVED unless the interrupt is cleared
1367 * first. Once the engine is reset, additional context switches can
1368 * occur; if one times out, STATUS will transition to DROPPED_TIMEOUT
1369 * if the interrupt isn't cleared first.
1370 * ACK_RECEIVED - The ack for the timed-out context request was
1371 * received between the point of the timeout and this register being
1372 * read. Note this STATUS can be reported during the load stage of the
1373 * same context switch that timed out if the timeout occurred during the
1374 * save half of a context switch. Additional context requests may have
1375 * completed or may be outstanding, but no further context timeout has
1376 * occurred. This simplifies checking for spurious context switch
1377 * timeouts.
1378 * DROPPED_TIMEOUT - The originally timed-out context request acked,
1379 * but a subsequent context request then timed out.
1380 * Information about the subsequent timeout is not stored; in fact, that
1381 * context request may also have already been acked by the time SW
1382 * SW reads this register. If not, there is a chance SW can get the
1383 * dropped information by clearing the corresponding
1384 * INTR_CTXSW_TIMEOUT_ENGINE bit and waiting for the timeout to occur
1385 * again. Note, however, that if the engine does time out again,
1386 * it may not be from the original request that caused the
1387 * DROPPED_TIMEOUT state, as that request may
1388 * be acked in the interim.
1389 */
1390 info_status = fifo_intr_ctxsw_timeout_info_status_v(timeout_info);
1391 if (info_status ==
1392 fifo_intr_ctxsw_timeout_info_status_awaiting_ack_v()) {
1393
1394 gk20a_dbg_info("ctxsw timeout info : awaiting ack");
1395
1396 } else if (info_status ==
1397 fifo_intr_ctxsw_timeout_info_status_eng_was_reset_v()) {
1398
1399 gk20a_dbg_info("ctxsw timeout info : eng was reset");
1400
1401 } else if (info_status ==
1402 fifo_intr_ctxsw_timeout_info_status_ack_received_v()) {
1403
1404 gk20a_dbg_info("ctxsw timeout info : ack received");
1405 /* no need to recover */
1406 tsgid = FIFO_INVAL_TSG_ID;
1407
1408 } else if (info_status ==
1409 fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) {
1410
1411 gk20a_dbg_info("ctxsw timeout info : dropped timeout");
1412 /* no need to recover */
1413 tsgid = FIFO_INVAL_TSG_ID;
1414
1415 } else {
1416 gk20a_dbg_info("ctxsw timeout info status = %u", info_status);
1417 }
1418
1419 return tsgid;
1420}
1421
1422bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr)
1423{
1424 bool ret = false;
1425 u32 tsgid = FIFO_INVAL_TSG_ID;
1426 u32 engine_id, active_eng_id;
1427 u32 timeout_val, ctxsw_timeout_engines;
1428
1429
1430 if (!(fifo_intr & fifo_intr_0_ctxsw_timeout_pending_f()))
1431 return ret;
1432
1433 /* get ctxsw timedout engines */
1434 ctxsw_timeout_engines = gk20a_readl(g, fifo_intr_ctxsw_timeout_r());
1435 if (ctxsw_timeout_engines == 0) {
1436 nvgpu_err(g, "no eng ctxsw timeout pending");
1437 return ret;
1438 }
1439
1440 timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r());
1441 timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val);
1442
1443 gk20a_dbg_info("eng ctxsw timeout period = 0x%x", timeout_val);
1444
1445 for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) {
1446 active_eng_id = g->fifo.active_engines_list[engine_id];
1447
1448 if (ctxsw_timeout_engines &
1449 fifo_intr_ctxsw_timeout_engine_pending_f(
1450 active_eng_id)) {
1451
1452 struct fifo_gk20a *f = &g->fifo;
1453 u32 ms = 0;
1454 bool verbose = false;
1455
1456 tsgid = gv11b_fifo_ctxsw_timeout_info(g, active_eng_id);
1457
1458 if (tsgid == FIFO_INVAL_TSG_ID)
1459 continue;
1460
1461 if (gk20a_fifo_check_tsg_ctxsw_timeout(
1462 &f->tsg[tsgid], &verbose, &ms)) {
1463 ret = true;
1464 nvgpu_err(g,
1465 "ctxsw timeout error:"
1466 "active engine id =%u, %s=%d, ms=%u",
1467 active_eng_id, "tsg", tsgid, ms);
1468
1469 /* Cancel all channels' timeout */
1470 gk20a_channel_timeout_restart_all_channels(g);
1471 gk20a_fifo_recover(g, BIT(active_eng_id), tsgid,
1472 true, true, verbose);
1473 } else {
1474 gk20a_dbg_info(
1475 "fifo is waiting for ctx switch: "
1476 "for %d ms, %s=%d", ms, "tsg", tsgid);
1477 }
1478 }
1479 }
1480 /* clear interrupt */
1481 gk20a_writel(g, fifo_intr_ctxsw_timeout_r(), ctxsw_timeout_engines);
1482 return ret;
1483}
1484
1485unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g,
1486 u32 pbdma_id, u32 pbdma_intr_0,
1487 u32 *handled, u32 *error_notifier)
1488{
1489 unsigned int rc_type = RC_TYPE_NO_RC;
1490
1491 rc_type = gk20a_fifo_handle_pbdma_intr_0(g, pbdma_id,
1492 pbdma_intr_0, handled, error_notifier);
1493
1494 if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) {
1495 gk20a_dbg(gpu_dbg_intr, "clear faulted error on pbdma id %d",
1496 pbdma_id);
1497 gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0);
1498 *handled |= pbdma_intr_0_clear_faulted_error_pending_f();
1499 rc_type = RC_TYPE_PBDMA_FAULT;
1500 }
1501
1502 if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) {
1503 gk20a_dbg(gpu_dbg_intr, "eng reset intr on pbdma id %d",
1504 pbdma_id);
1505 *handled |= pbdma_intr_0_eng_reset_pending_f();
1506 rc_type = RC_TYPE_PBDMA_FAULT;
1507 }
1508
1509 return rc_type;
1510}
1511
1512/*
1513 * Pbdma which encountered the ctxnotvalid interrupt will stall and
1514 * prevent the channel which was loaded at the time the interrupt fired
1515 * from being swapped out until the interrupt is cleared.
1516 * CTXNOTVALID pbdma interrupt indicates error conditions related
1517 * to the *_CTX_VALID fields for a channel. The following
1518 * conditions trigger the interrupt:
1519 * * CTX_VALID bit for the targeted engine is FALSE
1520 * * At channel start/resume, all preemptible eng have CTX_VALID FALSE but:
1521 * - CTX_RELOAD is set in CCSR_CHANNEL_STATUS,
1522 * - PBDMA_TARGET_SHOULD_SEND_HOST_TSG_EVENT is TRUE, or
1523 * - PBDMA_TARGET_NEEDS_HOST_TSG_EVENT is TRUE
1524 * The field is left NOT_PENDING and the interrupt is not raised if the PBDMA is
1525 * currently halted. This allows SW to unblock the PBDMA and recover.
1526 * SW may read METHOD0, CHANNEL_STATUS and TARGET to determine whether the
1527 * interrupt was due to an engine method, CTX_RELOAD, SHOULD_SEND_HOST_TSG_EVENT
1528 * or NEEDS_HOST_TSG_EVENT. If METHOD0 VALID is TRUE, lazy context creation
1529 * can be used or the TSG may be destroyed.
1530 * If METHOD0 VALID is FALSE, the error is likely a bug in SW, and the TSG
1531 * will have to be destroyed.
1532 */
1533
1534unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g,
1535 u32 pbdma_id, u32 pbdma_intr_1,
1536 u32 *handled, u32 *error_notifier)
1537{
1538 unsigned int rc_type = RC_TYPE_PBDMA_FAULT;
1539 u32 pbdma_intr_1_current = gk20a_readl(g, pbdma_intr_1_r(pbdma_id));
1540
1541 /* minimize race with the gpu clearing the pending interrupt */
1542 if (!(pbdma_intr_1_current &
1543 pbdma_intr_1_ctxnotvalid_pending_f()))
1544 pbdma_intr_1 &= ~pbdma_intr_1_ctxnotvalid_pending_f();
1545
1546 if (pbdma_intr_1 == 0)
1547 return RC_TYPE_NO_RC;
1548
1549 if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) {
1550 gk20a_dbg(gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d",
1551 pbdma_id);
1552 nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ",
1553 pbdma_id, pbdma_intr_1);
1554 *handled |= pbdma_intr_1_ctxnotvalid_pending_f();
1555 } else{
1556 /*
1557 * rest of the interrupts in _intr_1 are "host copy engine"
1558 * related, which is not supported. For now just make them
1559 * channel fatal.
1560 */
1561 nvgpu_err(g, "hce err: pbdma_intr_1(%d):0x%08x",
1562 pbdma_id, pbdma_intr_1);
1563 *handled |= pbdma_intr_1;
1564 }
1565
1566 return rc_type;
1567}
1568
1569static void gv11b_fifo_init_ramfc_eng_method_buffer(struct gk20a *g,
1570 struct channel_gk20a *ch, struct nvgpu_mem *mem)
1571{
1572 struct tsg_gk20a *tsg;
1573 struct nvgpu_mem *method_buffer_per_runque;
1574
1575 tsg = tsg_gk20a_from_ch(ch);
1576 if (tsg == NULL) {
1577 nvgpu_err(g, "channel is not part of tsg");
1578 return;
1579 }
1580 if (tsg->eng_method_buffers == NULL) {
1581 nvgpu_log_info(g, "eng method buffer NULL");
1582 return;
1583 }
1584 if (tsg->runlist_id == gk20a_fifo_get_fast_ce_runlist_id(g))
1585 method_buffer_per_runque =
1586 &tsg->eng_method_buffers[ASYNC_CE_RUNQUE];
1587 else
1588 method_buffer_per_runque =
1589 &tsg->eng_method_buffers[GR_RUNQUE];
1590
1591 nvgpu_mem_wr32(g, mem, ram_in_eng_method_buffer_addr_lo_w(),
1592 u64_lo32(method_buffer_per_runque->gpu_va));
1593 nvgpu_mem_wr32(g, mem, ram_in_eng_method_buffer_addr_hi_w(),
1594 u64_hi32(method_buffer_per_runque->gpu_va));
1595
1596 nvgpu_log_info(g, "init ramfc with method buffer");
1597}
1598
1599unsigned int gv11b_fifo_get_eng_method_buffer_size(struct gk20a *g)
1600{
1601 unsigned int buffer_size;
1602
1603 buffer_size = ((9 + 1 + 3) * g->ops.ce2.get_num_pce(g)) + 2;
1604 buffer_size = (27 * 5 * buffer_size);
1605 buffer_size = roundup(buffer_size, PAGE_SIZE);
1606 nvgpu_log_info(g, "method buffer size in bytes %d", buffer_size);
1607
1608 return buffer_size;
1609}
1610
1611void gv11b_fifo_init_eng_method_buffers(struct gk20a *g,
1612 struct tsg_gk20a *tsg)
1613{
1614 struct vm_gk20a *vm = g->mm.bar2.vm;
1615 int err = 0;
1616 int i;
1617 unsigned int runque, method_buffer_size;
1618 unsigned int num_pbdma = g->fifo.num_pbdma;
1619
1620 if (tsg->eng_method_buffers != NULL)
1621 return;
1622
1623 method_buffer_size = gv11b_fifo_get_eng_method_buffer_size(g);
1624 if (method_buffer_size == 0) {
1625 nvgpu_info(g, "ce will hit MTHD_BUFFER_FAULT");
1626 return;
1627 }
1628
1629 tsg->eng_method_buffers = nvgpu_kzalloc(g,
1630 num_pbdma * sizeof(struct nvgpu_mem));
1631
1632 for (runque = 0; runque < num_pbdma; runque++) {
1633 err = nvgpu_dma_alloc_map_sys(vm, method_buffer_size,
1634 &tsg->eng_method_buffers[runque]);
1635 if (err)
1636 break;
1637 }
1638 if (err) {
1639 for (i = (runque - 1); i >= 0; i--)
1640 nvgpu_dma_unmap_free(vm,
1641 &tsg->eng_method_buffers[i]);
1642
1643 nvgpu_kfree(g, tsg->eng_method_buffers);
1644 tsg->eng_method_buffers = NULL;
1645 nvgpu_err(g, "could not alloc eng method buffers");
1646 return;
1647 }
1648 nvgpu_log_info(g, "eng method buffers allocated");
1649
1650}
1651
1652void gv11b_fifo_deinit_eng_method_buffers(struct gk20a *g,
1653 struct tsg_gk20a *tsg)
1654{
1655 struct vm_gk20a *vm = g->mm.bar2.vm;
1656 unsigned int runque;
1657
1658 if (tsg->eng_method_buffers == NULL)
1659 return;
1660
1661 for (runque = 0; runque < g->fifo.num_pbdma; runque++)
1662 nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]);
1663
1664 nvgpu_kfree(g, tsg->eng_method_buffers);
1665 tsg->eng_method_buffers = NULL;
1666
1667 nvgpu_log_info(g, "eng method buffers de-allocated");
1668}
1669
1670#ifdef CONFIG_TEGRA_GK20A_NVHOST
1671int gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
1672 u32 syncpt_id, struct nvgpu_mem *syncpt_buf)
1673{
1674 u32 nr_pages;
1675 int err = 0;
1676 struct gk20a *g = c->g;
1677 struct vm_gk20a *vm = c->vm;
1678
1679 /*
1680 * Add ro map for complete sync point shim range in vm
1681 * All channels sharing same vm will share same ro mapping.
1682 * Create rw map for current channel sync point
1683 */
1684 if (!vm->syncpt_ro_map_gpu_va) {
1685 vm->syncpt_ro_map_gpu_va = nvgpu_gmmu_map(c->vm,
1686 &g->syncpt_mem, g->syncpt_unit_size,
1687 0, gk20a_mem_flag_read_only,
1688 false, APERTURE_SYSMEM);
1689
1690 if (!vm->syncpt_ro_map_gpu_va) {
1691 nvgpu_err(g, "failed to ro map syncpt buffer");
1692 nvgpu_dma_free(g, &g->syncpt_mem);
1693 err = -ENOMEM;
1694 }
1695 }
1696
1697 nr_pages = DIV_ROUND_UP(g->syncpt_size, PAGE_SIZE);
1698 __nvgpu_mem_create_from_phys(g, syncpt_buf,
1699 (g->syncpt_unit_base +
1700 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id)),
1701 nr_pages);
1702 syncpt_buf->gpu_va = nvgpu_gmmu_map(c->vm, syncpt_buf,
1703 g->syncpt_size, 0, gk20a_mem_flag_none,
1704 false, APERTURE_SYSMEM);
1705
1706 if (!syncpt_buf->gpu_va) {
1707 nvgpu_err(g, "failed to map syncpt buffer");
1708 nvgpu_dma_free(g, syncpt_buf);
1709 err = -ENOMEM;
1710 }
1711 return err;
1712}
1713
1714void gv11b_fifo_free_syncpt_buf(struct channel_gk20a *c,
1715 struct nvgpu_mem *syncpt_buf)
1716{
1717 nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va);
1718 nvgpu_dma_free(c->g, syncpt_buf);
1719}
1720
1721void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g,
1722 struct priv_cmd_entry *cmd, u32 off,
1723 u32 id, u32 thresh, u64 gpu_va_base)
1724{
1725 u64 gpu_va = gpu_va_base +
1726 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id);
1727
1728 gk20a_dbg_fn("");
1729
1730 off = cmd->off + off;
1731
1732 /* semaphore_a */
1733 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004);
1734 nvgpu_mem_wr32(g, cmd->mem, off++,
1735 (gpu_va >> 32) & 0xff);
1736 /* semaphore_b */
1737 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005);
1738 /* offset */
1739 nvgpu_mem_wr32(g, cmd->mem, off++, gpu_va & 0xffffffff);
1740
1741 /* semaphore_c */
1742 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006);
1743 /* payload */
1744 nvgpu_mem_wr32(g, cmd->mem, off++, thresh);
1745 /* semaphore_d */
1746 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007);
1747 /* operation: acq_geq, switch_en */
1748 nvgpu_mem_wr32(g, cmd->mem, off++, 0x4 | (0x1 << 12));
1749}
1750
1751u32 gv11b_fifo_get_syncpt_wait_cmd_size(void)
1752{
1753 return 8;
1754}
1755
1756void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g,
1757 bool wfi_cmd, struct priv_cmd_entry *cmd,
1758 u32 id, u64 gpu_va)
1759{
1760 u32 off = cmd->off;
1761
1762 gk20a_dbg_fn("");
1763
1764 /* semaphore_a */
1765 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004);
1766 nvgpu_mem_wr32(g, cmd->mem, off++,
1767 (gpu_va >> 32) & 0xff);
1768 /* semaphore_b */
1769 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005);
1770 /* offset */
1771 nvgpu_mem_wr32(g, cmd->mem, off++, gpu_va & 0xffffffff);
1772
1773 /* semaphore_c */
1774 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006);
1775 /* payload */
1776 nvgpu_mem_wr32(g, cmd->mem, off++, 0x0);
1777 /* semaphore_d */
1778 nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007);
1779
1780 /* operation: release, wfi */
1781 nvgpu_mem_wr32(g, cmd->mem, off++,
1782 0x2 | ((wfi_cmd ? 0x0 : 0x1) << 20));
1783 /* ignored */
1784 nvgpu_mem_wr32(g, cmd->mem, off++, 0);
1785}
1786
1787u32 gv11b_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd)
1788{
1789 return 9;
1790}
1791#endif /* CONFIG_TEGRA_GK20A_NVHOST */
1792
1793int gv11b_init_fifo_setup_hw(struct gk20a *g)
1794{
1795 struct fifo_gk20a *f = &g->fifo;
1796
1797 f->t19x.max_subctx_count =
1798 gr_pri_fe_chip_def_info_max_veid_count_init_v();
1799 return 0;
1800}
1801
1802static u32 gv11b_mmu_fault_id_to_gr_veid(struct gk20a *g, u32 gr_eng_fault_id,
1803 u32 mmu_fault_id)
1804{
1805 struct fifo_gk20a *f = &g->fifo;
1806 u32 num_subctx;
1807 u32 veid = FIFO_INVAL_VEID;
1808
1809 num_subctx = f->t19x.max_subctx_count;
1810
1811 if (mmu_fault_id >= gr_eng_fault_id &&
1812 mmu_fault_id < (gr_eng_fault_id + num_subctx))
1813 veid = mmu_fault_id - gr_eng_fault_id;
1814
1815 return veid;
1816}
1817
1818static u32 gv11b_mmu_fault_id_to_eng_id_and_veid(struct gk20a *g,
1819 u32 mmu_fault_id, u32 *veid)
1820{
1821 u32 engine_id;
1822 u32 active_engine_id;
1823 struct fifo_engine_info_gk20a *engine_info;
1824 struct fifo_gk20a *f = &g->fifo;
1825
1826
1827 for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
1828 active_engine_id = f->active_engines_list[engine_id];
1829 engine_info = &g->fifo.engine_info[active_engine_id];
1830
1831 if (active_engine_id == ENGINE_GR_GK20A) {
1832 /* get faulted subctx id */
1833 *veid = gv11b_mmu_fault_id_to_gr_veid(g,
1834 engine_info->fault_id, mmu_fault_id);
1835 if (*veid != FIFO_INVAL_VEID)
1836 break;
1837 } else {
1838 if (engine_info->fault_id == mmu_fault_id)
1839 break;
1840 }
1841
1842 active_engine_id = FIFO_INVAL_ENGINE_ID;
1843 }
1844 return active_engine_id;
1845}
1846
1847static u32 gv11b_mmu_fault_id_to_pbdma_id(struct gk20a *g, u32 mmu_fault_id)
1848{
1849 u32 num_pbdma, reg_val, fault_id_pbdma0;
1850
1851 reg_val = gk20a_readl(g, fifo_cfg0_r());
1852 num_pbdma = fifo_cfg0_num_pbdma_v(reg_val);
1853 fault_id_pbdma0 = fifo_cfg0_pbdma_fault_id_v(reg_val);
1854
1855 if (mmu_fault_id >= fault_id_pbdma0 &&
1856 mmu_fault_id <= fault_id_pbdma0 + num_pbdma - 1)
1857 return mmu_fault_id - fault_id_pbdma0;
1858
1859 return FIFO_INVAL_PBDMA_ID;
1860}
1861
1862void gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(struct gk20a *g,
1863 u32 mmu_fault_id, u32 *active_engine_id, u32 *veid, u32 *pbdma_id)
1864{
1865 *active_engine_id = gv11b_mmu_fault_id_to_eng_id_and_veid(g,
1866 mmu_fault_id, veid);
1867
1868 if (*active_engine_id == FIFO_INVAL_ENGINE_ID)
1869 *pbdma_id = gv11b_mmu_fault_id_to_pbdma_id(g, mmu_fault_id);
1870 else
1871 *pbdma_id = FIFO_INVAL_PBDMA_ID;
1872}
1873
1874static bool gk20a_fifo_channel_status_is_eng_faulted(struct gk20a *g, u32 chid)
1875{
1876 u32 channel = gk20a_readl(g, ccsr_channel_r(chid));
1877
1878 return ccsr_channel_eng_faulted_v(channel) ==
1879 ccsr_channel_eng_faulted_true_v();
1880}
1881
1882void gv11b_fifo_tsg_verify_status_faulted(struct channel_gk20a *ch)
1883{
1884 struct gk20a *g = ch->g;
1885 struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid];
1886
1887 /*
1888 * If channel has FAULTED set, clear the CE method buffer
1889 * if saved out channel is same as faulted channel
1890 */
1891 if (!gk20a_fifo_channel_status_is_eng_faulted(g, ch->chid))
1892 return;
1893
1894 if (tsg->eng_method_buffers == NULL)
1895 return;
1896
1897 /*
1898 * CE method buffer format :
1899 * DWord0 = method count
1900 * DWord1 = channel id
1901 *
1902 * It is sufficient to write 0 to method count to invalidate
1903 */
1904 if ((u32)ch->chid ==
1905 nvgpu_mem_rd32(g, &tsg->eng_method_buffers[ASYNC_CE_RUNQUE], 1))
1906 nvgpu_mem_wr32(g, &tsg->eng_method_buffers[ASYNC_CE_RUNQUE], 0, 0);
1907}
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h
new file mode 100644
index 00000000..fc1ddf83
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h
@@ -0,0 +1,117 @@
1/*
2 * GV11B Fifo
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef FIFO_GV11B_H
26#define FIFO_GV11B_H
27
28#define FIFO_INVAL_PBDMA_ID ((u32)~0)
29#define FIFO_INVAL_VEID ((u32)~0)
30
31/* engine context-switch request occurred while the engine was in reset */
32#define SCHED_ERROR_CODE_ENGINE_RESET 0x00000005
33
34/*
35* ERROR_CODE_BAD_TSG indicates that Host encountered a badly formed TSG header
36* or a badly formed channel type runlist entry in the runlist. This is typically
37* caused by encountering a new TSG entry in the middle of a TSG definition.
38* A channel type entry having wrong runqueue selector can also cause this.
39* Additionally this error code can indicate when a channel is encountered on
40* the runlist which is outside of a TSG.
41*/
42#define SCHED_ERROR_CODE_BAD_TSG 0x00000020
43
44/* can be removed after runque support is added */
45
46#define GR_RUNQUE 0 /* pbdma 0 */
47#define ASYNC_CE_RUNQUE 2 /* pbdma 2 */
48
49#define CHANNEL_INFO_VEID0 0
50
51struct gpu_ops;
52
53void gv11b_fifo_reset_pbdma_and_eng_faulted(struct gk20a *g,
54 struct channel_gk20a *refch,
55 u32 faulted_pbdma, u32 faulted_engine);
56void gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(struct gk20a *g,
57 u32 mmu_fault_id, u32 *active_engine_id, u32 *veid, u32 *pbdma_id);
58
59void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist);
60void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist);
61int channel_gv11b_setup_ramfc(struct channel_gk20a *c,
62 u64 gpfifo_base, u32 gpfifo_entries,
63 unsigned long acquire_timeout, u32 flags);
64u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c);
65u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c);
66void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c);
67void channel_gv11b_unbind(struct channel_gk20a *ch);
68u32 gv11b_fifo_get_num_fifos(struct gk20a *g);
69bool gv11b_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid);
70void gv11b_dump_channel_status_ramfc(struct gk20a *g,
71 struct gk20a_debug_output *o,
72 u32 chid,
73 struct ch_state *ch_state);
74void gv11b_dump_eng_status(struct gk20a *g,
75 struct gk20a_debug_output *o);
76u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g);
77int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
78 unsigned int id_type, unsigned int timeout_rc_type);
79int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid);
80int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid);
81int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg);
82int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id,
83 unsigned int id_type, unsigned int timeout_rc_type);
84void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
85 u32 id, unsigned int id_type, unsigned int rc_type,
86 struct mmu_fault_info *mmfault);
87void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f);
88int gv11b_init_fifo_reset_enable_hw(struct gk20a *g);
89bool gv11b_fifo_handle_sched_error(struct gk20a *g);
90bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr);
91unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g,
92 u32 pbdma_id, u32 pbdma_intr_0,
93 u32 *handled, u32 *error_notifier);
94unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g,
95 u32 pbdma_id, u32 pbdma_intr_1,
96 u32 *handled, u32 *error_notifier);
97void gv11b_fifo_init_eng_method_buffers(struct gk20a *g,
98 struct tsg_gk20a *tsg);
99void gv11b_fifo_deinit_eng_method_buffers(struct gk20a *g,
100 struct tsg_gk20a *tsg);
101int gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
102 u32 syncpt_id, struct nvgpu_mem *syncpt_buf);
103void gv11b_fifo_free_syncpt_buf(struct channel_gk20a *c,
104 struct nvgpu_mem *syncpt_buf);
105void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g,
106 struct priv_cmd_entry *cmd, u32 off,
107 u32 id, u32 thresh, u64 gpu_va_base);
108u32 gv11b_fifo_get_syncpt_wait_cmd_size(void);
109void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g,
110 bool wfi_cmd, struct priv_cmd_entry *cmd,
111 u32 id, u64 gpu_va_base);
112u32 gv11b_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd);
113int gv11b_init_fifo_setup_hw(struct gk20a *g);
114
115void gv11b_fifo_tsg_verify_status_faulted(struct channel_gk20a *ch);
116u32 gv11b_fifo_get_preempt_timeout(struct gk20a *g);
117#endif
diff --git a/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.c
new file mode 100644
index 00000000..514aadb1
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.c
@@ -0,0 +1,72 @@
1/*
2 *
3 * GV11B Graphics Context
4 *
5 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include "gk20a/gk20a.h"
27
28#include "gr_ctx_gv11b.h"
29
30int gr_gv11b_get_netlist_name(struct gk20a *g, int index, char *name)
31{
32 switch (index) {
33#ifdef GV11B_NETLIST_IMAGE_FW_NAME
34 case NETLIST_FINAL:
35 sprintf(name, GV11B_NETLIST_IMAGE_FW_NAME);
36 return 0;
37#endif
38#ifdef GK20A_NETLIST_IMAGE_A
39 case NETLIST_SLOT_A:
40 sprintf(name, GK20A_NETLIST_IMAGE_A);
41 return 0;
42#endif
43#ifdef GK20A_NETLIST_IMAGE_B
44 case NETLIST_SLOT_B:
45 sprintf(name, GK20A_NETLIST_IMAGE_B);
46 return 0;
47#endif
48#ifdef GK20A_NETLIST_IMAGE_C
49 case NETLIST_SLOT_C:
50 sprintf(name, GK20A_NETLIST_IMAGE_C);
51 return 0;
52#endif
53#ifdef GK20A_NETLIST_IMAGE_D
54 case NETLIST_SLOT_D:
55 sprintf(name, GK20A_NETLIST_IMAGE_D);
56 return 0;
57#endif
58 default:
59 return -1;
60 }
61
62 return -1;
63}
64
65bool gr_gv11b_is_firmware_defined(void)
66{
67#ifdef GV11B_NETLIST_IMAGE_FW_NAME
68 return true;
69#else
70 return false;
71#endif
72}
diff --git a/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.h b/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.h
new file mode 100644
index 00000000..0a95ab11
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.h
@@ -0,0 +1,36 @@
1/*
2 * GV11B Graphics Context
3 *
4 * Copyright (c) 2016 - 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef __GR_CTX_GV11B_H__
25#define __GR_CTX_GV11B_H__
26
27#include "gk20a/gr_ctx_gk20a.h"
28
29/* Define netlist for silicon only */
30
31#define GV11B_NETLIST_IMAGE_FW_NAME GK20A_NETLIST_IMAGE_D
32
33int gr_gv11b_get_netlist_name(struct gk20a *g, int index, char *name);
34bool gr_gv11b_is_firmware_defined(void);
35
36#endif /*__GR_CTX_GV11B_H__*/
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
new file mode 100644
index 00000000..3d817d7e
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
@@ -0,0 +1,3639 @@
1/*
2 * GV11b GPU GR
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/delay.h>
26#include <linux/version.h>
27#include <linux/vmalloc.h>
28#include <linux/tegra_gpu_t19x.h>
29#include <uapi/linux/nvgpu.h>
30
31#include <soc/tegra/fuse.h>
32
33#include <nvgpu/timers.h>
34#include <nvgpu/gmmu.h>
35#include <nvgpu/dma.h>
36#include <nvgpu/log.h>
37#include <nvgpu/debug.h>
38#include <nvgpu/enabled.h>
39
40#include "gk20a/gk20a.h"
41#include "gk20a/gr_gk20a.h"
42#include "gk20a/dbg_gpu_gk20a.h"
43#include "gk20a/regops_gk20a.h"
44#include "gk20a/gr_pri_gk20a.h"
45
46#include "gm20b/gr_gm20b.h"
47
48#include "gp10b/gr_gp10b.h"
49
50#include "gv11b/gr_gv11b.h"
51#include "gv11b/mm_gv11b.h"
52#include "gv11b/subctx_gv11b.h"
53
54#include <nvgpu/hw/gv11b/hw_gr_gv11b.h>
55#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
56#include <nvgpu/hw/gv11b/hw_proj_gv11b.h>
57#include <nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h>
58#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
59#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
60#include <nvgpu/hw/gv11b/hw_pbdma_gv11b.h>
61#include <nvgpu/hw/gv11b/hw_therm_gv11b.h>
62#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
63
64bool gr_gv11b_is_valid_class(struct gk20a *g, u32 class_num)
65{
66 bool valid = false;
67
68 switch (class_num) {
69 case VOLTA_COMPUTE_A:
70 case VOLTA_A:
71 case VOLTA_DMA_COPY_A:
72 valid = true;
73 break;
74
75 case MAXWELL_COMPUTE_B:
76 case MAXWELL_B:
77 case FERMI_TWOD_A:
78 case KEPLER_DMA_COPY_A:
79 case MAXWELL_DMA_COPY_A:
80 case PASCAL_COMPUTE_A:
81 case PASCAL_A:
82 case PASCAL_DMA_COPY_A:
83 valid = true;
84 break;
85
86 default:
87 break;
88 }
89 gk20a_dbg_info("class=0x%x valid=%d", class_num, valid);
90 return valid;
91}
92
93bool gr_gv11b_is_valid_gfx_class(struct gk20a *g, u32 class_num)
94{
95 bool valid = false;
96
97 switch (class_num) {
98 case VOLTA_A:
99 case PASCAL_A:
100 case MAXWELL_B:
101 valid = true;
102 break;
103
104 default:
105 break;
106 }
107 return valid;
108}
109
110bool gr_gv11b_is_valid_compute_class(struct gk20a *g, u32 class_num)
111{
112 bool valid = false;
113
114 switch (class_num) {
115 case VOLTA_COMPUTE_A:
116 case PASCAL_COMPUTE_A:
117 case MAXWELL_COMPUTE_B:
118 valid = true;
119 break;
120
121 default:
122 break;
123 }
124 return valid;
125}
126
127static u32 gv11b_gr_sm_offset(struct gk20a *g, u32 sm)
128{
129
130 u32 sm_pri_stride = nvgpu_get_litter_value(g, GPU_LIT_SM_PRI_STRIDE);
131 u32 sm_offset = sm_pri_stride * sm;
132
133 return sm_offset;
134}
135
136static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc,
137 bool *post_event, struct channel_gk20a *fault_ch,
138 u32 *hww_global_esr)
139{
140 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
141 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
142 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
143 u32 l1_tag_ecc_status, l1_tag_ecc_corrected_err_status = 0;
144 u32 l1_tag_ecc_uncorrected_err_status = 0;
145 u32 l1_tag_corrected_err_count_delta = 0;
146 u32 l1_tag_uncorrected_err_count_delta = 0;
147 bool is_l1_tag_ecc_corrected_total_err_overflow = 0;
148 bool is_l1_tag_ecc_uncorrected_total_err_overflow = 0;
149
150 /* Check for L1 tag ECC errors. */
151 l1_tag_ecc_status = gk20a_readl(g,
152 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_r() + offset);
153 l1_tag_ecc_corrected_err_status = l1_tag_ecc_status &
154 (gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_0_m() |
155 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_1_m() |
156 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_pixrpf_m() |
157 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_miss_fifo_m());
158 l1_tag_ecc_uncorrected_err_status = l1_tag_ecc_status &
159 (gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_0_m() |
160 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_1_m() |
161 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_pixrpf_m() |
162 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_miss_fifo_m());
163
164 if ((l1_tag_ecc_corrected_err_status == 0) && (l1_tag_ecc_uncorrected_err_status == 0))
165 return 0;
166
167 l1_tag_corrected_err_count_delta =
168 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_v(
169 gk20a_readl(g,
170 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_r() +
171 offset));
172 l1_tag_uncorrected_err_count_delta =
173 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_v(
174 gk20a_readl(g,
175 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_r() +
176 offset));
177 is_l1_tag_ecc_corrected_total_err_overflow =
178 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_total_counter_overflow_v(l1_tag_ecc_status);
179 is_l1_tag_ecc_uncorrected_total_err_overflow =
180 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(l1_tag_ecc_status);
181
182 if ((l1_tag_corrected_err_count_delta > 0) || is_l1_tag_ecc_corrected_total_err_overflow) {
183 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr,
184 "corrected error (SBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]",
185 l1_tag_ecc_corrected_err_status, is_l1_tag_ecc_corrected_total_err_overflow);
186
187 /* HW uses 16-bits counter */
188 l1_tag_corrected_err_count_delta +=
189 (is_l1_tag_ecc_corrected_total_err_overflow <<
190 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_s());
191 g->ecc.gr.t19x.sm_l1_tag_corrected_err_count.counters[tpc] +=
192 l1_tag_corrected_err_count_delta;
193 gk20a_writel(g,
194 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_r() + offset,
195 0);
196 }
197 if ((l1_tag_uncorrected_err_count_delta > 0) || is_l1_tag_ecc_uncorrected_total_err_overflow) {
198 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr,
199 "Uncorrected error (DBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]",
200 l1_tag_ecc_uncorrected_err_status, is_l1_tag_ecc_uncorrected_total_err_overflow);
201
202 /* HW uses 16-bits counter */
203 l1_tag_uncorrected_err_count_delta +=
204 (is_l1_tag_ecc_uncorrected_total_err_overflow <<
205 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_s());
206 g->ecc.gr.t19x.sm_l1_tag_uncorrected_err_count.counters[tpc] +=
207 l1_tag_uncorrected_err_count_delta;
208 gk20a_writel(g,
209 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_r() + offset,
210 0);
211 }
212
213 gk20a_writel(g, gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_r() + offset,
214 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_reset_task_f());
215
216 return 0;
217
218}
219
220static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc,
221 bool *post_event, struct channel_gk20a *fault_ch,
222 u32 *hww_global_esr)
223{
224 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
225 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
226 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
227 u32 lrf_ecc_status, lrf_ecc_corrected_err_status = 0;
228 u32 lrf_ecc_uncorrected_err_status = 0;
229 u32 lrf_corrected_err_count_delta = 0;
230 u32 lrf_uncorrected_err_count_delta = 0;
231 bool is_lrf_ecc_corrected_total_err_overflow = 0;
232 bool is_lrf_ecc_uncorrected_total_err_overflow = 0;
233
234 /* Check for LRF ECC errors. */
235 lrf_ecc_status = gk20a_readl(g,
236 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r() + offset);
237 lrf_ecc_corrected_err_status = lrf_ecc_status &
238 (gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp0_m() |
239 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp1_m() |
240 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp2_m() |
241 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp3_m() |
242 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp4_m() |
243 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp5_m() |
244 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp6_m() |
245 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp7_m());
246 lrf_ecc_uncorrected_err_status = lrf_ecc_status &
247 (gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp0_m() |
248 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp1_m() |
249 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp2_m() |
250 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp3_m() |
251 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp4_m() |
252 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp5_m() |
253 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp6_m() |
254 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp7_m());
255
256 if ((lrf_ecc_corrected_err_status == 0) && (lrf_ecc_uncorrected_err_status == 0))
257 return 0;
258
259 lrf_corrected_err_count_delta =
260 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_v(
261 gk20a_readl(g,
262 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_r() +
263 offset));
264 lrf_uncorrected_err_count_delta =
265 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_v(
266 gk20a_readl(g,
267 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_r() +
268 offset));
269 is_lrf_ecc_corrected_total_err_overflow =
270 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_total_counter_overflow_v(lrf_ecc_status);
271 is_lrf_ecc_uncorrected_total_err_overflow =
272 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(lrf_ecc_status);
273
274 if ((lrf_corrected_err_count_delta > 0) || is_lrf_ecc_corrected_total_err_overflow) {
275 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr,
276 "corrected error (SBE) detected in SM LRF! err_mask [%08x] is_overf [%d]",
277 lrf_ecc_corrected_err_status, is_lrf_ecc_corrected_total_err_overflow);
278
279 /* HW uses 16-bits counter */
280 lrf_corrected_err_count_delta +=
281 (is_lrf_ecc_corrected_total_err_overflow <<
282 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_s());
283 g->ecc.gr.t18x.sm_lrf_single_err_count.counters[tpc] +=
284 lrf_corrected_err_count_delta;
285 gk20a_writel(g,
286 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_r() + offset,
287 0);
288 }
289 if ((lrf_uncorrected_err_count_delta > 0) || is_lrf_ecc_uncorrected_total_err_overflow) {
290 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr,
291 "Uncorrected error (DBE) detected in SM LRF! err_mask [%08x] is_overf [%d]",
292 lrf_ecc_uncorrected_err_status, is_lrf_ecc_uncorrected_total_err_overflow);
293
294 /* HW uses 16-bits counter */
295 lrf_uncorrected_err_count_delta +=
296 (is_lrf_ecc_uncorrected_total_err_overflow <<
297 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_s());
298 g->ecc.gr.t18x.sm_lrf_double_err_count.counters[tpc] +=
299 lrf_uncorrected_err_count_delta;
300 gk20a_writel(g,
301 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_r() + offset,
302 0);
303 }
304
305 gk20a_writel(g, gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r() + offset,
306 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_reset_task_f());
307
308 return 0;
309
310}
311
312void gr_gv11b_enable_hww_exceptions(struct gk20a *g)
313{
314 /* enable exceptions */
315 gk20a_writel(g, gr_fe_hww_esr_r(),
316 gr_fe_hww_esr_en_enable_f() |
317 gr_fe_hww_esr_reset_active_f());
318 gk20a_writel(g, gr_memfmt_hww_esr_r(),
319 gr_memfmt_hww_esr_en_enable_f() |
320 gr_memfmt_hww_esr_reset_active_f());
321}
322
323void gr_gv11b_enable_exceptions(struct gk20a *g)
324{
325 struct gr_gk20a *gr = &g->gr;
326 u32 reg_val;
327
328 /*
329 * clear exceptions :
330 * other than SM : hww_esr are reset in *enable_hww_excetpions*
331 * SM : cleared in *set_hww_esr_report_mask*
332 */
333
334 /* enable exceptions */
335 gk20a_writel(g, gr_exception2_en_r(), 0x0); /* BE not enabled */
336 gk20a_writel(g, gr_exception1_en_r(), (1 << gr->gpc_count) - 1);
337
338 reg_val = gr_exception_en_fe_enabled_f() |
339 gr_exception_en_memfmt_enabled_f() |
340 gr_exception_en_ds_enabled_f() |
341 gr_exception_en_gpc_enabled_f();
342 gk20a_writel(g, gr_exception_en_r(), reg_val);
343
344}
345
346static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc,
347 bool *post_event, struct channel_gk20a *fault_ch,
348 u32 *hww_global_esr)
349{
350 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
351 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
352 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
353 u32 cbu_ecc_status, cbu_ecc_corrected_err_status = 0;
354 u32 cbu_ecc_uncorrected_err_status = 0;
355 u32 cbu_corrected_err_count_delta = 0;
356 u32 cbu_uncorrected_err_count_delta = 0;
357 bool is_cbu_ecc_corrected_total_err_overflow = 0;
358 bool is_cbu_ecc_uncorrected_total_err_overflow = 0;
359
360 /* Check for CBU ECC errors. */
361 cbu_ecc_status = gk20a_readl(g,
362 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_r() + offset);
363 cbu_ecc_corrected_err_status = cbu_ecc_status &
364 (gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm0_m() |
365 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm1_m() |
366 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm0_m() |
367 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm1_m());
368 cbu_ecc_uncorrected_err_status = cbu_ecc_status &
369 (gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm0_m() |
370 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm1_m() |
371 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm0_m() |
372 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm1_m());
373
374 if ((cbu_ecc_corrected_err_status == 0) && (cbu_ecc_uncorrected_err_status == 0))
375 return 0;
376
377 cbu_corrected_err_count_delta =
378 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_v(
379 gk20a_readl(g,
380 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r() +
381 offset));
382 cbu_uncorrected_err_count_delta =
383 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_v(
384 gk20a_readl(g,
385 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r() +
386 offset));
387 is_cbu_ecc_corrected_total_err_overflow =
388 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_total_counter_overflow_v(cbu_ecc_status);
389 is_cbu_ecc_uncorrected_total_err_overflow =
390 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(cbu_ecc_status);
391
392 if ((cbu_corrected_err_count_delta > 0) || is_cbu_ecc_corrected_total_err_overflow) {
393 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr,
394 "corrected error (SBE) detected in SM CBU! err_mask [%08x] is_overf [%d]",
395 cbu_ecc_corrected_err_status, is_cbu_ecc_corrected_total_err_overflow);
396
397 /* HW uses 16-bits counter */
398 cbu_corrected_err_count_delta +=
399 (is_cbu_ecc_corrected_total_err_overflow <<
400 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_s());
401 g->ecc.gr.t19x.sm_cbu_corrected_err_count.counters[tpc] +=
402 cbu_corrected_err_count_delta;
403 gk20a_writel(g,
404 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r() + offset,
405 0);
406 }
407 if ((cbu_uncorrected_err_count_delta > 0) || is_cbu_ecc_uncorrected_total_err_overflow) {
408 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr,
409 "Uncorrected error (DBE) detected in SM CBU! err_mask [%08x] is_overf [%d]",
410 cbu_ecc_uncorrected_err_status, is_cbu_ecc_uncorrected_total_err_overflow);
411
412 /* HW uses 16-bits counter */
413 cbu_uncorrected_err_count_delta +=
414 (is_cbu_ecc_uncorrected_total_err_overflow <<
415 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_s());
416 g->ecc.gr.t19x.sm_cbu_uncorrected_err_count.counters[tpc] +=
417 cbu_uncorrected_err_count_delta;
418 gk20a_writel(g,
419 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r() + offset,
420 0);
421 }
422
423 gk20a_writel(g, gr_pri_gpc0_tpc0_sm_cbu_ecc_status_r() + offset,
424 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_reset_task_f());
425
426 return 0;
427
428}
429
430static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc,
431 bool *post_event, struct channel_gk20a *fault_ch,
432 u32 *hww_global_esr)
433{
434 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
435 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
436 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
437 u32 l1_data_ecc_status, l1_data_ecc_corrected_err_status = 0;
438 u32 l1_data_ecc_uncorrected_err_status = 0;
439 u32 l1_data_corrected_err_count_delta = 0;
440 u32 l1_data_uncorrected_err_count_delta = 0;
441 bool is_l1_data_ecc_corrected_total_err_overflow = 0;
442 bool is_l1_data_ecc_uncorrected_total_err_overflow = 0;
443
444 /* Check for L1 data ECC errors. */
445 l1_data_ecc_status = gk20a_readl(g,
446 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_r() + offset);
447 l1_data_ecc_corrected_err_status = l1_data_ecc_status &
448 (gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_0_m() |
449 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_1_m());
450 l1_data_ecc_uncorrected_err_status = l1_data_ecc_status &
451 (gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_0_m() |
452 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_1_m());
453
454 if ((l1_data_ecc_corrected_err_status == 0) && (l1_data_ecc_uncorrected_err_status == 0))
455 return 0;
456
457 l1_data_corrected_err_count_delta =
458 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_v(
459 gk20a_readl(g,
460 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r() +
461 offset));
462 l1_data_uncorrected_err_count_delta =
463 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_v(
464 gk20a_readl(g,
465 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r() +
466 offset));
467 is_l1_data_ecc_corrected_total_err_overflow =
468 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_total_counter_overflow_v(l1_data_ecc_status);
469 is_l1_data_ecc_uncorrected_total_err_overflow =
470 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(l1_data_ecc_status);
471
472 if ((l1_data_corrected_err_count_delta > 0) || is_l1_data_ecc_corrected_total_err_overflow) {
473 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr,
474 "corrected error (SBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]",
475 l1_data_ecc_corrected_err_status, is_l1_data_ecc_corrected_total_err_overflow);
476
477 /* HW uses 16-bits counter */
478 l1_data_corrected_err_count_delta +=
479 (is_l1_data_ecc_corrected_total_err_overflow <<
480 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_s());
481 g->ecc.gr.t19x.sm_l1_data_corrected_err_count.counters[tpc] +=
482 l1_data_corrected_err_count_delta;
483 gk20a_writel(g,
484 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r() + offset,
485 0);
486 }
487 if ((l1_data_uncorrected_err_count_delta > 0) || is_l1_data_ecc_uncorrected_total_err_overflow) {
488 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr,
489 "Uncorrected error (DBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]",
490 l1_data_ecc_uncorrected_err_status, is_l1_data_ecc_uncorrected_total_err_overflow);
491
492 /* HW uses 16-bits counter */
493 l1_data_uncorrected_err_count_delta +=
494 (is_l1_data_ecc_uncorrected_total_err_overflow <<
495 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_s());
496 g->ecc.gr.t19x.sm_l1_data_uncorrected_err_count.counters[tpc] +=
497 l1_data_uncorrected_err_count_delta;
498 gk20a_writel(g,
499 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r() + offset,
500 0);
501 }
502
503 gk20a_writel(g, gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_r() + offset,
504 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_reset_task_f());
505
506 return 0;
507
508}
509
510static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc,
511 bool *post_event, struct channel_gk20a *fault_ch,
512 u32 *hww_global_esr)
513{
514 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
515 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
516 u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc;
517 u32 icache_ecc_status, icache_ecc_corrected_err_status = 0;
518 u32 icache_ecc_uncorrected_err_status = 0;
519 u32 icache_corrected_err_count_delta = 0;
520 u32 icache_uncorrected_err_count_delta = 0;
521 bool is_icache_ecc_corrected_total_err_overflow = 0;
522 bool is_icache_ecc_uncorrected_total_err_overflow = 0;
523
524 /* Check for L0 && L1 icache ECC errors. */
525 icache_ecc_status = gk20a_readl(g,
526 gr_pri_gpc0_tpc0_sm_icache_ecc_status_r() + offset);
527 icache_ecc_corrected_err_status = icache_ecc_status &
528 (gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l0_data_m() |
529 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l0_predecode_m() |
530 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l1_data_m() |
531 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l1_predecode_m());
532 icache_ecc_uncorrected_err_status = icache_ecc_status &
533 (gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l0_data_m() |
534 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l0_predecode_m() |
535 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l1_data_m() |
536 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l1_predecode_m());
537
538 if ((icache_ecc_corrected_err_status == 0) && (icache_ecc_uncorrected_err_status == 0))
539 return 0;
540
541 icache_corrected_err_count_delta =
542 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_v(
543 gk20a_readl(g,
544 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_r() +
545 offset));
546 icache_uncorrected_err_count_delta =
547 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_v(
548 gk20a_readl(g,
549 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_r() +
550 offset));
551 is_icache_ecc_corrected_total_err_overflow =
552 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_total_counter_overflow_v(icache_ecc_status);
553 is_icache_ecc_uncorrected_total_err_overflow =
554 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(icache_ecc_status);
555
556 if ((icache_corrected_err_count_delta > 0) || is_icache_ecc_corrected_total_err_overflow) {
557 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr,
558 "corrected error (SBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]",
559 icache_ecc_corrected_err_status, is_icache_ecc_corrected_total_err_overflow);
560
561 /* HW uses 16-bits counter */
562 icache_corrected_err_count_delta +=
563 (is_icache_ecc_corrected_total_err_overflow <<
564 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_s());
565 g->ecc.gr.t19x.sm_icache_corrected_err_count.counters[tpc] +=
566 icache_corrected_err_count_delta;
567 gk20a_writel(g,
568 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_r() + offset,
569 0);
570 }
571 if ((icache_uncorrected_err_count_delta > 0) || is_icache_ecc_uncorrected_total_err_overflow) {
572 gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr,
573 "Uncorrected error (DBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]",
574 icache_ecc_uncorrected_err_status, is_icache_ecc_uncorrected_total_err_overflow);
575
576 /* HW uses 16-bits counter */
577 icache_uncorrected_err_count_delta +=
578 (is_icache_ecc_uncorrected_total_err_overflow <<
579 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_s());
580 g->ecc.gr.t19x.sm_icache_uncorrected_err_count.counters[tpc] +=
581 icache_uncorrected_err_count_delta;
582 gk20a_writel(g,
583 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_r() + offset,
584 0);
585 }
586
587 gk20a_writel(g, gr_pri_gpc0_tpc0_sm_icache_ecc_status_r() + offset,
588 gr_pri_gpc0_tpc0_sm_icache_ecc_status_reset_task_f());
589
590 return 0;
591
592}
593
594int gr_gv11b_handle_tpc_sm_ecc_exception(struct gk20a *g,
595 u32 gpc, u32 tpc,
596 bool *post_event, struct channel_gk20a *fault_ch,
597 u32 *hww_global_esr)
598{
599 int ret = 0;
600
601 /* Check for L1 tag ECC errors. */
602 gr_gv11b_handle_l1_tag_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr);
603
604 /* Check for LRF ECC errors. */
605 gr_gv11b_handle_lrf_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr);
606
607 /* Check for CBU ECC errors. */
608 gr_gv11b_handle_cbu_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr);
609
610 /* Check for L1 data ECC errors. */
611 gr_gv11b_handle_l1_data_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr);
612
613 /* Check for L0 && L1 icache ECC errors. */
614 gr_gv11b_handle_icache_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr);
615
616 return ret;
617}
618
619int gr_gv11b_handle_gcc_exception(struct gk20a *g, u32 gpc, u32 tpc,
620 bool *post_event, struct channel_gk20a *fault_ch,
621 u32 *hww_global_esr)
622{
623 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
624 u32 offset = gpc_stride * gpc;
625 u32 gcc_l15_ecc_status, gcc_l15_ecc_corrected_err_status = 0;
626 u32 gcc_l15_ecc_uncorrected_err_status = 0;
627 u32 gcc_l15_corrected_err_count_delta = 0;
628 u32 gcc_l15_uncorrected_err_count_delta = 0;
629 bool is_gcc_l15_ecc_corrected_total_err_overflow = 0;
630 bool is_gcc_l15_ecc_uncorrected_total_err_overflow = 0;
631
632 /* Check for gcc l15 ECC errors. */
633 gcc_l15_ecc_status = gk20a_readl(g,
634 gr_pri_gpc0_gcc_l15_ecc_status_r() + offset);
635 gcc_l15_ecc_corrected_err_status = gcc_l15_ecc_status &
636 (gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_bank0_m() |
637 gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_bank1_m());
638 gcc_l15_ecc_uncorrected_err_status = gcc_l15_ecc_status &
639 (gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_bank0_m() |
640 gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_bank1_m());
641
642 if ((gcc_l15_ecc_corrected_err_status == 0) && (gcc_l15_ecc_uncorrected_err_status == 0))
643 return 0;
644
645 gcc_l15_corrected_err_count_delta =
646 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_v(
647 gk20a_readl(g,
648 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_r() +
649 offset));
650 gcc_l15_uncorrected_err_count_delta =
651 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_v(
652 gk20a_readl(g,
653 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_r() +
654 offset));
655 is_gcc_l15_ecc_corrected_total_err_overflow =
656 gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_total_counter_overflow_v(gcc_l15_ecc_status);
657 is_gcc_l15_ecc_uncorrected_total_err_overflow =
658 gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_total_counter_overflow_v(gcc_l15_ecc_status);
659
660 if ((gcc_l15_corrected_err_count_delta > 0) || is_gcc_l15_ecc_corrected_total_err_overflow) {
661 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
662 "corrected error (SBE) detected in GCC L1.5! err_mask [%08x] is_overf [%d]",
663 gcc_l15_ecc_corrected_err_status, is_gcc_l15_ecc_corrected_total_err_overflow);
664
665 /* HW uses 16-bits counter */
666 gcc_l15_corrected_err_count_delta +=
667 (is_gcc_l15_ecc_corrected_total_err_overflow <<
668 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_s());
669 g->ecc.gr.t19x.gcc_l15_corrected_err_count.counters[gpc] +=
670 gcc_l15_corrected_err_count_delta;
671 gk20a_writel(g,
672 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_r() + offset,
673 0);
674 }
675 if ((gcc_l15_uncorrected_err_count_delta > 0) || is_gcc_l15_ecc_uncorrected_total_err_overflow) {
676 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr,
677 "Uncorrected error (DBE) detected in GCC L1.5! err_mask [%08x] is_overf [%d]",
678 gcc_l15_ecc_uncorrected_err_status, is_gcc_l15_ecc_uncorrected_total_err_overflow);
679
680 /* HW uses 16-bits counter */
681 gcc_l15_uncorrected_err_count_delta +=
682 (is_gcc_l15_ecc_uncorrected_total_err_overflow <<
683 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_s());
684 g->ecc.gr.t19x.gcc_l15_uncorrected_err_count.counters[gpc] +=
685 gcc_l15_uncorrected_err_count_delta;
686 gk20a_writel(g,
687 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_r() + offset,
688 0);
689 }
690
691 gk20a_writel(g, gr_pri_gpc0_gcc_l15_ecc_status_r() + offset,
692 gr_pri_gpc0_gcc_l15_ecc_status_reset_task_f());
693
694 return 0;
695}
696
697static int gr_gv11b_handle_gpcmmu_ecc_exception(struct gk20a *g, u32 gpc,
698 u32 exception)
699{
700 int ret = 0;
701 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
702 u32 offset = gpc_stride * gpc;
703 u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt;
704 u32 corrected_delta, uncorrected_delta;
705 u32 corrected_overflow, uncorrected_overflow;
706 int hww_esr;
707
708 hww_esr = gk20a_readl(g, gr_gpc0_mmu_gpcmmu_global_esr_r() + offset);
709
710 if (!(hww_esr & (gr_gpc0_mmu_gpcmmu_global_esr_ecc_corrected_m() |
711 gr_gpc0_mmu_gpcmmu_global_esr_ecc_uncorrected_m())))
712 return ret;
713
714 ecc_status = gk20a_readl(g,
715 gr_gpc0_mmu_l1tlb_ecc_status_r() + offset);
716 ecc_addr = gk20a_readl(g,
717 gr_gpc0_mmu_l1tlb_ecc_address_r() + offset);
718 corrected_cnt = gk20a_readl(g,
719 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_r() + offset);
720 uncorrected_cnt = gk20a_readl(g,
721 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_r() + offset);
722
723 corrected_delta = gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_v(
724 corrected_cnt);
725 uncorrected_delta = gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_v(
726 uncorrected_cnt);
727 corrected_overflow = ecc_status &
728 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_total_counter_overflow_m();
729
730 uncorrected_overflow = ecc_status &
731 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_total_counter_overflow_m();
732
733
734 /* clear the interrupt */
735 if ((corrected_delta > 0) || corrected_overflow)
736 gk20a_writel(g,
737 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_r() +
738 offset, 0);
739 if ((uncorrected_delta > 0) || uncorrected_overflow)
740 gk20a_writel(g,
741 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_r() +
742 offset, 0);
743
744 gk20a_writel(g, gr_gpc0_mmu_l1tlb_ecc_status_r() + offset,
745 gr_gpc0_mmu_l1tlb_ecc_status_reset_task_f());
746
747 /* Handle overflow */
748 if (corrected_overflow)
749 corrected_delta += (0x1UL << gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_s());
750 if (uncorrected_overflow)
751 uncorrected_delta += (0x1UL << gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_s());
752
753
754 g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count.counters[gpc] +=
755 corrected_delta;
756 g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count.counters[gpc] +=
757 uncorrected_delta;
758 nvgpu_log(g, gpu_dbg_intr,
759 "mmu l1tlb gpc:%d ecc interrupt intr: 0x%x", gpc, hww_esr);
760
761 if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_m())
762 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
763 if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_sa_data_m())
764 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
765 if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_fa_data_m())
766 nvgpu_log(g, gpu_dbg_intr, "corrected ecc fa data error");
767 if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_fa_data_m())
768 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc fa data error");
769 if (corrected_overflow || uncorrected_overflow)
770 nvgpu_info(g, "mmu l1tlb ecc counter overflow!");
771
772 nvgpu_log(g, gpu_dbg_intr,
773 "ecc error address: 0x%x", ecc_addr);
774 nvgpu_log(g, gpu_dbg_intr,
775 "ecc error count corrected: %d, uncorrected %d",
776 g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count.counters[gpc],
777 g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count.counters[gpc]);
778
779 return ret;
780}
781
782static int gr_gv11b_handle_gpccs_ecc_exception(struct gk20a *g, u32 gpc,
783 u32 exception)
784{
785 int ret = 0;
786 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
787 u32 offset = gpc_stride * gpc;
788 u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt;
789 u32 corrected_delta, uncorrected_delta;
790 u32 corrected_overflow, uncorrected_overflow;
791 int hww_esr;
792
793 hww_esr = gk20a_readl(g, gr_gpc0_gpccs_hww_esr_r() + offset);
794
795 if (!(hww_esr & (gr_gpc0_gpccs_hww_esr_ecc_uncorrected_m() |
796 gr_gpc0_gpccs_hww_esr_ecc_corrected_m())))
797 return ret;
798
799 ecc_status = gk20a_readl(g,
800 gr_gpc0_gpccs_falcon_ecc_status_r() + offset);
801 ecc_addr = gk20a_readl(g,
802 gr_gpc0_gpccs_falcon_ecc_address_r() + offset);
803 corrected_cnt = gk20a_readl(g,
804 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_r() + offset);
805 uncorrected_cnt = gk20a_readl(g,
806 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_r() + offset);
807
808 corrected_delta = gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_v(
809 corrected_cnt);
810 uncorrected_delta = gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_total_v(
811 uncorrected_cnt);
812 corrected_overflow = ecc_status &
813 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_total_counter_overflow_m();
814
815 uncorrected_overflow = ecc_status &
816 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_total_counter_overflow_m();
817
818
819 /* clear the interrupt */
820 if ((corrected_delta > 0) || corrected_overflow)
821 gk20a_writel(g,
822 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_r() +
823 offset, 0);
824 if ((uncorrected_delta > 0) || uncorrected_overflow)
825 gk20a_writel(g,
826 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_r() +
827 offset, 0);
828
829 gk20a_writel(g, gr_gpc0_gpccs_falcon_ecc_status_r() + offset,
830 gr_gpc0_gpccs_falcon_ecc_status_reset_task_f());
831
832 g->ecc.gr.t19x.gpccs_corrected_err_count.counters[gpc] +=
833 corrected_delta;
834 g->ecc.gr.t19x.gpccs_uncorrected_err_count.counters[gpc] +=
835 uncorrected_delta;
836 nvgpu_log(g, gpu_dbg_intr,
837 "gppcs gpc:%d ecc interrupt intr: 0x%x", gpc, hww_esr);
838
839 if (ecc_status & gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_m())
840 nvgpu_log(g, gpu_dbg_intr, "imem ecc error corrected");
841 if (ecc_status &
842 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_m())
843 nvgpu_log(g, gpu_dbg_intr, "imem ecc error uncorrected");
844 if (ecc_status &
845 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_m())
846 nvgpu_log(g, gpu_dbg_intr, "dmem ecc error corrected");
847 if (ecc_status &
848 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_m())
849 nvgpu_log(g, gpu_dbg_intr, "dmem ecc error uncorrected");
850 if (corrected_overflow || uncorrected_overflow)
851 nvgpu_info(g, "gpccs ecc counter overflow!");
852
853 nvgpu_log(g, gpu_dbg_intr,
854 "ecc error row address: 0x%x",
855 gr_gpc0_gpccs_falcon_ecc_address_row_address_v(ecc_addr));
856
857 nvgpu_log(g, gpu_dbg_intr,
858 "ecc error count corrected: %d, uncorrected %d",
859 g->ecc.gr.t19x.gpccs_corrected_err_count.counters[gpc],
860 g->ecc.gr.t19x.gpccs_uncorrected_err_count.counters[gpc]);
861
862 return ret;
863}
864
865int gr_gv11b_handle_gpc_gpcmmu_exception(struct gk20a *g, u32 gpc,
866 u32 gpc_exception)
867{
868 if (gpc_exception & gr_gpc0_gpccs_gpc_exception_gpcmmu_m())
869 return gr_gv11b_handle_gpcmmu_ecc_exception(g, gpc,
870 gpc_exception);
871 return 0;
872}
873
874int gr_gv11b_handle_gpc_gpccs_exception(struct gk20a *g, u32 gpc,
875 u32 gpc_exception)
876{
877 if (gpc_exception & gr_gpc0_gpccs_gpc_exception_gpccs_m())
878 return gr_gv11b_handle_gpccs_ecc_exception(g, gpc,
879 gpc_exception);
880
881 return 0;
882}
883
884void gr_gv11b_enable_gpc_exceptions(struct gk20a *g)
885{
886 struct gr_gk20a *gr = &g->gr;
887 u32 tpc_mask;
888
889 gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(),
890 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f() |
891 gr_gpcs_tpcs_tpccs_tpc_exception_en_mpc_enabled_f());
892
893 tpc_mask =
894 gr_gpcs_gpccs_gpc_exception_en_tpc_f((1 << gr->tpc_count) - 1);
895
896 gk20a_writel(g, gr_gpcs_gpccs_gpc_exception_en_r(),
897 (tpc_mask | gr_gpcs_gpccs_gpc_exception_en_gcc_f(1) |
898 gr_gpcs_gpccs_gpc_exception_en_gpccs_f(1) |
899 gr_gpcs_gpccs_gpc_exception_en_gpcmmu_f(1)));
900}
901
902int gr_gv11b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
903 bool *post_event)
904{
905 return 0;
906}
907
908int gr_gv11b_zbc_s_query_table(struct gk20a *g, struct gr_gk20a *gr,
909 struct zbc_query_params *query_params)
910{
911 u32 index = query_params->index_size;
912
913 if (index >= GK20A_ZBC_TABLE_SIZE) {
914 nvgpu_err(g, "invalid zbc stencil table index");
915 return -EINVAL;
916 }
917 query_params->depth = gr->zbc_s_tbl[index].stencil;
918 query_params->format = gr->zbc_s_tbl[index].format;
919 query_params->ref_cnt = gr->zbc_s_tbl[index].ref_cnt;
920
921 return 0;
922}
923
924bool gr_gv11b_add_zbc_type_s(struct gk20a *g, struct gr_gk20a *gr,
925 struct zbc_entry *zbc_val, int *ret_val)
926{
927 struct zbc_s_table *s_tbl;
928 u32 i;
929 bool added = false;
930
931 *ret_val = -ENOMEM;
932
933 /* search existing tables */
934 for (i = 0; i < gr->max_used_s_index; i++) {
935
936 s_tbl = &gr->zbc_s_tbl[i];
937
938 if (s_tbl->ref_cnt &&
939 s_tbl->stencil == zbc_val->depth &&
940 s_tbl->format == zbc_val->format) {
941 added = true;
942 s_tbl->ref_cnt++;
943 *ret_val = 0;
944 break;
945 }
946 }
947 /* add new table */
948 if (!added &&
949 gr->max_used_s_index < GK20A_ZBC_TABLE_SIZE) {
950
951 s_tbl = &gr->zbc_s_tbl[gr->max_used_s_index];
952 WARN_ON(s_tbl->ref_cnt != 0);
953
954 *ret_val = g->ops.gr.add_zbc_s(g, gr,
955 zbc_val, gr->max_used_s_index);
956
957 if (!(*ret_val))
958 gr->max_used_s_index++;
959 }
960 return added;
961}
962
963int gr_gv11b_add_zbc_stencil(struct gk20a *g, struct gr_gk20a *gr,
964 struct zbc_entry *stencil_val, u32 index)
965{
966 u32 zbc_s;
967
968 /* update l2 table */
969 g->ops.ltc.set_zbc_s_entry(g, stencil_val, index);
970
971 /* update local copy */
972 gr->zbc_s_tbl[index].stencil = stencil_val->depth;
973 gr->zbc_s_tbl[index].format = stencil_val->format;
974 gr->zbc_s_tbl[index].ref_cnt++;
975
976 gk20a_writel(g, gr_gpcs_swdx_dss_zbc_s_r(index), stencil_val->depth);
977 zbc_s = gk20a_readl(g, gr_gpcs_swdx_dss_zbc_s_01_to_04_format_r() +
978 (index & ~3));
979 zbc_s &= ~(0x7f << (index % 4) * 7);
980 zbc_s |= stencil_val->format << (index % 4) * 7;
981 gk20a_writel(g, gr_gpcs_swdx_dss_zbc_s_01_to_04_format_r() +
982 (index & ~3), zbc_s);
983
984 return 0;
985}
986
987int gr_gv11b_load_stencil_default_tbl(struct gk20a *g,
988 struct gr_gk20a *gr)
989{
990 struct zbc_entry zbc_val;
991 u32 err;
992
993 /* load default stencil table */
994 zbc_val.type = GV11B_ZBC_TYPE_STENCIL;
995
996 zbc_val.depth = 0x0;
997 zbc_val.format = ZBC_STENCIL_CLEAR_FMT_U8;
998 err = gr_gk20a_add_zbc(g, gr, &zbc_val);
999
1000 zbc_val.depth = 0x1;
1001 zbc_val.format = ZBC_STENCIL_CLEAR_FMT_U8;
1002 err |= gr_gk20a_add_zbc(g, gr, &zbc_val);
1003
1004 zbc_val.depth = 0xff;
1005 zbc_val.format = ZBC_STENCIL_CLEAR_FMT_U8;
1006 err |= gr_gk20a_add_zbc(g, gr, &zbc_val);
1007
1008 if (!err) {
1009 gr->max_default_s_index = 3;
1010 } else {
1011 nvgpu_err(g, "fail to load default zbc stencil table");
1012 return err;
1013 }
1014
1015 return 0;
1016}
1017
1018int gr_gv11b_load_stencil_tbl(struct gk20a *g, struct gr_gk20a *gr)
1019{
1020 int ret;
1021 u32 i;
1022
1023 for (i = 0; i < gr->max_used_s_index; i++) {
1024 struct zbc_s_table *s_tbl = &gr->zbc_s_tbl[i];
1025 struct zbc_entry zbc_val;
1026
1027 zbc_val.type = GV11B_ZBC_TYPE_STENCIL;
1028 zbc_val.depth = s_tbl->stencil;
1029 zbc_val.format = s_tbl->format;
1030
1031 ret = g->ops.gr.add_zbc_s(g, gr, &zbc_val, i);
1032 if (ret)
1033 return ret;
1034 }
1035 return 0;
1036}
1037
1038u32 gr_gv11b_pagepool_default_size(struct gk20a *g)
1039{
1040 return gr_scc_pagepool_total_pages_hwmax_value_v();
1041}
1042
1043int gr_gv11b_calc_global_ctx_buffer_size(struct gk20a *g)
1044{
1045 struct gr_gk20a *gr = &g->gr;
1046 int size;
1047
1048 gr->attrib_cb_size = gr->attrib_cb_default_size;
1049 gr->alpha_cb_size = gr->alpha_cb_default_size;
1050
1051 gr->attrib_cb_size = min(gr->attrib_cb_size,
1052 gr_gpc0_ppc0_cbm_beta_cb_size_v_f(~0) / g->gr.tpc_count);
1053 gr->alpha_cb_size = min(gr->alpha_cb_size,
1054 gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(~0) / g->gr.tpc_count);
1055
1056 size = gr->attrib_cb_size *
1057 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() *
1058 gr->max_tpc_count;
1059
1060 size += gr->alpha_cb_size *
1061 gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v() *
1062 gr->max_tpc_count;
1063
1064 size = ALIGN(size, 128);
1065
1066 return size;
1067}
1068
1069static void gr_gv11b_set_go_idle_timeout(struct gk20a *g, u32 data)
1070{
1071 gk20a_writel(g, gr_fe_go_idle_timeout_r(), data);
1072}
1073
1074static void gr_gv11b_set_coalesce_buffer_size(struct gk20a *g, u32 data)
1075{
1076 u32 val;
1077
1078 gk20a_dbg_fn("");
1079
1080 val = gk20a_readl(g, gr_gpcs_tc_debug0_r());
1081 val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(),
1082 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data));
1083 gk20a_writel(g, gr_gpcs_tc_debug0_r(), val);
1084
1085 gk20a_dbg_fn("done");
1086}
1087
1088static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data)
1089{
1090 u32 val;
1091 bool flag;
1092
1093 gk20a_dbg_fn("");
1094
1095 val = gk20a_readl(g, gr_gpcs_tpcs_tex_in_dbg_r());
1096 flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) ? 1 : 0;
1097 val = set_field(val, gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_m(),
1098 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_f(flag));
1099 gk20a_writel(g, gr_gpcs_tpcs_tex_in_dbg_r(), val);
1100
1101 val = gk20a_readl(g, gr_gpcs_tpcs_sm_l1tag_ctrl_r());
1102 flag = (data &
1103 NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_LD) ? 1 : 0;
1104 val = set_field(val, gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_m(),
1105 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_f(flag));
1106 flag = (data &
1107 NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_ST) ? 1 : 0;
1108 val = set_field(val, gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_m(),
1109 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_f(flag));
1110 gk20a_writel(g, gr_gpcs_tpcs_sm_l1tag_ctrl_r(), val);
1111}
1112
1113static void gr_gv11b_set_skedcheck(struct gk20a *g, u32 data)
1114{
1115 u32 reg_val;
1116
1117 reg_val = gk20a_readl(g, gr_sked_hww_esr_en_r());
1118
1119 if ((data & NVC397_SET_SKEDCHECK_18_MASK) ==
1120 NVC397_SET_SKEDCHECK_18_DISABLE) {
1121 reg_val = set_field(reg_val,
1122 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_m(),
1123 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_disabled_f()
1124 );
1125 } else if ((data & NVC397_SET_SKEDCHECK_18_MASK) ==
1126 NVC397_SET_SKEDCHECK_18_ENABLE) {
1127 reg_val = set_field(reg_val,
1128 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_m(),
1129 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_enabled_f()
1130 );
1131 }
1132 nvgpu_log_info(g, "sked_hww_esr_en = 0x%x", reg_val);
1133 gk20a_writel(g, gr_sked_hww_esr_en_r(), reg_val);
1134
1135}
1136
1137static void gv11b_gr_set_shader_exceptions(struct gk20a *g, u32 data)
1138{
1139 gk20a_dbg_fn("");
1140
1141 if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) {
1142 gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(),
1143 0);
1144 gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r(),
1145 0);
1146 } else {
1147 g->ops.gr.set_hww_esr_report_mask(g);
1148 }
1149}
1150
1151int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr,
1152 u32 class_num, u32 offset, u32 data)
1153{
1154 gk20a_dbg_fn("");
1155
1156 if (class_num == VOLTA_COMPUTE_A) {
1157 switch (offset << 2) {
1158 case NVC0C0_SET_SHADER_EXCEPTIONS:
1159 gv11b_gr_set_shader_exceptions(g, data);
1160 break;
1161 case NVC3C0_SET_SKEDCHECK:
1162 gr_gv11b_set_skedcheck(g, data);
1163 break;
1164 default:
1165 goto fail;
1166 }
1167 }
1168
1169 if (class_num == VOLTA_A) {
1170 switch (offset << 2) {
1171 case NVC397_SET_SHADER_EXCEPTIONS:
1172 gv11b_gr_set_shader_exceptions(g, data);
1173 break;
1174 case NVC397_SET_CIRCULAR_BUFFER_SIZE:
1175 g->ops.gr.set_circular_buffer_size(g, data);
1176 break;
1177 case NVC397_SET_ALPHA_CIRCULAR_BUFFER_SIZE:
1178 g->ops.gr.set_alpha_circular_buffer_size(g, data);
1179 break;
1180 case NVC397_SET_GO_IDLE_TIMEOUT:
1181 gr_gv11b_set_go_idle_timeout(g, data);
1182 break;
1183 case NVC097_SET_COALESCE_BUFFER_SIZE:
1184 gr_gv11b_set_coalesce_buffer_size(g, data);
1185 break;
1186 case NVC397_SET_TEX_IN_DBG:
1187 gr_gv11b_set_tex_in_dbg(g, data);
1188 break;
1189 case NVC397_SET_SKEDCHECK:
1190 gr_gv11b_set_skedcheck(g, data);
1191 break;
1192 case NVC397_SET_BES_CROP_DEBUG3:
1193 g->ops.gr.set_bes_crop_debug3(g, data);
1194 break;
1195 default:
1196 goto fail;
1197 }
1198 }
1199 return 0;
1200
1201fail:
1202 return -EINVAL;
1203}
1204
1205void gr_gv11b_bundle_cb_defaults(struct gk20a *g)
1206{
1207 struct gr_gk20a *gr = &g->gr;
1208
1209 gr->bundle_cb_default_size =
1210 gr_scc_bundle_cb_size_div_256b__prod_v();
1211 gr->min_gpm_fifo_depth =
1212 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v();
1213 gr->bundle_cb_token_limit =
1214 gr_pd_ab_dist_cfg2_token_limit_init_v();
1215}
1216
1217void gr_gv11b_cb_size_default(struct gk20a *g)
1218{
1219 struct gr_gk20a *gr = &g->gr;
1220
1221 if (!gr->attrib_cb_default_size)
1222 gr->attrib_cb_default_size =
1223 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v();
1224 gr->alpha_cb_default_size =
1225 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
1226}
1227
1228void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
1229{
1230 struct gr_gk20a *gr = &g->gr;
1231 u32 gpc_index, ppc_index, stride, val;
1232 u32 pd_ab_max_output;
1233 u32 alpha_cb_size = data * 4;
1234
1235 gk20a_dbg_fn("");
1236
1237 if (alpha_cb_size > gr->alpha_cb_size)
1238 alpha_cb_size = gr->alpha_cb_size;
1239
1240 gk20a_writel(g, gr_ds_tga_constraintlogic_alpha_r(),
1241 (gk20a_readl(g, gr_ds_tga_constraintlogic_alpha_r()) &
1242 ~gr_ds_tga_constraintlogic_alpha_cbsize_f(~0)) |
1243 gr_ds_tga_constraintlogic_alpha_cbsize_f(alpha_cb_size));
1244
1245 pd_ab_max_output = alpha_cb_size *
1246 gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v() /
1247 gr_pd_ab_dist_cfg1_max_output_granularity_v();
1248
1249 gk20a_writel(g, gr_pd_ab_dist_cfg1_r(),
1250 gr_pd_ab_dist_cfg1_max_output_f(pd_ab_max_output) |
1251 gr_pd_ab_dist_cfg1_max_batches_init_f());
1252
1253 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
1254 stride = proj_gpc_stride_v() * gpc_index;
1255
1256 for (ppc_index = 0; ppc_index < gr->gpc_ppc_count[gpc_index];
1257 ppc_index++) {
1258
1259 val = gk20a_readl(g, gr_gpc0_ppc0_cbm_alpha_cb_size_r() +
1260 stride +
1261 proj_ppc_in_gpc_stride_v() * ppc_index);
1262
1263 val = set_field(val, gr_gpc0_ppc0_cbm_alpha_cb_size_v_m(),
1264 gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(alpha_cb_size *
1265 gr->pes_tpc_count[ppc_index][gpc_index]));
1266
1267 gk20a_writel(g, gr_gpc0_ppc0_cbm_alpha_cb_size_r() +
1268 stride +
1269 proj_ppc_in_gpc_stride_v() * ppc_index, val);
1270 }
1271 }
1272}
1273
1274void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data)
1275{
1276 struct gr_gk20a *gr = &g->gr;
1277 u32 gpc_index, ppc_index, stride, val;
1278 u32 cb_size_steady = data * 4, cb_size;
1279
1280 gk20a_dbg_fn("");
1281
1282 if (cb_size_steady > gr->attrib_cb_size)
1283 cb_size_steady = gr->attrib_cb_size;
1284 if (gk20a_readl(g, gr_gpc0_ppc0_cbm_beta_cb_size_r()) !=
1285 gk20a_readl(g,
1286 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r())) {
1287 cb_size = cb_size_steady +
1288 (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() -
1289 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v());
1290 } else {
1291 cb_size = cb_size_steady;
1292 }
1293
1294 gk20a_writel(g, gr_ds_tga_constraintlogic_beta_r(),
1295 (gk20a_readl(g, gr_ds_tga_constraintlogic_beta_r()) &
1296 ~gr_ds_tga_constraintlogic_beta_cbsize_f(~0)) |
1297 gr_ds_tga_constraintlogic_beta_cbsize_f(cb_size_steady));
1298
1299 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
1300 stride = proj_gpc_stride_v() * gpc_index;
1301
1302 for (ppc_index = 0; ppc_index < gr->gpc_ppc_count[gpc_index];
1303 ppc_index++) {
1304
1305 val = gk20a_readl(g, gr_gpc0_ppc0_cbm_beta_cb_size_r() +
1306 stride +
1307 proj_ppc_in_gpc_stride_v() * ppc_index);
1308
1309 val = set_field(val,
1310 gr_gpc0_ppc0_cbm_beta_cb_size_v_m(),
1311 gr_gpc0_ppc0_cbm_beta_cb_size_v_f(cb_size *
1312 gr->pes_tpc_count[ppc_index][gpc_index]));
1313
1314 gk20a_writel(g, gr_gpc0_ppc0_cbm_beta_cb_size_r() +
1315 stride +
1316 proj_ppc_in_gpc_stride_v() * ppc_index, val);
1317
1318 gk20a_writel(g, proj_ppc_in_gpc_stride_v() * ppc_index +
1319 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r() +
1320 stride,
1321 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_f(
1322 cb_size_steady));
1323
1324 val = gk20a_readl(g, gr_gpcs_swdx_tc_beta_cb_size_r(
1325 ppc_index + gpc_index));
1326
1327 val = set_field(val,
1328 gr_gpcs_swdx_tc_beta_cb_size_v_m(),
1329 gr_gpcs_swdx_tc_beta_cb_size_v_f(
1330 cb_size_steady *
1331 gr->gpc_ppc_count[gpc_index]));
1332
1333 gk20a_writel(g, gr_gpcs_swdx_tc_beta_cb_size_r(
1334 ppc_index + gpc_index), val);
1335 }
1336 }
1337}
1338
1339int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size,
1340 struct nvgpu_mem *mem)
1341{
1342 int err;
1343
1344 gk20a_dbg_fn("");
1345
1346 err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem);
1347 if (err)
1348 return err;
1349
1350 mem->gpu_va = nvgpu_gmmu_map(vm,
1351 mem,
1352 size,
1353 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
1354 gk20a_mem_flag_none,
1355 false,
1356 mem->aperture);
1357
1358 if (!mem->gpu_va) {
1359 err = -ENOMEM;
1360 goto fail_free;
1361 }
1362
1363 return 0;
1364
1365fail_free:
1366 nvgpu_dma_free(vm->mm->g, mem);
1367 return err;
1368}
1369
1370static void gr_gv11b_dump_gr_per_sm_regs(struct gk20a *g,
1371 struct gk20a_debug_output *o,
1372 u32 gpc, u32 tpc, u32 sm, u32 offset)
1373{
1374
1375 gk20a_debug_output(o,
1376 "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_HWW_WARP_ESR: 0x%x\n",
1377 gpc, tpc, sm, gk20a_readl(g,
1378 gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset));
1379
1380 gk20a_debug_output(o,
1381 "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_HWW_WARP_ESR_REPORT_MASK: 0x%x\n",
1382 gpc, tpc, sm, gk20a_readl(g,
1383 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r() + offset));
1384
1385 gk20a_debug_output(o,
1386 "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_HWW_GLOBAL_ESR: 0x%x\n",
1387 gpc, tpc, sm, gk20a_readl(g,
1388 gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset));
1389
1390 gk20a_debug_output(o,
1391 "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_HWW_GLOBAL_ESR_REPORT_MASK: 0x%x\n",
1392 gpc, tpc, sm, gk20a_readl(g,
1393 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r() + offset));
1394
1395 gk20a_debug_output(o,
1396 "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_DBGR_CONTROL0: 0x%x\n",
1397 gpc, tpc, sm, gk20a_readl(g,
1398 gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset));
1399
1400 gk20a_debug_output(o,
1401 "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_DBGR_STATUS0: 0x%x\n",
1402 gpc, tpc, sm, gk20a_readl(g,
1403 gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset));
1404}
1405
1406static int gr_gv11b_dump_gr_sm_regs(struct gk20a *g,
1407 struct gk20a_debug_output *o)
1408{
1409 u32 gpc, tpc, sm, sm_per_tpc;
1410 u32 gpc_offset, tpc_offset, offset;
1411
1412 gk20a_debug_output(o,
1413 "NV_PGRAPH_PRI_GPCS_TPCS_SMS_HWW_GLOBAL_ESR_REPORT_MASK: 0x%x\n",
1414 gk20a_readl(g,
1415 gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r()));
1416 gk20a_debug_output(o,
1417 "NV_PGRAPH_PRI_GPCS_TPCS_SMS_HWW_WARP_ESR_REPORT_MASK: 0x%x\n",
1418 gk20a_readl(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r()));
1419 gk20a_debug_output(o,
1420 "NV_PGRAPH_PRI_GPCS_TPCS_SMS_HWW_GLOBAL_ESR: 0x%x\n",
1421 gk20a_readl(g, gr_gpcs_tpcs_sms_hww_global_esr_r()));
1422 gk20a_debug_output(o,
1423 "NV_PGRAPH_PRI_GPCS_TPCS_SMS_DBGR_CONTROL0: 0x%x\n",
1424 gk20a_readl(g, gr_gpcs_tpcs_sms_dbgr_control0_r()));
1425 gk20a_debug_output(o,
1426 "NV_PGRAPH_PRI_GPCS_TPCS_SMS_DBGR_STATUS0: 0x%x\n",
1427 gk20a_readl(g, gr_gpcs_tpcs_sms_dbgr_status0_r()));
1428 gk20a_debug_output(o,
1429 "NV_PGRAPH_PRI_GPCS_TPCS_SMS_DBGR_BPT_PAUSE_MASK_0: 0x%x\n",
1430 gk20a_readl(g, gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r()));
1431 gk20a_debug_output(o,
1432 "NV_PGRAPH_PRI_GPCS_TPCS_SMS_DBGR_BPT_PAUSE_MASK_1: 0x%x\n",
1433 gk20a_readl(g, gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_1_r()));
1434
1435 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC);
1436 for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
1437 gpc_offset = gk20a_gr_gpc_offset(g, gpc);
1438
1439 for (tpc = 0; tpc < g->gr.tpc_count; tpc++) {
1440 tpc_offset = gk20a_gr_tpc_offset(g, tpc);
1441
1442 for (sm = 0; sm < sm_per_tpc; sm++) {
1443 offset = gpc_offset + tpc_offset +
1444 gv11b_gr_sm_offset(g, sm);
1445
1446 gr_gv11b_dump_gr_per_sm_regs(g, o,
1447 gpc, tpc, sm, offset);
1448 }
1449 }
1450 }
1451
1452 return 0;
1453}
1454
1455int gr_gv11b_dump_gr_status_regs(struct gk20a *g,
1456 struct gk20a_debug_output *o)
1457{
1458 struct gr_gk20a *gr = &g->gr;
1459 u32 gr_engine_id;
1460
1461 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
1462
1463 gk20a_debug_output(o, "NV_PGRAPH_STATUS: 0x%x\n",
1464 gk20a_readl(g, gr_status_r()));
1465 gk20a_debug_output(o, "NV_PGRAPH_STATUS1: 0x%x\n",
1466 gk20a_readl(g, gr_status_1_r()));
1467 gk20a_debug_output(o, "NV_PGRAPH_STATUS2: 0x%x\n",
1468 gk20a_readl(g, gr_status_2_r()));
1469 gk20a_debug_output(o, "NV_PGRAPH_ENGINE_STATUS: 0x%x\n",
1470 gk20a_readl(g, gr_engine_status_r()));
1471 gk20a_debug_output(o, "NV_PGRAPH_GRFIFO_STATUS : 0x%x\n",
1472 gk20a_readl(g, gr_gpfifo_status_r()));
1473 gk20a_debug_output(o, "NV_PGRAPH_GRFIFO_CONTROL : 0x%x\n",
1474 gk20a_readl(g, gr_gpfifo_ctl_r()));
1475 gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_HOST_INT_STATUS : 0x%x\n",
1476 gk20a_readl(g, gr_fecs_host_int_status_r()));
1477 gk20a_debug_output(o, "NV_PGRAPH_EXCEPTION : 0x%x\n",
1478 gk20a_readl(g, gr_exception_r()));
1479 gk20a_debug_output(o, "NV_PGRAPH_FECS_INTR : 0x%x\n",
1480 gk20a_readl(g, gr_fecs_intr_r()));
1481 gk20a_debug_output(o, "NV_PFIFO_ENGINE_STATUS(GR) : 0x%x\n",
1482 gk20a_readl(g, fifo_engine_status_r(gr_engine_id)));
1483 gk20a_debug_output(o, "NV_PGRAPH_ACTIVITY0: 0x%x\n",
1484 gk20a_readl(g, gr_activity_0_r()));
1485 gk20a_debug_output(o, "NV_PGRAPH_ACTIVITY1: 0x%x\n",
1486 gk20a_readl(g, gr_activity_1_r()));
1487 gk20a_debug_output(o, "NV_PGRAPH_ACTIVITY2: 0x%x\n",
1488 gk20a_readl(g, gr_activity_2_r()));
1489 gk20a_debug_output(o, "NV_PGRAPH_ACTIVITY4: 0x%x\n",
1490 gk20a_readl(g, gr_activity_4_r()));
1491 gk20a_debug_output(o, "NV_PGRAPH_PRI_SKED_ACTIVITY: 0x%x\n",
1492 gk20a_readl(g, gr_pri_sked_activity_r()));
1493 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_ACTIVITY0: 0x%x\n",
1494 gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity0_r()));
1495 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_ACTIVITY1: 0x%x\n",
1496 gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity1_r()));
1497 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_ACTIVITY2: 0x%x\n",
1498 gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity2_r()));
1499 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_ACTIVITY3: 0x%x\n",
1500 gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity3_r()));
1501 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
1502 gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r()));
1503 if (gr->gpc_tpc_count[0] == 2)
1504 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
1505 gk20a_readl(g, gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r()));
1506 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n",
1507 gk20a_readl(g, gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r()));
1508 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY0: 0x%x\n",
1509 gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_0_r()));
1510 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY1: 0x%x\n",
1511 gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_1_r()));
1512 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY2: 0x%x\n",
1513 gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_2_r()));
1514 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY3: 0x%x\n",
1515 gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_3_r()));
1516 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n",
1517 gk20a_readl(g, gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r()));
1518 if (gr->gpc_tpc_count[0] == 2)
1519 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n",
1520 gk20a_readl(g, gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r()));
1521 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n",
1522 gk20a_readl(g, gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r()));
1523 gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_BECS_BE_ACTIVITY0: 0x%x\n",
1524 gk20a_readl(g, gr_pri_be0_becs_be_activity0_r()));
1525 gk20a_debug_output(o, "NV_PGRAPH_PRI_BE1_BECS_BE_ACTIVITY0: 0x%x\n",
1526 gk20a_readl(g, gr_pri_be1_becs_be_activity0_r()));
1527 gk20a_debug_output(o, "NV_PGRAPH_PRI_BES_BECS_BE_ACTIVITY0: 0x%x\n",
1528 gk20a_readl(g, gr_pri_bes_becs_be_activity0_r()));
1529 gk20a_debug_output(o, "NV_PGRAPH_PRI_DS_MPIPE_STATUS: 0x%x\n",
1530 gk20a_readl(g, gr_pri_ds_mpipe_status_r()));
1531 gk20a_debug_output(o, "NV_PGRAPH_PRI_FE_GO_IDLE_TIMEOUT : 0x%x\n",
1532 gk20a_readl(g, gr_fe_go_idle_timeout_r()));
1533 gk20a_debug_output(o, "NV_PGRAPH_PRI_FE_GO_IDLE_INFO : 0x%x\n",
1534 gk20a_readl(g, gr_pri_fe_go_idle_info_r()));
1535 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TEX_M_TEX_SUBUNITS_STATUS: 0x%x\n",
1536 gk20a_readl(g, gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r()));
1537 gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_FS: 0x%x\n",
1538 gk20a_readl(g, gr_cwd_fs_r()));
1539 gk20a_debug_output(o, "NV_PGRAPH_PRI_FE_TPC_FS(0): 0x%x\n",
1540 gk20a_readl(g, gr_fe_tpc_fs_r(0)));
1541 gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_GPC_TPC_ID: 0x%x\n",
1542 gk20a_readl(g, gr_cwd_gpc_tpc_id_r(0)));
1543 gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_SM_ID(0): 0x%x\n",
1544 gk20a_readl(g, gr_cwd_sm_id_r(0)));
1545 gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_CTXSW_STATUS_FE_0: 0x%x\n",
1546 gk20a_readl(g, gr_fecs_ctxsw_status_fe_0_r()));
1547 gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_CTXSW_STATUS_1: 0x%x\n",
1548 gk20a_readl(g, gr_fecs_ctxsw_status_1_r()));
1549 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_CTXSW_STATUS_GPC_0: 0x%x\n",
1550 gk20a_readl(g, gr_gpc0_gpccs_ctxsw_status_gpc_0_r()));
1551 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_CTXSW_STATUS_1: 0x%x\n",
1552 gk20a_readl(g, gr_gpc0_gpccs_ctxsw_status_1_r()));
1553 gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_CTXSW_IDLESTATE : 0x%x\n",
1554 gk20a_readl(g, gr_fecs_ctxsw_idlestate_r()));
1555 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_CTXSW_IDLESTATE : 0x%x\n",
1556 gk20a_readl(g, gr_gpc0_gpccs_ctxsw_idlestate_r()));
1557 gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_CURRENT_CTX : 0x%x\n",
1558 gk20a_readl(g, gr_fecs_current_ctx_r()));
1559 gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_NEW_CTX : 0x%x\n",
1560 gk20a_readl(g, gr_fecs_new_ctx_r()));
1561 gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_HOST_INT_ENABLE : 0x%x\n",
1562 gk20a_readl(g, gr_fecs_host_int_enable_r()));
1563 gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_HOST_INT_STATUS : 0x%x\n",
1564 gk20a_readl(g, gr_fecs_host_int_status_r()));
1565 gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_CROP_STATUS1 : 0x%x\n",
1566 gk20a_readl(g, gr_pri_be0_crop_status1_r()));
1567 gk20a_debug_output(o, "NV_PGRAPH_PRI_BES_CROP_STATUS1 : 0x%x\n",
1568 gk20a_readl(g, gr_pri_bes_crop_status1_r()));
1569 gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_ZROP_STATUS : 0x%x\n",
1570 gk20a_readl(g, gr_pri_be0_zrop_status_r()));
1571 gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_ZROP_STATUS2 : 0x%x\n",
1572 gk20a_readl(g, gr_pri_be0_zrop_status2_r()));
1573 gk20a_debug_output(o, "NV_PGRAPH_PRI_BES_ZROP_STATUS : 0x%x\n",
1574 gk20a_readl(g, gr_pri_bes_zrop_status_r()));
1575 gk20a_debug_output(o, "NV_PGRAPH_PRI_BES_ZROP_STATUS2 : 0x%x\n",
1576 gk20a_readl(g, gr_pri_bes_zrop_status2_r()));
1577 gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_BECS_BE_EXCEPTION: 0x%x\n",
1578 gk20a_readl(g, gr_pri_be0_becs_be_exception_r()));
1579 gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_BECS_BE_EXCEPTION_EN: 0x%x\n",
1580 gk20a_readl(g, gr_pri_be0_becs_be_exception_en_r()));
1581 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_EXCEPTION: 0x%x\n",
1582 gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_exception_r()));
1583 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_EXCEPTION_EN: 0x%x\n",
1584 gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_exception_en_r()));
1585 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_EXCEPTION: 0x%x\n",
1586 gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_exception_r()));
1587 gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_EXCEPTION_EN: 0x%x\n",
1588 gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r()));
1589
1590 gr_gv11b_dump_gr_sm_regs(g, o);
1591
1592 return 0;
1593}
1594
1595static bool gr_activity_empty_or_preempted(u32 val)
1596{
1597 while (val) {
1598 u32 v = val & 7;
1599 if (v != gr_activity_4_gpc0_empty_v() &&
1600 v != gr_activity_4_gpc0_preempted_v())
1601 return false;
1602 val >>= 3;
1603 }
1604
1605 return true;
1606}
1607
1608int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms,
1609 u32 expect_delay)
1610{
1611 u32 delay = expect_delay;
1612 bool gr_enabled;
1613 bool ctxsw_active;
1614 bool gr_busy;
1615 u32 gr_status;
1616 u32 activity0, activity1, activity2, activity4;
1617 struct nvgpu_timeout timeout;
1618
1619 gk20a_dbg_fn("");
1620
1621 nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER);
1622
1623 do {
1624 /* fmodel: host gets fifo_engine_status(gr) from gr
1625 only when gr_status is read */
1626 gr_status = gk20a_readl(g, gr_status_r());
1627
1628 gr_enabled = gk20a_readl(g, mc_enable_r()) &
1629 mc_enable_pgraph_enabled_f();
1630
1631 ctxsw_active = gr_status & 1<<7;
1632
1633 activity0 = gk20a_readl(g, gr_activity_0_r());
1634 activity1 = gk20a_readl(g, gr_activity_1_r());
1635 activity2 = gk20a_readl(g, gr_activity_2_r());
1636 activity4 = gk20a_readl(g, gr_activity_4_r());
1637
1638 gr_busy = !(gr_activity_empty_or_preempted(activity0) &&
1639 gr_activity_empty_or_preempted(activity1) &&
1640 activity2 == 0 &&
1641 gr_activity_empty_or_preempted(activity4));
1642
1643 if (!gr_enabled || (!gr_busy && !ctxsw_active)) {
1644 gk20a_dbg_fn("done");
1645 return 0;
1646 }
1647
1648 usleep_range(delay, delay * 2);
1649 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
1650
1651 } while (!nvgpu_timeout_expired(&timeout));
1652
1653 nvgpu_err(g,
1654 "timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x",
1655 ctxsw_active, gr_busy, activity0, activity1, activity2, activity4);
1656
1657 return -EAGAIN;
1658}
1659
1660void gr_gv11b_commit_global_attrib_cb(struct gk20a *g,
1661 struct channel_ctx_gk20a *ch_ctx,
1662 u64 addr, bool patch)
1663{
1664 struct gr_ctx_desc *gr_ctx = ch_ctx->gr_ctx;
1665 int attrBufferSize;
1666
1667 if (gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va)
1668 attrBufferSize = gr_ctx->t18x.betacb_ctxsw_buffer.size;
1669 else
1670 attrBufferSize = g->ops.gr.calc_global_ctx_buffer_size(g);
1671
1672 attrBufferSize /= gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_granularity_f();
1673
1674 gr_gm20b_commit_global_attrib_cb(g, ch_ctx, addr, patch);
1675
1676 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(),
1677 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(addr) |
1678 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(), patch);
1679
1680 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_tex_rm_cb_0_r(),
1681 gr_gpcs_tpcs_tex_rm_cb_0_base_addr_43_12_f(addr), patch);
1682
1683 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_tex_rm_cb_1_r(),
1684 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_f(attrBufferSize) |
1685 gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(), patch);
1686}
1687
1688void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
1689{
1690#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
1691 tegra_fuse_writel(0x1, FUSE_FUSEBYPASS_0);
1692 tegra_fuse_writel(0x0, FUSE_WRITE_ACCESS_SW_0);
1693#else
1694 tegra_fuse_control_write(0x1, FUSE_FUSEBYPASS_0);
1695 tegra_fuse_control_write(0x0, FUSE_WRITE_ACCESS_SW_0);
1696#endif
1697
1698 if (g->gr.gpc_tpc_mask[gpc_index] == 0x1)
1699 tegra_fuse_writel(0x2, FUSE_OPT_GPU_TPC0_DISABLE_0);
1700 else if (g->gr.gpc_tpc_mask[gpc_index] == 0x2)
1701 tegra_fuse_writel(0x1, FUSE_OPT_GPU_TPC0_DISABLE_0);
1702 else
1703 tegra_fuse_writel(0x0, FUSE_OPT_GPU_TPC0_DISABLE_0);
1704}
1705
1706void gr_gv11b_get_access_map(struct gk20a *g,
1707 u32 **whitelist, int *num_entries)
1708{
1709 static u32 wl_addr_gv11b[] = {
1710 /* this list must be sorted (low to high) */
1711 0x404468, /* gr_pri_mme_max_instructions */
1712 0x418300, /* gr_pri_gpcs_rasterarb_line_class */
1713 0x418800, /* gr_pri_gpcs_setup_debug */
1714 0x418e00, /* gr_pri_gpcs_swdx_config */
1715 0x418e40, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
1716 0x418e44, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
1717 0x418e48, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
1718 0x418e4c, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
1719 0x418e50, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */
1720 0x418e58, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1721 0x418e5c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1722 0x418e60, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1723 0x418e64, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1724 0x418e68, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1725 0x418e6c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1726 0x418e70, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1727 0x418e74, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1728 0x418e78, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1729 0x418e7c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1730 0x418e80, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1731 0x418e84, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1732 0x418e88, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1733 0x418e8c, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1734 0x418e90, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1735 0x418e94, /* gr_pri_gpcs_swdx_tc_bundle_addr */
1736 0x419864, /* gr_pri_gpcs_tpcs_pe_l2_evict_policy */
1737 0x419a04, /* gr_pri_gpcs_tpcs_tex_lod_dbg */
1738 0x419a08, /* gr_pri_gpcs_tpcs_tex_samp_dbg */
1739 0x419e84, /* gr_pri_gpcs_tpcs_sms_dbgr_control0 */
1740 0x419ba4, /* gr_pri_gpcs_tpcs_sm_disp_ctrl */
1741 };
1742
1743 *whitelist = wl_addr_gv11b;
1744 *num_entries = ARRAY_SIZE(wl_addr_gv11b);
1745}
1746
1747/* @brief pre-process work on the SM exceptions to determine if we clear them or not.
1748 *
1749 * On Pascal, if we are in CILP preemtion mode, preempt the channel and handle errors with special processing
1750 */
1751int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
1752 u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr,
1753 bool sm_debugger_attached, struct channel_gk20a *fault_ch,
1754 bool *early_exit, bool *ignore_debugger)
1755{
1756 int ret;
1757 bool cilp_enabled = false;
1758 u32 global_mask = 0, dbgr_control0, global_esr_copy;
1759 u32 offset = gk20a_gr_gpc_offset(g, gpc) +
1760 gk20a_gr_tpc_offset(g, tpc) +
1761 gv11b_gr_sm_offset(g, sm);
1762
1763 *early_exit = false;
1764 *ignore_debugger = false;
1765
1766 if (fault_ch)
1767 cilp_enabled = (fault_ch->ch_ctx.gr_ctx->compute_preempt_mode ==
1768 NVGPU_PREEMPTION_MODE_COMPUTE_CILP);
1769
1770 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
1771 "SM Exception received on gpc %d tpc %d sm %d = 0x%08x",
1772 gpc, tpc, sm, global_esr);
1773
1774 if (cilp_enabled && sm_debugger_attached) {
1775 if (global_esr & gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f())
1776 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset,
1777 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f());
1778
1779 if (global_esr & gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f())
1780 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset,
1781 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f());
1782
1783 global_mask = gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_pending_f() |
1784 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_pending_f();
1785
1786 if (warp_esr != 0 || (global_esr & global_mask) != 0) {
1787 *ignore_debugger = true;
1788
1789 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
1790 "CILP: starting wait for LOCKED_DOWN on "
1791 "gpc %d tpc %d sm %d",
1792 gpc, tpc, sm);
1793
1794 if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) {
1795 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
1796 "CILP: Broadcasting STOP_TRIGGER from "
1797 "gpc %d tpc %d sm %d",
1798 gpc, tpc, sm);
1799 g->ops.gr.suspend_all_sms(g,
1800 global_mask, false);
1801
1802 gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch);
1803 } else {
1804 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
1805 "CILP: STOP_TRIGGER from "
1806 "gpc %d tpc %d sm %d",
1807 gpc, tpc, sm);
1808 g->ops.gr.suspend_single_sm(g,
1809 gpc, tpc, sm, global_mask, true);
1810 }
1811
1812 /* reset the HWW errors after locking down */
1813 global_esr_copy = g->ops.gr.get_sm_hww_global_esr(g,
1814 gpc, tpc, sm);
1815 g->ops.gr.clear_sm_hww(g,
1816 gpc, tpc, sm, global_esr_copy);
1817 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
1818 "CILP: HWWs cleared for "
1819 "gpc %d tpc %d sm %d",
1820 gpc, tpc, sm);
1821
1822 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n");
1823 ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch);
1824 if (ret) {
1825 nvgpu_err(g, "CILP: error while setting CILP preempt pending!");
1826 return ret;
1827 }
1828
1829 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset);
1830 if (dbgr_control0 & gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) {
1831 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
1832 "CILP: clearing SINGLE_STEP_MODE "
1833 "before resume for gpc %d tpc %d sm %d",
1834 gpc, tpc, sm);
1835 dbgr_control0 = set_field(dbgr_control0,
1836 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_m(),
1837 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_disable_f());
1838 gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0);
1839 }
1840
1841 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
1842 "CILP: resume for gpc %d tpc %d sm %d",
1843 gpc, tpc, sm);
1844 g->ops.gr.resume_single_sm(g, gpc, tpc, sm);
1845
1846 *ignore_debugger = true;
1847 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
1848 "CILP: All done on gpc %d, tpc %d sm %d",
1849 gpc, tpc, sm);
1850 }
1851
1852 *early_exit = true;
1853 }
1854 return 0;
1855}
1856
1857static void gr_gv11b_handle_fecs_ecc_error(struct gk20a *g, u32 intr)
1858{
1859 u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt;
1860 u32 corrected_delta, uncorrected_delta;
1861 u32 corrected_overflow, uncorrected_overflow;
1862
1863 if (intr & (gr_fecs_host_int_status_ecc_uncorrected_m() |
1864 gr_fecs_host_int_status_ecc_corrected_m())) {
1865 ecc_status = gk20a_readl(g, gr_fecs_falcon_ecc_status_r());
1866 ecc_addr = gk20a_readl(g,
1867 gr_fecs_falcon_ecc_address_r());
1868 corrected_cnt = gk20a_readl(g,
1869 gr_fecs_falcon_ecc_corrected_err_count_r());
1870 uncorrected_cnt = gk20a_readl(g,
1871 gr_fecs_falcon_ecc_uncorrected_err_count_r());
1872
1873 corrected_delta =
1874 gr_fecs_falcon_ecc_corrected_err_count_total_v(
1875 corrected_cnt);
1876 uncorrected_delta =
1877 gr_fecs_falcon_ecc_uncorrected_err_count_total_v(
1878 uncorrected_cnt);
1879
1880 corrected_overflow = ecc_status &
1881 gr_fecs_falcon_ecc_status_corrected_err_total_counter_overflow_m();
1882 uncorrected_overflow = ecc_status &
1883 gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_m();
1884
1885 /* clear the interrupt */
1886 if ((corrected_delta > 0) || corrected_overflow)
1887 gk20a_writel(g,
1888 gr_fecs_falcon_ecc_corrected_err_count_r(), 0);
1889 if ((uncorrected_delta > 0) || uncorrected_overflow)
1890 gk20a_writel(g,
1891 gr_fecs_falcon_ecc_uncorrected_err_count_r(),
1892 0);
1893
1894
1895 /* clear the interrupt */
1896 gk20a_writel(g, gr_fecs_falcon_ecc_uncorrected_err_count_r(),
1897 0);
1898 gk20a_writel(g, gr_fecs_falcon_ecc_corrected_err_count_r(), 0);
1899
1900 /* clear the interrupt */
1901 gk20a_writel(g, gr_fecs_falcon_ecc_status_r(),
1902 gr_fecs_falcon_ecc_status_reset_task_f());
1903
1904 g->ecc.gr.t19x.fecs_corrected_err_count.counters[0] +=
1905 corrected_delta;
1906 g->ecc.gr.t19x.fecs_uncorrected_err_count.counters[0] +=
1907 uncorrected_delta;
1908
1909 nvgpu_log(g, gpu_dbg_intr,
1910 "fecs ecc interrupt intr: 0x%x", intr);
1911
1912 if (ecc_status &
1913 gr_fecs_falcon_ecc_status_corrected_err_imem_m())
1914 nvgpu_log(g, gpu_dbg_intr, "imem ecc error corrected");
1915 if (ecc_status &
1916 gr_fecs_falcon_ecc_status_uncorrected_err_imem_m())
1917 nvgpu_log(g, gpu_dbg_intr,
1918 "imem ecc error uncorrected");
1919 if (ecc_status &
1920 gr_fecs_falcon_ecc_status_corrected_err_dmem_m())
1921 nvgpu_log(g, gpu_dbg_intr, "dmem ecc error corrected");
1922 if (ecc_status &
1923 gr_fecs_falcon_ecc_status_uncorrected_err_dmem_m())
1924 nvgpu_log(g, gpu_dbg_intr,
1925 "dmem ecc error uncorrected");
1926 if (corrected_overflow || uncorrected_overflow)
1927 nvgpu_info(g, "fecs ecc counter overflow!");
1928
1929 nvgpu_log(g, gpu_dbg_intr,
1930 "ecc error row address: 0x%x",
1931 gr_fecs_falcon_ecc_address_row_address_v(ecc_addr));
1932
1933 nvgpu_log(g, gpu_dbg_intr,
1934 "ecc error count corrected: %d, uncorrected %d",
1935 g->ecc.gr.t19x.fecs_corrected_err_count.counters[0],
1936 g->ecc.gr.t19x.fecs_uncorrected_err_count.counters[0]);
1937 }
1938}
1939
1940int gr_gv11b_handle_fecs_error(struct gk20a *g,
1941 struct channel_gk20a *__ch,
1942 struct gr_gk20a_isr_data *isr_data)
1943{
1944 u32 gr_fecs_intr = gk20a_readl(g, gr_fecs_host_int_status_r());
1945 int ret;
1946
1947 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "");
1948
1949 ret = gr_gp10b_handle_fecs_error(g, __ch, isr_data);
1950
1951 /* Handle ECC errors */
1952 gr_gv11b_handle_fecs_ecc_error(g, gr_fecs_intr);
1953
1954 return ret;
1955}
1956
1957int gr_gv11b_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr)
1958{
1959 u32 map;
1960 u32 i, j, mapregs;
1961 u32 num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS);
1962 u32 num_tpc_per_gpc = nvgpu_get_litter_value(g,
1963 GPU_LIT_NUM_TPC_PER_GPC);
1964
1965 gk20a_dbg_fn("");
1966
1967 if (!gr->map_tiles)
1968 return -1;
1969
1970 gk20a_writel(g, gr_crstr_map_table_cfg_r(),
1971 gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) |
1972 gr_crstr_map_table_cfg_num_entries_f(gr->tpc_count));
1973
1974 /* 6 tpc can be stored in one map register */
1975 mapregs = (num_gpcs * num_tpc_per_gpc + 5) / 6;
1976
1977 for (i = 0, j = 0; i < mapregs; i++, j = j + 6) {
1978 map = gr_crstr_gpc_map_tile0_f(gr->map_tiles[j]) |
1979 gr_crstr_gpc_map_tile1_f(gr->map_tiles[j + 1]) |
1980 gr_crstr_gpc_map_tile2_f(gr->map_tiles[j + 2]) |
1981 gr_crstr_gpc_map_tile3_f(gr->map_tiles[j + 3]) |
1982 gr_crstr_gpc_map_tile4_f(gr->map_tiles[j + 4]) |
1983 gr_crstr_gpc_map_tile5_f(gr->map_tiles[j + 5]);
1984
1985 gk20a_writel(g, gr_crstr_gpc_map_r(i), map);
1986 gk20a_writel(g, gr_ppcs_wwdx_map_gpc_map_r(i), map);
1987 gk20a_writel(g, gr_rstr2d_gpc_map_r(i), map);
1988 }
1989
1990 gk20a_writel(g, gr_ppcs_wwdx_map_table_cfg_r(),
1991 gr_ppcs_wwdx_map_table_cfg_row_offset_f(gr->map_row_offset) |
1992 gr_ppcs_wwdx_map_table_cfg_num_entries_f(gr->tpc_count));
1993
1994 for (i = 0, j = 1; i < gr_ppcs_wwdx_map_table_cfg_coeff__size_1_v();
1995 i++, j = j + 4) {
1996 gk20a_writel(g, gr_ppcs_wwdx_map_table_cfg_coeff_r(i),
1997 gr_ppcs_wwdx_map_table_cfg_coeff_0_mod_value_f(
1998 ((1 << j) % gr->tpc_count)) |
1999 gr_ppcs_wwdx_map_table_cfg_coeff_1_mod_value_f(
2000 ((1 << (j + 1)) % gr->tpc_count)) |
2001 gr_ppcs_wwdx_map_table_cfg_coeff_2_mod_value_f(
2002 ((1 << (j + 2)) % gr->tpc_count)) |
2003 gr_ppcs_wwdx_map_table_cfg_coeff_3_mod_value_f(
2004 ((1 << (j + 3)) % gr->tpc_count)));
2005 }
2006
2007 gk20a_writel(g, gr_rstr2d_map_table_cfg_r(),
2008 gr_rstr2d_map_table_cfg_row_offset_f(gr->map_row_offset) |
2009 gr_rstr2d_map_table_cfg_num_entries_f(gr->tpc_count));
2010
2011 return 0;
2012}
2013
2014static int gv11b_write_bundle_veid_state(struct gk20a *g, u32 index)
2015{
2016 struct av_list_gk20a *sw_veid_bundle_init =
2017 &g->gr.ctx_vars.sw_veid_bundle_init;
2018 u32 j;
2019 u32 num_subctx, err = 0;
2020
2021 num_subctx = g->fifo.t19x.max_subctx_count;
2022
2023 for (j = 0; j < num_subctx; j++) {
2024 nvgpu_log_fn(g, "write bundle_address_r for subctx: %d", j);
2025 gk20a_writel(g, gr_pipe_bundle_address_r(),
2026 sw_veid_bundle_init->l[index].addr |
2027 gr_pipe_bundle_address_veid_f(j));
2028
2029 err = gr_gk20a_wait_fe_idle(g, gk20a_get_gr_idle_timeout(g),
2030 GR_IDLE_CHECK_DEFAULT);
2031 }
2032 return err;
2033}
2034
2035int gr_gv11b_init_sw_veid_bundle(struct gk20a *g)
2036{
2037 struct av_list_gk20a *sw_veid_bundle_init =
2038 &g->gr.ctx_vars.sw_veid_bundle_init;
2039 u32 i;
2040 u32 last_bundle_data = 0;
2041 u32 err = 0;
2042
2043 for (i = 0; i < sw_veid_bundle_init->count; i++) {
2044 nvgpu_log_fn(g, "veid bundle count: %d", i);
2045
2046 if (i == 0 || last_bundle_data !=
2047 sw_veid_bundle_init->l[i].value) {
2048 gk20a_writel(g, gr_pipe_bundle_data_r(),
2049 sw_veid_bundle_init->l[i].value);
2050 last_bundle_data = sw_veid_bundle_init->l[i].value;
2051 nvgpu_log_fn(g, "last_bundle_data : 0x%08x",
2052 last_bundle_data);
2053 }
2054
2055 if (gr_pipe_bundle_address_value_v(
2056 sw_veid_bundle_init->l[i].addr) == GR_GO_IDLE_BUNDLE) {
2057 nvgpu_log_fn(g, "go idle bundle");
2058 gk20a_writel(g, gr_pipe_bundle_address_r(),
2059 sw_veid_bundle_init->l[i].addr);
2060 err |= gr_gk20a_wait_idle(g,
2061 gk20a_get_gr_idle_timeout(g),
2062 GR_IDLE_CHECK_DEFAULT);
2063 } else
2064 err = gv11b_write_bundle_veid_state(g, i);
2065
2066 if (err) {
2067 nvgpu_err(g, "failed to init sw veid bundle");
2068 break;
2069 }
2070 }
2071 return err;
2072}
2073
2074void gr_gv11b_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
2075 u32 *zcull_map_tiles)
2076{
2077 u32 val, i, j;
2078
2079 gk20a_dbg_fn("");
2080
2081 for (i = 0, j = 0; i < (zcull_num_entries / 8); i++, j += 8) {
2082 val =
2083 gr_gpcs_zcull_sm_in_gpc_number_map_tile_0_f(
2084 zcull_map_tiles[j+0]) |
2085 gr_gpcs_zcull_sm_in_gpc_number_map_tile_1_f(
2086 zcull_map_tiles[j+1]) |
2087 gr_gpcs_zcull_sm_in_gpc_number_map_tile_2_f(
2088 zcull_map_tiles[j+2]) |
2089 gr_gpcs_zcull_sm_in_gpc_number_map_tile_3_f(
2090 zcull_map_tiles[j+3]) |
2091 gr_gpcs_zcull_sm_in_gpc_number_map_tile_4_f(
2092 zcull_map_tiles[j+4]) |
2093 gr_gpcs_zcull_sm_in_gpc_number_map_tile_5_f(
2094 zcull_map_tiles[j+5]) |
2095 gr_gpcs_zcull_sm_in_gpc_number_map_tile_6_f(
2096 zcull_map_tiles[j+6]) |
2097 gr_gpcs_zcull_sm_in_gpc_number_map_tile_7_f(
2098 zcull_map_tiles[j+7]);
2099
2100 gk20a_writel(g, gr_gpcs_zcull_sm_in_gpc_number_map_r(i), val);
2101 }
2102}
2103
2104void gr_gv11b_detect_sm_arch(struct gk20a *g)
2105{
2106 u32 v = gk20a_readl(g, gr_gpc0_tpc0_sm_arch_r());
2107
2108 g->params.sm_arch_spa_version =
2109 gr_gpc0_tpc0_sm_arch_spa_version_v(v);
2110 g->params.sm_arch_sm_version =
2111 gr_gpc0_tpc0_sm_arch_sm_version_v(v);
2112 g->params.sm_arch_warp_count =
2113 gr_gpc0_tpc0_sm_arch_warp_count_v(v);
2114}
2115
2116void gr_gv11b_program_sm_id_numbering(struct gk20a *g,
2117 u32 gpc, u32 tpc, u32 smid)
2118{
2119 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
2120 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
2121 GPU_LIT_TPC_IN_GPC_STRIDE);
2122 u32 gpc_offset = gpc_stride * gpc;
2123 u32 tpc_offset = tpc_in_gpc_stride * tpc;
2124 u32 global_tpc_index = g->gr.sm_to_cluster[smid].global_tpc_index;
2125
2126 gk20a_writel(g, gr_gpc0_tpc0_sm_cfg_r() + gpc_offset + tpc_offset,
2127 gr_gpc0_tpc0_sm_cfg_tpc_id_f(global_tpc_index));
2128 gk20a_writel(g, gr_gpc0_gpm_pd_sm_id_r(tpc) + gpc_offset,
2129 gr_gpc0_gpm_pd_sm_id_id_f(global_tpc_index));
2130 gk20a_writel(g, gr_gpc0_tpc0_pe_cfg_smid_r() + gpc_offset + tpc_offset,
2131 gr_gpc0_tpc0_pe_cfg_smid_value_f(global_tpc_index));
2132}
2133
2134int gr_gv11b_load_smid_config(struct gk20a *g)
2135{
2136 u32 *tpc_sm_id;
2137 u32 i, j;
2138 u32 tpc_index, gpc_index, tpc_id;
2139 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC);
2140 int num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS);
2141
2142 tpc_sm_id = nvgpu_kcalloc(g, gr_cwd_sm_id__size_1_v(), sizeof(u32));
2143 if (!tpc_sm_id)
2144 return -ENOMEM;
2145
2146 /* Each NV_PGRAPH_PRI_CWD_GPC_TPC_ID can store 4 TPCs.*/
2147 for (i = 0; i <= ((g->gr.tpc_count-1) / 4); i++) {
2148 u32 reg = 0;
2149 u32 bit_stride = gr_cwd_gpc_tpc_id_gpc0_s() +
2150 gr_cwd_gpc_tpc_id_tpc0_s();
2151
2152 for (j = 0; j < 4; j++) {
2153 u32 sm_id;
2154 u32 bits;
2155
2156 tpc_id = (i << 2) + j;
2157 sm_id = tpc_id * sm_per_tpc;
2158
2159 if (sm_id >= g->gr.no_of_sm)
2160 break;
2161
2162 gpc_index = g->gr.sm_to_cluster[sm_id].gpc_index;
2163 tpc_index = g->gr.sm_to_cluster[sm_id].tpc_index;
2164
2165 bits = gr_cwd_gpc_tpc_id_gpc0_f(gpc_index) |
2166 gr_cwd_gpc_tpc_id_tpc0_f(tpc_index);
2167 reg |= bits << (j * bit_stride);
2168
2169 tpc_sm_id[gpc_index + (num_gpcs * ((tpc_index & 4)
2170 >> 2))] |= tpc_id << tpc_index * bit_stride;
2171 }
2172 gk20a_writel(g, gr_cwd_gpc_tpc_id_r(i), reg);
2173 }
2174
2175 for (i = 0; i < gr_cwd_sm_id__size_1_v(); i++)
2176 gk20a_writel(g, gr_cwd_sm_id_r(i), tpc_sm_id[i]);
2177 nvgpu_kfree(g, tpc_sm_id);
2178
2179 return 0;
2180}
2181
2182int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va)
2183{
2184 u32 addr_lo;
2185 u32 addr_hi;
2186 struct ctx_header_desc *ctx;
2187 int err;
2188
2189 gk20a_dbg_fn("");
2190
2191 err = gv11b_alloc_subctx_header(c);
2192 if (err)
2193 return err;
2194
2195 err = gv11b_update_subctx_header(c, gpu_va);
2196 if (err)
2197 return err;
2198
2199 ctx = &c->ch_ctx.ctx_header;
2200 addr_lo = u64_lo32(ctx->mem.gpu_va) >> ram_in_base_shift_v();
2201 addr_hi = u64_hi32(ctx->mem.gpu_va);
2202
2203 /* point this address to engine_wfi_ptr */
2204 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_engine_wfi_target_w(),
2205 ram_in_engine_cs_wfi_v() |
2206 ram_in_engine_wfi_mode_f(ram_in_engine_wfi_mode_virtual_v()) |
2207 ram_in_engine_wfi_ptr_lo_f(addr_lo));
2208
2209 nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_engine_wfi_ptr_hi_w(),
2210 ram_in_engine_wfi_ptr_hi_f(addr_hi));
2211
2212 return 0;
2213}
2214
2215
2216
2217int gr_gv11b_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c)
2218{
2219 struct channel_ctx_gk20a *ch_ctx = NULL;
2220 u32 pd_ab_dist_cfg0;
2221 u32 ds_debug;
2222 u32 mpc_vtg_debug;
2223 u32 pe_vaf;
2224 u32 pe_vsc_vpc;
2225
2226 gk20a_dbg_fn("");
2227
2228 pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r());
2229 ds_debug = gk20a_readl(g, gr_ds_debug_r());
2230 mpc_vtg_debug = gk20a_readl(g, gr_gpcs_tpcs_mpc_vtg_debug_r());
2231
2232 pe_vaf = gk20a_readl(g, gr_gpcs_tpcs_pe_vaf_r());
2233 pe_vsc_vpc = gk20a_readl(g, gr_gpcs_tpcs_pes_vsc_vpc_r());
2234
2235 pe_vaf = gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f() | pe_vaf;
2236 pe_vsc_vpc = gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f() |
2237 pe_vsc_vpc;
2238 pd_ab_dist_cfg0 = gr_pd_ab_dist_cfg0_timeslice_enable_en_f() |
2239 pd_ab_dist_cfg0;
2240 ds_debug = gr_ds_debug_timeslice_mode_enable_f() | ds_debug;
2241 mpc_vtg_debug = gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f() |
2242 mpc_vtg_debug;
2243
2244 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_pe_vaf_r(), pe_vaf,
2245 false);
2246 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_pes_vsc_vpc_r(),
2247 pe_vsc_vpc, false);
2248 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg0_r(),
2249 pd_ab_dist_cfg0, false);
2250 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_ds_debug_r(), ds_debug, false);
2251 gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_mpc_vtg_debug_r(),
2252 mpc_vtg_debug, false);
2253
2254 return 0;
2255}
2256
2257void gr_gv11b_write_zcull_ptr(struct gk20a *g,
2258 struct nvgpu_mem *mem, u64 gpu_va)
2259{
2260 u32 va_lo, va_hi;
2261
2262 gpu_va = gpu_va >> 8;
2263 va_lo = u64_lo32(gpu_va);
2264 va_hi = u64_hi32(gpu_va);
2265 nvgpu_mem_wr(g, mem,
2266 ctxsw_prog_main_image_zcull_ptr_o(), va_lo);
2267 nvgpu_mem_wr(g, mem,
2268 ctxsw_prog_main_image_zcull_ptr_hi_o(), va_hi);
2269}
2270
2271
2272void gr_gv11b_write_pm_ptr(struct gk20a *g,
2273 struct nvgpu_mem *mem, u64 gpu_va)
2274{
2275 u32 va_lo, va_hi;
2276
2277 gpu_va = gpu_va >> 8;
2278 va_lo = u64_lo32(gpu_va);
2279 va_hi = u64_hi32(gpu_va);
2280 nvgpu_mem_wr(g, mem,
2281 ctxsw_prog_main_image_pm_ptr_o(), va_lo);
2282 nvgpu_mem_wr(g, mem,
2283 ctxsw_prog_main_image_pm_ptr_hi_o(), va_hi);
2284}
2285
2286void gr_gv11b_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
2287{
2288 u32 gate_ctrl;
2289
2290 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG))
2291 return;
2292
2293 gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine));
2294
2295 switch (mode) {
2296 case ELCG_RUN:
2297 gate_ctrl = set_field(gate_ctrl,
2298 therm_gate_ctrl_eng_clk_m(),
2299 therm_gate_ctrl_eng_clk_run_f());
2300 gate_ctrl = set_field(gate_ctrl,
2301 therm_gate_ctrl_idle_holdoff_m(),
2302 therm_gate_ctrl_idle_holdoff_on_f());
2303 break;
2304 case ELCG_STOP:
2305 gate_ctrl = set_field(gate_ctrl,
2306 therm_gate_ctrl_eng_clk_m(),
2307 therm_gate_ctrl_eng_clk_stop_f());
2308 break;
2309 case ELCG_AUTO:
2310 gate_ctrl = set_field(gate_ctrl,
2311 therm_gate_ctrl_eng_clk_m(),
2312 therm_gate_ctrl_eng_clk_auto_f());
2313 break;
2314 default:
2315 nvgpu_err(g, "invalid elcg mode %d", mode);
2316 }
2317
2318 gk20a_writel(g, therm_gate_ctrl_r(engine), gate_ctrl);
2319}
2320
2321void gr_gv11b_load_tpc_mask(struct gk20a *g)
2322{
2323 u32 pes_tpc_mask = 0, fuse_tpc_mask;
2324 u32 gpc, pes, val;
2325 u32 num_tpc_per_gpc = nvgpu_get_litter_value(g,
2326 GPU_LIT_NUM_TPC_PER_GPC);
2327
2328 /* gv11b has 1 GPC and 4 TPC/GPC, so mask will not overflow u32 */
2329 for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
2330 for (pes = 0; pes < g->gr.pe_count_per_gpc; pes++) {
2331 pes_tpc_mask |= g->gr.pes_tpc_mask[pes][gpc] <<
2332 num_tpc_per_gpc * gpc;
2333 }
2334 }
2335
2336 gk20a_dbg_info("pes_tpc_mask %u\n", pes_tpc_mask);
2337 fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc);
2338 if (g->tpc_fs_mask_user &&
2339 g->tpc_fs_mask_user != fuse_tpc_mask &&
2340 fuse_tpc_mask == (0x1U << g->gr.max_tpc_count) - 1U) {
2341 val = g->tpc_fs_mask_user;
2342 val &= (0x1U << g->gr.max_tpc_count) - 1U;
2343 val = (0x1U << hweight32(val)) - 1U;
2344 gk20a_writel(g, gr_fe_tpc_fs_r(0), val);
2345 } else {
2346 gk20a_writel(g, gr_fe_tpc_fs_r(0), pes_tpc_mask);
2347 }
2348
2349}
2350
2351void gr_gv11b_set_preemption_buffer_va(struct gk20a *g,
2352 struct nvgpu_mem *mem, u64 gpu_va)
2353{
2354 u32 addr_lo, addr_hi;
2355
2356 addr_lo = u64_lo32(gpu_va);
2357 addr_hi = u64_hi32(gpu_va);
2358
2359 nvgpu_mem_wr(g, mem,
2360 ctxsw_prog_main_image_full_preemption_ptr_o(), addr_lo);
2361 nvgpu_mem_wr(g, mem,
2362 ctxsw_prog_main_image_full_preemption_ptr_hi_o(), addr_hi);
2363
2364 nvgpu_mem_wr(g, mem,
2365 ctxsw_prog_main_image_full_preemption_ptr_veid0_o(), addr_lo);
2366 nvgpu_mem_wr(g, mem,
2367 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_o(),
2368 addr_hi);
2369
2370}
2371
2372int gr_gv11b_init_fs_state(struct gk20a *g)
2373{
2374 u32 data;
2375
2376 gk20a_dbg_fn("");
2377
2378 data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r());
2379 data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(),
2380 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_arm_63_48_match_f());
2381 gk20a_writel(g, gr_gpcs_tpcs_sm_texio_control_r(), data);
2382
2383 data = gk20a_readl(g, gr_gpcs_tpcs_sm_disp_ctrl_r());
2384 data = set_field(data, gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_m(),
2385 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f());
2386 gk20a_writel(g, gr_gpcs_tpcs_sm_disp_ctrl_r(), data);
2387
2388 if (g->gr.t18x.fecs_feature_override_ecc_val != 0) {
2389 gk20a_writel(g,
2390 gr_fecs_feature_override_ecc_r(),
2391 g->gr.t18x.fecs_feature_override_ecc_val);
2392 }
2393
2394 return gr_gm20b_init_fs_state(g);
2395}
2396
2397void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc,
2398 u32 *esr_sm_sel)
2399{
2400 u32 reg_val;
2401 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc);
2402
2403 reg_val = gk20a_readl(g, gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r() + offset);
2404 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
2405 "sm tpc esr sm sel reg val: 0x%x", reg_val);
2406 *esr_sm_sel = 0;
2407 if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val))
2408 *esr_sm_sel = 1;
2409 if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val))
2410 *esr_sm_sel |= 1 << 1;
2411 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
2412 "esr_sm_sel bitmask: 0x%x", *esr_sm_sel);
2413}
2414
2415int gv11b_gr_sm_trigger_suspend(struct gk20a *g)
2416{
2417 u32 dbgr_control0;
2418
2419 /* assert stop trigger. uniformity assumption: all SMs will have
2420 * the same state in dbg_control0.
2421 */
2422 dbgr_control0 =
2423 gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r());
2424 dbgr_control0 |= gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f();
2425
2426 /* broadcast write */
2427 gk20a_writel(g,
2428 gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0);
2429
2430 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
2431 "stop trigger enable: broadcast dbgr_control0: 0x%x ",
2432 dbgr_control0);
2433
2434 return 0;
2435}
2436
2437void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state)
2438{
2439 /* Check if we have at least one valid warp
2440 * get paused state on maxwell
2441 */
2442 struct gr_gk20a *gr = &g->gr;
2443 u32 gpc, tpc, sm, sm_id;
2444 u32 offset;
2445 u64 warps_valid = 0, warps_paused = 0, warps_trapped = 0;
2446
2447 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) {
2448 gpc = g->gr.sm_to_cluster[sm_id].gpc_index;
2449 tpc = g->gr.sm_to_cluster[sm_id].tpc_index;
2450 sm = g->gr.sm_to_cluster[sm_id].sm_index;
2451
2452 offset = gk20a_gr_gpc_offset(g, gpc) +
2453 gk20a_gr_tpc_offset(g, tpc) +
2454 gv11b_gr_sm_offset(g, sm);
2455
2456 /* 64 bit read */
2457 warps_valid = (u64)gk20a_readl(g,
2458 gr_gpc0_tpc0_sm0_warp_valid_mask_1_r() +
2459 offset) << 32;
2460 warps_valid |= gk20a_readl(g,
2461 gr_gpc0_tpc0_sm0_warp_valid_mask_0_r() +
2462 offset);
2463
2464 /* 64 bit read */
2465 warps_paused = (u64)gk20a_readl(g,
2466 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_1_r() +
2467 offset) << 32;
2468 warps_paused |= gk20a_readl(g,
2469 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_0_r() +
2470 offset);
2471
2472 /* 64 bit read */
2473 warps_trapped = (u64)gk20a_readl(g,
2474 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_1_r() +
2475 offset) << 32;
2476 warps_trapped |= gk20a_readl(g,
2477 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_0_r() +
2478 offset);
2479
2480 w_state[sm_id].valid_warps[0] = warps_valid;
2481 w_state[sm_id].trapped_warps[0] = warps_trapped;
2482 w_state[sm_id].paused_warps[0] = warps_paused;
2483 }
2484
2485
2486 /* Only for debug purpose */
2487 for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) {
2488 gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n",
2489 sm_id, w_state[sm_id].valid_warps[0]);
2490 gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n",
2491 sm_id, w_state[sm_id].valid_warps[1]);
2492
2493 gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n",
2494 sm_id, w_state[sm_id].trapped_warps[0]);
2495 gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n",
2496 sm_id, w_state[sm_id].trapped_warps[1]);
2497
2498 gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n",
2499 sm_id, w_state[sm_id].paused_warps[0]);
2500 gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n",
2501 sm_id, w_state[sm_id].paused_warps[1]);
2502 }
2503}
2504
2505int gv11b_gr_update_sm_error_state(struct gk20a *g,
2506 struct channel_gk20a *ch, u32 sm_id,
2507 struct nvgpu_gr_sm_error_state *sm_error_state)
2508{
2509 u32 gpc, tpc, sm, offset;
2510 struct gr_gk20a *gr = &g->gr;
2511 struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx;
2512 int err = 0;
2513
2514 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
2515
2516 gr->sm_error_states[sm_id].hww_global_esr =
2517 sm_error_state->hww_global_esr;
2518 gr->sm_error_states[sm_id].hww_warp_esr =
2519 sm_error_state->hww_warp_esr;
2520 gr->sm_error_states[sm_id].hww_warp_esr_pc =
2521 sm_error_state->hww_warp_esr_pc;
2522 gr->sm_error_states[sm_id].hww_global_esr_report_mask =
2523 sm_error_state->hww_global_esr_report_mask;
2524 gr->sm_error_states[sm_id].hww_warp_esr_report_mask =
2525 sm_error_state->hww_warp_esr_report_mask;
2526
2527 err = gr_gk20a_disable_ctxsw(g);
2528 if (err) {
2529 nvgpu_err(g, "unable to stop gr ctxsw");
2530 goto fail;
2531 }
2532
2533 gpc = g->gr.sm_to_cluster[sm_id].gpc_index;
2534 tpc = g->gr.sm_to_cluster[sm_id].tpc_index;
2535 sm = g->gr.sm_to_cluster[sm_id].sm_index;
2536
2537 offset = gk20a_gr_gpc_offset(g, gpc) +
2538 gk20a_gr_tpc_offset(g, tpc) +
2539 gv11b_gr_sm_offset(g, sm);
2540
2541 if (gk20a_is_channel_ctx_resident(ch)) {
2542 gk20a_writel(g,
2543 gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset,
2544 gr->sm_error_states[sm_id].hww_global_esr);
2545 gk20a_writel(g,
2546 gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset,
2547 gr->sm_error_states[sm_id].hww_warp_esr);
2548 gk20a_writel(g,
2549 gr_gpc0_tpc0_sm0_hww_warp_esr_pc_r() + offset,
2550 gr->sm_error_states[sm_id].hww_warp_esr_pc);
2551 gk20a_writel(g,
2552 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r() + offset,
2553 gr->sm_error_states[sm_id].hww_global_esr_report_mask);
2554 gk20a_writel(g,
2555 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r() + offset,
2556 gr->sm_error_states[sm_id].hww_warp_esr_report_mask);
2557 } else {
2558 err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx, false);
2559 if (err)
2560 goto enable_ctxsw;
2561
2562 gr_gk20a_ctx_patch_write(g, ch_ctx,
2563 gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r() +
2564 offset,
2565 gr->sm_error_states[sm_id].hww_global_esr_report_mask,
2566 true);
2567 gr_gk20a_ctx_patch_write(g, ch_ctx,
2568 gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r() +
2569 offset,
2570 gr->sm_error_states[sm_id].hww_warp_esr_report_mask,
2571 true);
2572
2573 gr_gk20a_ctx_patch_write_end(g, ch_ctx, false);
2574 }
2575
2576enable_ctxsw:
2577 err = gr_gk20a_enable_ctxsw(g);
2578
2579fail:
2580 nvgpu_mutex_release(&g->dbg_sessions_lock);
2581 return err;
2582}
2583
2584int gv11b_gr_set_sm_debug_mode(struct gk20a *g,
2585 struct channel_gk20a *ch, u64 sms, bool enable)
2586{
2587 struct nvgpu_dbg_gpu_reg_op *ops;
2588 unsigned int i = 0, sm_id;
2589 int err;
2590
2591 ops = nvgpu_kcalloc(g, g->gr.no_of_sm, sizeof(*ops));
2592 if (!ops)
2593 return -ENOMEM;
2594 for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) {
2595 u32 gpc, tpc, sm;
2596 u32 reg_offset, reg_mask, reg_val;
2597
2598 if (!(sms & (1 << sm_id)))
2599 continue;
2600
2601 gpc = g->gr.sm_to_cluster[sm_id].gpc_index;
2602 tpc = g->gr.sm_to_cluster[sm_id].tpc_index;
2603 sm = g->gr.sm_to_cluster[sm_id].sm_index;
2604
2605 reg_offset = gk20a_gr_gpc_offset(g, gpc) +
2606 gk20a_gr_tpc_offset(g, tpc) +
2607 gv11b_gr_sm_offset(g, sm);
2608
2609 ops[i].op = REGOP(WRITE_32);
2610 ops[i].type = REGOP(TYPE_GR_CTX);
2611 ops[i].offset = gr_gpc0_tpc0_sm0_dbgr_control0_r() + reg_offset;
2612
2613 reg_mask = 0;
2614 reg_val = 0;
2615 if (enable) {
2616 nvgpu_log(g, gpu_dbg_gpu_dbg,
2617 "SM:%d debuggger mode ON", sm);
2618 reg_mask |=
2619 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_m();
2620 reg_val |=
2621 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_f();
2622 } else {
2623 nvgpu_log(g, gpu_dbg_gpu_dbg,
2624 "SM:%d debuggger mode Off", sm);
2625 reg_mask |=
2626 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_m();
2627 reg_val |=
2628 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_f();
2629 }
2630
2631 ops[i].and_n_mask_lo = reg_mask;
2632 ops[i].value_lo = reg_val;
2633 i++;
2634 }
2635
2636 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0);
2637 if (err)
2638 nvgpu_err(g, "Failed to access register\n");
2639 nvgpu_kfree(g, ops);
2640 return err;
2641}
2642
2643int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc)
2644{
2645 int sm_id;
2646 struct gr_gk20a *gr = &g->gr;
2647 u32 offset, sm, sm_per_tpc;
2648 u32 gpc_tpc_offset;
2649
2650 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
2651
2652 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC);
2653 gpc_tpc_offset = gk20a_gr_gpc_offset(g, gpc) +
2654 gk20a_gr_tpc_offset(g, tpc);
2655
2656 sm_id = gr_gpc0_tpc0_sm_cfg_tpc_id_v(gk20a_readl(g,
2657 gr_gpc0_tpc0_sm_cfg_r() + gpc_tpc_offset));
2658
2659 sm = sm_id % sm_per_tpc;
2660
2661 offset = gpc_tpc_offset + gv11b_gr_sm_offset(g, sm);
2662
2663 gr->sm_error_states[sm_id].hww_global_esr = gk20a_readl(g,
2664 gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset);
2665
2666 gr->sm_error_states[sm_id].hww_warp_esr = gk20a_readl(g,
2667 gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset);
2668
2669 gr->sm_error_states[sm_id].hww_warp_esr_pc = gk20a_readl(g,
2670 gr_gpc0_tpc0_sm0_hww_warp_esr_pc_r() + offset);
2671
2672 gr->sm_error_states[sm_id].hww_global_esr_report_mask = gk20a_readl(g,
2673 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r() + offset);
2674
2675 gr->sm_error_states[sm_id].hww_warp_esr_report_mask = gk20a_readl(g,
2676 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r() + offset);
2677
2678 nvgpu_mutex_release(&g->dbg_sessions_lock);
2679
2680 return 0;
2681}
2682
2683void gv11b_gr_set_hww_esr_report_mask(struct gk20a *g)
2684{
2685
2686 /* clear hww */
2687 gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_r(), 0xffffffff);
2688 gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_r(), 0xffffffff);
2689
2690 /* setup sm warp esr report masks */
2691 gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(),
2692 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_error_report_f() |
2693 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_api_stack_error_report_f() |
2694 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_wrap_report_f() |
2695 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_pc_report_f() |
2696 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_overflow_report_f() |
2697 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_reg_report_f() |
2698 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_encoding_report_f() |
2699 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_param_report_f() |
2700 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_reg_report_f() |
2701 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_addr_report_f() |
2702 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_addr_report_f() |
2703 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_addr_space_report_f() |
2704 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f() |
2705 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_overflow_report_f() |
2706 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_mmu_fault_report_f());
2707
2708 /* setup sm global esr report mask. vat_alarm_report is not enabled */
2709 gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r(),
2710 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_multiple_warp_errors_report_f());
2711}
2712
2713bool gv11b_gr_sm_debugger_attached(struct gk20a *g)
2714{
2715 u32 debugger_mode;
2716 u32 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r());
2717
2718 /* check if sm debugger is attached.
2719 * assumption: all SMs will have debug mode enabled/disabled
2720 * uniformly.
2721 */
2722 debugger_mode =
2723 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(dbgr_control0);
2724 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
2725 "SM Debugger Mode: %d", debugger_mode);
2726 if (debugger_mode ==
2727 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v())
2728 return true;
2729
2730 return false;
2731}
2732
2733void gv11b_gr_suspend_single_sm(struct gk20a *g,
2734 u32 gpc, u32 tpc, u32 sm,
2735 u32 global_esr_mask, bool check_errors)
2736{
2737 int err;
2738 u32 dbgr_control0;
2739 u32 offset = gk20a_gr_gpc_offset(g, gpc) +
2740 gk20a_gr_tpc_offset(g, tpc) +
2741 gv11b_gr_sm_offset(g, sm);
2742
2743 /* if an SM debugger isn't attached, skip suspend */
2744 if (!g->ops.gr.sm_debugger_attached(g)) {
2745 nvgpu_err(g,
2746 "SM debugger not attached, skipping suspend!");
2747 return;
2748 }
2749
2750 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2751 "suspending gpc:%d, tpc:%d, sm%d", gpc, tpc, sm);
2752
2753 /* assert stop trigger. */
2754 dbgr_control0 = gk20a_readl(g,
2755 gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset);
2756 dbgr_control0 |= gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f();
2757 gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset,
2758 dbgr_control0);
2759
2760 err = g->ops.gr.wait_for_sm_lock_down(g, gpc, tpc, sm,
2761 global_esr_mask, check_errors);
2762 if (err) {
2763 nvgpu_err(g,
2764 "SuspendSm failed");
2765 return;
2766 }
2767}
2768
2769void gv11b_gr_suspend_all_sms(struct gk20a *g,
2770 u32 global_esr_mask, bool check_errors)
2771{
2772 struct gr_gk20a *gr = &g->gr;
2773 u32 gpc, tpc, sm;
2774 int err;
2775 u32 dbgr_control0;
2776 u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC);
2777
2778 /* if an SM debugger isn't attached, skip suspend */
2779 if (!g->ops.gr.sm_debugger_attached(g)) {
2780 nvgpu_err(g,
2781 "SM debugger not attached, skipping suspend!");
2782 return;
2783 }
2784
2785 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "suspending all sms");
2786
2787 /* assert stop trigger. uniformity assumption: all SMs will have
2788 * the same state in dbg_control0.
2789 */
2790 dbgr_control0 =
2791 gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r());
2792 dbgr_control0 |= gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f();
2793
2794 /* broadcast write */
2795 gk20a_writel(g,
2796 gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0);
2797
2798 for (gpc = 0; gpc < gr->gpc_count; gpc++) {
2799 for (tpc = 0; tpc < gr_gk20a_get_tpc_count(gr, gpc); tpc++) {
2800 for (sm = 0; sm < sm_per_tpc; sm++) {
2801 err = g->ops.gr.wait_for_sm_lock_down(g,
2802 gpc, tpc, sm,
2803 global_esr_mask, check_errors);
2804 if (err) {
2805 nvgpu_err(g,
2806 "SuspendAllSms failed");
2807 return;
2808 }
2809 }
2810 }
2811 }
2812}
2813
2814void gv11b_gr_resume_single_sm(struct gk20a *g,
2815 u32 gpc, u32 tpc, u32 sm)
2816{
2817 u32 dbgr_control0, dbgr_status0;
2818 u32 offset;
2819 /*
2820 * The following requires some clarification. Despite the fact that both
2821 * RUN_TRIGGER and STOP_TRIGGER have the word "TRIGGER" in their
2822 * names, only one is actually a trigger, and that is the STOP_TRIGGER.
2823 * Merely writing a 1(_TASK) to the RUN_TRIGGER is not sufficient to
2824 * resume the gpu - the _STOP_TRIGGER must explicitly be set to 0
2825 * (_DISABLE) as well.
2826
2827 * Advice from the arch group: Disable the stop trigger first, as a
2828 * separate operation, in order to ensure that the trigger has taken
2829 * effect, before enabling the run trigger.
2830 */
2831
2832 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) +
2833 gv11b_gr_sm_offset(g, sm);
2834
2835 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2836 "resuming gpc:%d, tpc:%d, sm%d", gpc, tpc, sm);
2837 dbgr_control0 = gk20a_readl(g,
2838 gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset);
2839 dbgr_status0 = gk20a_readl(g,
2840 gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset);
2841
2842 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2843 "before stop trigger disable: "
2844 "dbgr_control0 = 0x%x dbgr_status0: 0x%x",
2845 dbgr_control0, dbgr_status0);
2846
2847 /*De-assert stop trigger */
2848 dbgr_control0 = set_field(dbgr_control0,
2849 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_m(),
2850 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_disable_f());
2851 gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() +
2852 offset, dbgr_control0);
2853
2854 dbgr_control0 = gk20a_readl(g,
2855 gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset);
2856 dbgr_status0 = gk20a_readl(g,
2857 gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset);
2858
2859 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2860 "before run trigger: "
2861 "dbgr_control0 = 0x%x dbgr_status0: 0x%x",
2862 dbgr_control0, dbgr_status0);
2863 /* Run trigger */
2864 dbgr_control0 |=
2865 gr_gpc0_tpc0_sm0_dbgr_control0_run_trigger_task_f();
2866 gk20a_writel(g,
2867 gr_gpc0_tpc0_sm0_dbgr_control0_r() +
2868 offset, dbgr_control0);
2869
2870 dbgr_control0 = gk20a_readl(g,
2871 gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset);
2872 dbgr_status0 = gk20a_readl(g,
2873 gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset);
2874 /* run trigger is not sticky bit. SM clears it immediately */
2875 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2876 "after run trigger: "
2877 "dbgr_control0 = 0x%x dbgr_status0: 0x%x",
2878 dbgr_control0, dbgr_status0);
2879
2880}
2881
2882void gv11b_gr_resume_all_sms(struct gk20a *g)
2883{
2884 u32 dbgr_control0, dbgr_status0;
2885 /*
2886 * The following requires some clarification. Despite the fact that both
2887 * RUN_TRIGGER and STOP_TRIGGER have the word "TRIGGER" in their
2888 * names, only one is actually a trigger, and that is the STOP_TRIGGER.
2889 * Merely writing a 1(_TASK) to the RUN_TRIGGER is not sufficient to
2890 * resume the gpu - the _STOP_TRIGGER must explicitly be set to 0
2891 * (_DISABLE) as well.
2892
2893 * Advice from the arch group: Disable the stop trigger first, as a
2894 * separate operation, in order to ensure that the trigger has taken
2895 * effect, before enabling the run trigger.
2896 */
2897
2898 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "resuming all sms");
2899
2900 /* Read from unicast registers */
2901 dbgr_control0 =
2902 gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r());
2903 dbgr_status0 =
2904 gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_status0_r());
2905
2906 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2907 "before stop trigger disable: "
2908 "dbgr_control0 = 0x%x dbgr_status0: 0x%x",
2909 dbgr_control0, dbgr_status0);
2910
2911 dbgr_control0 = set_field(dbgr_control0,
2912 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_m(),
2913 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_disable_f());
2914 /* Write to broadcast registers */
2915 gk20a_writel(g,
2916 gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0);
2917
2918 /* Read from unicast registers */
2919 dbgr_control0 =
2920 gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r());
2921 dbgr_status0 =
2922 gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_status0_r());
2923
2924 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2925 "before run trigger: "
2926 "dbgr_control0 = 0x%x dbgr_status0: 0x%x",
2927 dbgr_control0, dbgr_status0);
2928 /* Run trigger */
2929 dbgr_control0 |=
2930 gr_gpc0_tpc0_sm0_dbgr_control0_run_trigger_task_f();
2931 /* Write to broadcast registers */
2932 gk20a_writel(g,
2933 gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0);
2934
2935 /* Read from unicast registers */
2936 dbgr_control0 =
2937 gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r());
2938 dbgr_status0 =
2939 gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_status0_r());
2940 /* run trigger is not sticky bit. SM clears it immediately */
2941 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
2942 "after run trigger: "
2943 "dbgr_control0 = 0x%x dbgr_status0: 0x%x",
2944 dbgr_control0, dbgr_status0);
2945}
2946
2947int gv11b_gr_resume_from_pause(struct gk20a *g)
2948{
2949 int err = 0;
2950 u32 reg_val;
2951
2952 /* Clear the pause mask to tell the GPU we want to resume everyone */
2953 gk20a_writel(g, gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r(), 0);
2954
2955 /* explicitly re-enable forwarding of SM interrupts upon any resume */
2956 reg_val = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r());
2957 reg_val |= gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f();
2958
2959 gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(), reg_val);
2960
2961 g->ops.gr.resume_all_sms(g);
2962
2963 return err;
2964}
2965
2966u32 gv11b_gr_get_sm_hww_warp_esr(struct gk20a *g,
2967 u32 gpc, u32 tpc, u32 sm)
2968{
2969 u32 offset = gk20a_gr_gpc_offset(g, gpc) +
2970 gk20a_gr_tpc_offset(g, tpc) +
2971 gv11b_gr_sm_offset(g, sm);
2972
2973 u32 hww_warp_esr = gk20a_readl(g,
2974 gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset);
2975 return hww_warp_esr;
2976}
2977
2978u32 gv11b_gr_get_sm_hww_global_esr(struct gk20a *g,
2979 u32 gpc, u32 tpc, u32 sm)
2980{
2981 u32 offset = gk20a_gr_gpc_offset(g, gpc) +
2982 gk20a_gr_tpc_offset(g, tpc) +
2983 gv11b_gr_sm_offset(g, sm);
2984
2985 u32 hww_global_esr = gk20a_readl(g,
2986 gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset);
2987
2988 return hww_global_esr;
2989}
2990
2991u32 gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g)
2992{
2993 /*
2994 * These three interrupts don't require locking down the SM. They can
2995 * be handled by usermode clients as they aren't fatal. Additionally,
2996 * usermode clients may wish to allow some warps to execute while others
2997 * are at breakpoints, as opposed to fatal errors where all warps should
2998 * halt.
2999 */
3000 u32 global_esr_mask =
3001 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f() |
3002 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_pending_f() |
3003 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f();
3004
3005 return global_esr_mask;
3006}
3007
3008static void gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(struct gk20a *g,
3009 u32 offset, bool timeout)
3010{
3011 u64 warps_valid = 0, warps_paused = 0, warps_trapped = 0;
3012 u32 dbgr_control0 = gk20a_readl(g,
3013 gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset);
3014 u32 dbgr_status0 = gk20a_readl(g,
3015 gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset);
3016 /* 64 bit read */
3017 warps_valid =
3018 (u64)gk20a_readl(g, gr_gpc0_tpc0_sm0_warp_valid_mask_1_r() +
3019 offset) << 32;
3020 warps_valid |= gk20a_readl(g,
3021 gr_gpc0_tpc0_sm0_warp_valid_mask_0_r() + offset);
3022
3023 /* 64 bit read */
3024 warps_paused =
3025 (u64)gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_1_r() +
3026 offset) << 32;
3027 warps_paused |= gk20a_readl(g,
3028 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_0_r() + offset);
3029
3030 /* 64 bit read */
3031 warps_trapped =
3032 (u64)gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_1_r() +
3033 offset) << 32;
3034 warps_trapped |= gk20a_readl(g,
3035 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_0_r() + offset);
3036 if (timeout)
3037 nvgpu_err(g,
3038 "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx "
3039 "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n",
3040 dbgr_status0, dbgr_control0, warps_valid,
3041 warps_paused, warps_trapped);
3042 else
3043 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
3044 "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx "
3045 "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n",
3046 dbgr_status0, dbgr_control0, warps_valid,
3047 warps_paused, warps_trapped);
3048}
3049
3050int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
3051 u32 gpc, u32 tpc, u32 sm,
3052 u32 global_esr_mask, bool check_errors)
3053{
3054 bool locked_down;
3055 bool no_error_pending;
3056 u32 delay = GR_IDLE_CHECK_DEFAULT;
3057 bool mmu_debug_mode_enabled = g->ops.fb.is_debug_mode_enabled(g);
3058 u32 dbgr_status0 = 0;
3059 u32 warp_esr, global_esr;
3060 struct nvgpu_timeout timeout;
3061 u32 offset = gk20a_gr_gpc_offset(g, gpc) +
3062 gk20a_gr_tpc_offset(g, tpc) +
3063 gv11b_gr_sm_offset(g, sm);
3064
3065 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
3066 "GPC%d TPC%d: locking down SM%d", gpc, tpc, sm);
3067
3068 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
3069 NVGPU_TIMER_CPU_TIMER);
3070
3071 /* wait for the sm to lock down */
3072 do {
3073 global_esr = g->ops.gr.get_sm_hww_global_esr(g, gpc, tpc, sm);
3074 dbgr_status0 = gk20a_readl(g,
3075 gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset);
3076
3077 warp_esr = g->ops.gr.get_sm_hww_warp_esr(g, gpc, tpc, sm);
3078
3079 locked_down =
3080 (gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_v(dbgr_status0) ==
3081 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_true_v());
3082 no_error_pending =
3083 check_errors &&
3084 (gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(warp_esr) ==
3085 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_v()) &&
3086 ((global_esr & global_esr_mask) == 0);
3087
3088 if (locked_down) {
3089 /*
3090 * if SM reports locked down, it means that SM is idle and
3091 * trapped and also that one of the these conditions are true
3092 * 1) sm is nonempty and all valid warps are paused
3093 * 2) sm is empty and held in trapped state due to stop trigger
3094 * 3) sm is nonempty and some warps are not paused, but are
3095 * instead held at RTT due to an "active" stop trigger
3096 * Check for Paused warp mask != Valid
3097 * warp mask after SM reports it is locked down in order to
3098 * distinguish case 1 from case 3. When case 3 is detected,
3099 * it implies a misprogrammed trap handler code, as all warps
3100 * in the handler must promise to BPT.PAUSE instead of RTT
3101 * whenever SR64 read in trap mode indicates stop trigger
3102 * is asserted.
3103 */
3104 gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(g,
3105 offset, false);
3106 }
3107
3108 if (locked_down || no_error_pending) {
3109 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
3110 "GPC%d TPC%d: locked down SM%d", gpc, tpc, sm);
3111 return 0;
3112 }
3113
3114 /* if an mmu fault is pending and mmu debug mode is not
3115 * enabled, the sm will never lock down.
3116 */
3117 if (!mmu_debug_mode_enabled &&
3118 (g->ops.mm.mmu_fault_pending(g))) {
3119 nvgpu_err(g,
3120 "GPC%d TPC%d: mmu fault pending,"
3121 " SM%d will never lock down!", gpc, tpc, sm);
3122 return -EFAULT;
3123 }
3124
3125 nvgpu_usleep_range(delay, delay * 2);
3126 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
3127 } while (!nvgpu_timeout_expired(&timeout));
3128
3129 nvgpu_err(g, "GPC%d TPC%d: timed out while trying to "
3130 "lock down SM%d", gpc, tpc, sm);
3131 gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(g, offset, true);
3132
3133 return -ETIMEDOUT;
3134}
3135
3136int gv11b_gr_lock_down_sm(struct gk20a *g,
3137 u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask,
3138 bool check_errors)
3139{
3140 u32 dbgr_control0;
3141 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) +
3142 gv11b_gr_sm_offset(g, sm);
3143
3144 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
3145 "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm);
3146
3147 /* assert stop trigger */
3148 dbgr_control0 =
3149 gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset);
3150 dbgr_control0 |= gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f();
3151 gk20a_writel(g,
3152 gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0);
3153
3154 return g->ops.gr.wait_for_sm_lock_down(g, gpc, tpc, sm, global_esr_mask,
3155 check_errors);
3156}
3157
3158void gv11b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
3159 u32 global_esr)
3160{
3161 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) +
3162 gv11b_gr_sm_offset(g, sm);
3163
3164 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset,
3165 global_esr);
3166 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
3167 "Cleared HWW global esr, current reg val: 0x%x",
3168 gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() +
3169 offset));
3170
3171 gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset, 0);
3172 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg,
3173 "Cleared HWW warp esr, current reg val: 0x%x",
3174 gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() +
3175 offset));
3176}
3177
3178int gr_gv11b_handle_tpc_mpc_exception(struct gk20a *g,
3179 u32 gpc, u32 tpc, bool *post_event)
3180{
3181 u32 esr;
3182 u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc);
3183 u32 tpc_exception = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_r()
3184 + offset);
3185
3186 if (!(tpc_exception & gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m()))
3187 return 0;
3188
3189 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3190 "GPC%d TPC%d MPC exception", gpc, tpc);
3191
3192 esr = gk20a_readl(g, gr_gpc0_tpc0_mpc_hww_esr_r() + offset);
3193 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "mpc hww esr 0x%08x", esr);
3194
3195 esr = gk20a_readl(g, gr_gpc0_tpc0_mpc_hww_esr_info_r() + offset);
3196 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
3197 "mpc hww esr info: veid 0x%08x",
3198 gr_gpc0_tpc0_mpc_hww_esr_info_veid_v(esr));
3199
3200 gk20a_writel(g, gr_gpc0_tpc0_mpc_hww_esr_r() + offset,
3201 gr_gpc0_tpc0_mpc_hww_esr_reset_trigger_f());
3202
3203 return 0;
3204}
3205
3206static const u32 _num_ovr_perf_regs = 20;
3207static u32 _ovr_perf_regs[20] = { 0, };
3208
3209void gv11b_gr_init_ovr_sm_dsm_perf(void)
3210{
3211 if (_ovr_perf_regs[0] != 0)
3212 return;
3213
3214 _ovr_perf_regs[0] = gr_egpc0_etpc0_sm_dsm_perf_counter_control_sel0_r();
3215 _ovr_perf_regs[1] = gr_egpc0_etpc0_sm_dsm_perf_counter_control_sel1_r();
3216 _ovr_perf_regs[2] = gr_egpc0_etpc0_sm_dsm_perf_counter_control0_r();
3217 _ovr_perf_regs[3] = gr_egpc0_etpc0_sm_dsm_perf_counter_control1_r();
3218 _ovr_perf_regs[4] = gr_egpc0_etpc0_sm_dsm_perf_counter_control2_r();
3219 _ovr_perf_regs[5] = gr_egpc0_etpc0_sm_dsm_perf_counter_control3_r();
3220 _ovr_perf_regs[6] = gr_egpc0_etpc0_sm_dsm_perf_counter_control4_r();
3221 _ovr_perf_regs[7] = gr_egpc0_etpc0_sm_dsm_perf_counter_control5_r();
3222 _ovr_perf_regs[8] = gr_egpc0_etpc0_sm_dsm_perf_counter0_control_r();
3223 _ovr_perf_regs[9] = gr_egpc0_etpc0_sm_dsm_perf_counter1_control_r();
3224 _ovr_perf_regs[10] = gr_egpc0_etpc0_sm_dsm_perf_counter2_control_r();
3225 _ovr_perf_regs[11] = gr_egpc0_etpc0_sm_dsm_perf_counter3_control_r();
3226 _ovr_perf_regs[12] = gr_egpc0_etpc0_sm_dsm_perf_counter4_control_r();
3227 _ovr_perf_regs[13] = gr_egpc0_etpc0_sm_dsm_perf_counter5_control_r();
3228 _ovr_perf_regs[14] = gr_egpc0_etpc0_sm_dsm_perf_counter6_control_r();
3229 _ovr_perf_regs[15] = gr_egpc0_etpc0_sm_dsm_perf_counter7_control_r();
3230
3231 _ovr_perf_regs[16] = gr_egpc0_etpc0_sm0_dsm_perf_counter4_r();
3232 _ovr_perf_regs[17] = gr_egpc0_etpc0_sm0_dsm_perf_counter5_r();
3233 _ovr_perf_regs[18] = gr_egpc0_etpc0_sm0_dsm_perf_counter6_r();
3234 _ovr_perf_regs[19] = gr_egpc0_etpc0_sm0_dsm_perf_counter7_r();
3235}
3236
3237/* Following are the blocks of registers that the ucode
3238 * stores in the extended region.
3239 */
3240/* == ctxsw_extended_sm_dsm_perf_counter_register_stride_v() ? */
3241static const u32 _num_sm_dsm_perf_regs;
3242/* == ctxsw_extended_sm_dsm_perf_counter_control_register_stride_v() ?*/
3243static const u32 _num_sm_dsm_perf_ctrl_regs = 2;
3244static u32 *_sm_dsm_perf_regs;
3245static u32 _sm_dsm_perf_ctrl_regs[2];
3246
3247void gv11b_gr_init_sm_dsm_reg_info(void)
3248{
3249 if (_sm_dsm_perf_ctrl_regs[0] != 0)
3250 return;
3251
3252 _sm_dsm_perf_ctrl_regs[0] =
3253 gr_egpc0_etpc0_sm_dsm_perf_counter_control0_r();
3254 _sm_dsm_perf_ctrl_regs[1] =
3255 gr_egpc0_etpc0_sm_dsm_perf_counter_control5_r();
3256}
3257
3258void gv11b_gr_get_sm_dsm_perf_regs(struct gk20a *g,
3259 u32 *num_sm_dsm_perf_regs,
3260 u32 **sm_dsm_perf_regs,
3261 u32 *perf_register_stride)
3262{
3263 *num_sm_dsm_perf_regs = _num_sm_dsm_perf_regs;
3264 *sm_dsm_perf_regs = _sm_dsm_perf_regs;
3265 *perf_register_stride =
3266 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v();
3267}
3268
3269void gv11b_gr_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
3270 u32 *num_sm_dsm_perf_ctrl_regs,
3271 u32 **sm_dsm_perf_ctrl_regs,
3272 u32 *ctrl_register_stride)
3273{
3274 *num_sm_dsm_perf_ctrl_regs = _num_sm_dsm_perf_ctrl_regs;
3275 *sm_dsm_perf_ctrl_regs = _sm_dsm_perf_ctrl_regs;
3276 *ctrl_register_stride =
3277 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v();
3278}
3279
3280void gv11b_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs,
3281 u32 **ovr_perf_regs)
3282{
3283 *num_ovr_perf_regs = _num_ovr_perf_regs;
3284 *ovr_perf_regs = _ovr_perf_regs;
3285}
3286
3287void gv11b_gr_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset)
3288{
3289 u32 reg_val;
3290 u32 quad_ctrl;
3291 u32 half_ctrl;
3292 u32 tpc, gpc;
3293 u32 gpc_tpc_addr;
3294 u32 gpc_tpc_stride;
3295 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
3296 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
3297 GPU_LIT_TPC_IN_GPC_STRIDE);
3298
3299 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "offset=0x%x", offset);
3300
3301 gpc = pri_get_gpc_num(g, offset);
3302 gpc_tpc_addr = pri_gpccs_addr_mask(offset);
3303 tpc = g->ops.gr.get_tpc_num(g, gpc_tpc_addr);
3304
3305 quad_ctrl = quad & 0x1; /* first bit tells us quad */
3306 half_ctrl = (quad >> 1) & 0x1; /* second bit tells us half */
3307
3308 gpc_tpc_stride = gpc * gpc_stride + tpc * tpc_in_gpc_stride;
3309 gpc_tpc_addr = gr_gpc0_tpc0_sm_halfctl_ctrl_r() + gpc_tpc_stride;
3310
3311 /* read from unicast reg */
3312 reg_val = gk20a_readl(g, gpc_tpc_addr);
3313 reg_val = set_field(reg_val,
3314 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(),
3315 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(quad_ctrl));
3316
3317 /* write to broadcast reg */
3318 gk20a_writel(g, gr_gpcs_tpcs_sm_halfctl_ctrl_r(), reg_val);
3319
3320 gpc_tpc_addr = gr_gpc0_tpc0_sm_debug_sfe_control_r() + gpc_tpc_stride;
3321 reg_val = gk20a_readl(g, gpc_tpc_addr);
3322 reg_val = set_field(reg_val,
3323 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(),
3324 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(half_ctrl));
3325
3326 /* write to broadcast reg */
3327 gk20a_writel(g, gr_gpcs_tpcs_sm_debug_sfe_control_r(), reg_val);
3328}
3329
3330static bool pri_is_egpc_addr_shared(struct gk20a *g, u32 addr)
3331{
3332 u32 egpc_shared_base = EGPC_PRI_SHARED_BASE;
3333 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
3334
3335 return (addr >= egpc_shared_base) &&
3336 (addr < egpc_shared_base + gpc_stride);
3337}
3338
3339bool gv11b_gr_pri_is_egpc_addr(struct gk20a *g, u32 addr)
3340{
3341 u32 egpc_base = g->ops.gr.get_egpc_base(g);
3342 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
3343 u32 num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS);
3344
3345 return ((addr >= egpc_base) &&
3346 (addr < egpc_base + num_gpcs * gpc_stride)) ||
3347 pri_is_egpc_addr_shared(g, addr);
3348}
3349
3350static inline u32 pri_smpc_in_etpc_addr_mask(struct gk20a *g, u32 addr)
3351{
3352 u32 smpc_stride = nvgpu_get_litter_value(g,
3353 GPU_LIT_SMPC_PRI_STRIDE);
3354
3355 return (addr & (smpc_stride - 1));
3356}
3357
3358static u32 pri_smpc_ext_addr(struct gk20a *g, u32 sm_offset, u32 gpc_num,
3359 u32 tpc_num, u32 sm_num)
3360{
3361 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
3362 u32 tpc_in_gpc_base = nvgpu_get_litter_value(g,
3363 GPU_LIT_TPC_IN_GPC_BASE);
3364 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
3365 GPU_LIT_TPC_IN_GPC_STRIDE);
3366 u32 egpc_base = g->ops.gr.get_egpc_base(g);
3367 u32 smpc_unique_base = nvgpu_get_litter_value(g,
3368 GPU_LIT_SMPC_PRI_UNIQUE_BASE);
3369 u32 smpc_stride = nvgpu_get_litter_value(g,
3370 GPU_LIT_SMPC_PRI_STRIDE);
3371
3372 return (egpc_base + (gpc_num * gpc_stride) + tpc_in_gpc_base +
3373 (tpc_num * tpc_in_gpc_stride) +
3374 (sm_num * smpc_stride) +
3375 (smpc_unique_base + sm_offset));
3376}
3377
3378static bool pri_is_smpc_addr_in_etpc_shared(struct gk20a *g, u32 addr)
3379{
3380 u32 smpc_shared_base = nvgpu_get_litter_value(g,
3381 GPU_LIT_SMPC_PRI_SHARED_BASE);
3382 u32 smpc_stride = nvgpu_get_litter_value(g,
3383 GPU_LIT_SMPC_PRI_STRIDE);
3384
3385 return (addr >= smpc_shared_base) &&
3386 (addr < smpc_shared_base + smpc_stride);
3387}
3388
3389bool gv11b_gr_pri_is_etpc_addr(struct gk20a *g, u32 addr)
3390{
3391 u32 egpc_addr = 0;
3392
3393 if (g->ops.gr.is_egpc_addr(g, addr)) {
3394 egpc_addr = pri_gpccs_addr_mask(addr);
3395 if (g->ops.gr.is_tpc_addr(g, egpc_addr))
3396 return true;
3397 }
3398
3399 return false;
3400}
3401
3402static u32 pri_get_egpc_num(struct gk20a *g, u32 addr)
3403{
3404 u32 i, start;
3405 u32 egpc_base = g->ops.gr.get_egpc_base(g);
3406 u32 num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS);
3407 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
3408
3409 for (i = 0; i < num_gpcs; i++) {
3410 start = egpc_base + (i * gpc_stride);
3411 if ((addr >= start) && (addr < (start + gpc_stride)))
3412 return i;
3413 }
3414 return 0;
3415}
3416
3417static u32 pri_egpc_addr(struct gk20a *g, u32 addr, u32 gpc)
3418{
3419 u32 egpc_base = g->ops.gr.get_egpc_base(g);
3420 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
3421
3422 return egpc_base + (gpc * gpc_stride) + addr;
3423}
3424
3425static u32 pri_etpc_addr(struct gk20a *g, u32 addr, u32 gpc, u32 tpc)
3426{
3427 u32 egpc_base = g->ops.gr.get_egpc_base(g);
3428 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
3429 u32 tpc_in_gpc_base = nvgpu_get_litter_value(g,
3430 GPU_LIT_TPC_IN_GPC_BASE);
3431 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
3432 GPU_LIT_TPC_IN_GPC_STRIDE);
3433
3434 return egpc_base + (gpc * gpc_stride) +
3435 tpc_in_gpc_base + (tpc * tpc_in_gpc_stride) +
3436 addr;
3437}
3438
3439void gv11b_gr_get_egpc_etpc_num(struct gk20a *g, u32 addr,
3440 u32 *egpc_num, u32 *etpc_num)
3441{
3442 u32 egpc_addr = 0;
3443
3444 *egpc_num = pri_get_egpc_num(g, addr);
3445 egpc_addr = pri_gpccs_addr_mask(addr);
3446 *etpc_num = g->ops.gr.get_tpc_num(g, egpc_addr);
3447
3448 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
3449 "egpc_num = %d etpc_num = %d", *egpc_num, *etpc_num);
3450}
3451
3452int gv11b_gr_decode_egpc_addr(struct gk20a *g, u32 addr, int *addr_type,
3453 u32 *gpc_num, u32 *tpc_num, u32 *broadcast_flags)
3454{
3455 u32 gpc_addr;
3456 u32 tpc_addr;
3457
3458 if (g->ops.gr.is_egpc_addr(g, addr)) {
3459 nvgpu_log_info(g, "addr=0x%x is egpc", addr);
3460
3461 *addr_type = CTXSW_ADDR_TYPE_EGPC;
3462 gpc_addr = pri_gpccs_addr_mask(addr);
3463 if (pri_is_egpc_addr_shared(g, addr)) {
3464 *broadcast_flags |= PRI_BROADCAST_FLAGS_EGPC;
3465 *gpc_num = 0;
3466 nvgpu_log_info(g, "shared egpc");
3467 } else {
3468 *gpc_num = pri_get_egpc_num(g, addr);
3469 nvgpu_log_info(g, "gpc=0x%x", *gpc_num);
3470 }
3471 if (g->ops.gr.is_tpc_addr(g, gpc_addr)) {
3472 nvgpu_log_info(g, "addr=0x%x is etpc", addr);
3473 *addr_type = CTXSW_ADDR_TYPE_ETPC;
3474 if (pri_is_tpc_addr_shared(g, gpc_addr)) {
3475 *broadcast_flags |= PRI_BROADCAST_FLAGS_ETPC;
3476 *tpc_num = 0;
3477 nvgpu_log_info(g, "shared etpc");
3478 } else {
3479 *tpc_num = g->ops.gr.get_tpc_num(g, gpc_addr);
3480 nvgpu_log_info(g, "tpc=0x%x", *tpc_num);
3481 }
3482 tpc_addr = pri_tpccs_addr_mask(addr);
3483 if (pri_is_smpc_addr_in_etpc_shared(g, tpc_addr))
3484 *broadcast_flags |= PRI_BROADCAST_FLAGS_SMPC;
3485 }
3486
3487 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg,
3488 "addr_type = %d, broadcast_flags = %#08x",
3489 *addr_type, *broadcast_flags);
3490 return 0;
3491 }
3492 return -EINVAL;
3493}
3494
3495static void gv11b_gr_update_priv_addr_table_smpc(struct gk20a *g, u32 gpc_num,
3496 u32 tpc_num, u32 addr,
3497 u32 *priv_addr_table, u32 *t)
3498{
3499 u32 sm_per_tpc, sm_num;
3500
3501 nvgpu_log_info(g, "broadcast flags smpc");
3502
3503 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC);
3504 for (sm_num = 0; sm_num < sm_per_tpc; sm_num++) {
3505 priv_addr_table[*t] = pri_smpc_ext_addr(g,
3506 pri_smpc_in_etpc_addr_mask(g, addr),
3507 gpc_num, tpc_num, sm_num);
3508 nvgpu_log_info(g, "priv_addr_table[%d]:%#08x",
3509 *t, priv_addr_table[*t]);
3510 (*t)++;
3511 }
3512}
3513
3514void gv11b_gr_egpc_etpc_priv_addr_table(struct gk20a *g, u32 addr,
3515 u32 gpc, u32 broadcast_flags, u32 *priv_addr_table, u32 *t)
3516{
3517 u32 gpc_num, tpc_num;
3518
3519 nvgpu_log_info(g, "addr=0x%x", addr);
3520
3521 /* The GPC/TPC unicast registers are included in the compressed PRI
3522 * tables. Convert a GPC/TPC broadcast address to unicast addresses so
3523 * that we can look up the offsets.
3524 */
3525 if (broadcast_flags & PRI_BROADCAST_FLAGS_EGPC) {
3526 nvgpu_log_info(g, "broadcast flags egpc");
3527 for (gpc_num = 0; gpc_num < g->gr.gpc_count; gpc_num++) {
3528
3529 if (broadcast_flags & PRI_BROADCAST_FLAGS_ETPC) {
3530 nvgpu_log_info(g, "broadcast flags etpc");
3531 for (tpc_num = 0;
3532 tpc_num < g->gr.gpc_tpc_count[gpc_num];
3533 tpc_num++) {
3534 if (broadcast_flags &
3535 PRI_BROADCAST_FLAGS_SMPC) {
3536 gv11b_gr_update_priv_addr_table_smpc(
3537 g, gpc_num, tpc_num, addr,
3538 priv_addr_table, t);
3539 } else {
3540 priv_addr_table[*t] =
3541 pri_etpc_addr(g,
3542 pri_tpccs_addr_mask(addr),
3543 gpc_num, tpc_num);
3544 nvgpu_log_info(g,
3545 "priv_addr_table[%d]:%#08x",
3546 *t, priv_addr_table[*t]);
3547 (*t)++;
3548 }
3549 }
3550 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_SMPC) {
3551 tpc_num = 0;
3552 gv11b_gr_update_priv_addr_table_smpc(
3553 g, gpc_num, tpc_num, addr,
3554 priv_addr_table, t);
3555 } else {
3556 priv_addr_table[*t] =
3557 pri_egpc_addr(g,
3558 pri_gpccs_addr_mask(addr),
3559 gpc_num);
3560 nvgpu_log_info(g, "priv_addr_table[%d]:%#08x",
3561 *t, priv_addr_table[*t]);
3562 (*t)++;
3563 }
3564 }
3565 } else if (!(broadcast_flags & PRI_BROADCAST_FLAGS_EGPC)) {
3566 if (broadcast_flags & PRI_BROADCAST_FLAGS_ETPC) {
3567 nvgpu_log_info(g, "broadcast flags etpc but not egpc");
3568 gpc_num = 0;
3569 for (tpc_num = 0;
3570 tpc_num < g->gr.gpc_tpc_count[gpc];
3571 tpc_num++) {
3572 if (broadcast_flags &
3573 PRI_BROADCAST_FLAGS_SMPC)
3574 gv11b_gr_update_priv_addr_table_smpc(
3575 g, gpc_num, tpc_num, addr,
3576 priv_addr_table, t);
3577 else {
3578 priv_addr_table[*t] =
3579 pri_etpc_addr(g,
3580 pri_tpccs_addr_mask(addr),
3581 gpc, tpc_num);
3582 nvgpu_log_info(g,
3583 "priv_addr_table[%d]:%#08x",
3584 *t, priv_addr_table[*t]);
3585 (*t)++;
3586 }
3587 }
3588 } else if (broadcast_flags & PRI_BROADCAST_FLAGS_SMPC) {
3589 tpc_num = 0;
3590 gpc_num = 0;
3591 gv11b_gr_update_priv_addr_table_smpc(
3592 g, gpc_num, tpc_num, addr,
3593 priv_addr_table, t);
3594 } else {
3595 priv_addr_table[*t] = addr;
3596 nvgpu_log_info(g, "priv_addr_table[%d]:%#08x",
3597 *t, priv_addr_table[*t]);
3598 (*t)++;
3599 }
3600 }
3601}
3602
3603u32 gv11b_gr_get_egpc_base(struct gk20a *g)
3604{
3605 return EGPC_PRI_BASE;
3606}
3607
3608void gr_gv11b_init_gpc_mmu(struct gk20a *g)
3609{
3610 u32 temp;
3611
3612 nvgpu_log_info(g, "initialize gpc mmu");
3613
3614 if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
3615 /* Bypass MMU check for non-secure boot. For
3616 * secure-boot,this register write has no-effect */
3617 gk20a_writel(g, fb_priv_mmu_phy_secure_r(), 0xffffffff);
3618 }
3619 temp = gk20a_readl(g, fb_mmu_ctrl_r());
3620 temp &= gr_gpcs_pri_mmu_ctrl_vm_pg_size_m() |
3621 gr_gpcs_pri_mmu_ctrl_use_pdb_big_page_size_m() |
3622 gr_gpcs_pri_mmu_ctrl_vol_fault_m() |
3623 gr_gpcs_pri_mmu_ctrl_comp_fault_m() |
3624 gr_gpcs_pri_mmu_ctrl_miss_gran_m() |
3625 gr_gpcs_pri_mmu_ctrl_cache_mode_m() |
3626 gr_gpcs_pri_mmu_ctrl_mmu_aperture_m() |
3627 gr_gpcs_pri_mmu_ctrl_mmu_vol_m() |
3628 gr_gpcs_pri_mmu_ctrl_mmu_disable_m();
3629 gk20a_writel(g, gr_gpcs_pri_mmu_ctrl_r(), temp);
3630 gk20a_writel(g, gr_gpcs_pri_mmu_pm_unit_mask_r(), 0);
3631 gk20a_writel(g, gr_gpcs_pri_mmu_pm_req_mask_r(), 0);
3632
3633 gk20a_writel(g, gr_gpcs_pri_mmu_debug_ctrl_r(),
3634 gk20a_readl(g, fb_mmu_debug_ctrl_r()));
3635 gk20a_writel(g, gr_gpcs_pri_mmu_debug_wr_r(),
3636 gk20a_readl(g, fb_mmu_debug_wr_r()));
3637 gk20a_writel(g, gr_gpcs_pri_mmu_debug_rd_r(),
3638 gk20a_readl(g, fb_mmu_debug_rd_r()));
3639}
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.h b/drivers/gpu/nvgpu/gv11b/gr_gv11b.h
new file mode 100644
index 00000000..b6ba231e
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.h
@@ -0,0 +1,215 @@
1/*
2 * GV11B GPU GR
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVGPU_GR_GV11B_H_
26#define _NVGPU_GR_GV11B_H_
27
28#define EGPC_PRI_BASE 0x580000
29#define EGPC_PRI_SHARED_BASE 0x480000
30
31#define PRI_BROADCAST_FLAGS_SMPC BIT(17)
32
33#define GV11B_ZBC_TYPE_STENCIL T19X_ZBC
34#define ZBC_STENCIL_CLEAR_FMT_INVAILD 0
35#define ZBC_STENCIL_CLEAR_FMT_U8 1
36
37struct zbc_s_table {
38 u32 stencil;
39 u32 format;
40 u32 ref_cnt;
41};
42
43struct gk20a;
44struct zbc_entry;
45struct zbc_query_params;
46struct channel_ctx_gk20a;
47struct nvgpu_warpstate;
48struct nvgpu_gr_sm_error_state;
49
50enum {
51 VOLTA_CHANNEL_GPFIFO_A = 0xC36F,
52 VOLTA_A = 0xC397,
53 VOLTA_COMPUTE_A = 0xC3C0,
54 VOLTA_DMA_COPY_A = 0xC3B5,
55};
56
57#define NVC397_SET_SHADER_EXCEPTIONS 0x1528
58#define NVC397_SET_CIRCULAR_BUFFER_SIZE 0x1280
59#define NVC397_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc
60#define NVC397_SET_GO_IDLE_TIMEOUT 0x022c
61#define NVC397_SET_TEX_IN_DBG 0x10bc
62#define NVC397_SET_SKEDCHECK 0x10c0
63#define NVC397_SET_BES_CROP_DEBUG3 0x10c4
64
65#define NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE 0x1
66#define NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_LD 0x2
67#define NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_ST 0x4
68
69#define NVC397_SET_SKEDCHECK_18_MASK 0x3
70#define NVC397_SET_SKEDCHECK_18_DEFAULT 0x0
71#define NVC397_SET_SKEDCHECK_18_DISABLE 0x1
72#define NVC397_SET_SKEDCHECK_18_ENABLE 0x2
73
74#define NVC3C0_SET_SKEDCHECK 0x23c
75
76#define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0
77
78int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size,
79 struct nvgpu_mem *mem);
80/*zcull*/
81void gr_gv11b_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
82 u32 *zcull_map_tiles);
83void gr_gv11b_create_sysfs(struct gk20a *g);
84
85bool gr_gv11b_is_valid_class(struct gk20a *g, u32 class_num);
86bool gr_gv11b_is_valid_gfx_class(struct gk20a *g, u32 class_num);
87bool gr_gv11b_is_valid_compute_class(struct gk20a *g, u32 class_num);
88void gr_gv11b_enable_hww_exceptions(struct gk20a *g);
89void gr_gv11b_enable_exceptions(struct gk20a *g);
90int gr_gv11b_handle_tpc_sm_ecc_exception(struct gk20a *g,
91 u32 gpc, u32 tpc,
92 bool *post_event, struct channel_gk20a *fault_ch,
93 u32 *hww_global_esr);
94int gr_gv11b_handle_gcc_exception(struct gk20a *g, u32 gpc, u32 tpc,
95 bool *post_event, struct channel_gk20a *fault_ch,
96 u32 *hww_global_esr);
97int gr_gv11b_handle_gpc_gpcmmu_exception(struct gk20a *g, u32 gpc,
98 u32 gpc_exception);
99int gr_gv11b_handle_gpc_gpccs_exception(struct gk20a *g, u32 gpc,
100 u32 gpc_exception);
101void gr_gv11b_enable_gpc_exceptions(struct gk20a *g);
102int gr_gv11b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc,
103 bool *post_event);
104int gr_gv11b_zbc_s_query_table(struct gk20a *g, struct gr_gk20a *gr,
105 struct zbc_query_params *query_params);
106bool gr_gv11b_add_zbc_type_s(struct gk20a *g, struct gr_gk20a *gr,
107 struct zbc_entry *zbc_val, int *ret_val);
108int gr_gv11b_add_zbc_stencil(struct gk20a *g, struct gr_gk20a *gr,
109 struct zbc_entry *stencil_val, u32 index);
110int gr_gv11b_load_stencil_default_tbl(struct gk20a *g,
111 struct gr_gk20a *gr);
112int gr_gv11b_load_stencil_tbl(struct gk20a *g, struct gr_gk20a *gr);
113u32 gr_gv11b_pagepool_default_size(struct gk20a *g);
114int gr_gv11b_calc_global_ctx_buffer_size(struct gk20a *g);
115int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr,
116 u32 class_num, u32 offset, u32 data);
117void gr_gv11b_bundle_cb_defaults(struct gk20a *g);
118void gr_gv11b_cb_size_default(struct gk20a *g);
119void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data);
120void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data);
121int gr_gv11b_dump_gr_status_regs(struct gk20a *g,
122 struct gk20a_debug_output *o);
123int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms,
124 u32 expect_delay);
125void gr_gv11b_commit_global_attrib_cb(struct gk20a *g,
126 struct channel_ctx_gk20a *ch_ctx,
127 u64 addr, bool patch);
128void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
129void gr_gv11b_get_access_map(struct gk20a *g,
130 u32 **whitelist, int *num_entries);
131int gr_gv11b_pre_process_sm_exception(struct gk20a *g,
132 u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr,
133 bool sm_debugger_attached, struct channel_gk20a *fault_ch,
134 bool *early_exit, bool *ignore_debugger);
135int gr_gv11b_handle_fecs_error(struct gk20a *g,
136 struct channel_gk20a *__ch,
137 struct gr_gk20a_isr_data *isr_data);
138int gr_gv11b_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr);
139int gr_gv11b_init_sw_veid_bundle(struct gk20a *g);
140void gr_gv11b_detect_sm_arch(struct gk20a *g);
141void gr_gv11b_program_sm_id_numbering(struct gk20a *g,
142 u32 gpc, u32 tpc, u32 smid);
143int gr_gv11b_load_smid_config(struct gk20a *g);
144int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va);
145int gr_gv11b_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c);
146void gr_gv11b_write_zcull_ptr(struct gk20a *g,
147 struct nvgpu_mem *mem, u64 gpu_va);
148void gr_gv11b_write_pm_ptr(struct gk20a *g,
149 struct nvgpu_mem *mem, u64 gpu_va);
150void gr_gv11b_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine);
151void gr_gv11b_load_tpc_mask(struct gk20a *g);
152void gr_gv11b_set_preemption_buffer_va(struct gk20a *g,
153 struct nvgpu_mem *mem, u64 gpu_va);
154int gr_gv11b_init_fs_state(struct gk20a *g);
155void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc,
156 u32 *esr_sm_sel);
157int gv11b_gr_sm_trigger_suspend(struct gk20a *g);
158void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state);
159int gv11b_gr_update_sm_error_state(struct gk20a *g,
160 struct channel_gk20a *ch, u32 sm_id,
161 struct nvgpu_gr_sm_error_state *sm_error_state);
162int gv11b_gr_set_sm_debug_mode(struct gk20a *g,
163 struct channel_gk20a *ch, u64 sms, bool enable);
164int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc);
165void gv11b_gr_set_hww_esr_report_mask(struct gk20a *g);
166bool gv11b_gr_sm_debugger_attached(struct gk20a *g);
167void gv11b_gr_suspend_single_sm(struct gk20a *g,
168 u32 gpc, u32 tpc, u32 sm,
169 u32 global_esr_mask, bool check_errors);
170void gv11b_gr_suspend_all_sms(struct gk20a *g,
171 u32 global_esr_mask, bool check_errors);
172void gv11b_gr_resume_single_sm(struct gk20a *g,
173 u32 gpc, u32 tpc, u32 sm);
174void gv11b_gr_resume_all_sms(struct gk20a *g);
175int gv11b_gr_resume_from_pause(struct gk20a *g);
176u32 gv11b_gr_get_sm_hww_warp_esr(struct gk20a *g,
177 u32 gpc, u32 tpc, u32 sm);
178u32 gv11b_gr_get_sm_hww_global_esr(struct gk20a *g,
179 u32 gpc, u32 tpc, u32 sm);
180u32 gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g);
181int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
182 u32 gpc, u32 tpc, u32 sm,
183 u32 global_esr_mask, bool check_errors);
184int gv11b_gr_lock_down_sm(struct gk20a *g,
185 u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask,
186 bool check_errors);
187void gv11b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
188 u32 global_esr);
189int gr_gv11b_handle_tpc_mpc_exception(struct gk20a *g,
190 u32 gpc, u32 tpc, bool *post_event);
191void gv11b_gr_init_ovr_sm_dsm_perf(void);
192void gv11b_gr_init_sm_dsm_reg_info(void);
193void gv11b_gr_get_sm_dsm_perf_regs(struct gk20a *g,
194 u32 *num_sm_dsm_perf_regs,
195 u32 **sm_dsm_perf_regs,
196 u32 *perf_register_stride);
197void gv11b_gr_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
198 u32 *num_sm_dsm_perf_ctrl_regs,
199 u32 **sm_dsm_perf_ctrl_regs,
200 u32 *ctrl_register_stride);
201void gv11b_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs,
202 u32 **ovr_perf_regs);
203void gv11b_gr_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset);
204bool gv11b_gr_pri_is_egpc_addr(struct gk20a *g, u32 addr);
205bool gv11b_gr_pri_is_etpc_addr(struct gk20a *g, u32 addr);
206void gv11b_gr_get_egpc_etpc_num(struct gk20a *g, u32 addr,
207 u32 *egpc_num, u32 *etpc_num);
208int gv11b_gr_decode_egpc_addr(struct gk20a *g, u32 addr, int *addr_type,
209 u32 *gpc_num, u32 *tpc_num, u32 *broadcast_flags);
210void gv11b_gr_egpc_etpc_priv_addr_table(struct gk20a *g, u32 addr,
211 u32 gpc, u32 broadcast_flags, u32 *priv_addr_table, u32 *t);
212u32 gv11b_gr_get_egpc_base(struct gk20a *g);
213void gr_gv11b_init_gpc_mmu(struct gk20a *g);
214
215#endif
diff --git a/drivers/gpu/nvgpu/gv11b/gv11b.c b/drivers/gpu/nvgpu/gv11b/gv11b.c
new file mode 100644
index 00000000..211755e5
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/gv11b.c
@@ -0,0 +1,38 @@
1/*
2 * GV11B Graphics
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/enabled.h>
26#include <nvgpu/enabled_t19x.h>
27
28#include "gk20a/gk20a.h"
29
30#include "gv11b/gv11b.h"
31
32int gv11b_init_gpu_characteristics(struct gk20a *g)
33{
34 gk20a_init_gpu_characteristics(g);
35 __nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);
36 __nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true);
37 return 0;
38}
diff --git a/drivers/gpu/nvgpu/gv11b/gv11b.h b/drivers/gpu/nvgpu/gv11b/gv11b.h
new file mode 100644
index 00000000..3d5490e6
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/gv11b.h
@@ -0,0 +1,32 @@
1/*
2 * GV11B Graphics
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef GV11B_H
26#define GV11B_H
27
28#include "gk20a/gk20a.h"
29
30int gv11b_init_gpu_characteristics(struct gk20a *g);
31
32#endif /* GV11B_H */
diff --git a/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c b/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c
new file mode 100644
index 00000000..9f6057ae
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c
@@ -0,0 +1,748 @@
1/*
2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 *
22 * This file is autogenerated. Do not edit.
23 */
24
25#ifndef __gv11b_gating_reglist_h__
26#define __gv11b_gating_reglist_h__
27
28#include <linux/types.h>
29#include "gv11b_gating_reglist.h"
30#include <nvgpu/enabled.h>
31
32struct gating_desc {
33 u32 addr;
34 u32 prod;
35 u32 disable;
36};
37/* slcg bus */
38static const struct gating_desc gv11b_slcg_bus[] = {
39 {.addr = 0x00001c04, .prod = 0x00000000, .disable = 0x000003fe},
40};
41
42/* slcg ce2 */
43static const struct gating_desc gv11b_slcg_ce2[] = {
44 {.addr = 0x00104204, .prod = 0x00000040, .disable = 0x000007fe},
45};
46
47/* slcg chiplet */
48static const struct gating_desc gv11b_slcg_chiplet[] = {
49 {.addr = 0x0010c07c, .prod = 0x00000000, .disable = 0x00000007},
50 {.addr = 0x0010e07c, .prod = 0x00000000, .disable = 0x00000007},
51 {.addr = 0x0010d07c, .prod = 0x00000000, .disable = 0x00000007},
52 {.addr = 0x0010e17c, .prod = 0x00000000, .disable = 0x00000007},
53};
54
55/* slcg fb */
56static const struct gating_desc gv11b_slcg_fb[] = {
57 {.addr = 0x00100d14, .prod = 0x00000000, .disable = 0xfffffffe},
58 {.addr = 0x00100c9c, .prod = 0x00000000, .disable = 0x000001fe},
59};
60
61/* slcg fifo */
62static const struct gating_desc gv11b_slcg_fifo[] = {
63 {.addr = 0x000026ec, .prod = 0x00000000, .disable = 0x0001fffe},
64};
65
66/* slcg gr */
67static const struct gating_desc gv11b_slcg_gr[] = {
68 {.addr = 0x004041f4, .prod = 0x00000000, .disable = 0x07fffffe},
69 {.addr = 0x00409134, .prod = 0x00020008, .disable = 0x0003fffe},
70 {.addr = 0x00409894, .prod = 0x00000000, .disable = 0x0000fffe},
71 {.addr = 0x004078c4, .prod = 0x00000000, .disable = 0x000001fe},
72 {.addr = 0x00406004, .prod = 0x00000200, .disable = 0x0001fffe},
73 {.addr = 0x00405864, .prod = 0x00000000, .disable = 0x000001fe},
74 {.addr = 0x00405910, .prod = 0xfffffff0, .disable = 0xfffffffe},
75 {.addr = 0x00408044, .prod = 0x00000000, .disable = 0x000007fe},
76 {.addr = 0x00407004, .prod = 0x00000000, .disable = 0x000001fe},
77 {.addr = 0x00405bf4, .prod = 0x00000000, .disable = 0x00000002},
78 {.addr = 0x0041a134, .prod = 0x00020008, .disable = 0x0003fffe},
79 {.addr = 0x0041a894, .prod = 0x00000000, .disable = 0x0000fffe},
80 {.addr = 0x00418504, .prod = 0x00000000, .disable = 0x0007fffe},
81 {.addr = 0x0041860c, .prod = 0x00000000, .disable = 0x000001fe},
82 {.addr = 0x0041868c, .prod = 0x00000000, .disable = 0x0000001e},
83 {.addr = 0x0041871c, .prod = 0x00000000, .disable = 0x000003fe},
84 {.addr = 0x00418388, .prod = 0x00000000, .disable = 0x00000001},
85 {.addr = 0x0041882c, .prod = 0x00000000, .disable = 0x0001fffe},
86 {.addr = 0x00418bc0, .prod = 0x00000000, .disable = 0x000001fe},
87 {.addr = 0x00418974, .prod = 0x00000000, .disable = 0x0001fffe},
88 {.addr = 0x00418c74, .prod = 0xffffff80, .disable = 0xfffffffe},
89 {.addr = 0x00418cf4, .prod = 0xfffffff8, .disable = 0xfffffffe},
90 {.addr = 0x00418d74, .prod = 0xffffffe0, .disable = 0xfffffffe},
91 {.addr = 0x00418f10, .prod = 0xffffffe0, .disable = 0xfffffffe},
92 {.addr = 0x00418e10, .prod = 0xfffffffe, .disable = 0xfffffffe},
93 {.addr = 0x00419024, .prod = 0x000001fe, .disable = 0x000001fe},
94 {.addr = 0x0041889c, .prod = 0x00000000, .disable = 0x000001fe},
95 {.addr = 0x00419d24, .prod = 0x00000000, .disable = 0x000000ff},
96 {.addr = 0x0041986c, .prod = 0x00000104, .disable = 0x00fffffe},
97 {.addr = 0x00419c74, .prod = 0x0000001e, .disable = 0x0000001e},
98 {.addr = 0x00419c84, .prod = 0x0003fff8, .disable = 0x0003fffe},
99 {.addr = 0x00419c8c, .prod = 0xffffff84, .disable = 0xfffffffe},
100 {.addr = 0x00419c94, .prod = 0x00080040, .disable = 0x000ffffe},
101 {.addr = 0x00419ca4, .prod = 0x00003ffe, .disable = 0x00003ffe},
102 {.addr = 0x00419cac, .prod = 0x0001fffe, .disable = 0x0001fffe},
103 {.addr = 0x00419a44, .prod = 0x00000008, .disable = 0x0000000e},
104 {.addr = 0x00419a4c, .prod = 0x000001f8, .disable = 0x000001fe},
105 {.addr = 0x00419a54, .prod = 0x0000003c, .disable = 0x0000003e},
106 {.addr = 0x00419a5c, .prod = 0x0000000c, .disable = 0x0000000e},
107 {.addr = 0x00419a64, .prod = 0x000001ba, .disable = 0x000001fe},
108 {.addr = 0x00419a7c, .prod = 0x0000003c, .disable = 0x0000003e},
109 {.addr = 0x00419a84, .prod = 0x0000000c, .disable = 0x0000000e},
110 {.addr = 0x0041be2c, .prod = 0x04115fc0, .disable = 0xfffffffe},
111 {.addr = 0x0041bfec, .prod = 0xfffffff0, .disable = 0xfffffffe},
112 {.addr = 0x0041bed4, .prod = 0xfffffff8, .disable = 0xfffffffe},
113 {.addr = 0x00408814, .prod = 0x00000000, .disable = 0x0001fffe},
114 {.addr = 0x00408a84, .prod = 0x00000000, .disable = 0x0001fffe},
115 {.addr = 0x004089ac, .prod = 0x00000000, .disable = 0x0001fffe},
116 {.addr = 0x00408a24, .prod = 0x00000000, .disable = 0x000000ff},
117};
118
119/* slcg ltc */
120static const struct gating_desc gv11b_slcg_ltc[] = {
121 {.addr = 0x0017e050, .prod = 0x00000000, .disable = 0xfffffffe},
122 {.addr = 0x0017e35c, .prod = 0x00000000, .disable = 0xfffffffe},
123};
124
125/* slcg perf */
126static const struct gating_desc gv11b_slcg_perf[] = {
127 {.addr = 0x00248018, .prod = 0xffffffff, .disable = 0x00000000},
128 {.addr = 0x00248018, .prod = 0xffffffff, .disable = 0x00000000},
129 {.addr = 0x00246018, .prod = 0xffffffff, .disable = 0x00000000},
130 {.addr = 0x00246018, .prod = 0xffffffff, .disable = 0x00000000},
131 {.addr = 0x00246018, .prod = 0xffffffff, .disable = 0x00000000},
132 {.addr = 0x00244018, .prod = 0xffffffff, .disable = 0x00000000},
133 {.addr = 0x00244018, .prod = 0xffffffff, .disable = 0x00000000},
134 {.addr = 0x00244018, .prod = 0xffffffff, .disable = 0x00000000},
135 {.addr = 0x0024a124, .prod = 0x00000001, .disable = 0x00000000},
136};
137
138/* slcg PriRing */
139static const struct gating_desc gv11b_slcg_priring[] = {
140 {.addr = 0x001200a8, .prod = 0x00000000, .disable = 0x00000001},
141};
142
143/* slcg pwr_csb */
144static const struct gating_desc gv11b_slcg_pwr_csb[] = {
145 {.addr = 0x00000134, .prod = 0x00020008, .disable = 0x0003fffe},
146 {.addr = 0x00000e74, .prod = 0x00000000, .disable = 0x0000000f},
147 {.addr = 0x00000a74, .prod = 0x00004040, .disable = 0x00007ffe},
148 {.addr = 0x000206b8, .prod = 0x00000008, .disable = 0x0000000f},
149};
150
151/* slcg pmu */
152static const struct gating_desc gv11b_slcg_pmu[] = {
153 {.addr = 0x0010a134, .prod = 0x00020008, .disable = 0x0003fffe},
154 {.addr = 0x0010aa74, .prod = 0x00004040, .disable = 0x00007ffe},
155 {.addr = 0x0010ae74, .prod = 0x00000000, .disable = 0x0000000f},
156};
157
158/* therm gr */
159static const struct gating_desc gv11b_slcg_therm[] = {
160 {.addr = 0x000206b8, .prod = 0x00000008, .disable = 0x0000000f},
161};
162
163/* slcg Xbar */
164static const struct gating_desc gv11b_slcg_xbar[] = {
165 {.addr = 0x0013c824, .prod = 0x00000000, .disable = 0x7ffffffe},
166 {.addr = 0x0013dc08, .prod = 0x00000000, .disable = 0xfffffffe},
167 {.addr = 0x0013c924, .prod = 0x00000000, .disable = 0x7ffffffe},
168 {.addr = 0x0013cbe4, .prod = 0x00000000, .disable = 0x1ffffffe},
169 {.addr = 0x0013cc04, .prod = 0x00000000, .disable = 0x1ffffffe},
170};
171
172/* blcg bus */
173static const struct gating_desc gv11b_blcg_bus[] = {
174 {.addr = 0x00001c00, .prod = 0x00000042, .disable = 0x00000000},
175};
176
177/* blcg ce */
178static const struct gating_desc gv11b_blcg_ce[] = {
179 {.addr = 0x00104200, .prod = 0x0000c242, .disable = 0x00000000},
180};
181
182/* blcg ctxsw prog */
183static const struct gating_desc gv11b_blcg_ctxsw_prog[] = {
184};
185
186/* blcg fb */
187static const struct gating_desc gv11b_blcg_fb[] = {
188 {.addr = 0x00100d10, .prod = 0x0000c242, .disable = 0x00000000},
189 {.addr = 0x00100d30, .prod = 0x0000c242, .disable = 0x00000000},
190 {.addr = 0x00100d3c, .prod = 0x00000242, .disable = 0x00000000},
191 {.addr = 0x00100d48, .prod = 0x0000c242, .disable = 0x00000000},
192 {.addr = 0x00100d1c, .prod = 0x00000042, .disable = 0x00000000},
193 {.addr = 0x00100c98, .prod = 0x00004242, .disable = 0x00000000},
194};
195
196/* blcg fifo */
197static const struct gating_desc gv11b_blcg_fifo[] = {
198 {.addr = 0x000026e0, .prod = 0x0000c244, .disable = 0x00000000},
199};
200
201/* blcg gr */
202static const struct gating_desc gv11b_blcg_gr[] = {
203 {.addr = 0x004041f0, .prod = 0x0000c646, .disable = 0x00000000},
204 {.addr = 0x00409890, .prod = 0x0000007f, .disable = 0x00000000},
205 {.addr = 0x004098b0, .prod = 0x0000007f, .disable = 0x00000000},
206 {.addr = 0x004078c0, .prod = 0x00004242, .disable = 0x00000000},
207 {.addr = 0x00406000, .prod = 0x0000c444, .disable = 0x00000000},
208 {.addr = 0x00405860, .prod = 0x0000c242, .disable = 0x00000000},
209 {.addr = 0x0040590c, .prod = 0x0000c444, .disable = 0x00000000},
210 {.addr = 0x00408040, .prod = 0x0000c444, .disable = 0x00000000},
211 {.addr = 0x00407000, .prod = 0x4000c242, .disable = 0x00000000},
212 {.addr = 0x00405bf0, .prod = 0x0000c444, .disable = 0x00000000},
213 {.addr = 0x0041a890, .prod = 0x0000427f, .disable = 0x00000000},
214 {.addr = 0x0041a8b0, .prod = 0x0000007f, .disable = 0x00000000},
215 {.addr = 0x00418500, .prod = 0x0000c244, .disable = 0x00000000},
216 {.addr = 0x00418608, .prod = 0x0000c242, .disable = 0x00000000},
217 {.addr = 0x00418688, .prod = 0x0000c242, .disable = 0x00000000},
218 {.addr = 0x00418718, .prod = 0x00000042, .disable = 0x00000000},
219 {.addr = 0x00418828, .prod = 0x00008444, .disable = 0x00000000},
220 {.addr = 0x00418bbc, .prod = 0x0000c242, .disable = 0x00000000},
221 {.addr = 0x00418970, .prod = 0x0000c242, .disable = 0x00000000},
222 {.addr = 0x00418c70, .prod = 0x0000c444, .disable = 0x00000000},
223 {.addr = 0x00418cf0, .prod = 0x0000c444, .disable = 0x00000000},
224 {.addr = 0x00418d70, .prod = 0x0000c444, .disable = 0x00000000},
225 {.addr = 0x00418f0c, .prod = 0x0000c444, .disable = 0x00000000},
226 {.addr = 0x00418e0c, .prod = 0x0000c444, .disable = 0x00000000},
227 {.addr = 0x00419020, .prod = 0x0000c242, .disable = 0x00000000},
228 {.addr = 0x00419038, .prod = 0x00000042, .disable = 0x00000000},
229 {.addr = 0x00418898, .prod = 0x00004242, .disable = 0x00000000},
230 {.addr = 0x00419868, .prod = 0x00008243, .disable = 0x00000000},
231 {.addr = 0x00419c70, .prod = 0x0000c444, .disable = 0x00000000},
232 {.addr = 0x00419c80, .prod = 0x00004045, .disable = 0x00000000},
233 {.addr = 0x00419c88, .prod = 0x00004043, .disable = 0x00000000},
234 {.addr = 0x00419c90, .prod = 0x0000004a, .disable = 0x00000000},
235 {.addr = 0x00419c98, .prod = 0x00000042, .disable = 0x00000000},
236 {.addr = 0x00419ca0, .prod = 0x00000043, .disable = 0x00000000},
237 {.addr = 0x00419ca8, .prod = 0x00000003, .disable = 0x00000000},
238 {.addr = 0x00419cb0, .prod = 0x00000002, .disable = 0x00000000},
239 {.addr = 0x00419a40, .prod = 0x00000242, .disable = 0x00000000},
240 {.addr = 0x00419a48, .prod = 0x00000242, .disable = 0x00000000},
241 {.addr = 0x00419a50, .prod = 0x00000242, .disable = 0x00000000},
242 {.addr = 0x00419a58, .prod = 0x00000242, .disable = 0x00000000},
243 {.addr = 0x00419a60, .prod = 0x00000202, .disable = 0x00000000},
244 {.addr = 0x00419a68, .prod = 0x00000202, .disable = 0x00000000},
245 {.addr = 0x00419a78, .prod = 0x00000242, .disable = 0x00000000},
246 {.addr = 0x00419a80, .prod = 0x00000242, .disable = 0x00000000},
247 {.addr = 0x0041be28, .prod = 0x00008242, .disable = 0x00000000},
248 {.addr = 0x0041bfe8, .prod = 0x0000c444, .disable = 0x00000000},
249 {.addr = 0x0041bed0, .prod = 0x0000c444, .disable = 0x00000000},
250 {.addr = 0x00408810, .prod = 0x0000c242, .disable = 0x00000000},
251 {.addr = 0x00408a80, .prod = 0x0000c242, .disable = 0x00000000},
252 {.addr = 0x004089a8, .prod = 0x0000c242, .disable = 0x00000000},
253};
254
255/* blcg ltc */
256static const struct gating_desc gv11b_blcg_ltc[] = {
257 {.addr = 0x0017e030, .prod = 0x00000044, .disable = 0x00000000},
258 {.addr = 0x0017e040, .prod = 0x00000044, .disable = 0x00000000},
259 {.addr = 0x0017e3e0, .prod = 0x00000044, .disable = 0x00000000},
260 {.addr = 0x0017e3c8, .prod = 0x00000044, .disable = 0x00000000},
261};
262
263/* blcg pwr_csb */
264static const struct gating_desc gv11b_blcg_pwr_csb[] = {
265 {.addr = 0x00000a70, .prod = 0x00000045, .disable = 0x00000000},
266};
267
268/* blcg pmu */
269static const struct gating_desc gv11b_blcg_pmu[] = {
270 {.addr = 0x0010aa70, .prod = 0x00000045, .disable = 0x00000000},
271};
272
273/* blcg Xbar */
274static const struct gating_desc gv11b_blcg_xbar[] = {
275 {.addr = 0x0013c820, .prod = 0x0001004a, .disable = 0x00000000},
276 {.addr = 0x0013dc04, .prod = 0x0001004a, .disable = 0x00000000},
277 {.addr = 0x0013c920, .prod = 0x0000004a, .disable = 0x00000000},
278 {.addr = 0x0013cbe0, .prod = 0x00000042, .disable = 0x00000000},
279 {.addr = 0x0013cc00, .prod = 0x00000042, .disable = 0x00000000},
280};
281
282/* pg gr */
283static const struct gating_desc gv11b_pg_gr[] = {
284};
285
286/* inline functions */
287void gv11b_slcg_bus_load_gating_prod(struct gk20a *g,
288 bool prod)
289{
290 u32 i;
291 u32 size = sizeof(gv11b_slcg_bus) / sizeof(struct gating_desc);
292
293 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
294 return;
295
296 for (i = 0; i < size; i++) {
297 if (prod)
298 gk20a_writel(g, gv11b_slcg_bus[i].addr,
299 gv11b_slcg_bus[i].prod);
300 else
301 gk20a_writel(g, gv11b_slcg_bus[i].addr,
302 gv11b_slcg_bus[i].disable);
303 }
304}
305
306void gv11b_slcg_ce2_load_gating_prod(struct gk20a *g,
307 bool prod)
308{
309 u32 i;
310 u32 size = sizeof(gv11b_slcg_ce2) / sizeof(struct gating_desc);
311
312 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
313 return;
314
315 for (i = 0; i < size; i++) {
316 if (prod)
317 gk20a_writel(g, gv11b_slcg_ce2[i].addr,
318 gv11b_slcg_ce2[i].prod);
319 else
320 gk20a_writel(g, gv11b_slcg_ce2[i].addr,
321 gv11b_slcg_ce2[i].disable);
322 }
323}
324
325void gv11b_slcg_chiplet_load_gating_prod(struct gk20a *g,
326 bool prod)
327{
328 u32 i;
329 u32 size = sizeof(gv11b_slcg_chiplet) / sizeof(struct gating_desc);
330
331 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
332 return;
333
334 for (i = 0; i < size; i++) {
335 if (prod)
336 gk20a_writel(g, gv11b_slcg_chiplet[i].addr,
337 gv11b_slcg_chiplet[i].prod);
338 else
339 gk20a_writel(g, gv11b_slcg_chiplet[i].addr,
340 gv11b_slcg_chiplet[i].disable);
341 }
342}
343
344void gv11b_slcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
345 bool prod)
346{
347}
348
349void gv11b_slcg_fb_load_gating_prod(struct gk20a *g,
350 bool prod)
351{
352 u32 i;
353 u32 size = sizeof(gv11b_slcg_fb) / sizeof(struct gating_desc);
354
355 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
356 return;
357
358 for (i = 0; i < size; i++) {
359 if (prod)
360 gk20a_writel(g, gv11b_slcg_fb[i].addr,
361 gv11b_slcg_fb[i].prod);
362 else
363 gk20a_writel(g, gv11b_slcg_fb[i].addr,
364 gv11b_slcg_fb[i].disable);
365 }
366}
367
368void gv11b_slcg_fifo_load_gating_prod(struct gk20a *g,
369 bool prod)
370{
371 u32 i;
372 u32 size = sizeof(gv11b_slcg_fifo) / sizeof(struct gating_desc);
373
374 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
375 return;
376
377 for (i = 0; i < size; i++) {
378 if (prod)
379 gk20a_writel(g, gv11b_slcg_fifo[i].addr,
380 gv11b_slcg_fifo[i].prod);
381 else
382 gk20a_writel(g, gv11b_slcg_fifo[i].addr,
383 gv11b_slcg_fifo[i].disable);
384 }
385}
386
387void gr_gv11b_slcg_gr_load_gating_prod(struct gk20a *g,
388 bool prod)
389{
390 u32 i;
391 u32 size = sizeof(gv11b_slcg_gr) / sizeof(struct gating_desc);
392
393 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
394 return;
395
396 for (i = 0; i < size; i++) {
397 if (prod)
398 gk20a_writel(g, gv11b_slcg_gr[i].addr,
399 gv11b_slcg_gr[i].prod);
400 else
401 gk20a_writel(g, gv11b_slcg_gr[i].addr,
402 gv11b_slcg_gr[i].disable);
403 }
404}
405
406void ltc_gv11b_slcg_ltc_load_gating_prod(struct gk20a *g,
407 bool prod)
408{
409 u32 i;
410 u32 size = sizeof(gv11b_slcg_ltc) / sizeof(struct gating_desc);
411
412 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
413 return;
414
415 for (i = 0; i < size; i++) {
416 if (prod)
417 gk20a_writel(g, gv11b_slcg_ltc[i].addr,
418 gv11b_slcg_ltc[i].prod);
419 else
420 gk20a_writel(g, gv11b_slcg_ltc[i].addr,
421 gv11b_slcg_ltc[i].disable);
422 }
423}
424
425void gv11b_slcg_perf_load_gating_prod(struct gk20a *g,
426 bool prod)
427{
428 u32 i;
429 u32 size = sizeof(gv11b_slcg_perf) / sizeof(struct gating_desc);
430
431 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
432 return;
433
434 for (i = 0; i < size; i++) {
435 if (prod)
436 gk20a_writel(g, gv11b_slcg_perf[i].addr,
437 gv11b_slcg_perf[i].prod);
438 else
439 gk20a_writel(g, gv11b_slcg_perf[i].addr,
440 gv11b_slcg_perf[i].disable);
441 }
442}
443
444void gv11b_slcg_priring_load_gating_prod(struct gk20a *g,
445 bool prod)
446{
447 u32 i;
448 u32 size = sizeof(gv11b_slcg_priring) / sizeof(struct gating_desc);
449
450 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
451 return;
452
453 for (i = 0; i < size; i++) {
454 if (prod)
455 gk20a_writel(g, gv11b_slcg_priring[i].addr,
456 gv11b_slcg_priring[i].prod);
457 else
458 gk20a_writel(g, gv11b_slcg_priring[i].addr,
459 gv11b_slcg_priring[i].disable);
460 }
461}
462
463void gv11b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
464 bool prod)
465{
466 u32 i;
467 u32 size = sizeof(gv11b_slcg_pwr_csb) / sizeof(struct gating_desc);
468
469 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
470 return;
471
472 for (i = 0; i < size; i++) {
473 if (prod)
474 gk20a_writel(g, gv11b_slcg_pwr_csb[i].addr,
475 gv11b_slcg_pwr_csb[i].prod);
476 else
477 gk20a_writel(g, gv11b_slcg_pwr_csb[i].addr,
478 gv11b_slcg_pwr_csb[i].disable);
479 }
480}
481
482void gv11b_slcg_pmu_load_gating_prod(struct gk20a *g,
483 bool prod)
484{
485 u32 i;
486 u32 size = sizeof(gv11b_slcg_pmu) / sizeof(struct gating_desc);
487
488 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
489 return;
490
491 for (i = 0; i < size; i++) {
492 if (prod)
493 gk20a_writel(g, gv11b_slcg_pmu[i].addr,
494 gv11b_slcg_pmu[i].prod);
495 else
496 gk20a_writel(g, gv11b_slcg_pmu[i].addr,
497 gv11b_slcg_pmu[i].disable);
498 }
499}
500
501void gv11b_slcg_therm_load_gating_prod(struct gk20a *g,
502 bool prod)
503{
504 u32 i;
505 u32 size = sizeof(gv11b_slcg_therm) / sizeof(struct gating_desc);
506
507 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
508 return;
509
510 for (i = 0; i < size; i++) {
511 if (prod)
512 gk20a_writel(g, gv11b_slcg_therm[i].addr,
513 gv11b_slcg_therm[i].prod);
514 else
515 gk20a_writel(g, gv11b_slcg_therm[i].addr,
516 gv11b_slcg_therm[i].disable);
517 }
518}
519
520void gv11b_slcg_xbar_load_gating_prod(struct gk20a *g,
521 bool prod)
522{
523 u32 i;
524 u32 size = sizeof(gv11b_slcg_xbar) / sizeof(struct gating_desc);
525
526 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
527 return;
528
529 for (i = 0; i < size; i++) {
530 if (prod)
531 gk20a_writel(g, gv11b_slcg_xbar[i].addr,
532 gv11b_slcg_xbar[i].prod);
533 else
534 gk20a_writel(g, gv11b_slcg_xbar[i].addr,
535 gv11b_slcg_xbar[i].disable);
536 }
537}
538
539void gv11b_blcg_bus_load_gating_prod(struct gk20a *g,
540 bool prod)
541{
542 u32 i;
543 u32 size = sizeof(gv11b_blcg_bus) / sizeof(struct gating_desc);
544
545 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
546 return;
547
548 for (i = 0; i < size; i++) {
549 if (prod)
550 gk20a_writel(g, gv11b_blcg_bus[i].addr,
551 gv11b_blcg_bus[i].prod);
552 else
553 gk20a_writel(g, gv11b_blcg_bus[i].addr,
554 gv11b_blcg_bus[i].disable);
555 }
556}
557
558void gv11b_blcg_ce_load_gating_prod(struct gk20a *g,
559 bool prod)
560{
561 u32 i;
562 u32 size = sizeof(gv11b_blcg_ce) / sizeof(struct gating_desc);
563
564 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
565 return;
566
567 for (i = 0; i < size; i++) {
568 if (prod)
569 gk20a_writel(g, gv11b_blcg_ce[i].addr,
570 gv11b_blcg_ce[i].prod);
571 else
572 gk20a_writel(g, gv11b_blcg_ce[i].addr,
573 gv11b_blcg_ce[i].disable);
574 }
575}
576
577void gv11b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
578 bool prod)
579{
580 u32 i;
581 u32 size = sizeof(gv11b_blcg_ctxsw_prog) / sizeof(struct gating_desc);
582
583 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
584 return;
585
586 for (i = 0; i < size; i++) {
587 if (prod)
588 gk20a_writel(g, gv11b_blcg_ctxsw_prog[i].addr,
589 gv11b_blcg_ctxsw_prog[i].prod);
590 else
591 gk20a_writel(g, gv11b_blcg_ctxsw_prog[i].addr,
592 gv11b_blcg_ctxsw_prog[i].disable);
593 }
594}
595
596void gv11b_blcg_fb_load_gating_prod(struct gk20a *g,
597 bool prod)
598{
599 u32 i;
600 u32 size = sizeof(gv11b_blcg_fb) / sizeof(struct gating_desc);
601
602 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
603 return;
604
605 for (i = 0; i < size; i++) {
606 if (prod)
607 gk20a_writel(g, gv11b_blcg_fb[i].addr,
608 gv11b_blcg_fb[i].prod);
609 else
610 gk20a_writel(g, gv11b_blcg_fb[i].addr,
611 gv11b_blcg_fb[i].disable);
612 }
613}
614
615void gv11b_blcg_fifo_load_gating_prod(struct gk20a *g,
616 bool prod)
617{
618 u32 i;
619 u32 size = sizeof(gv11b_blcg_fifo) / sizeof(struct gating_desc);
620
621 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
622 return;
623
624 for (i = 0; i < size; i++) {
625 if (prod)
626 gk20a_writel(g, gv11b_blcg_fifo[i].addr,
627 gv11b_blcg_fifo[i].prod);
628 else
629 gk20a_writel(g, gv11b_blcg_fifo[i].addr,
630 gv11b_blcg_fifo[i].disable);
631 }
632}
633
634void gv11b_blcg_gr_load_gating_prod(struct gk20a *g,
635 bool prod)
636{
637 u32 i;
638 u32 size = sizeof(gv11b_blcg_gr) / sizeof(struct gating_desc);
639
640 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
641 return;
642
643 for (i = 0; i < size; i++) {
644 if (prod)
645 gk20a_writel(g, gv11b_blcg_gr[i].addr,
646 gv11b_blcg_gr[i].prod);
647 else
648 gk20a_writel(g, gv11b_blcg_gr[i].addr,
649 gv11b_blcg_gr[i].disable);
650 }
651}
652
653void gv11b_blcg_ltc_load_gating_prod(struct gk20a *g,
654 bool prod)
655{
656 u32 i;
657 u32 size = sizeof(gv11b_blcg_ltc) / sizeof(struct gating_desc);
658
659 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
660 return;
661
662 for (i = 0; i < size; i++) {
663 if (prod)
664 gk20a_writel(g, gv11b_blcg_ltc[i].addr,
665 gv11b_blcg_ltc[i].prod);
666 else
667 gk20a_writel(g, gv11b_blcg_ltc[i].addr,
668 gv11b_blcg_ltc[i].disable);
669 }
670}
671
672void gv11b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
673 bool prod)
674{
675 u32 i;
676 u32 size = sizeof(gv11b_blcg_pwr_csb) / sizeof(struct gating_desc);
677
678 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
679 return;
680
681 for (i = 0; i < size; i++) {
682 if (prod)
683 gk20a_writel(g, gv11b_blcg_pwr_csb[i].addr,
684 gv11b_blcg_pwr_csb[i].prod);
685 else
686 gk20a_writel(g, gv11b_blcg_pwr_csb[i].addr,
687 gv11b_blcg_pwr_csb[i].disable);
688 }
689}
690
691void gv11b_blcg_pmu_load_gating_prod(struct gk20a *g,
692 bool prod)
693{
694 u32 i;
695 u32 size = sizeof(gv11b_blcg_pmu) / sizeof(struct gating_desc);
696
697 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
698 return;
699
700 for (i = 0; i < size; i++) {
701 if (prod)
702 gk20a_writel(g, gv11b_blcg_pmu[i].addr,
703 gv11b_blcg_pmu[i].prod);
704 else
705 gk20a_writel(g, gv11b_blcg_pmu[i].addr,
706 gv11b_blcg_pmu[i].disable);
707 }
708}
709
710void gv11b_blcg_xbar_load_gating_prod(struct gk20a *g,
711 bool prod)
712{
713 u32 i;
714 u32 size = sizeof(gv11b_blcg_xbar) / sizeof(struct gating_desc);
715
716 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
717 return;
718
719 for (i = 0; i < size; i++) {
720 if (prod)
721 gk20a_writel(g, gv11b_blcg_xbar[i].addr,
722 gv11b_blcg_xbar[i].prod);
723 else
724 gk20a_writel(g, gv11b_blcg_xbar[i].addr,
725 gv11b_blcg_xbar[i].disable);
726 }
727}
728
729void gr_gv11b_pg_gr_load_gating_prod(struct gk20a *g,
730 bool prod)
731{
732 u32 i;
733 u32 size = sizeof(gv11b_pg_gr) / sizeof(struct gating_desc);
734
735 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
736 return;
737
738 for (i = 0; i < size; i++) {
739 if (prod)
740 gk20a_writel(g, gv11b_pg_gr[i].addr,
741 gv11b_pg_gr[i].prod);
742 else
743 gk20a_writel(g, gv11b_pg_gr[i].addr,
744 gv11b_pg_gr[i].disable);
745 }
746}
747
748#endif /* __gv11b_gating_reglist_h__ */
diff --git a/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.h b/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.h
new file mode 100644
index 00000000..233189e0
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (c) 2016, NVIDIA Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "gk20a/gk20a.h"
24
25void gv11b_slcg_bus_load_gating_prod(struct gk20a *g,
26 bool prod);
27
28void gv11b_slcg_ce2_load_gating_prod(struct gk20a *g,
29 bool prod);
30
31void gv11b_slcg_chiplet_load_gating_prod(struct gk20a *g,
32 bool prod);
33
34void gv11b_slcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
35 bool prod);
36
37void gv11b_slcg_fb_load_gating_prod(struct gk20a *g,
38 bool prod);
39
40void gv11b_slcg_fifo_load_gating_prod(struct gk20a *g,
41 bool prod);
42
43void gr_gv11b_slcg_gr_load_gating_prod(struct gk20a *g,
44 bool prod);
45
46void ltc_gv11b_slcg_ltc_load_gating_prod(struct gk20a *g,
47 bool prod);
48
49void gv11b_slcg_perf_load_gating_prod(struct gk20a *g,
50 bool prod);
51
52void gv11b_slcg_priring_load_gating_prod(struct gk20a *g,
53 bool prod);
54
55void gv11b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
56 bool prod);
57
58void gv11b_slcg_pmu_load_gating_prod(struct gk20a *g,
59 bool prod);
60
61void gv11b_slcg_therm_load_gating_prod(struct gk20a *g,
62 bool prod);
63
64void gv11b_slcg_xbar_load_gating_prod(struct gk20a *g,
65 bool prod);
66
67void gv11b_blcg_bus_load_gating_prod(struct gk20a *g,
68 bool prod);
69
70void gv11b_blcg_ce_load_gating_prod(struct gk20a *g,
71 bool prod);
72
73void gv11b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
74 bool prod);
75
76void gv11b_blcg_fb_load_gating_prod(struct gk20a *g,
77 bool prod);
78
79void gv11b_blcg_fifo_load_gating_prod(struct gk20a *g,
80 bool prod);
81
82void gv11b_blcg_gr_load_gating_prod(struct gk20a *g,
83 bool prod);
84
85void gv11b_blcg_ltc_load_gating_prod(struct gk20a *g,
86 bool prod);
87
88void gv11b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
89 bool prod);
90
91void gv11b_blcg_pmu_load_gating_prod(struct gk20a *g,
92 bool prod);
93
94void gv11b_blcg_xbar_load_gating_prod(struct gk20a *g,
95 bool prod);
96
97void gr_gv11b_pg_gr_load_gating_prod(struct gk20a *g,
98 bool prod);
99
diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
new file mode 100644
index 00000000..fc059caa
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
@@ -0,0 +1,778 @@
1/*
2 * GV11B Tegra HAL interface
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/types.h>
26#include <linux/printk.h>
27
28#include <linux/types.h>
29#include <linux/tegra_gpu_t19x.h>
30
31#include "gk20a/gk20a.h"
32#include "gk20a/fifo_gk20a.h"
33#include "gk20a/fecs_trace_gk20a.h"
34#include "gk20a/css_gr_gk20a.h"
35#include "gk20a/mc_gk20a.h"
36#include "gk20a/mm_gk20a.h"
37#include "gk20a/dbg_gpu_gk20a.h"
38#include "gk20a/bus_gk20a.h"
39#include "gk20a/flcn_gk20a.h"
40#include "gk20a/regops_gk20a.h"
41#include "gk20a/fb_gk20a.h"
42#include "gk20a/pmu_gk20a.h"
43#include "gk20a/gr_gk20a.h"
44
45#include "gm20b/ltc_gm20b.h"
46#include "gm20b/gr_gm20b.h"
47#include "gm20b/fb_gm20b.h"
48#include "gm20b/fifo_gm20b.h"
49#include "gm20b/mm_gm20b.h"
50#include "gm20b/acr_gm20b.h"
51#include "gm20b/pmu_gm20b.h"
52
53#include "gp10b/ltc_gp10b.h"
54#include "gp10b/therm_gp10b.h"
55#include "gp10b/mc_gp10b.h"
56#include "gp10b/ce_gp10b.h"
57#include "gp10b/priv_ring_gp10b.h"
58#include "gp10b/fifo_gp10b.h"
59#include "gp10b/fecs_trace_gp10b.h"
60#include "gp10b/fb_gp10b.h"
61#include "gp10b/mm_gp10b.h"
62#include "gp10b/pmu_gp10b.h"
63#include "gp10b/gr_gp10b.h"
64
65#include "gp106/pmu_gp106.h"
66#include "gp106/acr_gp106.h"
67
68#include "gv100/gr_gv100.h"
69
70#include "dbg_gpu_gv11b.h"
71#include "hal_gv11b.h"
72#include "css_gr_gv11b.h"
73#include "gr_gv11b.h"
74#include "mc_gv11b.h"
75#include "ltc_gv11b.h"
76#include "gv11b.h"
77#include "ce_gv11b.h"
78#include "gr_ctx_gv11b.h"
79#include "mm_gv11b.h"
80#include "pmu_gv11b.h"
81#include "acr_gv11b.h"
82#include "fb_gv11b.h"
83#include "fifo_gv11b.h"
84#include "gv11b_gating_reglist.h"
85#include "regops_gv11b.h"
86#include "subctx_gv11b.h"
87#include "therm_gv11b.h"
88
89#include <nvgpu/bus.h>
90#include <nvgpu/debug.h>
91#include <nvgpu/enabled.h>
92#include <nvgpu/ctxsw_trace.h>
93
94#include <nvgpu/hw/gv11b/hw_proj_gv11b.h>
95#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
96#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
97#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
98#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
99#include <nvgpu/hw/gv11b/hw_fuse_gv11b.h>
100
101int gv11b_get_litter_value(struct gk20a *g, int value)
102{
103 int ret = EINVAL;
104 switch (value) {
105 case GPU_LIT_NUM_GPCS:
106 ret = proj_scal_litter_num_gpcs_v();
107 break;
108 case GPU_LIT_NUM_PES_PER_GPC:
109 ret = proj_scal_litter_num_pes_per_gpc_v();
110 break;
111 case GPU_LIT_NUM_ZCULL_BANKS:
112 ret = proj_scal_litter_num_zcull_banks_v();
113 break;
114 case GPU_LIT_NUM_TPC_PER_GPC:
115 ret = proj_scal_litter_num_tpc_per_gpc_v();
116 break;
117 case GPU_LIT_NUM_SM_PER_TPC:
118 ret = proj_scal_litter_num_sm_per_tpc_v();
119 break;
120 case GPU_LIT_NUM_FBPS:
121 ret = proj_scal_litter_num_fbps_v();
122 break;
123 case GPU_LIT_GPC_BASE:
124 ret = proj_gpc_base_v();
125 break;
126 case GPU_LIT_GPC_STRIDE:
127 ret = proj_gpc_stride_v();
128 break;
129 case GPU_LIT_GPC_SHARED_BASE:
130 ret = proj_gpc_shared_base_v();
131 break;
132 case GPU_LIT_TPC_IN_GPC_BASE:
133 ret = proj_tpc_in_gpc_base_v();
134 break;
135 case GPU_LIT_TPC_IN_GPC_STRIDE:
136 ret = proj_tpc_in_gpc_stride_v();
137 break;
138 case GPU_LIT_TPC_IN_GPC_SHARED_BASE:
139 ret = proj_tpc_in_gpc_shared_base_v();
140 break;
141 case GPU_LIT_PPC_IN_GPC_BASE:
142 ret = proj_ppc_in_gpc_base_v();
143 break;
144 case GPU_LIT_PPC_IN_GPC_SHARED_BASE:
145 ret = proj_ppc_in_gpc_shared_base_v();
146 break;
147 case GPU_LIT_PPC_IN_GPC_STRIDE:
148 ret = proj_ppc_in_gpc_stride_v();
149 break;
150 case GPU_LIT_ROP_BASE:
151 ret = proj_rop_base_v();
152 break;
153 case GPU_LIT_ROP_STRIDE:
154 ret = proj_rop_stride_v();
155 break;
156 case GPU_LIT_ROP_SHARED_BASE:
157 ret = proj_rop_shared_base_v();
158 break;
159 case GPU_LIT_HOST_NUM_ENGINES:
160 ret = proj_host_num_engines_v();
161 break;
162 case GPU_LIT_HOST_NUM_PBDMA:
163 ret = proj_host_num_pbdma_v();
164 break;
165 case GPU_LIT_LTC_STRIDE:
166 ret = proj_ltc_stride_v();
167 break;
168 case GPU_LIT_LTS_STRIDE:
169 ret = proj_lts_stride_v();
170 break;
171 case GPU_LIT_SM_PRI_STRIDE:
172 ret = proj_sm_stride_v();
173 break;
174 case GPU_LIT_SMPC_PRI_BASE:
175 ret = proj_smpc_base_v();
176 break;
177 case GPU_LIT_SMPC_PRI_SHARED_BASE:
178 ret = proj_smpc_shared_base_v();
179 break;
180 case GPU_LIT_SMPC_PRI_UNIQUE_BASE:
181 ret = proj_smpc_unique_base_v();
182 break;
183 case GPU_LIT_SMPC_PRI_STRIDE:
184 ret = proj_smpc_stride_v();
185 break;
186 /* Even though GV11B doesn't have an FBPA unit, the HW reports one,
187 * and the microcode as a result leaves space in the context buffer
188 * for one, so make sure SW accounts for this also.
189 */
190 case GPU_LIT_NUM_FBPAS:
191 ret = proj_scal_litter_num_fbpas_v();
192 break;
193 /* Hardcode FBPA values other than NUM_FBPAS to 0. */
194 case GPU_LIT_FBPA_STRIDE:
195 case GPU_LIT_FBPA_BASE:
196 case GPU_LIT_FBPA_SHARED_BASE:
197 ret = 0;
198 break;
199 case GPU_LIT_TWOD_CLASS:
200 ret = FERMI_TWOD_A;
201 break;
202 case GPU_LIT_THREED_CLASS:
203 ret = VOLTA_A;
204 break;
205 case GPU_LIT_COMPUTE_CLASS:
206 ret = VOLTA_COMPUTE_A;
207 break;
208 case GPU_LIT_GPFIFO_CLASS:
209 ret = VOLTA_CHANNEL_GPFIFO_A;
210 break;
211 case GPU_LIT_I2M_CLASS:
212 ret = KEPLER_INLINE_TO_MEMORY_B;
213 break;
214 case GPU_LIT_DMA_COPY_CLASS:
215 ret = VOLTA_DMA_COPY_A;
216 break;
217
218 default:
219 nvgpu_err(g, "Missing definition %d", value);
220 BUG();
221 break;
222 }
223
224 return ret;
225}
226
227static const struct gpu_ops gv11b_ops = {
228 .ltc = {
229 .determine_L2_size_bytes = gp10b_determine_L2_size_bytes,
230 .set_zbc_s_entry = gv11b_ltc_set_zbc_stencil_entry,
231 .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry,
232 .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry,
233 .init_cbc = NULL,
234 .init_fs_state = gv11b_ltc_init_fs_state,
235 .init_comptags = gp10b_ltc_init_comptags,
236 .cbc_ctrl = gm20b_ltc_cbc_ctrl,
237 .isr = gv11b_ltc_isr,
238 .cbc_fix_config = gv11b_ltc_cbc_fix_config,
239 .flush = gm20b_flush_ltc,
240 .set_enabled = gp10b_ltc_set_enabled,
241 },
242 .ce2 = {
243 .isr_stall = gv11b_ce_isr,
244 .isr_nonstall = gp10b_ce_nonstall_isr,
245 .get_num_pce = gv11b_ce_get_num_pce,
246 },
247 .gr = {
248 .get_patch_slots = gr_gv100_get_patch_slots,
249 .init_gpc_mmu = gr_gv11b_init_gpc_mmu,
250 .bundle_cb_defaults = gr_gv11b_bundle_cb_defaults,
251 .cb_size_default = gr_gv11b_cb_size_default,
252 .calc_global_ctx_buffer_size =
253 gr_gv11b_calc_global_ctx_buffer_size,
254 .commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb,
255 .commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb,
256 .commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
257 .commit_global_pagepool = gr_gp10b_commit_global_pagepool,
258 .handle_sw_method = gr_gv11b_handle_sw_method,
259 .set_alpha_circular_buffer_size =
260 gr_gv11b_set_alpha_circular_buffer_size,
261 .set_circular_buffer_size = gr_gv11b_set_circular_buffer_size,
262 .enable_hww_exceptions = gr_gv11b_enable_hww_exceptions,
263 .is_valid_class = gr_gv11b_is_valid_class,
264 .is_valid_gfx_class = gr_gv11b_is_valid_gfx_class,
265 .is_valid_compute_class = gr_gv11b_is_valid_compute_class,
266 .get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
267 .get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs,
268 .init_fs_state = gr_gv11b_init_fs_state,
269 .set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask,
270 .falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
271 .load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode,
272 .set_gpc_tpc_mask = gr_gv11b_set_gpc_tpc_mask,
273 .get_gpc_tpc_mask = gr_gm20b_get_gpc_tpc_mask,
274 .free_channel_ctx = gk20a_free_channel_ctx,
275 .alloc_obj_ctx = gk20a_alloc_obj_ctx,
276 .bind_ctxsw_zcull = gr_gk20a_bind_ctxsw_zcull,
277 .get_zcull_info = gr_gk20a_get_zcull_info,
278 .is_tpc_addr = gr_gm20b_is_tpc_addr,
279 .get_tpc_num = gr_gm20b_get_tpc_num,
280 .detect_sm_arch = gr_gv11b_detect_sm_arch,
281 .add_zbc_color = gr_gp10b_add_zbc_color,
282 .add_zbc_depth = gr_gp10b_add_zbc_depth,
283 .zbc_set_table = gk20a_gr_zbc_set_table,
284 .zbc_query_table = gr_gk20a_query_zbc,
285 .pmu_save_zbc = gk20a_pmu_save_zbc,
286 .add_zbc = gr_gk20a_add_zbc,
287 .pagepool_default_size = gr_gv11b_pagepool_default_size,
288 .init_ctx_state = gr_gp10b_init_ctx_state,
289 .alloc_gr_ctx = gr_gp10b_alloc_gr_ctx,
290 .free_gr_ctx = gr_gp10b_free_gr_ctx,
291 .update_ctxsw_preemption_mode =
292 gr_gp10b_update_ctxsw_preemption_mode,
293 .dump_gr_regs = gr_gv11b_dump_gr_status_regs,
294 .update_pc_sampling = gr_gm20b_update_pc_sampling,
295 .get_fbp_en_mask = gr_gm20b_get_fbp_en_mask,
296 .get_max_ltc_per_fbp = gr_gm20b_get_max_ltc_per_fbp,
297 .get_max_lts_per_ltc = gr_gm20b_get_max_lts_per_ltc,
298 .get_rop_l2_en_mask = gr_gm20b_rop_l2_en_mask,
299 .get_max_fbps_count = gr_gm20b_get_max_fbps_count,
300 .init_sm_dsm_reg_info = gv11b_gr_init_sm_dsm_reg_info,
301 .wait_empty = gr_gv11b_wait_empty,
302 .init_cyclestats = gr_gm20b_init_cyclestats,
303 .set_sm_debug_mode = gv11b_gr_set_sm_debug_mode,
304 .enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs,
305 .bpt_reg_info = gv11b_gr_bpt_reg_info,
306 .get_access_map = gr_gv11b_get_access_map,
307 .handle_fecs_error = gr_gv11b_handle_fecs_error,
308 .handle_sm_exception = gr_gk20a_handle_sm_exception,
309 .handle_tex_exception = gr_gv11b_handle_tex_exception,
310 .enable_gpc_exceptions = gr_gv11b_enable_gpc_exceptions,
311 .enable_exceptions = gr_gv11b_enable_exceptions,
312 .get_lrf_tex_ltc_dram_override = get_ecc_override_val,
313 .update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode,
314 .update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode,
315 .record_sm_error_state = gv11b_gr_record_sm_error_state,
316 .update_sm_error_state = gv11b_gr_update_sm_error_state,
317 .clear_sm_error_state = gm20b_gr_clear_sm_error_state,
318 .suspend_contexts = gr_gp10b_suspend_contexts,
319 .resume_contexts = gr_gk20a_resume_contexts,
320 .get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags,
321 .init_sm_id_table = gr_gv100_init_sm_id_table,
322 .load_smid_config = gr_gv11b_load_smid_config,
323 .program_sm_id_numbering = gr_gv11b_program_sm_id_numbering,
324 .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr,
325 .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr,
326 .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr,
327 .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr,
328 .setup_rop_mapping = gr_gv11b_setup_rop_mapping,
329 .program_zcull_mapping = gr_gv11b_program_zcull_mapping,
330 .commit_global_timeslice = gr_gv11b_commit_global_timeslice,
331 .commit_inst = gr_gv11b_commit_inst,
332 .write_zcull_ptr = gr_gv11b_write_zcull_ptr,
333 .write_pm_ptr = gr_gv11b_write_pm_ptr,
334 .init_elcg_mode = gr_gv11b_init_elcg_mode,
335 .load_tpc_mask = gr_gv11b_load_tpc_mask,
336 .inval_icache = gr_gk20a_inval_icache,
337 .trigger_suspend = gv11b_gr_sm_trigger_suspend,
338 .wait_for_pause = gr_gk20a_wait_for_pause,
339 .resume_from_pause = gv11b_gr_resume_from_pause,
340 .clear_sm_errors = gr_gk20a_clear_sm_errors,
341 .tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions,
342 .get_esr_sm_sel = gv11b_gr_get_esr_sm_sel,
343 .sm_debugger_attached = gv11b_gr_sm_debugger_attached,
344 .suspend_single_sm = gv11b_gr_suspend_single_sm,
345 .suspend_all_sms = gv11b_gr_suspend_all_sms,
346 .resume_single_sm = gv11b_gr_resume_single_sm,
347 .resume_all_sms = gv11b_gr_resume_all_sms,
348 .get_sm_hww_warp_esr = gv11b_gr_get_sm_hww_warp_esr,
349 .get_sm_hww_global_esr = gv11b_gr_get_sm_hww_global_esr,
350 .get_sm_no_lock_down_hww_global_esr_mask =
351 gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask,
352 .lock_down_sm = gv11b_gr_lock_down_sm,
353 .wait_for_sm_lock_down = gv11b_gr_wait_for_sm_lock_down,
354 .clear_sm_hww = gv11b_gr_clear_sm_hww,
355 .init_ovr_sm_dsm_perf = gv11b_gr_init_ovr_sm_dsm_perf,
356 .get_ovr_perf_regs = gv11b_gr_get_ovr_perf_regs,
357 .disable_rd_coalesce = gm20a_gr_disable_rd_coalesce,
358 .set_boosted_ctx = gr_gp10b_set_boosted_ctx,
359 .set_preemption_mode = gr_gp10b_set_preemption_mode,
360 .set_czf_bypass = NULL,
361 .pre_process_sm_exception = gr_gv11b_pre_process_sm_exception,
362 .set_preemption_buffer_va = gr_gv11b_set_preemption_buffer_va,
363 .init_preemption_state = NULL,
364 .update_boosted_ctx = gr_gp10b_update_boosted_ctx,
365 .set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3,
366 .create_gr_sysfs = gr_gv11b_create_sysfs,
367 .set_ctxsw_preemption_mode = gr_gp10b_set_ctxsw_preemption_mode,
368 .is_etpc_addr = gv11b_gr_pri_is_etpc_addr,
369 .egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
370 .handle_tpc_mpc_exception = gr_gv11b_handle_tpc_mpc_exception,
371 .zbc_s_query_table = gr_gv11b_zbc_s_query_table,
372 .load_zbc_s_default_tbl = gr_gv11b_load_stencil_default_tbl,
373 .handle_gpc_gpcmmu_exception =
374 gr_gv11b_handle_gpc_gpcmmu_exception,
375 .add_zbc_type_s = gr_gv11b_add_zbc_type_s,
376 .get_egpc_base = gv11b_gr_get_egpc_base,
377 .get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
378 .handle_gpc_gpccs_exception =
379 gr_gv11b_handle_gpc_gpccs_exception,
380 .load_zbc_s_tbl = gr_gv11b_load_stencil_tbl,
381 .access_smpc_reg = gv11b_gr_access_smpc_reg,
382 .is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
383 .add_zbc_s = gr_gv11b_add_zbc_stencil,
384 .handle_gcc_exception = gr_gv11b_handle_gcc_exception,
385 .init_sw_veid_bundle = gr_gv11b_init_sw_veid_bundle,
386 .handle_tpc_sm_ecc_exception =
387 gr_gv11b_handle_tpc_sm_ecc_exception,
388 .decode_egpc_addr = gv11b_gr_decode_egpc_addr,
389 .init_ctxsw_hdr_data = gr_gp10b_init_ctxsw_hdr_data,
390 },
391 .fb = {
392 .reset = gv11b_fb_reset,
393 .init_hw = gk20a_fb_init_hw,
394 .init_fs_state = gv11b_fb_init_fs_state,
395 .init_cbc = gv11b_fb_init_cbc,
396 .set_mmu_page_size = gm20b_fb_set_mmu_page_size,
397 .set_use_full_comp_tag_line =
398 gm20b_fb_set_use_full_comp_tag_line,
399 .compression_page_size = gp10b_fb_compression_page_size,
400 .compressible_page_size = gp10b_fb_compressible_page_size,
401 .vpr_info_fetch = gm20b_fb_vpr_info_fetch,
402 .dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info,
403 .read_wpr_info = gm20b_fb_read_wpr_info,
404 .is_debug_mode_enabled = gm20b_fb_debug_mode_enabled,
405 .set_debug_mode = gm20b_fb_set_debug_mode,
406 .tlb_invalidate = gk20a_fb_tlb_invalidate,
407 .hub_isr = gv11b_fb_hub_isr,
408 .mem_unlock = NULL,
409 },
410 .clock_gating = {
411 .slcg_bus_load_gating_prod =
412 gv11b_slcg_bus_load_gating_prod,
413 .slcg_ce2_load_gating_prod =
414 gv11b_slcg_ce2_load_gating_prod,
415 .slcg_chiplet_load_gating_prod =
416 gv11b_slcg_chiplet_load_gating_prod,
417 .slcg_ctxsw_firmware_load_gating_prod =
418 gv11b_slcg_ctxsw_firmware_load_gating_prod,
419 .slcg_fb_load_gating_prod =
420 gv11b_slcg_fb_load_gating_prod,
421 .slcg_fifo_load_gating_prod =
422 gv11b_slcg_fifo_load_gating_prod,
423 .slcg_gr_load_gating_prod =
424 gr_gv11b_slcg_gr_load_gating_prod,
425 .slcg_ltc_load_gating_prod =
426 ltc_gv11b_slcg_ltc_load_gating_prod,
427 .slcg_perf_load_gating_prod =
428 gv11b_slcg_perf_load_gating_prod,
429 .slcg_priring_load_gating_prod =
430 gv11b_slcg_priring_load_gating_prod,
431 .slcg_pmu_load_gating_prod =
432 gv11b_slcg_pmu_load_gating_prod,
433 .slcg_therm_load_gating_prod =
434 gv11b_slcg_therm_load_gating_prod,
435 .slcg_xbar_load_gating_prod =
436 gv11b_slcg_xbar_load_gating_prod,
437 .blcg_bus_load_gating_prod =
438 gv11b_blcg_bus_load_gating_prod,
439 .blcg_ce_load_gating_prod =
440 gv11b_blcg_ce_load_gating_prod,
441 .blcg_ctxsw_firmware_load_gating_prod =
442 gv11b_blcg_ctxsw_firmware_load_gating_prod,
443 .blcg_fb_load_gating_prod =
444 gv11b_blcg_fb_load_gating_prod,
445 .blcg_fifo_load_gating_prod =
446 gv11b_blcg_fifo_load_gating_prod,
447 .blcg_gr_load_gating_prod =
448 gv11b_blcg_gr_load_gating_prod,
449 .blcg_ltc_load_gating_prod =
450 gv11b_blcg_ltc_load_gating_prod,
451 .blcg_pwr_csb_load_gating_prod =
452 gv11b_blcg_pwr_csb_load_gating_prod,
453 .blcg_pmu_load_gating_prod =
454 gv11b_blcg_pmu_load_gating_prod,
455 .blcg_xbar_load_gating_prod =
456 gv11b_blcg_xbar_load_gating_prod,
457 .pg_gr_load_gating_prod =
458 gr_gv11b_pg_gr_load_gating_prod,
459 },
460 .fifo = {
461 .get_preempt_timeout = gv11b_fifo_get_preempt_timeout,
462 .init_fifo_setup_hw = gv11b_init_fifo_setup_hw,
463 .bind_channel = channel_gm20b_bind,
464 .unbind_channel = channel_gv11b_unbind,
465 .disable_channel = gk20a_fifo_disable_channel,
466 .enable_channel = gk20a_fifo_enable_channel,
467 .alloc_inst = gk20a_fifo_alloc_inst,
468 .free_inst = gk20a_fifo_free_inst,
469 .setup_ramfc = channel_gv11b_setup_ramfc,
470 .channel_set_timeslice = gk20a_fifo_set_timeslice,
471 .default_timeslice_us = gk20a_fifo_default_timeslice_us,
472 .setup_userd = gk20a_fifo_setup_userd,
473 .userd_gp_get = gv11b_userd_gp_get,
474 .userd_gp_put = gv11b_userd_gp_put,
475 .userd_pb_get = gv11b_userd_pb_get,
476 .pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
477 .preempt_channel = gv11b_fifo_preempt_channel,
478 .preempt_tsg = gv11b_fifo_preempt_tsg,
479 .enable_tsg = gv11b_fifo_enable_tsg,
480 .disable_tsg = gk20a_disable_tsg,
481 .tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status,
482 .tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload,
483 .tsg_verify_status_faulted = gv11b_fifo_tsg_verify_status_faulted,
484 .update_runlist = gk20a_fifo_update_runlist,
485 .trigger_mmu_fault = NULL,
486 .get_mmu_fault_info = NULL,
487 .wait_engine_idle = gk20a_fifo_wait_engine_idle,
488 .get_num_fifos = gv11b_fifo_get_num_fifos,
489 .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
490 .set_runlist_interleave = gk20a_fifo_set_runlist_interleave,
491 .tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
492 .force_reset_ch = gk20a_fifo_force_reset_ch,
493 .engine_enum_from_type = gp10b_fifo_engine_enum_from_type,
494 .device_info_data_parse = gp10b_device_info_data_parse,
495 .eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
496 .init_engine_info = gk20a_fifo_init_engine_info,
497 .runlist_entry_size = ram_rl_entry_size_v,
498 .get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
499 .get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
500 .is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
501 .dump_pbdma_status = gk20a_dump_pbdma_status,
502 .dump_eng_status = gv11b_dump_eng_status,
503 .dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
504 .intr_0_error_mask = gv11b_fifo_intr_0_error_mask,
505 .is_preempt_pending = gv11b_fifo_is_preempt_pending,
506 .init_pbdma_intr_descs = gv11b_fifo_init_pbdma_intr_descs,
507 .reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
508 .teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
509 .handle_sched_error = gv11b_fifo_handle_sched_error,
510 .handle_pbdma_intr_0 = gv11b_fifo_handle_pbdma_intr_0,
511 .handle_pbdma_intr_1 = gv11b_fifo_handle_pbdma_intr_1,
512 .init_eng_method_buffers = gv11b_fifo_init_eng_method_buffers,
513 .deinit_eng_method_buffers =
514 gv11b_fifo_deinit_eng_method_buffers,
515 .tsg_bind_channel = gk20a_tsg_bind_channel,
516 .tsg_unbind_channel = gk20a_tsg_unbind_channel,
517#ifdef CONFIG_TEGRA_GK20A_NVHOST
518 .alloc_syncpt_buf = gv11b_fifo_alloc_syncpt_buf,
519 .free_syncpt_buf = gv11b_fifo_free_syncpt_buf,
520 .add_syncpt_wait_cmd = gv11b_fifo_add_syncpt_wait_cmd,
521 .get_syncpt_wait_cmd_size = gv11b_fifo_get_syncpt_wait_cmd_size,
522 .add_syncpt_incr_cmd = gv11b_fifo_add_syncpt_incr_cmd,
523 .get_syncpt_incr_cmd_size = gv11b_fifo_get_syncpt_incr_cmd_size,
524#endif
525 .resetup_ramfc = NULL,
526 .device_info_fault_id = top_device_info_data_fault_id_enum_v,
527 .free_channel_ctx_header = gv11b_free_subctx_header,
528 .preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg,
529 .handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout,
530 },
531 .gr_ctx = {
532 .get_netlist_name = gr_gv11b_get_netlist_name,
533 .is_fw_defined = gr_gv11b_is_firmware_defined,
534 },
535#ifdef CONFIG_GK20A_CTXSW_TRACE
536 .fecs_trace = {
537 .alloc_user_buffer = NULL,
538 .free_user_buffer = NULL,
539 .mmap_user_buffer = NULL,
540 .init = NULL,
541 .deinit = NULL,
542 .enable = NULL,
543 .disable = NULL,
544 .is_enabled = NULL,
545 .reset = NULL,
546 .flush = NULL,
547 .poll = NULL,
548 .bind_channel = NULL,
549 .unbind_channel = NULL,
550 .max_entries = NULL,
551 },
552#endif /* CONFIG_GK20A_CTXSW_TRACE */
553 .mm = {
554 .support_sparse = gm20b_mm_support_sparse,
555 .gmmu_map = gk20a_locked_gmmu_map,
556 .gmmu_unmap = gk20a_locked_gmmu_unmap,
557 .vm_bind_channel = gk20a_vm_bind_channel,
558 .fb_flush = gk20a_mm_fb_flush,
559 .l2_invalidate = gk20a_mm_l2_invalidate,
560 .l2_flush = gv11b_mm_l2_flush,
561 .cbc_clean = gk20a_mm_cbc_clean,
562 .set_big_page_size = gm20b_mm_set_big_page_size,
563 .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
564 .get_default_big_page_size = gp10b_mm_get_default_big_page_size,
565 .gpu_phys_addr = gv11b_gpu_phys_addr,
566 .get_iommu_bit = gp10b_mm_get_iommu_bit,
567 .get_mmu_levels = gp10b_mm_get_mmu_levels,
568 .init_pdb = gp10b_mm_init_pdb,
569 .init_mm_setup_hw = gv11b_init_mm_setup_hw,
570 .is_bar1_supported = gv11b_mm_is_bar1_supported,
571 .alloc_inst_block = gk20a_alloc_inst_block,
572 .init_inst_block = gv11b_init_inst_block,
573 .mmu_fault_pending = gv11b_mm_mmu_fault_pending,
574 .get_kind_invalid = gm20b_get_kind_invalid,
575 .get_kind_pitch = gm20b_get_kind_pitch,
576 .init_bar2_vm = gb10b_init_bar2_vm,
577 .init_bar2_mm_hw_setup = gv11b_init_bar2_mm_hw_setup,
578 .remove_bar2_vm = gv11b_mm_remove_bar2_vm,
579 .fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
580 },
581 .therm = {
582 .init_therm_setup_hw = gp10b_init_therm_setup_hw,
583 .elcg_init_idle_filters = gv11b_elcg_init_idle_filters,
584 },
585 .pmu = {
586 .pmu_setup_elpg = gp10b_pmu_setup_elpg,
587 .pmu_get_queue_head = pwr_pmu_queue_head_r,
588 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
589 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
590 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
591 .pmu_queue_head = gk20a_pmu_queue_head,
592 .pmu_queue_tail = gk20a_pmu_queue_tail,
593 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
594 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
595 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
596 .pmu_mutex_release = gk20a_pmu_mutex_release,
597 .write_dmatrfbase = gp10b_write_dmatrfbase,
598 .pmu_elpg_statistics = gp106_pmu_elpg_statistics,
599 .pmu_pg_init_param = gv11b_pg_gr_init,
600 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
601 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
602 .dump_secure_fuses = pmu_dump_security_fuses_gp10b,
603 .reset_engine = gp106_pmu_engine_reset,
604 .is_engine_in_reset = gp106_pmu_is_engine_in_reset,
605 .pmu_nsbootstrap = gv11b_pmu_bootstrap,
606 .pmu_pg_set_sub_feature_mask = gv11b_pg_set_subfeature_mask,
607 .is_pmu_supported = gv11b_is_pmu_supported,
608 },
609 .regops = {
610 .get_global_whitelist_ranges =
611 gv11b_get_global_whitelist_ranges,
612 .get_global_whitelist_ranges_count =
613 gv11b_get_global_whitelist_ranges_count,
614 .get_context_whitelist_ranges =
615 gv11b_get_context_whitelist_ranges,
616 .get_context_whitelist_ranges_count =
617 gv11b_get_context_whitelist_ranges_count,
618 .get_runcontrol_whitelist = gv11b_get_runcontrol_whitelist,
619 .get_runcontrol_whitelist_count =
620 gv11b_get_runcontrol_whitelist_count,
621 .get_runcontrol_whitelist_ranges =
622 gv11b_get_runcontrol_whitelist_ranges,
623 .get_runcontrol_whitelist_ranges_count =
624 gv11b_get_runcontrol_whitelist_ranges_count,
625 .get_qctl_whitelist = gv11b_get_qctl_whitelist,
626 .get_qctl_whitelist_count = gv11b_get_qctl_whitelist_count,
627 .get_qctl_whitelist_ranges = gv11b_get_qctl_whitelist_ranges,
628 .get_qctl_whitelist_ranges_count =
629 gv11b_get_qctl_whitelist_ranges_count,
630 .apply_smpc_war = gv11b_apply_smpc_war,
631 },
632 .mc = {
633 .intr_enable = mc_gv11b_intr_enable,
634 .intr_unit_config = mc_gp10b_intr_unit_config,
635 .isr_stall = mc_gp10b_isr_stall,
636 .intr_stall = mc_gp10b_intr_stall,
637 .intr_stall_pause = mc_gp10b_intr_stall_pause,
638 .intr_stall_resume = mc_gp10b_intr_stall_resume,
639 .intr_nonstall = mc_gp10b_intr_nonstall,
640 .intr_nonstall_pause = mc_gp10b_intr_nonstall_pause,
641 .intr_nonstall_resume = mc_gp10b_intr_nonstall_resume,
642 .enable = gk20a_mc_enable,
643 .disable = gk20a_mc_disable,
644 .reset = gk20a_mc_reset,
645 .boot_0 = gk20a_mc_boot_0,
646 .is_intr1_pending = mc_gp10b_is_intr1_pending,
647 .is_intr_hub_pending = gv11b_mc_is_intr_hub_pending,
648 },
649 .debug = {
650 .show_dump = gk20a_debug_show_dump,
651 },
652 .dbg_session_ops = {
653 .exec_reg_ops = exec_regops_gk20a,
654 .dbg_set_powergate = dbg_set_powergate,
655 .check_and_set_global_reservation =
656 nvgpu_check_and_set_global_reservation,
657 .check_and_set_context_reservation =
658 nvgpu_check_and_set_context_reservation,
659 .release_profiler_reservation =
660 nvgpu_release_profiler_reservation,
661 .perfbuffer_enable = gv11b_perfbuf_enable_locked,
662 .perfbuffer_disable = gv11b_perfbuf_disable_locked,
663 },
664 .bus = {
665 .init_hw = gk20a_bus_init_hw,
666 .isr = gk20a_bus_isr,
667 .read_ptimer = gk20a_read_ptimer,
668 .get_timestamps_zipper = nvgpu_get_timestamps_zipper,
669 .bar1_bind = NULL,
670 },
671#if defined(CONFIG_GK20A_CYCLE_STATS)
672 .css = {
673 .enable_snapshot = gv11b_css_hw_enable_snapshot,
674 .disable_snapshot = gv11b_css_hw_disable_snapshot,
675 .check_data_available = gv11b_css_hw_check_data_available,
676 .set_handled_snapshots = css_hw_set_handled_snapshots,
677 .allocate_perfmon_ids = css_gr_allocate_perfmon_ids,
678 .release_perfmon_ids = css_gr_release_perfmon_ids,
679 },
680#endif
681 .falcon = {
682 .falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
683 },
684 .priv_ring = {
685 .isr = gp10b_priv_ring_isr,
686 },
687 .chip_init_gpu_characteristics = gv11b_init_gpu_characteristics,
688 .get_litter_value = gv11b_get_litter_value,
689};
690
691int gv11b_init_hal(struct gk20a *g)
692{
693 struct gpu_ops *gops = &g->ops;
694 u32 val;
695 bool priv_security;
696
697 gops->ltc = gv11b_ops.ltc;
698 gops->ce2 = gv11b_ops.ce2;
699 gops->gr = gv11b_ops.gr;
700 gops->fb = gv11b_ops.fb;
701 gops->clock_gating = gv11b_ops.clock_gating;
702 gops->fifo = gv11b_ops.fifo;
703 gops->gr_ctx = gv11b_ops.gr_ctx;
704 gops->mm = gv11b_ops.mm;
705#ifdef CONFIG_GK20A_CTXSW_TRACE
706 gops->fecs_trace = gv11b_ops.fecs_trace;
707#endif
708 gops->therm = gv11b_ops.therm;
709 gops->pmu = gv11b_ops.pmu;
710 gops->regops = gv11b_ops.regops;
711 gops->mc = gv11b_ops.mc;
712 gops->debug = gv11b_ops.debug;
713 gops->dbg_session_ops = gv11b_ops.dbg_session_ops;
714 gops->bus = gv11b_ops.bus;
715#if defined(CONFIG_GK20A_CYCLE_STATS)
716 gops->css = gv11b_ops.css;
717#endif
718 gops->falcon = gv11b_ops.falcon;
719 gops->priv_ring = gv11b_ops.priv_ring;
720
721 /* Lone functions */
722 gops->chip_init_gpu_characteristics =
723 gv11b_ops.chip_init_gpu_characteristics;
724 gops->get_litter_value = gv11b_ops.get_litter_value;
725
726 val = gk20a_readl(g, fuse_opt_priv_sec_en_r());
727 if (val) {
728 priv_security = true;
729 pr_err("priv security is enabled\n");
730 } else {
731 priv_security = false;
732 pr_err("priv security is disabled\n");
733 }
734 __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false);
735 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, priv_security);
736 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, priv_security);
737
738 /* priv security dependent ops */
739 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
740 /* Add in ops from gm20b acr */
741 gops->pmu.prepare_ucode = gp106_prepare_ucode_blob,
742 gops->pmu.pmu_setup_hw_and_bootstrap = gv11b_bootstrap_hs_flcn,
743 gops->pmu.get_wpr = gm20b_wpr_info,
744 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
745 gops->pmu.pmu_populate_loader_cfg =
746 gp106_pmu_populate_loader_cfg,
747 gops->pmu.flcn_populate_bl_dmem_desc =
748 gp106_flcn_populate_bl_dmem_desc,
749 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
750 gops->pmu.falcon_clear_halt_interrupt_status =
751 clear_halt_interrupt_status,
752 gops->pmu.init_falcon_setup_hw = gv11b_init_pmu_setup_hw1,
753
754 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
755 gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
756 gops->pmu.is_lazy_bootstrap = gv11b_is_lazy_bootstrap,
757 gops->pmu.is_priv_load = gv11b_is_priv_load,
758
759 gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
760 } else {
761 /* Inherit from gk20a */
762 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
763 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
764
765 gops->pmu.load_lsfalcon_ucode = NULL;
766 gops->pmu.init_wpr_region = NULL;
767 gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
768
769 gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
770 }
771
772 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
773 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
774
775 g->name = "gv11b";
776
777 return 0;
778}
diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.h b/drivers/gpu/nvgpu/gv11b/hal_gv11b.h
new file mode 100644
index 00000000..668353dc
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.h
@@ -0,0 +1,31 @@
1/*
2 * GV11B Tegra HAL interface
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVGPU_HAL_GV11B_H
26#define _NVGPU_HAL_GV11B_H
27struct gk20a;
28
29int gv11b_init_hal(struct gk20a *gops);
30int gv11b_get_litter_value(struct gk20a *g, int value);
31#endif
diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
new file mode 100644
index 00000000..a199e024
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
@@ -0,0 +1,205 @@
1/*
2 * GV11B LTC
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26#include "gp10b/ltc_gp10b.h"
27
28#include "ltc_gv11b.h"
29
30#include <nvgpu/hw/gv11b/hw_ltc_gv11b.h>
31#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
32#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
33#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
34#include <nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h>
35
36/*
37 * Sets the ZBC stencil for the passed index.
38 */
39void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g,
40 struct zbc_entry *stencil_val,
41 u32 index)
42{
43 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
44
45 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
46 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
47
48 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(),
49 stencil_val->depth);
50
51 gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r());
52}
53
54void gv11b_ltc_init_fs_state(struct gk20a *g)
55{
56 u32 ltc_intr;
57 u32 reg;
58
59 gk20a_dbg_info("initialize gv11b l2");
60
61 g->ops.mc.reset(g, mc_enable_pfb_enabled_f() |
62 mc_enable_l2_enabled_f());
63
64 reg = gk20a_readl(g, mc_elpg_enable_r());
65 reg |= mc_elpg_enable_l2_enabled_f();
66 gk20a_writel(g, mc_elpg_enable_r(), reg);
67
68 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r());
69 g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r());
70 gk20a_dbg_info("%u ltcs out of %u", g->ltc_count, g->max_ltc_count);
71
72 gk20a_writel(g, ltc_ltcs_ltss_dstg_cfg0_r(),
73 gk20a_readl(g, ltc_ltc0_lts0_dstg_cfg0_r()) |
74 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m());
75
76 /* Disable LTC interrupts */
77 reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
78 reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m();
79 reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m();
80 gk20a_writel(g, ltc_ltcs_ltss_intr_r(), reg);
81
82 /* Enable ECC interrupts */
83 ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
84 ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() |
85 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f();
86 gk20a_writel(g, ltc_ltcs_ltss_intr_r(),
87 ltc_intr);
88}
89
90void gv11b_ltc_isr(struct gk20a *g)
91{
92 u32 mc_intr, ltc_intr3;
93 unsigned int ltc, slice;
94 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
95 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
96 u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt;
97 u32 corrected_delta, uncorrected_delta;
98 u32 corrected_overflow, uncorrected_overflow;
99 u32 ltc_corrected, ltc_uncorrected;
100
101 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
102 for (ltc = 0; ltc < g->ltc_count; ltc++) {
103 if ((mc_intr & 1 << ltc) == 0)
104 continue;
105 ltc_corrected = ltc_uncorrected = 0;
106
107 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
108 u32 offset = ltc_stride * ltc + lts_stride * slice;
109 ltc_intr3 = gk20a_readl(g, ltc_ltc0_lts0_intr3_r() +
110 offset);
111
112 /* Detect and handle ECC PARITY errors */
113
114 if (ltc_intr3 &
115 (ltc_ltcs_ltss_intr3_ecc_uncorrected_m() |
116 ltc_ltcs_ltss_intr3_ecc_corrected_m())) {
117
118 ecc_status = gk20a_readl(g,
119 ltc_ltc0_lts0_l2_cache_ecc_status_r() +
120 offset);
121 ecc_addr = gk20a_readl(g,
122 ltc_ltc0_lts0_l2_cache_ecc_address_r() +
123 offset);
124 corrected_cnt = gk20a_readl(g,
125 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset);
126 uncorrected_cnt = gk20a_readl(g,
127 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset);
128
129 corrected_delta =
130 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(corrected_cnt);
131 uncorrected_delta =
132 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_v(uncorrected_cnt);
133 corrected_overflow = ecc_status &
134 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_m();
135
136 uncorrected_overflow = ecc_status &
137 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m();
138
139 /* clear the interrupt */
140 if ((corrected_delta > 0) || corrected_overflow) {
141 gk20a_writel(g, ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0);
142 }
143 if ((uncorrected_delta > 0) || uncorrected_overflow) {
144 gk20a_writel(g,
145 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0);
146 }
147
148 gk20a_writel(g, ltc_ltc0_lts0_l2_cache_ecc_status_r() + offset,
149 ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f());
150
151 /* update counters per slice */
152 if (corrected_overflow)
153 corrected_delta += (0x1UL << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s());
154 if (uncorrected_overflow)
155 uncorrected_delta += (0x1UL << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s());
156
157 ltc_corrected += corrected_delta;
158 ltc_uncorrected += uncorrected_delta;
159 nvgpu_log(g, gpu_dbg_intr,
160 "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3);
161
162 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m())
163 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected");
164 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m())
165 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected");
166 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m())
167 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected");
168 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m())
169 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected");
170 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m())
171 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected");
172 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m())
173 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected");
174
175 if (corrected_overflow || uncorrected_overflow)
176 nvgpu_info(g, "ecc counter overflow!");
177
178 nvgpu_log(g, gpu_dbg_intr,
179 "ecc error address: 0x%x", ecc_addr);
180
181 }
182
183 }
184 g->ecc.ltc.t19x.l2_cache_corrected_err_count.counters[ltc] +=
185 ltc_corrected;
186 g->ecc.ltc.t19x.l2_cache_uncorrected_err_count.counters[ltc] +=
187 ltc_uncorrected;
188
189 }
190
191 /* fallback to other interrupts */
192 gp10b_ltc_isr(g);
193}
194
195u32 gv11b_ltc_cbc_fix_config(struct gk20a *g, int base)
196{
197 u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r());
198
199 if (ltc_ltcs_ltss_cbc_num_active_ltcs__v(val) == 2)
200 return base * 2;
201 else if (ltc_ltcs_ltss_cbc_num_active_ltcs__v(val) != 1) {
202 nvgpu_err(g, "Invalid number of active ltcs: %08x", val);
203 }
204 return base;
205}
diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.h b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.h
new file mode 100644
index 00000000..9b46e74c
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef LTC_GV11B_H
24#define LTC_GV11B_H
25struct gk20a;
26
27void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g,
28 struct zbc_entry *stencil_val,
29 u32 index);
30void gv11b_ltc_init_fs_state(struct gk20a *g);
31void gv11b_ltc_isr(struct gk20a *g);
32u32 gv11b_ltc_cbc_fix_config(struct gk20a *g, int base);
33
34#endif
diff --git a/drivers/gpu/nvgpu/gv11b/mc_gv11b.c b/drivers/gpu/nvgpu/gv11b/mc_gv11b.c
new file mode 100644
index 00000000..74c5c4d6
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/mc_gv11b.c
@@ -0,0 +1,92 @@
1/*
2 * GV11B master
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/types.h>
26
27#include "gk20a/gk20a.h"
28
29#include "gp10b/mc_gp10b.h"
30
31#include "mc_gv11b.h"
32#include "fb_gv11b.h"
33
34#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
35
36void mc_gv11b_intr_enable(struct gk20a *g)
37{
38 u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
39
40 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
41 0xffffffff);
42 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
43 0xffffffff);
44 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL);
45
46 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
47 mc_intr_pfifo_pending_f() |
48 mc_intr_hub_pending_f() |
49 mc_intr_priv_ring_pending_f() |
50 mc_intr_pbus_pending_f() |
51 mc_intr_ltc_pending_f() |
52 eng_intr_mask;
53
54 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] =
55 mc_intr_pfifo_pending_f()
56 | eng_intr_mask;
57
58 /* TODO: Enable PRI faults for HUB ECC err intr */
59 gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX, g->mm.hub_intr_types);
60
61 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
62 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
63
64 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
65 g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
66
67}
68
69bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0)
70{
71 return ((mc_intr_0 & mc_intr_hub_pending_f()) ? true : false);
72}
73
74bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id)
75{
76 u32 mc_intr_0 = gk20a_readl(g, mc_intr_r(0));
77 u32 stall_intr, eng_intr_mask;
78
79 eng_intr_mask = gk20a_fifo_act_eng_interrupt_mask(g, act_eng_id);
80 if (mc_intr_0 & eng_intr_mask)
81 return true;
82
83 stall_intr = mc_intr_pfifo_pending_f() |
84 mc_intr_hub_pending_f() |
85 mc_intr_priv_ring_pending_f() |
86 mc_intr_pbus_pending_f() |
87 mc_intr_ltc_pending_f();
88 if (mc_intr_0 & stall_intr)
89 return true;
90
91 return false;
92}
diff --git a/drivers/gpu/nvgpu/gv11b/mc_gv11b.h b/drivers/gpu/nvgpu/gv11b/mc_gv11b.h
new file mode 100644
index 00000000..eb9d0e4e
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/mc_gv11b.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef MC_GV11B_H
24#define MC_GV11B_H
25struct gk20a;
26
27void mc_gv11b_intr_enable(struct gk20a *g);
28bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0);
29bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id);
30#endif
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
new file mode 100644
index 00000000..fdc506ac
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
@@ -0,0 +1,330 @@
1/*
2 * GV11B MMU
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/pm_runtime.h>
26
27#include <nvgpu/kmem.h>
28#include <nvgpu/dma.h>
29#include <nvgpu/log.h>
30#include <nvgpu/mm.h>
31
32#include "gk20a/gk20a.h"
33#include "gk20a/mm_gk20a.h"
34
35#include "gp10b/mm_gp10b.h"
36#include "gp10b/mc_gp10b.h"
37
38#include "mm_gv11b.h"
39#include "fb_gv11b.h"
40
41#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
42#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
43#include <nvgpu/hw/gv11b/hw_bus_gv11b.h>
44
45#define NVGPU_L3_ALLOC_BIT BIT(36)
46
47bool gv11b_mm_is_bar1_supported(struct gk20a *g)
48{
49 return false;
50}
51
52void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
53 struct vm_gk20a *vm, u32 big_page_size)
54{
55 struct gk20a *g = gk20a_from_vm(vm);
56
57 gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p",
58 nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va);
59
60 g->ops.mm.init_pdb(g, inst_block, vm);
61
62 if (big_page_size && g->ops.mm.set_big_page_size)
63 g->ops.mm.set_big_page_size(g, inst_block, big_page_size);
64}
65
66bool gv11b_mm_mmu_fault_pending(struct gk20a *g)
67{
68 return gv11b_fb_mmu_fault_pending(g);
69}
70
71void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
72{
73 nvgpu_log_fn(g, " ");
74
75 nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
76
77 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_OTHER |
78 HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
79
80 nvgpu_kfree(g, g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY]);
81
82 g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] = NULL;
83 g->mm.fault_info[FAULT_TYPE_REPLAY] = NULL;
84
85 nvgpu_mutex_release(&g->mm.hub_isr_mutex);
86 nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
87}
88
89static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g,
90 u32 *hub_intr_types)
91{
92 struct mmu_fault_info *fault_info_mem;
93
94 fault_info_mem = nvgpu_kzalloc(g, sizeof(struct mmu_fault_info) *
95 FAULT_TYPE_NUM);
96 if (!fault_info_mem) {
97 nvgpu_log_info(g, "failed to alloc shadow fault info");
98 return -ENOMEM;
99 }
100 /* shadow buffer for copying mmu fault info */
101 g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] =
102 &fault_info_mem[FAULT_TYPE_OTHER_AND_NONREPLAY];
103
104 g->mm.fault_info[FAULT_TYPE_REPLAY] =
105 &fault_info_mem[FAULT_TYPE_REPLAY];
106
107 *hub_intr_types |= HUB_INTR_TYPE_OTHER;
108 return 0;
109}
110
111static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g,
112 u32 *hub_intr_types)
113{
114 struct vm_gk20a *vm = g->mm.bar2.vm;
115 int err = 0;
116 size_t fb_size;
117
118 /* Max entries take care of 1 entry used for full detection */
119 fb_size = (g->ops.fifo.get_num_fifos(g) + 1) *
120 gmmu_fault_buf_size_v();
121
122 err = nvgpu_dma_alloc_map_sys(vm, fb_size,
123 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
124 if (err) {
125 nvgpu_err(g,
126 "Error in hw mmu fault buf [0] alloc in bar2 vm ");
127 /* Fault will be snapped in pri reg but not in buffer */
128 return;
129 }
130
131 g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] =
132 HW_FAULT_BUF_STATUS_ALLOC_TRUE;
133 *hub_intr_types |= HUB_INTR_TYPE_NONREPLAY;
134
135 err = nvgpu_dma_alloc_map_sys(vm, fb_size,
136 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
137 if (err) {
138 nvgpu_err(g,
139 "Error in hw mmu fault buf [1] alloc in bar2 vm ");
140 /* Fault will be snapped in pri reg but not in buffer */
141 return;
142 }
143 g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] =
144 HW_FAULT_BUF_STATUS_ALLOC_TRUE;
145 *hub_intr_types |= HUB_INTR_TYPE_REPLAY;
146}
147
148static void gv11b_mm_mmu_hw_fault_buf_deinit(struct gk20a *g)
149{
150 struct vm_gk20a *vm = g->mm.bar2.vm;
151
152 nvgpu_log_fn(g, " ");
153
154 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_NONREPLAY |
155 HUB_INTR_TYPE_REPLAY);
156
157 g->mm.hub_intr_types &= (~(HUB_INTR_TYPE_NONREPLAY |
158 HUB_INTR_TYPE_REPLAY));
159
160 if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) {
161 gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX,
162 FAULT_BUF_DISABLED);
163 }
164
165 if ((gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))) {
166 gv11b_fb_fault_buf_set_state_hw(g, REPLAY_REG_INDEX,
167 FAULT_BUF_DISABLED);
168 }
169
170 if (g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] ==
171 HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
172 nvgpu_dma_unmap_free(vm,
173 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
174 g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] =
175 HW_FAULT_BUF_STATUS_ALLOC_FALSE;
176 }
177
178 if (g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] ==
179 HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
180 nvgpu_dma_unmap_free(vm,
181 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
182 g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] =
183 HW_FAULT_BUF_STATUS_ALLOC_FALSE;
184 }
185}
186
187void gv11b_mm_remove_bar2_vm(struct gk20a *g)
188{
189 struct mm_gk20a *mm = &g->mm;
190
191 nvgpu_log_fn(g, " ");
192
193 gv11b_mm_mmu_hw_fault_buf_deinit(g);
194
195 nvgpu_free_inst_block(g, &mm->bar2.inst_block);
196 nvgpu_vm_put(mm->bar2.vm);
197}
198
199static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
200{
201 if (g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] ==
202 HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
203 gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX);
204 }
205 if (g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] ==
206 HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
207 gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX);
208 }
209}
210
211static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
212{
213 int err;
214
215 nvgpu_log_fn(g, " ");
216
217 nvgpu_mutex_init(&g->mm.hub_isr_mutex);
218
219 g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] =
220 HW_FAULT_BUF_STATUS_ALLOC_FALSE;
221 g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] =
222 HW_FAULT_BUF_STATUS_ALLOC_FALSE;
223
224 g->mm.hub_intr_types = HUB_INTR_TYPE_ECC_UNCORRECTED;
225
226 err = gv11b_mm_mmu_fault_info_buf_init(g, &g->mm.hub_intr_types);
227
228 if (!err)
229 gv11b_mm_mmu_hw_fault_buf_init(g, &g->mm.hub_intr_types);
230
231 return err;
232}
233
234int gv11b_init_mm_setup_hw(struct gk20a *g)
235{
236 int err = 0;
237
238 nvgpu_log_fn(g, " ");
239
240 g->ops.fb.set_mmu_page_size(g);
241 g->ops.fb.init_hw(g);
242
243 err = g->ops.mm.init_bar2_mm_hw_setup(g);
244 if (err)
245 return err;
246
247 if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g))
248 return -EBUSY;
249
250 err = gv11b_mm_mmu_fault_setup_sw(g);
251 if (!err)
252 gv11b_mm_mmu_fault_setup_hw(g);
253
254 nvgpu_log_fn(g, "end");
255
256 return err;
257}
258
259void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate)
260{
261 nvgpu_log(g, gpu_dbg_fn, "gv11b_mm_l2_flush");
262
263 g->ops.mm.fb_flush(g);
264 gk20a_mm_l2_flush(g, invalidate);
265 g->ops.mm.fb_flush(g);
266}
267
268/*
269 * On Volta the GPU determines whether to do L3 allocation for a mapping by
270 * checking bit 36 of the phsyical address. So if a mapping should allocte lines
271 * in the L3 this bit must be set.
272 */
273u64 gv11b_gpu_phys_addr(struct gk20a *g,
274 struct nvgpu_gmmu_attrs *attrs, u64 phys)
275{
276 if (attrs && attrs->t19x_attrs.l3_alloc)
277 return phys | NVGPU_L3_ALLOC_BIT;
278
279 return phys;
280}
281
282int gv11b_init_bar2_mm_hw_setup(struct gk20a *g)
283{
284 struct mm_gk20a *mm = &g->mm;
285 struct nvgpu_mem *inst_block = &mm->bar2.inst_block;
286 u64 inst_pa = nvgpu_inst_block_addr(g, inst_block);
287 u32 reg_val;
288 struct nvgpu_timeout timeout;
289 u32 delay = GR_IDLE_CHECK_DEFAULT;
290
291 nvgpu_log_fn(g, " ");
292
293 g->ops.fb.set_mmu_page_size(g);
294
295 inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v());
296 nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", (u32)inst_pa);
297
298 gk20a_writel(g, bus_bar2_block_r(),
299 nvgpu_aperture_mask(g, inst_block,
300 bus_bar2_block_target_sys_mem_ncoh_f(),
301 bus_bar2_block_target_vid_mem_f()) |
302 bus_bar2_block_mode_virtual_f() |
303 bus_bar2_block_ptr_f(inst_pa));
304
305 /* This is needed as BAR1 support is removed and there is no way
306 * to know if gpu successfully accessed memory.
307 * To avoid deadlocks and non-deterministic virtual address translation
308 * behavior, after writing BAR2_BLOCK to bind BAR2 to a virtual address
309 * space, SW must ensure that the bind has completed prior to issuing
310 * any further BAR2 requests by polling for both
311 * BUS_BIND_STATUS_BAR2_PENDING to return to EMPTY and
312 * BUS_BIND_STATUS_BAR2_OUTSTANDING to return to FALSE
313 */
314 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
315 NVGPU_TIMER_CPU_TIMER);
316 nvgpu_log_info(g, "check bar2 bind status");
317 do {
318 reg_val = gk20a_readl(g, bus_bind_status_r());
319
320 if (!((reg_val & bus_bind_status_bar2_pending_busy_f()) ||
321 (reg_val & bus_bind_status_bar2_outstanding_true_f())))
322 return 0;
323
324 nvgpu_usleep_range(delay, delay * 2);
325 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
326 } while (!nvgpu_timeout_expired_msg(&timeout, "bar2 bind timedout"));
327
328 nvgpu_err(g, "bar2 bind failed. gpu unable to access memory");
329 return -EBUSY;
330}
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.h b/drivers/gpu/nvgpu/gv11b/mm_gv11b.h
new file mode 100644
index 00000000..d830b7cc
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.h
@@ -0,0 +1,46 @@
1/*
2 * GV11B MM
3 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef MM_GV11B_H
25#define MM_GV11B_H
26
27#define HW_FAULT_BUF_STATUS_ALLOC_TRUE 1
28#define HW_FAULT_BUF_STATUS_ALLOC_FALSE 0
29
30struct gk20a;
31struct nvgpu_mem;
32struct vm_gk20a;
33
34bool gv11b_mm_is_bar1_supported(struct gk20a *g);
35void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
36 struct vm_gk20a *vm, u32 big_page_size);
37bool gv11b_mm_mmu_fault_pending(struct gk20a *g);
38void gv11b_mm_remove_bar2_vm(struct gk20a *g);
39int gv11b_init_mm_setup_hw(struct gk20a *g);
40int gv11b_init_bar2_mm_hw_setup(struct gk20a *g);
41void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate);
42u64 gv11b_gpu_phys_addr(struct gk20a *g,
43 struct nvgpu_gmmu_attrs *attrs, u64 phys);
44void gv11b_mm_fault_info_mem_destroy(struct gk20a *g);
45
46#endif
diff --git a/drivers/gpu/nvgpu/gv11b/platform_gv11b_tegra.c b/drivers/gpu/nvgpu/gv11b/platform_gv11b_tegra.c
new file mode 100644
index 00000000..95d82254
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/platform_gv11b_tegra.c
@@ -0,0 +1,549 @@
1/*
2 * GV11B Tegra Platform Interface
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/of_platform.h>
26#include <linux/debugfs.h>
27#include <linux/dma-buf.h>
28#include <linux/nvmap.h>
29#include <linux/reset.h>
30#include <linux/hashtable.h>
31#include <linux/clk.h>
32#include <nvgpu/nvhost.h>
33#include <nvgpu/nvhost_t19x.h>
34
35#include <uapi/linux/nvgpu.h>
36
37#include <soc/tegra/tegra_bpmp.h>
38#include <soc/tegra/tegra_powergate.h>
39
40#include "gk20a/gk20a.h"
41#include "common/linux/platform_gk20a.h"
42#include "common/linux/clk.h"
43
44#include "gp10b/platform_gp10b.h"
45#include "common/linux/platform_gp10b_tegra.h"
46
47#include "common/linux/os_linux.h"
48#include "common/linux/platform_gk20a_tegra.h"
49#include "gr_gv11b.h"
50#include "nvgpu_gpuid_t19x.h"
51
52static void gr_gv11b_remove_sysfs(struct device *dev);
53
54static int gv11b_tegra_probe(struct device *dev)
55{
56 struct gk20a_platform *platform = dev_get_drvdata(dev);
57#ifdef CONFIG_TEGRA_GK20A_NVHOST
58 struct gk20a *g = platform->g;
59 int err = 0;
60
61 err = nvgpu_get_nvhost_dev(g);
62 if (err) {
63 dev_err(dev, "host1x device not available");
64 return err;
65 }
66
67 err = nvgpu_nvhost_syncpt_unit_interface_get_aperture(
68 g->nvhost_dev,
69 &g->syncpt_unit_base,
70 &g->syncpt_unit_size);
71 if (err) {
72 dev_err(dev, "Failed to get syncpt interface");
73 return -ENOSYS;
74 }
75 g->syncpt_size = nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(1);
76 gk20a_dbg_info("syncpt_unit_base %llx syncpt_unit_size %zx size %x\n",
77 g->syncpt_unit_base, g->syncpt_unit_size,
78 g->syncpt_size);
79#endif
80
81 platform->bypass_smmu = !device_is_iommuable(dev);
82 platform->disable_bigpage = platform->bypass_smmu;
83
84 platform->g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close
85 = false;
86 platform->g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close
87 = false;
88
89 platform->g->gr.t18x.ctx_vars.force_preemption_gfxp = false;
90 platform->g->gr.t18x.ctx_vars.force_preemption_cilp = false;
91
92 gp10b_tegra_get_clocks(dev);
93 nvgpu_linux_init_clk_support(platform->g);
94
95 return 0;
96}
97
98static int gv11b_tegra_remove(struct device *dev)
99{
100 gp10b_tegra_remove(dev);
101
102 gr_gv11b_remove_sysfs(dev);
103
104 return 0;
105}
106
107static bool gv11b_tegra_is_railgated(struct device *dev)
108{
109 bool ret = false;
110#ifdef TEGRA194_POWER_DOMAIN_GPU
111 struct gk20a *g = get_gk20a(dev);
112
113 if (tegra_bpmp_running()) {
114 nvgpu_log(g, gpu_dbg_info, "bpmp running");
115 ret = !tegra_powergate_is_powered(TEGRA194_POWER_DOMAIN_GPU);
116
117 nvgpu_log(g, gpu_dbg_info, "railgated? %s", ret ? "yes" : "no");
118 } else {
119 nvgpu_log(g, gpu_dbg_info, "bpmp not running");
120 }
121#endif
122 return ret;
123}
124
125static int gv11b_tegra_railgate(struct device *dev)
126{
127#ifdef TEGRA194_POWER_DOMAIN_GPU
128 struct gk20a_platform *platform = gk20a_get_platform(dev);
129 struct gk20a *g = get_gk20a(dev);
130 int i;
131
132 if (tegra_bpmp_running()) {
133 nvgpu_log(g, gpu_dbg_info, "bpmp running");
134 if (!tegra_powergate_is_powered(TEGRA194_POWER_DOMAIN_GPU)) {
135 nvgpu_log(g, gpu_dbg_info, "powergate is not powered");
136 return 0;
137 }
138 nvgpu_log(g, gpu_dbg_info, "clk_disable_unprepare");
139 for (i = 0; i < platform->num_clks; i++) {
140 if (platform->clk[i])
141 clk_disable_unprepare(platform->clk[i]);
142 }
143 nvgpu_log(g, gpu_dbg_info, "powergate_partition");
144 tegra_powergate_partition(TEGRA194_POWER_DOMAIN_GPU);
145 } else {
146 nvgpu_log(g, gpu_dbg_info, "bpmp not running");
147 }
148#endif
149 return 0;
150}
151
152static int gv11b_tegra_unrailgate(struct device *dev)
153{
154 int ret = 0;
155#ifdef TEGRA194_POWER_DOMAIN_GPU
156 struct gk20a_platform *platform = gk20a_get_platform(dev);
157 struct gk20a *g = get_gk20a(dev);
158 int i;
159
160 if (tegra_bpmp_running()) {
161 nvgpu_log(g, gpu_dbg_info, "bpmp running");
162 ret = tegra_unpowergate_partition(TEGRA194_POWER_DOMAIN_GPU);
163 if (ret) {
164 nvgpu_log(g, gpu_dbg_info,
165 "unpowergate partition failed");
166 return ret;
167 }
168 nvgpu_log(g, gpu_dbg_info, "clk_prepare_enable");
169 for (i = 0; i < platform->num_clks; i++) {
170 if (platform->clk[i])
171 clk_prepare_enable(platform->clk[i]);
172 }
173 } else {
174 nvgpu_log(g, gpu_dbg_info, "bpmp not running");
175 }
176#endif
177 return ret;
178}
179
180static int gv11b_tegra_suspend(struct device *dev)
181{
182 return 0;
183}
184
185struct gk20a_platform t19x_gpu_tegra_platform = {
186 .has_syncpoints = true,
187
188 /* power management configuration */
189
190 /* ptimer src frequency in hz*/
191 .ptimer_src_freq = 31250000,
192
193 .probe = gv11b_tegra_probe,
194 .remove = gv11b_tegra_remove,
195
196 .enable_slcg = false,
197 .enable_blcg = false,
198 .enable_elcg = false,
199 .can_slcg = false,
200 .can_blcg = false,
201 .can_elcg = false,
202
203 /* power management callbacks */
204 .suspend = gv11b_tegra_suspend,
205 .railgate = gv11b_tegra_railgate,
206 .unrailgate = gv11b_tegra_unrailgate,
207 .is_railgated = gv11b_tegra_is_railgated,
208
209 .busy = gk20a_tegra_busy,
210 .idle = gk20a_tegra_idle,
211
212 .dump_platform_dependencies = gk20a_tegra_debug_dump,
213
214 .soc_name = "tegra19x",
215
216 .honors_aperture = true,
217 .unified_memory = true,
218
219 .reset_assert = gp10b_tegra_reset_assert,
220 .reset_deassert = gp10b_tegra_reset_deassert,
221};
222
223static struct device_attribute *dev_attr_sm_l1_tag_ecc_corrected_err_count_array;
224static struct device_attribute *dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array;
225static struct device_attribute *dev_attr_sm_cbu_ecc_corrected_err_count_array;
226static struct device_attribute *dev_attr_sm_cbu_ecc_uncorrected_err_count_array;
227static struct device_attribute *dev_attr_sm_l1_data_ecc_corrected_err_count_array;
228static struct device_attribute *dev_attr_sm_l1_data_ecc_uncorrected_err_count_array;
229static struct device_attribute *dev_attr_sm_icache_ecc_corrected_err_count_array;
230static struct device_attribute *dev_attr_sm_icache_ecc_uncorrected_err_count_array;
231static struct device_attribute *dev_attr_gcc_l15_ecc_corrected_err_count_array;
232static struct device_attribute *dev_attr_gcc_l15_ecc_uncorrected_err_count_array;
233static struct device_attribute *dev_attr_mmu_l1tlb_ecc_corrected_err_count_array;
234static struct device_attribute *dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array;
235
236static struct device_attribute *dev_attr_fecs_ecc_corrected_err_count_array;
237static struct device_attribute *dev_attr_fecs_ecc_uncorrected_err_count_array;
238static struct device_attribute *dev_attr_gpccs_ecc_corrected_err_count_array;
239static struct device_attribute *dev_attr_gpccs_ecc_uncorrected_err_count_array;
240
241static struct device_attribute *dev_attr_l2_cache_ecc_corrected_err_count_array;
242static struct device_attribute *dev_attr_l2_cache_ecc_uncorrected_err_count_array;
243
244static struct device_attribute *dev_attr_mmu_l2tlb_ecc_corrected_err_count_array;
245static struct device_attribute *dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array;
246static struct device_attribute *dev_attr_mmu_hubtlb_ecc_corrected_err_count_array;
247static struct device_attribute *dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array;
248static struct device_attribute *dev_attr_mmu_fillunit_ecc_corrected_err_count_array;
249static struct device_attribute *dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array;
250
251void gr_gv11b_create_sysfs(struct gk20a *g)
252{
253 struct device *dev = dev_from_gk20a(g);
254 int error = 0;
255 /* This stat creation function is called on GR init. GR can get
256 initialized multiple times but we only need to create the ECC
257 stats once. Therefore, add the following check to avoid
258 creating duplicate stat sysfs nodes. */
259 if (g->ecc.gr.t19x.sm_l1_tag_corrected_err_count.counters != NULL)
260 return;
261
262 gr_gp10b_create_sysfs(g);
263
264 error |= gr_gp10b_ecc_stat_create(dev,
265 0,
266 "sm_l1_tag_ecc_corrected_err_count",
267 &g->ecc.gr.t19x.sm_l1_tag_corrected_err_count,
268 &dev_attr_sm_l1_tag_ecc_corrected_err_count_array);
269
270 error |= gr_gp10b_ecc_stat_create(dev,
271 0,
272 "sm_l1_tag_ecc_uncorrected_err_count",
273 &g->ecc.gr.t19x.sm_l1_tag_uncorrected_err_count,
274 &dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array);
275
276 error |= gr_gp10b_ecc_stat_create(dev,
277 0,
278 "sm_cbu_ecc_corrected_err_count",
279 &g->ecc.gr.t19x.sm_cbu_corrected_err_count,
280 &dev_attr_sm_cbu_ecc_corrected_err_count_array);
281
282 error |= gr_gp10b_ecc_stat_create(dev,
283 0,
284 "sm_cbu_ecc_uncorrected_err_count",
285 &g->ecc.gr.t19x.sm_cbu_uncorrected_err_count,
286 &dev_attr_sm_cbu_ecc_uncorrected_err_count_array);
287
288 error |= gr_gp10b_ecc_stat_create(dev,
289 0,
290 "sm_l1_data_ecc_corrected_err_count",
291 &g->ecc.gr.t19x.sm_l1_data_corrected_err_count,
292 &dev_attr_sm_l1_data_ecc_corrected_err_count_array);
293
294 error |= gr_gp10b_ecc_stat_create(dev,
295 0,
296 "sm_l1_data_ecc_uncorrected_err_count",
297 &g->ecc.gr.t19x.sm_l1_data_uncorrected_err_count,
298 &dev_attr_sm_l1_data_ecc_uncorrected_err_count_array);
299
300 error |= gr_gp10b_ecc_stat_create(dev,
301 0,
302 "sm_icache_ecc_corrected_err_count",
303 &g->ecc.gr.t19x.sm_icache_corrected_err_count,
304 &dev_attr_sm_icache_ecc_corrected_err_count_array);
305
306 error |= gr_gp10b_ecc_stat_create(dev,
307 0,
308 "sm_icache_ecc_uncorrected_err_count",
309 &g->ecc.gr.t19x.sm_icache_uncorrected_err_count,
310 &dev_attr_sm_icache_ecc_uncorrected_err_count_array);
311
312 error |= gr_gp10b_ecc_stat_create(dev,
313 0,
314 "gcc_l15_ecc_corrected_err_count",
315 &g->ecc.gr.t19x.gcc_l15_corrected_err_count,
316 &dev_attr_gcc_l15_ecc_corrected_err_count_array);
317
318 error |= gr_gp10b_ecc_stat_create(dev,
319 0,
320 "gcc_l15_ecc_uncorrected_err_count",
321 &g->ecc.gr.t19x.gcc_l15_uncorrected_err_count,
322 &dev_attr_gcc_l15_ecc_uncorrected_err_count_array);
323
324 error |= gp10b_ecc_stat_create(dev,
325 g->ltc_count,
326 "ltc",
327 "l2_cache_uncorrected_err_count",
328 &g->ecc.ltc.t19x.l2_cache_uncorrected_err_count,
329 &dev_attr_l2_cache_ecc_uncorrected_err_count_array);
330
331 error |= gp10b_ecc_stat_create(dev,
332 g->ltc_count,
333 "ltc",
334 "l2_cache_corrected_err_count",
335 &g->ecc.ltc.t19x.l2_cache_corrected_err_count,
336 &dev_attr_l2_cache_ecc_corrected_err_count_array);
337
338 error |= gp10b_ecc_stat_create(dev,
339 1,
340 "gpc",
341 "fecs_ecc_uncorrected_err_count",
342 &g->ecc.gr.t19x.fecs_uncorrected_err_count,
343 &dev_attr_fecs_ecc_uncorrected_err_count_array);
344
345 error |= gp10b_ecc_stat_create(dev,
346 1,
347 "gpc",
348 "fecs_ecc_corrected_err_count",
349 &g->ecc.gr.t19x.fecs_corrected_err_count,
350 &dev_attr_fecs_ecc_corrected_err_count_array);
351
352 error |= gp10b_ecc_stat_create(dev,
353 g->gr.gpc_count,
354 "gpc",
355 "gpccs_ecc_uncorrected_err_count",
356 &g->ecc.gr.t19x.gpccs_uncorrected_err_count,
357 &dev_attr_gpccs_ecc_uncorrected_err_count_array);
358
359 error |= gp10b_ecc_stat_create(dev,
360 g->gr.gpc_count,
361 "gpc",
362 "gpccs_ecc_corrected_err_count",
363 &g->ecc.gr.t19x.gpccs_corrected_err_count,
364 &dev_attr_gpccs_ecc_corrected_err_count_array);
365
366 error |= gp10b_ecc_stat_create(dev,
367 g->gr.gpc_count,
368 "gpc",
369 "mmu_l1tlb_ecc_uncorrected_err_count",
370 &g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count,
371 &dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array);
372
373 error |= gp10b_ecc_stat_create(dev,
374 g->gr.gpc_count,
375 "gpc",
376 "mmu_l1tlb_ecc_corrected_err_count",
377 &g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count,
378 &dev_attr_mmu_l1tlb_ecc_corrected_err_count_array);
379
380 error |= gp10b_ecc_stat_create(dev,
381 1,
382 "eng",
383 "mmu_l2tlb_ecc_uncorrected_err_count",
384 &g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count,
385 &dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array);
386
387 error |= gp10b_ecc_stat_create(dev,
388 1,
389 "eng",
390 "mmu_l2tlb_ecc_corrected_err_count",
391 &g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count,
392 &dev_attr_mmu_l2tlb_ecc_corrected_err_count_array);
393
394 error |= gp10b_ecc_stat_create(dev,
395 1,
396 "eng",
397 "mmu_hubtlb_ecc_uncorrected_err_count",
398 &g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count,
399 &dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array);
400
401 error |= gp10b_ecc_stat_create(dev,
402 1,
403 "eng",
404 "mmu_hubtlb_ecc_corrected_err_count",
405 &g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count,
406 &dev_attr_mmu_hubtlb_ecc_corrected_err_count_array);
407
408 error |= gp10b_ecc_stat_create(dev,
409 1,
410 "eng",
411 "mmu_fillunit_ecc_uncorrected_err_count",
412 &g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count,
413 &dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array);
414
415 error |= gp10b_ecc_stat_create(dev,
416 1,
417 "eng",
418 "mmu_fillunit_ecc_corrected_err_count",
419 &g->ecc.eng.t19x.mmu_fillunit_corrected_err_count,
420 &dev_attr_mmu_fillunit_ecc_corrected_err_count_array);
421
422 if (error)
423 dev_err(dev, "Failed to create gv11b sysfs attributes!\n");
424}
425
426static void gr_gv11b_remove_sysfs(struct device *dev)
427{
428 struct gk20a *g = get_gk20a(dev);
429
430 gr_gp10b_ecc_stat_remove(dev,
431 0,
432 &g->ecc.gr.t19x.sm_l1_tag_corrected_err_count,
433 dev_attr_sm_l1_tag_ecc_corrected_err_count_array);
434
435 gr_gp10b_ecc_stat_remove(dev,
436 0,
437 &g->ecc.gr.t19x.sm_l1_tag_uncorrected_err_count,
438 dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array);
439
440 gr_gp10b_ecc_stat_remove(dev,
441 0,
442 &g->ecc.gr.t19x.sm_cbu_corrected_err_count,
443 dev_attr_sm_cbu_ecc_corrected_err_count_array);
444
445 gr_gp10b_ecc_stat_remove(dev,
446 0,
447 &g->ecc.gr.t19x.sm_cbu_uncorrected_err_count,
448 dev_attr_sm_cbu_ecc_uncorrected_err_count_array);
449
450 gr_gp10b_ecc_stat_remove(dev,
451 0,
452 &g->ecc.gr.t19x.sm_l1_data_corrected_err_count,
453 dev_attr_sm_l1_data_ecc_corrected_err_count_array);
454
455 gr_gp10b_ecc_stat_remove(dev,
456 0,
457 &g->ecc.gr.t19x.sm_l1_data_uncorrected_err_count,
458 dev_attr_sm_l1_data_ecc_uncorrected_err_count_array);
459
460 gr_gp10b_ecc_stat_remove(dev,
461 0,
462 &g->ecc.gr.t19x.sm_icache_corrected_err_count,
463 dev_attr_sm_icache_ecc_corrected_err_count_array);
464
465 gr_gp10b_ecc_stat_remove(dev,
466 0,
467 &g->ecc.gr.t19x.sm_icache_uncorrected_err_count,
468 dev_attr_sm_icache_ecc_uncorrected_err_count_array);
469
470 gr_gp10b_ecc_stat_remove(dev,
471 0,
472 &g->ecc.gr.t19x.gcc_l15_corrected_err_count,
473 dev_attr_gcc_l15_ecc_corrected_err_count_array);
474
475 gr_gp10b_ecc_stat_remove(dev,
476 0,
477 &g->ecc.gr.t19x.gcc_l15_uncorrected_err_count,
478 dev_attr_gcc_l15_ecc_uncorrected_err_count_array);
479
480 gp10b_ecc_stat_remove(dev,
481 g->ltc_count,
482 &g->ecc.ltc.t19x.l2_cache_uncorrected_err_count,
483 dev_attr_l2_cache_ecc_uncorrected_err_count_array);
484
485 gp10b_ecc_stat_remove(dev,
486 g->ltc_count,
487 &g->ecc.ltc.t19x.l2_cache_corrected_err_count,
488 dev_attr_l2_cache_ecc_corrected_err_count_array);
489
490 gp10b_ecc_stat_remove(dev,
491 1,
492 &g->ecc.gr.t19x.fecs_uncorrected_err_count,
493 dev_attr_fecs_ecc_uncorrected_err_count_array);
494
495 gp10b_ecc_stat_remove(dev,
496 1,
497 &g->ecc.gr.t19x.fecs_corrected_err_count,
498 dev_attr_fecs_ecc_corrected_err_count_array);
499
500 gp10b_ecc_stat_remove(dev,
501 g->gr.gpc_count,
502 &g->ecc.gr.t19x.gpccs_uncorrected_err_count,
503 dev_attr_gpccs_ecc_uncorrected_err_count_array);
504
505 gp10b_ecc_stat_remove(dev,
506 g->gr.gpc_count,
507 &g->ecc.gr.t19x.gpccs_corrected_err_count,
508 dev_attr_gpccs_ecc_corrected_err_count_array);
509
510 gp10b_ecc_stat_remove(dev,
511 g->gr.gpc_count,
512 &g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count,
513 dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array);
514
515 gp10b_ecc_stat_remove(dev,
516 g->gr.gpc_count,
517 &g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count,
518 dev_attr_mmu_l1tlb_ecc_corrected_err_count_array);
519
520 gp10b_ecc_stat_remove(dev,
521 1,
522 &g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count,
523 dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array);
524
525 gp10b_ecc_stat_remove(dev,
526 1,
527 &g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count,
528 dev_attr_mmu_l2tlb_ecc_corrected_err_count_array);
529
530 gp10b_ecc_stat_remove(dev,
531 1,
532 &g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count,
533 dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array);
534
535 gp10b_ecc_stat_remove(dev,
536 1,
537 &g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count,
538 dev_attr_mmu_hubtlb_ecc_corrected_err_count_array);
539
540 gp10b_ecc_stat_remove(dev,
541 1,
542 &g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count,
543 dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array);
544
545 gp10b_ecc_stat_remove(dev,
546 1,
547 &g->ecc.eng.t19x.mmu_fillunit_corrected_err_count,
548 dev_attr_mmu_fillunit_ecc_corrected_err_count_array);
549}
diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
new file mode 100644
index 00000000..2c7b6457
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
@@ -0,0 +1,283 @@
1/*
2 * GV11B PMU
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/delay.h> /* for udelay */
26#include <linux/clk.h>
27
28#include <soc/tegra/fuse.h>
29
30#include <nvgpu/pmu.h>
31#include <nvgpu/falcon.h>
32#include <nvgpu/enabled.h>
33#include <nvgpu/mm.h>
34
35#include "gk20a/gk20a.h"
36
37#include "gp10b/pmu_gp10b.h"
38#include "gp106/pmu_gp106.h"
39
40#include "pmu_gv11b.h"
41#include "acr_gv11b.h"
42
43#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
44
45#define gv11b_dbg_pmu(fmt, arg...) \
46 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
47
48#define ALIGN_4KB 12
49
50bool gv11b_is_pmu_supported(struct gk20a *g)
51{
52 return true;
53}
54
55bool gv11b_is_lazy_bootstrap(u32 falcon_id)
56{
57 bool enable_status = false;
58
59 switch (falcon_id) {
60 case LSF_FALCON_ID_FECS:
61 enable_status = true;
62 break;
63 case LSF_FALCON_ID_GPCCS:
64 enable_status = true;
65 break;
66 default:
67 break;
68 }
69
70 return enable_status;
71}
72
73bool gv11b_is_priv_load(u32 falcon_id)
74{
75 bool enable_status = false;
76
77 switch (falcon_id) {
78 case LSF_FALCON_ID_FECS:
79 enable_status = true;
80 break;
81 case LSF_FALCON_ID_GPCCS:
82 enable_status = true;
83 break;
84 default:
85 break;
86 }
87
88 return enable_status;
89}
90
91int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
92{
93 struct gk20a *g = gk20a_from_pmu(pmu);
94 struct mm_gk20a *mm = &g->mm;
95 struct pmu_ucode_desc *desc = pmu->desc;
96 u64 addr_code_lo, addr_data_lo, addr_load_lo;
97 u64 addr_code_hi, addr_data_hi, addr_load_hi;
98 u32 i, blocks, addr_args;
99
100 gk20a_dbg_fn("");
101
102 gk20a_writel(g, pwr_falcon_itfen_r(),
103 gk20a_readl(g, pwr_falcon_itfen_r()) |
104 pwr_falcon_itfen_ctxen_enable_f());
105
106 gk20a_writel(g, pwr_pmu_new_instblk_r(),
107 pwr_pmu_new_instblk_ptr_f(
108 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> ALIGN_4KB)
109 | pwr_pmu_new_instblk_valid_f(1)
110 | pwr_pmu_new_instblk_target_sys_ncoh_f());
111
112 /* TBD: load all other surfaces */
113 g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
114 pmu, GK20A_PMU_TRACE_BUFSIZE);
115 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
116 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
117 pmu, GK20A_PMU_DMAIDX_VIRT);
118
119 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu,
120 g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
121
122 addr_args = (pwr_falcon_hwcfg_dmem_size_v(
123 gk20a_readl(g, pwr_falcon_hwcfg_r()))
124 << GK20A_PMU_DMEM_BLKSIZE2) -
125 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
126
127 nvgpu_flcn_copy_to_dmem(pmu->flcn, addr_args,
128 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
129 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
130
131 gk20a_writel(g, pwr_falcon_dmemc_r(0),
132 pwr_falcon_dmemc_offs_f(0) |
133 pwr_falcon_dmemc_blk_f(0) |
134 pwr_falcon_dmemc_aincw_f(1));
135
136 addr_code_lo = u64_lo32((pmu->ucode.gpu_va +
137 desc->app_start_offset +
138 desc->app_resident_code_offset) >> 8);
139
140 addr_code_hi = u64_hi32((pmu->ucode.gpu_va +
141 desc->app_start_offset +
142 desc->app_resident_code_offset) >> 8);
143 addr_data_lo = u64_lo32((pmu->ucode.gpu_va +
144 desc->app_start_offset +
145 desc->app_resident_data_offset) >> 8);
146 addr_data_hi = u64_hi32((pmu->ucode.gpu_va +
147 desc->app_start_offset +
148 desc->app_resident_data_offset) >> 8);
149 addr_load_lo = u64_lo32((pmu->ucode.gpu_va +
150 desc->bootloader_start_offset) >> 8);
151 addr_load_hi = u64_hi32((pmu->ucode.gpu_va +
152 desc->bootloader_start_offset) >> 8);
153
154 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
155 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
156 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
157 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
158 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
159 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
160 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
161 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
162 gk20a_writel(g, pwr_falcon_dmemd_r(0), GK20A_PMU_DMAIDX_UCODE);
163 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code_lo << 8);
164 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code_hi);
165 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_offset);
166 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_size);
167 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
168 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
169 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_imem_entry);
170 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data_lo << 8);
171 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data_hi);
172 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_data_size);
173 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x1);
174 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_args);
175
176 g->ops.pmu.write_dmatrfbase(g,
177 addr_load_lo - (desc->bootloader_imem_offset >> 8));
178
179 blocks = ((desc->bootloader_size + 0xFF) & ~0xFF) >> 8;
180
181 for (i = 0; i < blocks; i++) {
182 gk20a_writel(g, pwr_falcon_dmatrfmoffs_r(),
183 desc->bootloader_imem_offset + (i << 8));
184 gk20a_writel(g, pwr_falcon_dmatrffboffs_r(),
185 desc->bootloader_imem_offset + (i << 8));
186 gk20a_writel(g, pwr_falcon_dmatrfcmd_r(),
187 pwr_falcon_dmatrfcmd_imem_f(1) |
188 pwr_falcon_dmatrfcmd_write_f(0) |
189 pwr_falcon_dmatrfcmd_size_f(6) |
190 pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE));
191 }
192
193 nvgpu_flcn_bootstrap(pmu->flcn, desc->bootloader_entry_point);
194
195 gk20a_writel(g, pwr_falcon_os_r(), desc->app_version);
196
197 return 0;
198}
199
200static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
201 void *param, u32 handle, u32 status)
202{
203 gk20a_dbg_fn("");
204
205 if (status != 0) {
206 nvgpu_err(g, "Sub-feature mask update cmd aborted\n");
207 return;
208 }
209
210 gv11b_dbg_pmu("sub-feature mask update is acknowledged from PMU %x\n",
211 msg->msg.pg.msg_type);
212}
213
214static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg,
215 void *param, u32 handle, u32 status)
216{
217 gk20a_dbg_fn("");
218
219 if (status != 0) {
220 nvgpu_err(g, "GR PARAM cmd aborted\n");
221 return;
222 }
223
224 gv11b_dbg_pmu("GR PARAM is acknowledged from PMU %x\n",
225 msg->msg.pg.msg_type);
226}
227
228int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
229{
230 struct nvgpu_pmu *pmu = &g->pmu;
231 struct pmu_cmd cmd;
232 u32 seq;
233
234 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
235 memset(&cmd, 0, sizeof(struct pmu_cmd));
236 cmd.hdr.unit_id = PMU_UNIT_PG;
237 cmd.hdr.size = PMU_CMD_HDR_SIZE +
238 sizeof(struct pmu_pg_cmd_gr_init_param_v1);
239 cmd.cmd.pg.gr_init_param_v1.cmd_type =
240 PMU_PG_CMD_ID_PG_PARAM;
241 cmd.cmd.pg.gr_init_param_v1.sub_cmd_id =
242 PMU_PG_PARAM_CMD_GR_INIT_PARAM;
243 cmd.cmd.pg.gr_init_param_v1.featuremask =
244 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED;
245
246 gv11b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n");
247 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
248 pmu_handle_pg_param_msg, pmu, &seq, ~0);
249
250 } else
251 return -EINVAL;
252
253 return 0;
254}
255
256int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id)
257{
258 struct nvgpu_pmu *pmu = &g->pmu;
259 struct pmu_cmd cmd;
260 u32 seq;
261
262 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
263 memset(&cmd, 0, sizeof(struct pmu_cmd));
264 cmd.hdr.unit_id = PMU_UNIT_PG;
265 cmd.hdr.size = PMU_CMD_HDR_SIZE +
266 sizeof(struct pmu_pg_cmd_sub_feature_mask_update);
267 cmd.cmd.pg.sf_mask_update.cmd_type =
268 PMU_PG_CMD_ID_PG_PARAM;
269 cmd.cmd.pg.sf_mask_update.sub_cmd_id =
270 PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE;
271 cmd.cmd.pg.sf_mask_update.ctrl_id =
272 PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
273 cmd.cmd.pg.sf_mask_update.enabled_mask =
274 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED;
275
276 gv11b_dbg_pmu("cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n");
277 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
278 pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0);
279 } else
280 return -EINVAL;
281
282 return 0;
283}
diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.h b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.h
new file mode 100644
index 00000000..809970ff
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.h
@@ -0,0 +1,37 @@
1/*
2 * GV11B PMU
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef __PMU_GV11B_H_
26#define __PMU_GV11B_H_
27
28struct gk20a;
29
30bool gv11b_is_pmu_supported(struct gk20a *g);
31int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu);
32int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id);
33int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id);
34bool gv11b_is_lazy_bootstrap(u32 falcon_id);
35bool gv11b_is_priv_load(u32 falcon_id);
36
37#endif /*__PMU_GV11B_H_*/
diff --git a/drivers/gpu/nvgpu/gv11b/regops_gv11b.c b/drivers/gpu/nvgpu/gv11b/regops_gv11b.c
new file mode 100644
index 00000000..c356785e
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/regops_gv11b.c
@@ -0,0 +1,1548 @@
1/*
2 * Tegra GV11b GPU Driver Register Ops
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/slab.h>
26#include <linux/err.h>
27#include <linux/bsearch.h>
28#include <uapi/linux/nvgpu.h>
29
30#include "gk20a/gk20a.h"
31#include "gk20a/dbg_gpu_gk20a.h"
32#include "gk20a/regops_gk20a.h"
33#include "regops_gv11b.h"
34
35static const struct regop_offset_range gv11b_global_whitelist_ranges[] = {
36 { 0x000004f0, 1},
37 { 0x00001a00, 1},
38 { 0x00009400, 1},
39 { 0x00009410, 1},
40 { 0x00009480, 1},
41 { 0x00020200, 32},
42 { 0x00021c04, 2},
43 { 0x00021c14, 3},
44 { 0x00021c24, 71},
45 { 0x00021d44, 1},
46 { 0x00021d4c, 1},
47 { 0x00021d54, 1},
48 { 0x00021d5c, 1},
49 { 0x00021d68, 19},
50 { 0x00021dbc, 16},
51 { 0x00022430, 7},
52 { 0x00022450, 1},
53 { 0x0002245c, 2},
54 { 0x00070000, 5},
55 { 0x000840a8, 1},
56 { 0x00084b5c, 1},
57 { 0x000870a8, 1},
58 { 0x000884e0, 1},
59 { 0x00100c18, 3},
60 { 0x00100c84, 1},
61 { 0x0010a0a8, 1},
62 { 0x0010a4f0, 1},
63 { 0x0013c808, 2},
64 { 0x0013cc14, 1},
65 { 0x0013ec18, 1},
66 { 0x00140028, 1},
67 { 0x00140280, 1},
68 { 0x001402a0, 1},
69 { 0x00140350, 1},
70 { 0x00140480, 1},
71 { 0x001404a0, 1},
72 { 0x00140550, 1},
73 { 0x00140680, 1},
74 { 0x001406a0, 1},
75 { 0x00140750, 1},
76 { 0x00142028, 1},
77 { 0x00142280, 1},
78 { 0x001422a0, 1},
79 { 0x00142350, 1},
80 { 0x00142480, 1},
81 { 0x001424a0, 1},
82 { 0x00142550, 1},
83 { 0x00142680, 1},
84 { 0x001426a0, 1},
85 { 0x00142750, 1},
86 { 0x0017e028, 1},
87 { 0x0017e280, 1},
88 { 0x0017e294, 1},
89 { 0x0017e29c, 2},
90 { 0x0017e2ac, 1},
91 { 0x0017e350, 1},
92 { 0x0017e39c, 1},
93 { 0x0017e480, 1},
94 { 0x0017e4a0, 1},
95 { 0x0017e550, 1},
96 { 0x0017e680, 1},
97 { 0x0017e6a0, 1},
98 { 0x0017e750, 1},
99 { 0x00180040, 41},
100 { 0x001800ec, 1},
101 { 0x001800f8, 7},
102 { 0x00180120, 2},
103 { 0x00180240, 41},
104 { 0x001802ec, 1},
105 { 0x001802f8, 7},
106 { 0x00180320, 2},
107 { 0x00180440, 41},
108 { 0x001804ec, 1},
109 { 0x001804f8, 7},
110 { 0x00180520, 2},
111 { 0x00180640, 41},
112 { 0x001806ec, 1},
113 { 0x001806f8, 7},
114 { 0x00180720, 2},
115 { 0x00180840, 41},
116 { 0x001808ec, 1},
117 { 0x001808f8, 7},
118 { 0x00180920, 2},
119 { 0x00180a40, 41},
120 { 0x00180aec, 1},
121 { 0x00180af8, 7},
122 { 0x00180b20, 2},
123 { 0x00180c40, 41},
124 { 0x00180cec, 1},
125 { 0x00180cf8, 2},
126 { 0x00180d04, 4},
127 { 0x00180d20, 2},
128 { 0x00180e40, 41},
129 { 0x00180eec, 1},
130 { 0x00180ef8, 2},
131 { 0x00180f04, 4},
132 { 0x00180f20, 2},
133 { 0x00181040, 41},
134 { 0x001810ec, 1},
135 { 0x001810f8, 2},
136 { 0x00181104, 4},
137 { 0x00181120, 2},
138 { 0x00181240, 41},
139 { 0x001812ec, 1},
140 { 0x001812f8, 2},
141 { 0x00181304, 4},
142 { 0x00181320, 2},
143 { 0x00181440, 41},
144 { 0x001814ec, 1},
145 { 0x001814f8, 2},
146 { 0x00181504, 4},
147 { 0x00181520, 2},
148 { 0x00181640, 41},
149 { 0x001816ec, 1},
150 { 0x001816f8, 2},
151 { 0x00181704, 4},
152 { 0x00181720, 2},
153 { 0x00181840, 41},
154 { 0x001818ec, 1},
155 { 0x001818f8, 2},
156 { 0x00181904, 4},
157 { 0x00181920, 2},
158 { 0x00181a40, 41},
159 { 0x00181aec, 1},
160 { 0x00181af8, 2},
161 { 0x00181b04, 4},
162 { 0x00181b20, 2},
163 { 0x00181c40, 41},
164 { 0x00181cec, 1},
165 { 0x00181cf8, 2},
166 { 0x00181d04, 4},
167 { 0x00181d20, 2},
168 { 0x00181e40, 41},
169 { 0x00181eec, 1},
170 { 0x00181ef8, 2},
171 { 0x00181f04, 4},
172 { 0x00181f20, 2},
173 { 0x00182040, 41},
174 { 0x001820ec, 1},
175 { 0x001820f8, 2},
176 { 0x00182104, 4},
177 { 0x00182120, 2},
178 { 0x00182240, 41},
179 { 0x001822ec, 1},
180 { 0x001822f8, 2},
181 { 0x00182304, 4},
182 { 0x00182320, 2},
183 { 0x00182440, 41},
184 { 0x001824ec, 1},
185 { 0x001824f8, 2},
186 { 0x00182504, 4},
187 { 0x00182520, 2},
188 { 0x00182640, 41},
189 { 0x001826ec, 1},
190 { 0x001826f8, 2},
191 { 0x00182704, 4},
192 { 0x00182720, 2},
193 { 0x00182840, 41},
194 { 0x001828ec, 1},
195 { 0x001828f8, 2},
196 { 0x00182904, 4},
197 { 0x00182920, 2},
198 { 0x00182a40, 41},
199 { 0x00182aec, 1},
200 { 0x00182af8, 2},
201 { 0x00182b04, 4},
202 { 0x00182b20, 2},
203 { 0x00182c40, 41},
204 { 0x00182cec, 1},
205 { 0x00182cf8, 2},
206 { 0x00182d04, 4},
207 { 0x00182d20, 2},
208 { 0x00182e40, 41},
209 { 0x00182eec, 1},
210 { 0x00182ef8, 2},
211 { 0x00182f04, 4},
212 { 0x00182f20, 2},
213 { 0x00183040, 41},
214 { 0x001830ec, 1},
215 { 0x001830f8, 2},
216 { 0x00183104, 4},
217 { 0x00183120, 2},
218 { 0x00183240, 41},
219 { 0x001832ec, 1},
220 { 0x001832f8, 2},
221 { 0x00183304, 4},
222 { 0x00183320, 2},
223 { 0x00183440, 41},
224 { 0x001834ec, 1},
225 { 0x001834f8, 2},
226 { 0x00183504, 4},
227 { 0x00183520, 2},
228 { 0x00183640, 41},
229 { 0x001836ec, 1},
230 { 0x001836f8, 2},
231 { 0x00183704, 4},
232 { 0x00183720, 2},
233 { 0x00183840, 41},
234 { 0x001838ec, 1},
235 { 0x001838f8, 2},
236 { 0x00183904, 4},
237 { 0x00183920, 2},
238 { 0x00183a40, 41},
239 { 0x00183aec, 1},
240 { 0x00183af8, 2},
241 { 0x00183b04, 4},
242 { 0x00183b20, 2},
243 { 0x00183c40, 41},
244 { 0x00183cec, 1},
245 { 0x00183cf8, 2},
246 { 0x00183d04, 4},
247 { 0x00183d20, 2},
248 { 0x00183e40, 41},
249 { 0x00183eec, 1},
250 { 0x00183ef8, 2},
251 { 0x00183f04, 4},
252 { 0x00183f20, 2},
253 { 0x001c80a8, 1},
254 { 0x001c9100, 1},
255 { 0x001cc0a8, 1},
256 { 0x001cd100, 1},
257 { 0x001d00a8, 1},
258 { 0x001d1100, 1},
259 { 0x00200040, 41},
260 { 0x002000ec, 1},
261 { 0x002000f8, 7},
262 { 0x00200120, 2},
263 { 0x00200240, 41},
264 { 0x002002ec, 1},
265 { 0x002002f8, 7},
266 { 0x00200320, 2},
267 { 0x00200440, 41},
268 { 0x002004ec, 1},
269 { 0x002004f8, 7},
270 { 0x00200520, 2},
271 { 0x00200640, 41},
272 { 0x002006ec, 1},
273 { 0x002006f8, 7},
274 { 0x00200720, 2},
275 { 0x00200840, 41},
276 { 0x002008ec, 1},
277 { 0x002008f8, 2},
278 { 0x00200904, 4},
279 { 0x00200920, 2},
280 { 0x00200a40, 41},
281 { 0x00200aec, 1},
282 { 0x00200af8, 2},
283 { 0x00200b04, 4},
284 { 0x00200b20, 2},
285 { 0x00200c40, 41},
286 { 0x00200cec, 1},
287 { 0x00200cf8, 2},
288 { 0x00200d04, 4},
289 { 0x00200d20, 2},
290 { 0x00200e40, 41},
291 { 0x00200eec, 1},
292 { 0x00200ef8, 2},
293 { 0x00200f04, 4},
294 { 0x00200f20, 2},
295 { 0x00201040, 41},
296 { 0x002010ec, 1},
297 { 0x002010f8, 2},
298 { 0x00201104, 4},
299 { 0x00201120, 2},
300 { 0x00201240, 41},
301 { 0x002012ec, 1},
302 { 0x002012f8, 2},
303 { 0x00201304, 4},
304 { 0x00201320, 2},
305 { 0x00201440, 41},
306 { 0x002014ec, 1},
307 { 0x002014f8, 2},
308 { 0x00201504, 4},
309 { 0x00201520, 2},
310 { 0x00201640, 41},
311 { 0x002016ec, 1},
312 { 0x002016f8, 2},
313 { 0x00201704, 4},
314 { 0x00201720, 2},
315 { 0x00201840, 41},
316 { 0x002018ec, 1},
317 { 0x002018f8, 2},
318 { 0x00201904, 4},
319 { 0x00201920, 2},
320 { 0x00201a40, 41},
321 { 0x00201aec, 1},
322 { 0x00201af8, 2},
323 { 0x00201b04, 4},
324 { 0x00201b20, 2},
325 { 0x00201c40, 41},
326 { 0x00201cec, 1},
327 { 0x00201cf8, 2},
328 { 0x00201d04, 4},
329 { 0x00201d20, 2},
330 { 0x00201e40, 41},
331 { 0x00201eec, 1},
332 { 0x00201ef8, 2},
333 { 0x00201f04, 4},
334 { 0x00201f20, 2},
335 { 0x00202040, 41},
336 { 0x002020ec, 1},
337 { 0x002020f8, 2},
338 { 0x00202104, 4},
339 { 0x00202120, 2},
340 { 0x00202240, 41},
341 { 0x002022ec, 1},
342 { 0x002022f8, 2},
343 { 0x00202304, 4},
344 { 0x00202320, 2},
345 { 0x00202440, 41},
346 { 0x002024ec, 1},
347 { 0x002024f8, 2},
348 { 0x00202504, 4},
349 { 0x00202520, 2},
350 { 0x00202640, 41},
351 { 0x002026ec, 1},
352 { 0x002026f8, 2},
353 { 0x00202704, 4},
354 { 0x00202720, 2},
355 { 0x00202840, 41},
356 { 0x002028ec, 1},
357 { 0x002028f8, 2},
358 { 0x00202904, 4},
359 { 0x00202920, 2},
360 { 0x00202a40, 41},
361 { 0x00202aec, 1},
362 { 0x00202af8, 2},
363 { 0x00202b04, 4},
364 { 0x00202b20, 2},
365 { 0x00202c40, 41},
366 { 0x00202cec, 1},
367 { 0x00202cf8, 2},
368 { 0x00202d04, 4},
369 { 0x00202d20, 2},
370 { 0x00202e40, 41},
371 { 0x00202eec, 1},
372 { 0x00202ef8, 2},
373 { 0x00202f04, 4},
374 { 0x00202f20, 2},
375 { 0x00203040, 41},
376 { 0x002030ec, 1},
377 { 0x002030f8, 2},
378 { 0x00203104, 4},
379 { 0x00203120, 2},
380 { 0x00203240, 41},
381 { 0x002032ec, 1},
382 { 0x002032f8, 2},
383 { 0x00203304, 4},
384 { 0x00203320, 2},
385 { 0x00203440, 41},
386 { 0x002034ec, 1},
387 { 0x002034f8, 2},
388 { 0x00203504, 4},
389 { 0x00203520, 2},
390 { 0x00203640, 41},
391 { 0x002036ec, 1},
392 { 0x002036f8, 2},
393 { 0x00203704, 4},
394 { 0x00203720, 2},
395 { 0x00203840, 41},
396 { 0x002038ec, 1},
397 { 0x002038f8, 2},
398 { 0x00203904, 4},
399 { 0x00203920, 2},
400 { 0x00203a40, 41},
401 { 0x00203aec, 1},
402 { 0x00203af8, 2},
403 { 0x00203b04, 4},
404 { 0x00203b20, 2},
405 { 0x00203c40, 41},
406 { 0x00203cec, 1},
407 { 0x00203cf8, 2},
408 { 0x00203d04, 4},
409 { 0x00203d20, 2},
410 { 0x00203e40, 41},
411 { 0x00203eec, 1},
412 { 0x00203ef8, 2},
413 { 0x00203f04, 4},
414 { 0x00203f20, 2},
415 { 0x00240040, 41},
416 { 0x002400ec, 1},
417 { 0x002400f8, 7},
418 { 0x00240120, 2},
419 { 0x00240240, 41},
420 { 0x002402ec, 1},
421 { 0x002402f8, 7},
422 { 0x00240320, 2},
423 { 0x00240440, 41},
424 { 0x002404ec, 1},
425 { 0x002404f8, 7},
426 { 0x00240520, 2},
427 { 0x00240640, 41},
428 { 0x002406ec, 1},
429 { 0x002406f8, 7},
430 { 0x00240720, 2},
431 { 0x00240840, 41},
432 { 0x002408ec, 1},
433 { 0x002408f8, 7},
434 { 0x00240920, 2},
435 { 0x00240a40, 41},
436 { 0x00240aec, 1},
437 { 0x00240af8, 7},
438 { 0x00240b20, 2},
439 { 0x00240c40, 41},
440 { 0x00240cec, 1},
441 { 0x00240cf8, 2},
442 { 0x00240d04, 4},
443 { 0x00240d20, 2},
444 { 0x00240e40, 41},
445 { 0x00240eec, 1},
446 { 0x00240ef8, 2},
447 { 0x00240f04, 4},
448 { 0x00240f20, 2},
449 { 0x00241040, 41},
450 { 0x002410ec, 1},
451 { 0x002410f8, 2},
452 { 0x00241104, 4},
453 { 0x00241120, 2},
454 { 0x00241240, 41},
455 { 0x002412ec, 1},
456 { 0x002412f8, 2},
457 { 0x00241304, 4},
458 { 0x00241320, 2},
459 { 0x00241440, 41},
460 { 0x002414ec, 1},
461 { 0x002414f8, 2},
462 { 0x00241504, 4},
463 { 0x00241520, 2},
464 { 0x00241640, 41},
465 { 0x002416ec, 1},
466 { 0x002416f8, 2},
467 { 0x00241704, 4},
468 { 0x00241720, 2},
469 { 0x00241840, 41},
470 { 0x002418ec, 1},
471 { 0x002418f8, 2},
472 { 0x00241904, 4},
473 { 0x00241920, 2},
474 { 0x00241a40, 41},
475 { 0x00241aec, 1},
476 { 0x00241af8, 2},
477 { 0x00241b04, 4},
478 { 0x00241b20, 2},
479 { 0x00241c40, 41},
480 { 0x00241cec, 1},
481 { 0x00241cf8, 2},
482 { 0x00241d04, 4},
483 { 0x00241d20, 2},
484 { 0x00241e40, 41},
485 { 0x00241eec, 1},
486 { 0x00241ef8, 2},
487 { 0x00241f04, 4},
488 { 0x00241f20, 2},
489 { 0x00242040, 41},
490 { 0x002420ec, 1},
491 { 0x002420f8, 2},
492 { 0x00242104, 4},
493 { 0x00242120, 2},
494 { 0x00242240, 41},
495 { 0x002422ec, 1},
496 { 0x002422f8, 2},
497 { 0x00242304, 4},
498 { 0x00242320, 2},
499 { 0x00242440, 41},
500 { 0x002424ec, 1},
501 { 0x002424f8, 2},
502 { 0x00242504, 4},
503 { 0x00242520, 2},
504 { 0x00242640, 41},
505 { 0x002426ec, 1},
506 { 0x002426f8, 2},
507 { 0x00242704, 4},
508 { 0x00242720, 2},
509 { 0x00242840, 41},
510 { 0x002428ec, 1},
511 { 0x002428f8, 2},
512 { 0x00242904, 4},
513 { 0x00242920, 2},
514 { 0x00242a40, 41},
515 { 0x00242aec, 1},
516 { 0x00242af8, 2},
517 { 0x00242b04, 4},
518 { 0x00242b20, 2},
519 { 0x00242c40, 41},
520 { 0x00242cec, 1},
521 { 0x00242cf8, 2},
522 { 0x00242d04, 4},
523 { 0x00242d20, 2},
524 { 0x00242e40, 41},
525 { 0x00242eec, 1},
526 { 0x00242ef8, 2},
527 { 0x00242f04, 4},
528 { 0x00242f20, 2},
529 { 0x00243040, 41},
530 { 0x002430ec, 1},
531 { 0x002430f8, 2},
532 { 0x00243104, 4},
533 { 0x00243120, 2},
534 { 0x00243240, 41},
535 { 0x002432ec, 1},
536 { 0x002432f8, 2},
537 { 0x00243304, 4},
538 { 0x00243320, 2},
539 { 0x00243440, 41},
540 { 0x002434ec, 1},
541 { 0x002434f8, 2},
542 { 0x00243504, 4},
543 { 0x00243520, 2},
544 { 0x00243640, 41},
545 { 0x002436ec, 1},
546 { 0x002436f8, 2},
547 { 0x00243704, 4},
548 { 0x00243720, 2},
549 { 0x00243840, 41},
550 { 0x002438ec, 1},
551 { 0x002438f8, 2},
552 { 0x00243904, 4},
553 { 0x00243920, 2},
554 { 0x00243a40, 41},
555 { 0x00243aec, 1},
556 { 0x00243af8, 2},
557 { 0x00243b04, 4},
558 { 0x00243b20, 2},
559 { 0x00243c40, 41},
560 { 0x00243cec, 1},
561 { 0x00243cf8, 2},
562 { 0x00243d04, 4},
563 { 0x00243d20, 2},
564 { 0x00243e40, 41},
565 { 0x00243eec, 1},
566 { 0x00243ef8, 2},
567 { 0x00243f04, 4},
568 { 0x00243f20, 2},
569 { 0x00244000, 1},
570 { 0x00244008, 1},
571 { 0x00244010, 2},
572 { 0x00246000, 1},
573 { 0x00246008, 1},
574 { 0x00246010, 2},
575 { 0x00248000, 1},
576 { 0x00248008, 1},
577 { 0x00248010, 2},
578 { 0x0024a000, 1},
579 { 0x0024a008, 1},
580 { 0x0024a010, 11},
581 { 0x0024a040, 3},
582 { 0x0024a050, 3},
583 { 0x0024a060, 4},
584 { 0x0024a074, 7},
585 { 0x0024a094, 3},
586 { 0x0024a0a4, 1},
587 { 0x0024a100, 6},
588 { 0x00250040, 25},
589 { 0x002500c8, 7},
590 { 0x002500ec, 1},
591 { 0x002500f8, 2},
592 { 0x00250104, 4},
593 { 0x00250120, 2},
594 { 0x00250240, 25},
595 { 0x002502c8, 7},
596 { 0x002502ec, 1},
597 { 0x002502f8, 2},
598 { 0x00250304, 4},
599 { 0x00250320, 2},
600 { 0x00250840, 25},
601 { 0x002508c8, 7},
602 { 0x002508ec, 1},
603 { 0x002508f8, 2},
604 { 0x00250904, 4},
605 { 0x00250920, 2},
606 { 0x00250a40, 25},
607 { 0x00250ac8, 7},
608 { 0x00250aec, 1},
609 { 0x00250af8, 2},
610 { 0x00250b04, 4},
611 { 0x00250b20, 2},
612 { 0x00251800, 3},
613 { 0x00251810, 2},
614 { 0x00251a00, 3},
615 { 0x00251a10, 2},
616 { 0x00278040, 25},
617 { 0x002780c8, 7},
618 { 0x002780ec, 1},
619 { 0x002780f8, 2},
620 { 0x00278104, 4},
621 { 0x00278120, 2},
622 { 0x00278240, 25},
623 { 0x002782c8, 7},
624 { 0x002782ec, 1},
625 { 0x002782f8, 2},
626 { 0x00278304, 4},
627 { 0x00278320, 2},
628 { 0x00278440, 25},
629 { 0x002784c8, 7},
630 { 0x002784ec, 1},
631 { 0x002784f8, 2},
632 { 0x00278504, 4},
633 { 0x00278520, 2},
634 { 0x00278640, 25},
635 { 0x002786c8, 7},
636 { 0x002786ec, 1},
637 { 0x002786f8, 2},
638 { 0x00278704, 4},
639 { 0x00278720, 2},
640 { 0x00278840, 25},
641 { 0x002788c8, 7},
642 { 0x002788ec, 1},
643 { 0x002788f8, 2},
644 { 0x00278904, 4},
645 { 0x00278920, 2},
646 { 0x00278a40, 25},
647 { 0x00278ac8, 7},
648 { 0x00278aec, 1},
649 { 0x00278af8, 2},
650 { 0x00278b04, 4},
651 { 0x00278b20, 2},
652 { 0x00278c40, 25},
653 { 0x00278cc8, 7},
654 { 0x00278cec, 1},
655 { 0x00278cf8, 2},
656 { 0x00278d04, 4},
657 { 0x00278d20, 2},
658 { 0x00278e40, 25},
659 { 0x00278ec8, 7},
660 { 0x00278eec, 1},
661 { 0x00278ef8, 2},
662 { 0x00278f04, 4},
663 { 0x00278f20, 2},
664 { 0x00279040, 25},
665 { 0x002790c8, 7},
666 { 0x002790ec, 1},
667 { 0x002790f8, 2},
668 { 0x00279104, 4},
669 { 0x00279120, 2},
670 { 0x00279240, 25},
671 { 0x002792c8, 7},
672 { 0x002792ec, 1},
673 { 0x002792f8, 2},
674 { 0x00279304, 4},
675 { 0x00279320, 2},
676 { 0x00279440, 25},
677 { 0x002794c8, 7},
678 { 0x002794ec, 1},
679 { 0x002794f8, 2},
680 { 0x00279504, 4},
681 { 0x00279520, 2},
682 { 0x00279640, 25},
683 { 0x002796c8, 7},
684 { 0x002796ec, 1},
685 { 0x002796f8, 2},
686 { 0x00279704, 4},
687 { 0x00279720, 2},
688 { 0x00279840, 25},
689 { 0x002798c8, 7},
690 { 0x002798ec, 1},
691 { 0x002798f8, 2},
692 { 0x00279904, 4},
693 { 0x00279920, 2},
694 { 0x00279a40, 25},
695 { 0x00279ac8, 7},
696 { 0x00279aec, 1},
697 { 0x00279af8, 2},
698 { 0x00279b04, 4},
699 { 0x00279b20, 2},
700 { 0x00279c40, 25},
701 { 0x00279cc8, 7},
702 { 0x00279cec, 1},
703 { 0x00279cf8, 2},
704 { 0x00279d04, 4},
705 { 0x00279d20, 2},
706 { 0x00279e40, 25},
707 { 0x00279ec8, 7},
708 { 0x00279eec, 1},
709 { 0x00279ef8, 2},
710 { 0x00279f04, 4},
711 { 0x00279f20, 2},
712 { 0x0027a040, 25},
713 { 0x0027a0c8, 7},
714 { 0x0027a0ec, 1},
715 { 0x0027a0f8, 2},
716 { 0x0027a104, 4},
717 { 0x0027a120, 2},
718 { 0x0027a240, 25},
719 { 0x0027a2c8, 7},
720 { 0x0027a2ec, 1},
721 { 0x0027a2f8, 2},
722 { 0x0027a304, 4},
723 { 0x0027a320, 2},
724 { 0x0027a440, 25},
725 { 0x0027a4c8, 7},
726 { 0x0027a4ec, 1},
727 { 0x0027a4f8, 2},
728 { 0x0027a504, 4},
729 { 0x0027a520, 2},
730 { 0x0027a640, 25},
731 { 0x0027a6c8, 7},
732 { 0x0027a6ec, 1},
733 { 0x0027a6f8, 2},
734 { 0x0027a704, 4},
735 { 0x0027a720, 2},
736 { 0x0027a840, 25},
737 { 0x0027a8c8, 7},
738 { 0x0027a8ec, 1},
739 { 0x0027a8f8, 2},
740 { 0x0027a904, 4},
741 { 0x0027a920, 2},
742 { 0x0027aa40, 25},
743 { 0x0027aac8, 7},
744 { 0x0027aaec, 1},
745 { 0x0027aaf8, 2},
746 { 0x0027ab04, 4},
747 { 0x0027ab20, 2},
748 { 0x0027ac40, 25},
749 { 0x0027acc8, 7},
750 { 0x0027acec, 1},
751 { 0x0027acf8, 2},
752 { 0x0027ad04, 4},
753 { 0x0027ad20, 2},
754 { 0x0027ae40, 25},
755 { 0x0027aec8, 7},
756 { 0x0027aeec, 1},
757 { 0x0027aef8, 2},
758 { 0x0027af04, 4},
759 { 0x0027af20, 2},
760 { 0x0027b040, 25},
761 { 0x0027b0c8, 7},
762 { 0x0027b0ec, 1},
763 { 0x0027b0f8, 2},
764 { 0x0027b104, 4},
765 { 0x0027b120, 2},
766 { 0x0027b240, 25},
767 { 0x0027b2c8, 7},
768 { 0x0027b2ec, 1},
769 { 0x0027b2f8, 2},
770 { 0x0027b304, 4},
771 { 0x0027b320, 2},
772 { 0x0027b440, 25},
773 { 0x0027b4c8, 7},
774 { 0x0027b4ec, 1},
775 { 0x0027b4f8, 2},
776 { 0x0027b504, 4},
777 { 0x0027b520, 2},
778 { 0x0027b640, 25},
779 { 0x0027b6c8, 7},
780 { 0x0027b6ec, 1},
781 { 0x0027b6f8, 2},
782 { 0x0027b704, 4},
783 { 0x0027b720, 2},
784 { 0x0027b840, 25},
785 { 0x0027b8c8, 7},
786 { 0x0027b8ec, 1},
787 { 0x0027b8f8, 2},
788 { 0x0027b904, 4},
789 { 0x0027b920, 2},
790 { 0x0027ba40, 25},
791 { 0x0027bac8, 7},
792 { 0x0027baec, 1},
793 { 0x0027baf8, 2},
794 { 0x0027bb04, 4},
795 { 0x0027bb20, 2},
796 { 0x0027bc40, 25},
797 { 0x0027bcc8, 7},
798 { 0x0027bcec, 1},
799 { 0x0027bcf8, 2},
800 { 0x0027bd04, 4},
801 { 0x0027bd20, 2},
802 { 0x0027be40, 25},
803 { 0x0027bec8, 7},
804 { 0x0027beec, 1},
805 { 0x0027bef8, 2},
806 { 0x0027bf04, 4},
807 { 0x0027bf20, 2},
808 { 0x0027c040, 25},
809 { 0x0027c0c8, 7},
810 { 0x0027c0ec, 1},
811 { 0x0027c0f8, 2},
812 { 0x0027c104, 4},
813 { 0x0027c120, 2},
814 { 0x0027c240, 25},
815 { 0x0027c2c8, 7},
816 { 0x0027c2ec, 1},
817 { 0x0027c2f8, 2},
818 { 0x0027c304, 4},
819 { 0x0027c320, 2},
820 { 0x0027c440, 25},
821 { 0x0027c4c8, 7},
822 { 0x0027c4ec, 1},
823 { 0x0027c4f8, 2},
824 { 0x0027c504, 4},
825 { 0x0027c520, 2},
826 { 0x0027c640, 25},
827 { 0x0027c6c8, 7},
828 { 0x0027c6ec, 1},
829 { 0x0027c6f8, 2},
830 { 0x0027c704, 4},
831 { 0x0027c720, 2},
832 { 0x0027c840, 25},
833 { 0x0027c8c8, 7},
834 { 0x0027c8ec, 1},
835 { 0x0027c8f8, 2},
836 { 0x0027c904, 4},
837 { 0x0027c920, 2},
838 { 0x0027ca40, 25},
839 { 0x0027cac8, 7},
840 { 0x0027caec, 1},
841 { 0x0027caf8, 2},
842 { 0x0027cb04, 4},
843 { 0x0027cb20, 2},
844 { 0x0027cc40, 25},
845 { 0x0027ccc8, 7},
846 { 0x0027ccec, 1},
847 { 0x0027ccf8, 2},
848 { 0x0027cd04, 4},
849 { 0x0027cd20, 2},
850 { 0x0027ce40, 25},
851 { 0x0027cec8, 7},
852 { 0x0027ceec, 1},
853 { 0x0027cef8, 2},
854 { 0x0027cf04, 4},
855 { 0x0027cf20, 2},
856 { 0x0027d040, 25},
857 { 0x0027d0c8, 7},
858 { 0x0027d0ec, 1},
859 { 0x0027d0f8, 2},
860 { 0x0027d104, 4},
861 { 0x0027d120, 2},
862 { 0x0027d240, 25},
863 { 0x0027d2c8, 7},
864 { 0x0027d2ec, 1},
865 { 0x0027d2f8, 2},
866 { 0x0027d304, 4},
867 { 0x0027d320, 2},
868 { 0x0027d440, 25},
869 { 0x0027d4c8, 7},
870 { 0x0027d4ec, 1},
871 { 0x0027d4f8, 2},
872 { 0x0027d504, 4},
873 { 0x0027d520, 2},
874 { 0x0027d640, 25},
875 { 0x0027d6c8, 7},
876 { 0x0027d6ec, 1},
877 { 0x0027d6f8, 2},
878 { 0x0027d704, 4},
879 { 0x0027d720, 2},
880 { 0x0027d840, 25},
881 { 0x0027d8c8, 7},
882 { 0x0027d8ec, 1},
883 { 0x0027d8f8, 2},
884 { 0x0027d904, 4},
885 { 0x0027d920, 2},
886 { 0x0027da40, 25},
887 { 0x0027dac8, 7},
888 { 0x0027daec, 1},
889 { 0x0027daf8, 2},
890 { 0x0027db04, 4},
891 { 0x0027db20, 2},
892 { 0x0027dc40, 25},
893 { 0x0027dcc8, 7},
894 { 0x0027dcec, 1},
895 { 0x0027dcf8, 2},
896 { 0x0027dd04, 4},
897 { 0x0027dd20, 2},
898 { 0x0027de40, 25},
899 { 0x0027dec8, 7},
900 { 0x0027deec, 1},
901 { 0x0027def8, 2},
902 { 0x0027df04, 4},
903 { 0x0027df20, 2},
904 { 0x0027e040, 25},
905 { 0x0027e0c8, 7},
906 { 0x0027e0ec, 1},
907 { 0x0027e0f8, 2},
908 { 0x0027e104, 4},
909 { 0x0027e120, 2},
910 { 0x0027e240, 25},
911 { 0x0027e2c8, 7},
912 { 0x0027e2ec, 1},
913 { 0x0027e2f8, 2},
914 { 0x0027e304, 4},
915 { 0x0027e320, 2},
916 { 0x0027e440, 25},
917 { 0x0027e4c8, 7},
918 { 0x0027e4ec, 1},
919 { 0x0027e4f8, 2},
920 { 0x0027e504, 4},
921 { 0x0027e520, 2},
922 { 0x0027e640, 25},
923 { 0x0027e6c8, 7},
924 { 0x0027e6ec, 1},
925 { 0x0027e6f8, 2},
926 { 0x0027e704, 4},
927 { 0x0027e720, 2},
928 { 0x0027e840, 25},
929 { 0x0027e8c8, 7},
930 { 0x0027e8ec, 1},
931 { 0x0027e8f8, 2},
932 { 0x0027e904, 4},
933 { 0x0027e920, 2},
934 { 0x0027ea40, 25},
935 { 0x0027eac8, 7},
936 { 0x0027eaec, 1},
937 { 0x0027eaf8, 2},
938 { 0x0027eb04, 4},
939 { 0x0027eb20, 2},
940 { 0x0027ec40, 25},
941 { 0x0027ecc8, 7},
942 { 0x0027ecec, 1},
943 { 0x0027ecf8, 2},
944 { 0x0027ed04, 4},
945 { 0x0027ed20, 2},
946 { 0x0027ee40, 25},
947 { 0x0027eec8, 7},
948 { 0x0027eeec, 1},
949 { 0x0027eef8, 2},
950 { 0x0027ef04, 4},
951 { 0x0027ef20, 2},
952 { 0x0027f040, 25},
953 { 0x0027f0c8, 7},
954 { 0x0027f0ec, 1},
955 { 0x0027f0f8, 2},
956 { 0x0027f104, 4},
957 { 0x0027f120, 2},
958 { 0x0027f240, 25},
959 { 0x0027f2c8, 7},
960 { 0x0027f2ec, 1},
961 { 0x0027f2f8, 2},
962 { 0x0027f304, 4},
963 { 0x0027f320, 2},
964 { 0x0027f440, 25},
965 { 0x0027f4c8, 7},
966 { 0x0027f4ec, 1},
967 { 0x0027f4f8, 2},
968 { 0x0027f504, 4},
969 { 0x0027f520, 2},
970 { 0x0027f640, 25},
971 { 0x0027f6c8, 7},
972 { 0x0027f6ec, 1},
973 { 0x0027f6f8, 2},
974 { 0x0027f704, 4},
975 { 0x0027f720, 2},
976 { 0x0027f840, 25},
977 { 0x0027f8c8, 7},
978 { 0x0027f8ec, 1},
979 { 0x0027f8f8, 2},
980 { 0x0027f904, 4},
981 { 0x0027f920, 2},
982 { 0x0027fa40, 25},
983 { 0x0027fac8, 7},
984 { 0x0027faec, 1},
985 { 0x0027faf8, 2},
986 { 0x0027fb04, 4},
987 { 0x0027fb20, 2},
988 { 0x0027fc40, 25},
989 { 0x0027fcc8, 7},
990 { 0x0027fcec, 1},
991 { 0x0027fcf8, 2},
992 { 0x0027fd04, 4},
993 { 0x0027fd20, 2},
994 { 0x0027fe40, 25},
995 { 0x0027fec8, 7},
996 { 0x0027feec, 1},
997 { 0x0027fef8, 2},
998 { 0x0027ff04, 4},
999 { 0x0027ff20, 2},
1000 { 0x00400500, 1},
1001 { 0x0040415c, 1},
1002 { 0x00404468, 1},
1003 { 0x00404498, 1},
1004 { 0x00405800, 1},
1005 { 0x00405840, 2},
1006 { 0x00405850, 1},
1007 { 0x00405908, 1},
1008 { 0x00405a00, 1},
1009 { 0x00405b50, 1},
1010 { 0x00406024, 5},
1011 { 0x00407010, 1},
1012 { 0x00407808, 1},
1013 { 0x0040803c, 1},
1014 { 0x00408804, 1},
1015 { 0x0040880c, 1},
1016 { 0x00408900, 2},
1017 { 0x00408910, 1},
1018 { 0x00408944, 1},
1019 { 0x00408984, 1},
1020 { 0x004090a8, 1},
1021 { 0x004098a0, 1},
1022 { 0x00409b00, 1},
1023 { 0x0041000c, 1},
1024 { 0x00410110, 1},
1025 { 0x00410184, 1},
1026 { 0x0041040c, 1},
1027 { 0x00410510, 1},
1028 { 0x00410584, 1},
1029 { 0x00418000, 1},
1030 { 0x00418008, 1},
1031 { 0x00418380, 2},
1032 { 0x00418400, 2},
1033 { 0x004184a0, 1},
1034 { 0x00418604, 1},
1035 { 0x00418680, 1},
1036 { 0x00418704, 1},
1037 { 0x00418714, 1},
1038 { 0x00418800, 1},
1039 { 0x0041881c, 1},
1040 { 0x00418830, 1},
1041 { 0x00418884, 1},
1042 { 0x004188b0, 1},
1043 { 0x004188c8, 3},
1044 { 0x004188fc, 1},
1045 { 0x00418b04, 1},
1046 { 0x00418c04, 1},
1047 { 0x00418c10, 8},
1048 { 0x00418c88, 1},
1049 { 0x00418d00, 1},
1050 { 0x00418e00, 1},
1051 { 0x00418e08, 1},
1052 { 0x00418e34, 1},
1053 { 0x00418e40, 4},
1054 { 0x00418e58, 16},
1055 { 0x00418f08, 1},
1056 { 0x00419000, 1},
1057 { 0x0041900c, 1},
1058 { 0x00419018, 1},
1059 { 0x00419854, 1},
1060 { 0x00419864, 1},
1061 { 0x00419a04, 2},
1062 { 0x00419ab0, 1},
1063 { 0x00419b04, 1},
1064 { 0x00419b3c, 1},
1065 { 0x00419b48, 1},
1066 { 0x00419b50, 1},
1067 { 0x00419ba0, 2},
1068 { 0x00419bb0, 1},
1069 { 0x00419bdc, 1},
1070 { 0x00419c0c, 1},
1071 { 0x00419d00, 1},
1072 { 0x00419d08, 2},
1073 { 0x00419e08, 1},
1074 { 0x00419e80, 8},
1075 { 0x00419ea8, 5},
1076 { 0x00419f00, 8},
1077 { 0x00419f28, 5},
1078 { 0x00419f80, 8},
1079 { 0x00419fa8, 5},
1080 { 0x0041a02c, 2},
1081 { 0x0041a0a8, 1},
1082 { 0x0041a8a0, 3},
1083 { 0x0041b014, 1},
1084 { 0x0041b0cc, 1},
1085 { 0x0041b1dc, 1},
1086 { 0x0041b214, 1},
1087 { 0x0041b2cc, 1},
1088 { 0x0041b3dc, 1},
1089 { 0x0041be0c, 3},
1090 { 0x0041becc, 1},
1091 { 0x0041bfdc, 1},
1092 { 0x0041c054, 1},
1093 { 0x0041c2b0, 1},
1094 { 0x0041c304, 1},
1095 { 0x0041c33c, 1},
1096 { 0x0041c348, 1},
1097 { 0x0041c350, 1},
1098 { 0x0041c3a0, 2},
1099 { 0x0041c3b0, 1},
1100 { 0x0041c3dc, 1},
1101 { 0x0041c40c, 1},
1102 { 0x0041c500, 1},
1103 { 0x0041c508, 2},
1104 { 0x0041c608, 1},
1105 { 0x0041c680, 8},
1106 { 0x0041c6a8, 5},
1107 { 0x0041c700, 8},
1108 { 0x0041c728, 5},
1109 { 0x0041c780, 8},
1110 { 0x0041c7a8, 5},
1111 { 0x0041c854, 1},
1112 { 0x0041cab0, 1},
1113 { 0x0041cb04, 1},
1114 { 0x0041cb3c, 1},
1115 { 0x0041cb48, 1},
1116 { 0x0041cb50, 1},
1117 { 0x0041cba0, 2},
1118 { 0x0041cbb0, 1},
1119 { 0x0041cbdc, 1},
1120 { 0x0041cc0c, 1},
1121 { 0x0041cd00, 1},
1122 { 0x0041cd08, 2},
1123 { 0x0041ce08, 1},
1124 { 0x0041ce80, 8},
1125 { 0x0041cea8, 5},
1126 { 0x0041cf00, 8},
1127 { 0x0041cf28, 5},
1128 { 0x0041cf80, 8},
1129 { 0x0041cfa8, 5},
1130 { 0x0041d054, 1},
1131 { 0x0041d2b0, 1},
1132 { 0x0041d304, 1},
1133 { 0x0041d33c, 1},
1134 { 0x0041d348, 1},
1135 { 0x0041d350, 1},
1136 { 0x0041d3a0, 2},
1137 { 0x0041d3b0, 1},
1138 { 0x0041d3dc, 1},
1139 { 0x0041d40c, 1},
1140 { 0x0041d500, 1},
1141 { 0x0041d508, 2},
1142 { 0x0041d608, 1},
1143 { 0x0041d680, 8},
1144 { 0x0041d6a8, 5},
1145 { 0x0041d700, 8},
1146 { 0x0041d728, 5},
1147 { 0x0041d780, 8},
1148 { 0x0041d7a8, 5},
1149 { 0x0041d854, 1},
1150 { 0x0041dab0, 1},
1151 { 0x0041db04, 1},
1152 { 0x0041db3c, 1},
1153 { 0x0041db48, 1},
1154 { 0x0041db50, 1},
1155 { 0x0041dba0, 2},
1156 { 0x0041dbb0, 1},
1157 { 0x0041dbdc, 1},
1158 { 0x0041dc0c, 1},
1159 { 0x0041dd00, 1},
1160 { 0x0041dd08, 2},
1161 { 0x0041de08, 1},
1162 { 0x0041de80, 8},
1163 { 0x0041dea8, 5},
1164 { 0x0041df00, 8},
1165 { 0x0041df28, 5},
1166 { 0x0041df80, 8},
1167 { 0x0041dfa8, 5},
1168 { 0x00481a00, 19},
1169 { 0x00481b00, 50},
1170 { 0x00481e00, 50},
1171 { 0x00481f00, 50},
1172 { 0x00484200, 19},
1173 { 0x00484300, 50},
1174 { 0x00484600, 50},
1175 { 0x00484700, 50},
1176 { 0x00484a00, 19},
1177 { 0x00484b00, 50},
1178 { 0x00484e00, 50},
1179 { 0x00484f00, 50},
1180 { 0x00485200, 19},
1181 { 0x00485300, 50},
1182 { 0x00485600, 50},
1183 { 0x00485700, 50},
1184 { 0x00485a00, 19},
1185 { 0x00485b00, 50},
1186 { 0x00485e00, 50},
1187 { 0x00485f00, 50},
1188 { 0x00500384, 1},
1189 { 0x005004a0, 1},
1190 { 0x00500604, 1},
1191 { 0x00500680, 1},
1192 { 0x00500714, 1},
1193 { 0x0050081c, 1},
1194 { 0x00500884, 1},
1195 { 0x005008b0, 1},
1196 { 0x005008c8, 3},
1197 { 0x005008fc, 1},
1198 { 0x00500b04, 1},
1199 { 0x00500c04, 1},
1200 { 0x00500c10, 8},
1201 { 0x00500c88, 1},
1202 { 0x00500d00, 1},
1203 { 0x00500e08, 1},
1204 { 0x00500f08, 1},
1205 { 0x00501000, 1},
1206 { 0x0050100c, 1},
1207 { 0x00501018, 1},
1208 { 0x00501854, 1},
1209 { 0x00501ab0, 1},
1210 { 0x00501b04, 1},
1211 { 0x00501b3c, 1},
1212 { 0x00501b48, 1},
1213 { 0x00501b50, 1},
1214 { 0x00501ba0, 2},
1215 { 0x00501bb0, 1},
1216 { 0x00501bdc, 1},
1217 { 0x00501c0c, 1},
1218 { 0x00501d00, 1},
1219 { 0x00501d08, 2},
1220 { 0x00501e08, 1},
1221 { 0x00501e80, 8},
1222 { 0x00501ea8, 5},
1223 { 0x00501f00, 8},
1224 { 0x00501f28, 5},
1225 { 0x00501f80, 8},
1226 { 0x00501fa8, 5},
1227 { 0x0050202c, 2},
1228 { 0x005020a8, 1},
1229 { 0x005028a0, 3},
1230 { 0x00503014, 1},
1231 { 0x005030cc, 1},
1232 { 0x005031dc, 1},
1233 { 0x00503214, 1},
1234 { 0x005032cc, 1},
1235 { 0x005033dc, 1},
1236 { 0x00503e14, 1},
1237 { 0x00503ecc, 1},
1238 { 0x00503fdc, 1},
1239 { 0x00504054, 1},
1240 { 0x005042b0, 1},
1241 { 0x00504304, 1},
1242 { 0x0050433c, 1},
1243 { 0x00504348, 1},
1244 { 0x00504350, 1},
1245 { 0x005043a0, 2},
1246 { 0x005043b0, 1},
1247 { 0x005043dc, 1},
1248 { 0x0050440c, 1},
1249 { 0x00504500, 1},
1250 { 0x00504508, 2},
1251 { 0x00504608, 1},
1252 { 0x00504680, 8},
1253 { 0x005046a8, 5},
1254 { 0x00504700, 8},
1255 { 0x00504728, 5},
1256 { 0x00504780, 8},
1257 { 0x005047a8, 5},
1258 { 0x00504854, 1},
1259 { 0x00504ab0, 1},
1260 { 0x00504b04, 1},
1261 { 0x00504b3c, 1},
1262 { 0x00504b48, 1},
1263 { 0x00504b50, 1},
1264 { 0x00504ba0, 2},
1265 { 0x00504bb0, 1},
1266 { 0x00504bdc, 1},
1267 { 0x00504c0c, 1},
1268 { 0x00504d00, 1},
1269 { 0x00504d08, 2},
1270 { 0x00504e08, 1},
1271 { 0x00504e80, 8},
1272 { 0x00504ea8, 5},
1273 { 0x00504f00, 8},
1274 { 0x00504f28, 5},
1275 { 0x00504f80, 8},
1276 { 0x00504fa8, 5},
1277 { 0x00505054, 1},
1278 { 0x005052b0, 1},
1279 { 0x00505304, 1},
1280 { 0x0050533c, 1},
1281 { 0x00505348, 1},
1282 { 0x00505350, 1},
1283 { 0x005053a0, 2},
1284 { 0x005053b0, 1},
1285 { 0x005053dc, 1},
1286 { 0x0050540c, 1},
1287 { 0x00505500, 1},
1288 { 0x00505508, 2},
1289 { 0x00505608, 1},
1290 { 0x00505680, 8},
1291 { 0x005056a8, 5},
1292 { 0x00505700, 8},
1293 { 0x00505728, 5},
1294 { 0x00505780, 8},
1295 { 0x005057a8, 5},
1296 { 0x00505854, 1},
1297 { 0x00505ab0, 1},
1298 { 0x00505b04, 1},
1299 { 0x00505b3c, 1},
1300 { 0x00505b48, 1},
1301 { 0x00505b50, 1},
1302 { 0x00505ba0, 2},
1303 { 0x00505bb0, 1},
1304 { 0x00505bdc, 1},
1305 { 0x00505c0c, 1},
1306 { 0x00505d00, 1},
1307 { 0x00505d08, 2},
1308 { 0x00505e08, 1},
1309 { 0x00505e80, 8},
1310 { 0x00505ea8, 5},
1311 { 0x00505f00, 8},
1312 { 0x00505f28, 5},
1313 { 0x00505f80, 8},
1314 { 0x00505fa8, 5},
1315 { 0x00581a00, 19},
1316 { 0x00581b00, 50},
1317 { 0x00581e00, 50},
1318 { 0x00581f00, 50},
1319 { 0x00584200, 19},
1320 { 0x00584300, 50},
1321 { 0x00584600, 50},
1322 { 0x00584700, 50},
1323 { 0x00584a00, 19},
1324 { 0x00584b00, 50},
1325 { 0x00584e00, 50},
1326 { 0x00584f00, 50},
1327 { 0x00585200, 19},
1328 { 0x00585300, 50},
1329 { 0x00585600, 50},
1330 { 0x00585700, 50},
1331 { 0x00585a00, 19},
1332 { 0x00585b00, 50},
1333 { 0x00585e00, 50},
1334 { 0x00585f00, 50},
1335 { 0x00900100, 1},
1336 { 0x009a0100, 1},
1337 { 0x00a00160, 2},
1338 { 0x00a007d0, 1},
1339 { 0x00a04200, 1},
1340 { 0x00a04470, 2},
1341 { 0x00a08190, 1},
1342 { 0x00a08198, 4},
1343 { 0x00a0c820, 2},
1344 { 0x00a0cc20, 2},
1345 { 0x00a0e470, 2},
1346 { 0x00a0e490, 9},
1347 { 0x00a0e6a8, 7},
1348 { 0x00a0e6c8, 2},
1349 { 0x00a0e6d4, 7},
1350 { 0x00a0e6f4, 2},
1351 { 0x00a0ec70, 2},
1352 { 0x00a0ec90, 9},
1353 { 0x00a0eea8, 7},
1354 { 0x00a0eec8, 2},
1355 { 0x00a0eed4, 7},
1356 { 0x00a0eef4, 2},
1357 { 0x00a10190, 1},
1358 { 0x00a10198, 4},
1359 { 0x00a14820, 2},
1360 { 0x00a14c20, 2},
1361 { 0x00a16470, 2},
1362 { 0x00a16490, 9},
1363 { 0x00a166a8, 7},
1364 { 0x00a166c8, 2},
1365 { 0x00a166d4, 7},
1366 { 0x00a166f4, 2},
1367 { 0x00a16c70, 2},
1368 { 0x00a16c90, 9},
1369 { 0x00a16ea8, 7},
1370 { 0x00a16ec8, 2},
1371 { 0x00a16ed4, 7},
1372 { 0x00a16ef4, 2},
1373 { 0x00a18190, 1},
1374 { 0x00a18198, 4},
1375 { 0x00a1c820, 2},
1376 { 0x00a1cc20, 2},
1377 { 0x00a1e470, 2},
1378 { 0x00a1e490, 9},
1379 { 0x00a1e6a8, 7},
1380 { 0x00a1e6c8, 2},
1381 { 0x00a1e6d4, 7},
1382 { 0x00a1e6f4, 2},
1383 { 0x00a1ec70, 2},
1384 { 0x00a1ec90, 9},
1385 { 0x00a1eea8, 7},
1386 { 0x00a1eec8, 2},
1387 { 0x00a1eed4, 7},
1388 { 0x00a1eef4, 2},
1389 { 0x00a20190, 1},
1390 { 0x00a20198, 4},
1391 { 0x00a24820, 2},
1392 { 0x00a24c20, 2},
1393 { 0x00a26470, 2},
1394 { 0x00a26490, 9},
1395 { 0x00a266a8, 7},
1396 { 0x00a266c8, 2},
1397 { 0x00a266d4, 7},
1398 { 0x00a266f4, 2},
1399 { 0x00a26c70, 2},
1400 { 0x00a26c90, 9},
1401 { 0x00a26ea8, 7},
1402 { 0x00a26ec8, 2},
1403 { 0x00a26ed4, 7},
1404 { 0x00a26ef4, 2},
1405 { 0x00a28190, 1},
1406 { 0x00a28198, 4},
1407 { 0x00a2c820, 2},
1408 { 0x00a2cc20, 2},
1409 { 0x00a2e470, 2},
1410 { 0x00a2e490, 9},
1411 { 0x00a2e6a8, 7},
1412 { 0x00a2e6c8, 2},
1413 { 0x00a2e6d4, 7},
1414 { 0x00a2e6f4, 2},
1415 { 0x00a2ec70, 2},
1416 { 0x00a2ec90, 9},
1417 { 0x00a2eea8, 7},
1418 { 0x00a2eec8, 2},
1419 { 0x00a2eed4, 7},
1420 { 0x00a2eef4, 2},
1421 { 0x00a30190, 1},
1422 { 0x00a30198, 4},
1423 { 0x00a34820, 2},
1424 { 0x00a34c20, 2},
1425 { 0x00a36470, 2},
1426 { 0x00a36490, 9},
1427 { 0x00a366a8, 7},
1428 { 0x00a366c8, 2},
1429 { 0x00a366d4, 7},
1430 { 0x00a366f4, 2},
1431 { 0x00a36c70, 2},
1432 { 0x00a36c90, 9},
1433 { 0x00a36ea8, 7},
1434 { 0x00a36ec8, 2},
1435 { 0x00a36ed4, 7},
1436 { 0x00a36ef4, 2},
1437 { 0x00a38190, 1},
1438 { 0x00a38198, 4},
1439 { 0x00a3c820, 2},
1440 { 0x00a3cc20, 2},
1441 { 0x00a3e470, 2},
1442 { 0x00a3e490, 9},
1443 { 0x00a3e6a8, 7},
1444 { 0x00a3e6c8, 2},
1445 { 0x00a3e6d4, 7},
1446 { 0x00a3e6f4, 2},
1447 { 0x00a3ec70, 2},
1448 { 0x00a3ec90, 9},
1449 { 0x00a3eea8, 7},
1450 { 0x00a3eec8, 2},
1451 { 0x00a3eed4, 7},
1452 { 0x00a3eef4, 2},
1453};
1454
1455
1456static const u32 gv11b_global_whitelist_ranges_count =
1457 ARRAY_SIZE(gv11b_global_whitelist_ranges);
1458
1459/* context */
1460
1461/* runcontrol */
1462static const u32 gv11b_runcontrol_whitelist[] = {
1463};
1464static const u32 gv11b_runcontrol_whitelist_count =
1465 ARRAY_SIZE(gv11b_runcontrol_whitelist);
1466
1467static const struct regop_offset_range gv11b_runcontrol_whitelist_ranges[] = {
1468};
1469static const u32 gv11b_runcontrol_whitelist_ranges_count =
1470 ARRAY_SIZE(gv11b_runcontrol_whitelist_ranges);
1471
1472
1473/* quad ctl */
1474static const u32 gv11b_qctl_whitelist[] = {
1475};
1476static const u32 gv11b_qctl_whitelist_count =
1477 ARRAY_SIZE(gv11b_qctl_whitelist);
1478
1479static const struct regop_offset_range gv11b_qctl_whitelist_ranges[] = {
1480};
1481static const u32 gv11b_qctl_whitelist_ranges_count =
1482 ARRAY_SIZE(gv11b_qctl_whitelist_ranges);
1483
1484const struct regop_offset_range *gv11b_get_global_whitelist_ranges(void)
1485{
1486 return gv11b_global_whitelist_ranges;
1487}
1488
1489int gv11b_get_global_whitelist_ranges_count(void)
1490{
1491 return gv11b_global_whitelist_ranges_count;
1492}
1493
1494const struct regop_offset_range *gv11b_get_context_whitelist_ranges(void)
1495{
1496 return gv11b_global_whitelist_ranges;
1497}
1498
1499int gv11b_get_context_whitelist_ranges_count(void)
1500{
1501 return gv11b_global_whitelist_ranges_count;
1502}
1503
1504const u32 *gv11b_get_runcontrol_whitelist(void)
1505{
1506 return gv11b_runcontrol_whitelist;
1507}
1508
1509int gv11b_get_runcontrol_whitelist_count(void)
1510{
1511 return gv11b_runcontrol_whitelist_count;
1512}
1513
1514const struct regop_offset_range *gv11b_get_runcontrol_whitelist_ranges(void)
1515{
1516 return gv11b_runcontrol_whitelist_ranges;
1517}
1518
1519int gv11b_get_runcontrol_whitelist_ranges_count(void)
1520{
1521 return gv11b_runcontrol_whitelist_ranges_count;
1522}
1523
1524const u32 *gv11b_get_qctl_whitelist(void)
1525{
1526 return gv11b_qctl_whitelist;
1527}
1528
1529int gv11b_get_qctl_whitelist_count(void)
1530{
1531 return gv11b_qctl_whitelist_count;
1532}
1533
1534const struct regop_offset_range *gv11b_get_qctl_whitelist_ranges(void)
1535{
1536 return gv11b_qctl_whitelist_ranges;
1537}
1538
1539int gv11b_get_qctl_whitelist_ranges_count(void)
1540{
1541 return gv11b_qctl_whitelist_ranges_count;
1542}
1543
1544int gv11b_apply_smpc_war(struct dbg_session_gk20a *dbg_s)
1545{
1546 /* Not needed on gv11b */
1547 return 0;
1548}
diff --git a/drivers/gpu/nvgpu/gv11b/regops_gv11b.h b/drivers/gpu/nvgpu/gv11b/regops_gv11b.h
new file mode 100644
index 00000000..0ee2edfe
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/regops_gv11b.h
@@ -0,0 +1,42 @@
1/*
2 *
3 * Tegra GV11B GPU Driver Register Ops
4 *
5 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25#ifndef __REGOPS_GV11B_H_
26#define __REGOPS_GV11B_H_
27
28const struct regop_offset_range *gv11b_get_global_whitelist_ranges(void);
29int gv11b_get_global_whitelist_ranges_count(void);
30const struct regop_offset_range *gv11b_get_context_whitelist_ranges(void);
31int gv11b_get_context_whitelist_ranges_count(void);
32const u32 *gv11b_get_runcontrol_whitelist(void);
33int gv11b_get_runcontrol_whitelist_count(void);
34const struct regop_offset_range *gv11b_get_runcontrol_whitelist_ranges(void);
35int gv11b_get_runcontrol_whitelist_ranges_count(void);
36const u32 *gv11b_get_qctl_whitelist(void);
37int gv11b_get_qctl_whitelist_count(void);
38const struct regop_offset_range *gv11b_get_qctl_whitelist_ranges(void);
39int gv11b_get_qctl_whitelist_ranges_count(void);
40int gv11b_apply_smpc_war(struct dbg_session_gk20a *dbg_s);
41
42#endif /* __REGOPS_GV11B_H_ */
diff --git a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c
new file mode 100644
index 00000000..fe1aa8a5
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c
@@ -0,0 +1,185 @@
1/*
2 * Volta GPU series Subcontext
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26
27#include "gv11b/subctx_gv11b.h"
28
29#include <nvgpu/dma.h>
30#include <nvgpu/log.h>
31#include <nvgpu/gmmu.h>
32
33#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
34#include <nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h>
35
36static void gv11b_init_subcontext_pdb(struct channel_gk20a *c,
37 struct nvgpu_mem *inst_block);
38
39static void gv11b_subctx_commit_valid_mask(struct channel_gk20a *c,
40 struct nvgpu_mem *inst_block);
41static void gv11b_subctx_commit_pdb(struct channel_gk20a *c,
42 struct nvgpu_mem *inst_block);
43
44void gv11b_free_subctx_header(struct channel_gk20a *c)
45{
46 struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
47 struct gk20a *g = c->g;
48
49 nvgpu_log(g, gpu_dbg_fn, "gv11b_free_subctx_header");
50
51 if (ctx->mem.gpu_va) {
52 nvgpu_gmmu_unmap(c->vm, &ctx->mem, ctx->mem.gpu_va);
53
54 nvgpu_dma_free(g, &ctx->mem);
55 }
56}
57
58int gv11b_alloc_subctx_header(struct channel_gk20a *c)
59{
60 struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
61 struct gk20a *g = c->g;
62 int ret = 0;
63
64 nvgpu_log(g, gpu_dbg_fn, "gv11b_alloc_subctx_header");
65
66 if (ctx->mem.gpu_va == 0) {
67 ret = nvgpu_dma_alloc_flags_sys(g,
68 0, /* No Special flags */
69 ctxsw_prog_fecs_header_v(),
70 &ctx->mem);
71 if (ret) {
72 nvgpu_err(g, "failed to allocate sub ctx header");
73 return ret;
74 }
75 ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm,
76 &ctx->mem,
77 ctx->mem.size,
78 0, /* not GPU-cacheable */
79 gk20a_mem_flag_none, true,
80 ctx->mem.aperture);
81 if (!ctx->mem.gpu_va) {
82 nvgpu_err(g, "failed to map ctx header");
83 nvgpu_dma_free(g, &ctx->mem);
84 return -ENOMEM;
85 }
86 /* Now clear the buffer */
87 if (nvgpu_mem_begin(g, &ctx->mem))
88 return -ENOMEM;
89
90 nvgpu_memset(g, &ctx->mem, 0, 0, ctx->mem.size);
91 nvgpu_mem_end(g, &ctx->mem);
92
93 gv11b_init_subcontext_pdb(c, &c->inst_block);
94 }
95 return ret;
96}
97
98static void gv11b_init_subcontext_pdb(struct channel_gk20a *c,
99 struct nvgpu_mem *inst_block)
100{
101 struct gk20a *g = c->g;
102
103 gv11b_subctx_commit_pdb(c, inst_block);
104 gv11b_subctx_commit_valid_mask(c, inst_block);
105
106 nvgpu_log(g, gpu_dbg_info, " subctx %d instblk set", c->t19x.subctx_id);
107 nvgpu_mem_wr32(g, inst_block, ram_in_engine_wfi_veid_w(),
108 ram_in_engine_wfi_veid_f(c->t19x.subctx_id));
109
110}
111
112int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va)
113{
114 struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
115 struct nvgpu_mem *gr_mem;
116 struct gk20a *g = c->g;
117 int ret = 0;
118 u32 addr_lo, addr_hi;
119
120 addr_lo = u64_lo32(gpu_va);
121 addr_hi = u64_hi32(gpu_va);
122
123 gr_mem = &ctx->mem;
124 g->ops.mm.l2_flush(g, true);
125 if (nvgpu_mem_begin(g, gr_mem))
126 return -ENOMEM;
127
128 nvgpu_mem_wr(g, gr_mem,
129 ctxsw_prog_main_image_context_buffer_ptr_hi_o(), addr_hi);
130 nvgpu_mem_wr(g, gr_mem,
131 ctxsw_prog_main_image_context_buffer_ptr_o(), addr_lo);
132
133 nvgpu_mem_wr(g, gr_mem,
134 ctxsw_prog_main_image_ctl_o(),
135 ctxsw_prog_main_image_ctl_type_per_veid_header_v());
136 nvgpu_mem_end(g, gr_mem);
137 return ret;
138}
139
140void gv11b_subctx_commit_valid_mask(struct channel_gk20a *c,
141 struct nvgpu_mem *inst_block)
142{
143 struct gk20a *g = c->g;
144
145 /* Make all subctx pdbs valid */
146 nvgpu_mem_wr32(g, inst_block, 166, 0xffffffff);
147 nvgpu_mem_wr32(g, inst_block, 167, 0xffffffff);
148}
149
150void gv11b_subctx_commit_pdb(struct channel_gk20a *c,
151 struct nvgpu_mem *inst_block)
152{
153 struct gk20a *g = c->g;
154 struct fifo_gk20a *f = &g->fifo;
155 struct vm_gk20a *vm = c->vm;
156 u32 lo, hi;
157 u32 subctx_id = 0;
158 u32 format_word;
159 u32 pdb_addr_lo, pdb_addr_hi;
160 u64 pdb_addr;
161 u32 aperture = nvgpu_aperture_mask(g, vm->pdb.mem,
162 ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(),
163 ram_in_sc_page_dir_base_target_vid_mem_v());
164
165 pdb_addr = nvgpu_mem_get_addr(g, vm->pdb.mem);
166 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
167 pdb_addr_hi = u64_hi32(pdb_addr);
168 format_word = ram_in_sc_page_dir_base_target_f(
169 aperture, 0) |
170 ram_in_sc_page_dir_base_vol_f(
171 ram_in_sc_page_dir_base_vol_true_v(), 0) |
172 ram_in_sc_page_dir_base_fault_replay_tex_f(1, 0) |
173 ram_in_sc_page_dir_base_fault_replay_gcc_f(1, 0) |
174 ram_in_sc_use_ver2_pt_format_f(1, 0) |
175 ram_in_sc_big_page_size_f(1, 0) |
176 ram_in_sc_page_dir_base_lo_0_f(pdb_addr_lo);
177 nvgpu_log(g, gpu_dbg_info, " pdb info lo %x hi %x",
178 format_word, pdb_addr_hi);
179 for (subctx_id = 0; subctx_id < f->t19x.max_subctx_count; subctx_id++) {
180 lo = ram_in_sc_page_dir_base_vol_0_w() + (4 * subctx_id);
181 hi = ram_in_sc_page_dir_base_hi_0_w() + (4 * subctx_id);
182 nvgpu_mem_wr32(g, inst_block, lo, format_word);
183 nvgpu_mem_wr32(g, inst_block, hi, pdb_addr_hi);
184 }
185}
diff --git a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.h b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.h
new file mode 100644
index 00000000..10dc0ba5
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.h
@@ -0,0 +1,34 @@
1/*
2 *
3 * Volta GPU series Subcontext
4 *
5 * Copyright (c) 2016 - 2017, NVIDIA CORPORATION. All rights reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25#ifndef __SUBCONTEXT_GV11B_H__
26#define __SUBCONTEXT_GV11B_H__
27
28int gv11b_alloc_subctx_header(struct channel_gk20a *c);
29
30void gv11b_free_subctx_header(struct channel_gk20a *c);
31
32int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va);
33
34#endif /* __SUBCONTEXT_GV11B_H__ */
diff --git a/drivers/gpu/nvgpu/gv11b/therm_gv11b.c b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c
new file mode 100644
index 00000000..18987119
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c
@@ -0,0 +1,75 @@
1/*
2 * GV11B Therm
3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26
27#include <nvgpu/soc.h>
28
29#include <nvgpu/hw/gv11b/hw_therm_gv11b.h>
30
31int gv11b_elcg_init_idle_filters(struct gk20a *g)
32{
33 u32 gate_ctrl, idle_filter;
34 u32 engine_id;
35 u32 active_engine_id = 0;
36 struct fifo_gk20a *f = &g->fifo;
37
38 if (nvgpu_platform_is_simulation(g))
39 return 0;
40
41 gk20a_dbg_info("init clock/power gate reg");
42
43 for (engine_id = 0; engine_id < f->num_engines; engine_id++) {
44 active_engine_id = f->active_engines_list[engine_id];
45
46 gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(active_engine_id));
47 gate_ctrl = set_field(gate_ctrl,
48 therm_gate_ctrl_eng_idle_filt_exp_m(),
49 therm_gate_ctrl_eng_idle_filt_exp__prod_f());
50 gate_ctrl = set_field(gate_ctrl,
51 therm_gate_ctrl_eng_idle_filt_mant_m(),
52 therm_gate_ctrl_eng_idle_filt_mant__prod_f());
53 gate_ctrl = set_field(gate_ctrl,
54 therm_gate_ctrl_eng_delay_before_m(),
55 therm_gate_ctrl_eng_delay_before__prod_f());
56 gate_ctrl = set_field(gate_ctrl,
57 therm_gate_ctrl_eng_delay_after_m(),
58 therm_gate_ctrl_eng_delay_after__prod_f());
59 gk20a_writel(g, therm_gate_ctrl_r(active_engine_id), gate_ctrl);
60 }
61
62 idle_filter = gk20a_readl(g, therm_fecs_idle_filter_r());
63 idle_filter = set_field(idle_filter,
64 therm_fecs_idle_filter_value_m(),
65 therm_fecs_idle_filter_value__prod_f());
66 gk20a_writel(g, therm_fecs_idle_filter_r(), idle_filter);
67
68 idle_filter = gk20a_readl(g, therm_hubmmu_idle_filter_r());
69 idle_filter = set_field(idle_filter,
70 therm_hubmmu_idle_filter_value_m(),
71 therm_hubmmu_idle_filter_value__prod_f());
72 gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter);
73
74 return 0;
75}
diff --git a/drivers/gpu/nvgpu/gv11b/therm_gv11b.h b/drivers/gpu/nvgpu/gv11b/therm_gv11b.h
new file mode 100644
index 00000000..1d89597b
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/therm_gv11b.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#ifndef THERM_GV11B_H
23#define THERM_GV11B_H
24
25struct gk20a;
26int gv11b_elcg_init_idle_filters(struct gk20a *g);
27
28#endif /* THERM_GV11B_H */
diff --git a/drivers/gpu/nvgpu/include/nvgpu/enabled_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/enabled_t19x.h
new file mode 100644
index 00000000..9ef1dc30
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/enabled_t19x.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NVGPU_ENABLED_T19X_H__
24#define __NVGPU_ENABLED_T19X_H__
25
26/* subcontexts are available */
27#define NVGPU_SUPPORT_TSG_SUBCONTEXTS 63
28
29#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/gmmu_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/gmmu_t19x.h
new file mode 100644
index 00000000..eea51fbb
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/gmmu_t19x.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NVGPU_GMMU_T19X_H__
24#define __NVGPU_GMMU_T19X_H__
25
26struct nvgpu_gmmu_attrs;
27
28struct nvgpu_gmmu_attrs_t19x {
29 bool l3_alloc;
30};
31
32void nvgpu_gmmu_add_t19x_attrs(struct nvgpu_gmmu_attrs *attrs, u32 flags);
33
34#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_bus_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_bus_gv100.h
new file mode 100644
index 00000000..7771f1ea
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_bus_gv100.h
@@ -0,0 +1,227 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_bus_gv100_h_
57#define _hw_bus_gv100_h_
58
59static inline u32 bus_sw_scratch_r(u32 i)
60{
61 return 0x00001580U + i*4U;
62}
63static inline u32 bus_bar0_window_r(void)
64{
65 return 0x00001700U;
66}
67static inline u32 bus_bar0_window_base_f(u32 v)
68{
69 return (v & 0xffffffU) << 0U;
70}
71static inline u32 bus_bar0_window_target_vid_mem_f(void)
72{
73 return 0x0U;
74}
75static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void)
76{
77 return 0x2000000U;
78}
79static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void)
80{
81 return 0x3000000U;
82}
83static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void)
84{
85 return 0x00000010U;
86}
87static inline u32 bus_bar1_block_r(void)
88{
89 return 0x00001704U;
90}
91static inline u32 bus_bar1_block_ptr_f(u32 v)
92{
93 return (v & 0xfffffffU) << 0U;
94}
95static inline u32 bus_bar1_block_target_vid_mem_f(void)
96{
97 return 0x0U;
98}
99static inline u32 bus_bar1_block_target_sys_mem_coh_f(void)
100{
101 return 0x20000000U;
102}
103static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void)
104{
105 return 0x30000000U;
106}
107static inline u32 bus_bar1_block_mode_virtual_f(void)
108{
109 return 0x80000000U;
110}
111static inline u32 bus_bar2_block_r(void)
112{
113 return 0x00001714U;
114}
115static inline u32 bus_bar2_block_ptr_f(u32 v)
116{
117 return (v & 0xfffffffU) << 0U;
118}
119static inline u32 bus_bar2_block_target_vid_mem_f(void)
120{
121 return 0x0U;
122}
123static inline u32 bus_bar2_block_target_sys_mem_coh_f(void)
124{
125 return 0x20000000U;
126}
127static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void)
128{
129 return 0x30000000U;
130}
131static inline u32 bus_bar2_block_mode_virtual_f(void)
132{
133 return 0x80000000U;
134}
135static inline u32 bus_bar1_block_ptr_shift_v(void)
136{
137 return 0x0000000cU;
138}
139static inline u32 bus_bar2_block_ptr_shift_v(void)
140{
141 return 0x0000000cU;
142}
143static inline u32 bus_bind_status_r(void)
144{
145 return 0x00001710U;
146}
147static inline u32 bus_bind_status_bar1_pending_v(u32 r)
148{
149 return (r >> 0U) & 0x1U;
150}
151static inline u32 bus_bind_status_bar1_pending_empty_f(void)
152{
153 return 0x0U;
154}
155static inline u32 bus_bind_status_bar1_pending_busy_f(void)
156{
157 return 0x1U;
158}
159static inline u32 bus_bind_status_bar1_outstanding_v(u32 r)
160{
161 return (r >> 1U) & 0x1U;
162}
163static inline u32 bus_bind_status_bar1_outstanding_false_f(void)
164{
165 return 0x0U;
166}
167static inline u32 bus_bind_status_bar1_outstanding_true_f(void)
168{
169 return 0x2U;
170}
171static inline u32 bus_bind_status_bar2_pending_v(u32 r)
172{
173 return (r >> 2U) & 0x1U;
174}
175static inline u32 bus_bind_status_bar2_pending_empty_f(void)
176{
177 return 0x0U;
178}
179static inline u32 bus_bind_status_bar2_pending_busy_f(void)
180{
181 return 0x4U;
182}
183static inline u32 bus_bind_status_bar2_outstanding_v(u32 r)
184{
185 return (r >> 3U) & 0x1U;
186}
187static inline u32 bus_bind_status_bar2_outstanding_false_f(void)
188{
189 return 0x0U;
190}
191static inline u32 bus_bind_status_bar2_outstanding_true_f(void)
192{
193 return 0x8U;
194}
195static inline u32 bus_intr_0_r(void)
196{
197 return 0x00001100U;
198}
199static inline u32 bus_intr_0_pri_squash_m(void)
200{
201 return 0x1U << 1U;
202}
203static inline u32 bus_intr_0_pri_fecserr_m(void)
204{
205 return 0x1U << 2U;
206}
207static inline u32 bus_intr_0_pri_timeout_m(void)
208{
209 return 0x1U << 3U;
210}
211static inline u32 bus_intr_en_0_r(void)
212{
213 return 0x00001140U;
214}
215static inline u32 bus_intr_en_0_pri_squash_m(void)
216{
217 return 0x1U << 1U;
218}
219static inline u32 bus_intr_en_0_pri_fecserr_m(void)
220{
221 return 0x1U << 2U;
222}
223static inline u32 bus_intr_en_0_pri_timeout_m(void)
224{
225 return 0x1U << 3U;
226}
227#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ccsr_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ccsr_gv100.h
new file mode 100644
index 00000000..b1478037
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ccsr_gv100.h
@@ -0,0 +1,187 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_ccsr_gv100_h_
57#define _hw_ccsr_gv100_h_
58
59static inline u32 ccsr_channel_inst_r(u32 i)
60{
61 return 0x00800000U + i*8U;
62}
63static inline u32 ccsr_channel_inst__size_1_v(void)
64{
65 return 0x00001000U;
66}
67static inline u32 ccsr_channel_inst_ptr_f(u32 v)
68{
69 return (v & 0xfffffffU) << 0U;
70}
71static inline u32 ccsr_channel_inst_target_vid_mem_f(void)
72{
73 return 0x0U;
74}
75static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void)
76{
77 return 0x20000000U;
78}
79static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void)
80{
81 return 0x30000000U;
82}
83static inline u32 ccsr_channel_inst_bind_false_f(void)
84{
85 return 0x0U;
86}
87static inline u32 ccsr_channel_inst_bind_true_f(void)
88{
89 return 0x80000000U;
90}
91static inline u32 ccsr_channel_r(u32 i)
92{
93 return 0x00800004U + i*8U;
94}
95static inline u32 ccsr_channel__size_1_v(void)
96{
97 return 0x00001000U;
98}
99static inline u32 ccsr_channel_enable_v(u32 r)
100{
101 return (r >> 0U) & 0x1U;
102}
103static inline u32 ccsr_channel_enable_set_f(u32 v)
104{
105 return (v & 0x1U) << 10U;
106}
107static inline u32 ccsr_channel_enable_set_true_f(void)
108{
109 return 0x400U;
110}
111static inline u32 ccsr_channel_enable_clr_true_f(void)
112{
113 return 0x800U;
114}
115static inline u32 ccsr_channel_status_v(u32 r)
116{
117 return (r >> 24U) & 0xfU;
118}
119static inline u32 ccsr_channel_status_pending_ctx_reload_v(void)
120{
121 return 0x00000002U;
122}
123static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void)
124{
125 return 0x00000004U;
126}
127static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void)
128{
129 return 0x0000000aU;
130}
131static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void)
132{
133 return 0x0000000bU;
134}
135static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void)
136{
137 return 0x0000000cU;
138}
139static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void)
140{
141 return 0x0000000dU;
142}
143static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void)
144{
145 return 0x0000000eU;
146}
147static inline u32 ccsr_channel_next_v(u32 r)
148{
149 return (r >> 1U) & 0x1U;
150}
151static inline u32 ccsr_channel_next_true_v(void)
152{
153 return 0x00000001U;
154}
155static inline u32 ccsr_channel_force_ctx_reload_true_f(void)
156{
157 return 0x100U;
158}
159static inline u32 ccsr_channel_pbdma_faulted_f(u32 v)
160{
161 return (v & 0x1U) << 22U;
162}
163static inline u32 ccsr_channel_pbdma_faulted_reset_f(void)
164{
165 return 0x400000U;
166}
167static inline u32 ccsr_channel_eng_faulted_f(u32 v)
168{
169 return (v & 0x1U) << 23U;
170}
171static inline u32 ccsr_channel_eng_faulted_v(u32 r)
172{
173 return (r >> 23U) & 0x1U;
174}
175static inline u32 ccsr_channel_eng_faulted_reset_f(void)
176{
177 return 0x800000U;
178}
179static inline u32 ccsr_channel_eng_faulted_true_v(void)
180{
181 return 0x00000001U;
182}
183static inline u32 ccsr_channel_busy_v(u32 r)
184{
185 return (r >> 28U) & 0x1U;
186}
187#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ce_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ce_gv100.h
new file mode 100644
index 00000000..18b5fc66
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ce_gv100.h
@@ -0,0 +1,107 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_ce_gv100_h_
57#define _hw_ce_gv100_h_
58
59static inline u32 ce_intr_status_r(u32 i)
60{
61 return 0x00104410U + i*128U;
62}
63static inline u32 ce_intr_status_blockpipe_pending_f(void)
64{
65 return 0x1U;
66}
67static inline u32 ce_intr_status_blockpipe_reset_f(void)
68{
69 return 0x1U;
70}
71static inline u32 ce_intr_status_nonblockpipe_pending_f(void)
72{
73 return 0x2U;
74}
75static inline u32 ce_intr_status_nonblockpipe_reset_f(void)
76{
77 return 0x2U;
78}
79static inline u32 ce_intr_status_launcherr_pending_f(void)
80{
81 return 0x4U;
82}
83static inline u32 ce_intr_status_launcherr_reset_f(void)
84{
85 return 0x4U;
86}
87static inline u32 ce_intr_status_invalid_config_pending_f(void)
88{
89 return 0x8U;
90}
91static inline u32 ce_intr_status_invalid_config_reset_f(void)
92{
93 return 0x8U;
94}
95static inline u32 ce_intr_status_mthd_buffer_fault_pending_f(void)
96{
97 return 0x10U;
98}
99static inline u32 ce_intr_status_mthd_buffer_fault_reset_f(void)
100{
101 return 0x10U;
102}
103static inline u32 ce_pce_map_r(void)
104{
105 return 0x00104028U;
106}
107#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ctxsw_prog_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ctxsw_prog_gv100.h
new file mode 100644
index 00000000..cd792835
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ctxsw_prog_gv100.h
@@ -0,0 +1,455 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_ctxsw_prog_gv100_h_
57#define _hw_ctxsw_prog_gv100_h_
58
59static inline u32 ctxsw_prog_fecs_header_v(void)
60{
61 return 0x00000100U;
62}
63static inline u32 ctxsw_prog_main_image_num_gpcs_o(void)
64{
65 return 0x00000008U;
66}
67static inline u32 ctxsw_prog_main_image_ctl_o(void)
68{
69 return 0x0000000cU;
70}
71static inline u32 ctxsw_prog_main_image_ctl_type_f(u32 v)
72{
73 return (v & 0x3fU) << 0U;
74}
75static inline u32 ctxsw_prog_main_image_ctl_type_undefined_v(void)
76{
77 return 0x00000000U;
78}
79static inline u32 ctxsw_prog_main_image_ctl_type_opengl_v(void)
80{
81 return 0x00000008U;
82}
83static inline u32 ctxsw_prog_main_image_ctl_type_dx9_v(void)
84{
85 return 0x00000010U;
86}
87static inline u32 ctxsw_prog_main_image_ctl_type_dx10_v(void)
88{
89 return 0x00000011U;
90}
91static inline u32 ctxsw_prog_main_image_ctl_type_dx11_v(void)
92{
93 return 0x00000012U;
94}
95static inline u32 ctxsw_prog_main_image_ctl_type_compute_v(void)
96{
97 return 0x00000020U;
98}
99static inline u32 ctxsw_prog_main_image_ctl_type_per_veid_header_v(void)
100{
101 return 0x00000021U;
102}
103static inline u32 ctxsw_prog_main_image_patch_count_o(void)
104{
105 return 0x00000010U;
106}
107static inline u32 ctxsw_prog_main_image_context_id_o(void)
108{
109 return 0x000000f0U;
110}
111static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void)
112{
113 return 0x00000014U;
114}
115static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void)
116{
117 return 0x00000018U;
118}
119static inline u32 ctxsw_prog_main_image_zcull_o(void)
120{
121 return 0x0000001cU;
122}
123static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void)
124{
125 return 0x00000001U;
126}
127static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void)
128{
129 return 0x00000002U;
130}
131static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void)
132{
133 return 0x00000020U;
134}
135static inline u32 ctxsw_prog_main_image_pm_o(void)
136{
137 return 0x00000028U;
138}
139static inline u32 ctxsw_prog_main_image_pm_mode_m(void)
140{
141 return 0x7U << 0U;
142}
143static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void)
144{
145 return 0x0U;
146}
147static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void)
148{
149 return 0x7U << 3U;
150}
151static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void)
152{
153 return 0x8U;
154}
155static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void)
156{
157 return 0x0U;
158}
159static inline u32 ctxsw_prog_main_image_pm_ptr_o(void)
160{
161 return 0x0000002cU;
162}
163static inline u32 ctxsw_prog_main_image_num_save_ops_o(void)
164{
165 return 0x000000f4U;
166}
167static inline u32 ctxsw_prog_main_image_num_wfi_save_ops_o(void)
168{
169 return 0x000000d0U;
170}
171static inline u32 ctxsw_prog_main_image_num_cta_save_ops_o(void)
172{
173 return 0x000000d4U;
174}
175static inline u32 ctxsw_prog_main_image_num_gfxp_save_ops_o(void)
176{
177 return 0x000000d8U;
178}
179static inline u32 ctxsw_prog_main_image_num_cilp_save_ops_o(void)
180{
181 return 0x000000dcU;
182}
183static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void)
184{
185 return 0x000000f8U;
186}
187static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_o(void)
188{
189 return 0x00000060U;
190}
191static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_v_f(u32 v)
192{
193 return (v & 0x1ffffU) << 0U;
194}
195static inline u32 ctxsw_prog_main_image_pm_ptr_hi_o(void)
196{
197 return 0x00000094U;
198}
199static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_o(void)
200{
201 return 0x00000064U;
202}
203static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_v_f(u32 v)
204{
205 return (v & 0x1ffffU) << 0U;
206}
207static inline u32 ctxsw_prog_main_image_full_preemption_ptr_o(void)
208{
209 return 0x00000068U;
210}
211static inline u32 ctxsw_prog_main_image_full_preemption_ptr_v_f(u32 v)
212{
213 return (v & 0xffffffffU) << 0U;
214}
215static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_o(void)
216{
217 return 0x00000070U;
218}
219static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_v_f(u32 v)
220{
221 return (v & 0x1ffffU) << 0U;
222}
223static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_o(void)
224{
225 return 0x00000074U;
226}
227static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_v_f(u32 v)
228{
229 return (v & 0xffffffffU) << 0U;
230}
231static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_o(void)
232{
233 return 0x00000078U;
234}
235static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_v_f(u32 v)
236{
237 return (v & 0x1ffffU) << 0U;
238}
239static inline u32 ctxsw_prog_main_image_context_buffer_ptr_o(void)
240{
241 return 0x0000007cU;
242}
243static inline u32 ctxsw_prog_main_image_context_buffer_ptr_v_f(u32 v)
244{
245 return (v & 0xffffffffU) << 0U;
246}
247static inline u32 ctxsw_prog_main_image_magic_value_o(void)
248{
249 return 0x000000fcU;
250}
251static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void)
252{
253 return 0x600dc0deU;
254}
255static inline u32 ctxsw_prog_local_priv_register_ctl_o(void)
256{
257 return 0x0000000cU;
258}
259static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r)
260{
261 return (r >> 0U) & 0xffffU;
262}
263static inline u32 ctxsw_prog_main_image_global_cb_ptr_o(void)
264{
265 return 0x000000b8U;
266}
267static inline u32 ctxsw_prog_main_image_global_cb_ptr_v_f(u32 v)
268{
269 return (v & 0xffffffffU) << 0U;
270}
271static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_o(void)
272{
273 return 0x000000bcU;
274}
275static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_v_f(u32 v)
276{
277 return (v & 0x1ffffU) << 0U;
278}
279static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_o(void)
280{
281 return 0x000000c0U;
282}
283static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_v_f(u32 v)
284{
285 return (v & 0xffffffffU) << 0U;
286}
287static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_o(void)
288{
289 return 0x000000c4U;
290}
291static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_v_f(u32 v)
292{
293 return (v & 0x1ffffU) << 0U;
294}
295static inline u32 ctxsw_prog_main_image_control_block_ptr_o(void)
296{
297 return 0x000000c8U;
298}
299static inline u32 ctxsw_prog_main_image_control_block_ptr_v_f(u32 v)
300{
301 return (v & 0xffffffffU) << 0U;
302}
303static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_o(void)
304{
305 return 0x000000ccU;
306}
307static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_v_f(u32 v)
308{
309 return (v & 0x1ffffU) << 0U;
310}
311static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_o(void)
312{
313 return 0x000000e0U;
314}
315static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_v_f(u32 v)
316{
317 return (v & 0xffffffffU) << 0U;
318}
319static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_o(void)
320{
321 return 0x000000e4U;
322}
323static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_v_f(u32 v)
324{
325 return (v & 0x1ffffU) << 0U;
326}
327static inline u32 ctxsw_prog_local_image_ppc_info_o(void)
328{
329 return 0x000000f4U;
330}
331static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r)
332{
333 return (r >> 0U) & 0xffffU;
334}
335static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r)
336{
337 return (r >> 16U) & 0xffffU;
338}
339static inline u32 ctxsw_prog_local_image_num_tpcs_o(void)
340{
341 return 0x000000f8U;
342}
343static inline u32 ctxsw_prog_local_magic_value_o(void)
344{
345 return 0x000000fcU;
346}
347static inline u32 ctxsw_prog_local_magic_value_v_value_v(void)
348{
349 return 0xad0becabU;
350}
351static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void)
352{
353 return 0x000000ecU;
354}
355static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r)
356{
357 return (r >> 0U) & 0xffffU;
358}
359static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r)
360{
361 return (r >> 16U) & 0xffU;
362}
363static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void)
364{
365 return 0x00000100U;
366}
367static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void)
368{
369 return 0x00000004U;
370}
371static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void)
372{
373 return 0x00000000U;
374}
375static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void)
376{
377 return 0x00000002U;
378}
379static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void)
380{
381 return 0x000000a0U;
382}
383static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void)
384{
385 return 2U;
386}
387static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v)
388{
389 return (v & 0x3U) << 0U;
390}
391static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void)
392{
393 return 0x3U << 0U;
394}
395static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r)
396{
397 return (r >> 0U) & 0x3U;
398}
399static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void)
400{
401 return 0x0U;
402}
403static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void)
404{
405 return 0x2U;
406}
407static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void)
408{
409 return 0x000000a4U;
410}
411static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void)
412{
413 return 0x000000a8U;
414}
415static inline u32 ctxsw_prog_main_image_misc_options_o(void)
416{
417 return 0x0000003cU;
418}
419static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void)
420{
421 return 0x1U << 3U;
422}
423static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void)
424{
425 return 0x0U;
426}
427static inline u32 ctxsw_prog_main_image_graphics_preemption_options_o(void)
428{
429 return 0x00000080U;
430}
431static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_f(u32 v)
432{
433 return (v & 0x3U) << 0U;
434}
435static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f(void)
436{
437 return 0x1U;
438}
439static inline u32 ctxsw_prog_main_image_compute_preemption_options_o(void)
440{
441 return 0x00000084U;
442}
443static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_f(u32 v)
444{
445 return (v & 0x3U) << 0U;
446}
447static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(void)
448{
449 return 0x1U;
450}
451static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f(void)
452{
453 return 0x2U;
454}
455#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_falcon_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_falcon_gv100.h
new file mode 100644
index 00000000..122956bb
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_falcon_gv100.h
@@ -0,0 +1,599 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_falcon_gv100_h_
57#define _hw_falcon_gv100_h_
58
59static inline u32 falcon_falcon_irqsset_r(void)
60{
61 return 0x00000000U;
62}
63static inline u32 falcon_falcon_irqsset_swgen0_set_f(void)
64{
65 return 0x40U;
66}
67static inline u32 falcon_falcon_irqsclr_r(void)
68{
69 return 0x00000004U;
70}
71static inline u32 falcon_falcon_irqstat_r(void)
72{
73 return 0x00000008U;
74}
75static inline u32 falcon_falcon_irqstat_halt_true_f(void)
76{
77 return 0x10U;
78}
79static inline u32 falcon_falcon_irqstat_exterr_true_f(void)
80{
81 return 0x20U;
82}
83static inline u32 falcon_falcon_irqstat_swgen0_true_f(void)
84{
85 return 0x40U;
86}
87static inline u32 falcon_falcon_irqmode_r(void)
88{
89 return 0x0000000cU;
90}
91static inline u32 falcon_falcon_irqmset_r(void)
92{
93 return 0x00000010U;
94}
95static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v)
96{
97 return (v & 0x1U) << 0U;
98}
99static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v)
100{
101 return (v & 0x1U) << 1U;
102}
103static inline u32 falcon_falcon_irqmset_mthd_f(u32 v)
104{
105 return (v & 0x1U) << 2U;
106}
107static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v)
108{
109 return (v & 0x1U) << 3U;
110}
111static inline u32 falcon_falcon_irqmset_halt_f(u32 v)
112{
113 return (v & 0x1U) << 4U;
114}
115static inline u32 falcon_falcon_irqmset_exterr_f(u32 v)
116{
117 return (v & 0x1U) << 5U;
118}
119static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v)
120{
121 return (v & 0x1U) << 6U;
122}
123static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v)
124{
125 return (v & 0x1U) << 7U;
126}
127static inline u32 falcon_falcon_irqmclr_r(void)
128{
129 return 0x00000014U;
130}
131static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v)
132{
133 return (v & 0x1U) << 0U;
134}
135static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v)
136{
137 return (v & 0x1U) << 1U;
138}
139static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v)
140{
141 return (v & 0x1U) << 2U;
142}
143static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v)
144{
145 return (v & 0x1U) << 3U;
146}
147static inline u32 falcon_falcon_irqmclr_halt_f(u32 v)
148{
149 return (v & 0x1U) << 4U;
150}
151static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v)
152{
153 return (v & 0x1U) << 5U;
154}
155static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v)
156{
157 return (v & 0x1U) << 6U;
158}
159static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v)
160{
161 return (v & 0x1U) << 7U;
162}
163static inline u32 falcon_falcon_irqmclr_ext_f(u32 v)
164{
165 return (v & 0xffU) << 8U;
166}
167static inline u32 falcon_falcon_irqmask_r(void)
168{
169 return 0x00000018U;
170}
171static inline u32 falcon_falcon_irqdest_r(void)
172{
173 return 0x0000001cU;
174}
175static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v)
176{
177 return (v & 0x1U) << 0U;
178}
179static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v)
180{
181 return (v & 0x1U) << 1U;
182}
183static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v)
184{
185 return (v & 0x1U) << 2U;
186}
187static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v)
188{
189 return (v & 0x1U) << 3U;
190}
191static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v)
192{
193 return (v & 0x1U) << 4U;
194}
195static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v)
196{
197 return (v & 0x1U) << 5U;
198}
199static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v)
200{
201 return (v & 0x1U) << 6U;
202}
203static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v)
204{
205 return (v & 0x1U) << 7U;
206}
207static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v)
208{
209 return (v & 0xffU) << 8U;
210}
211static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v)
212{
213 return (v & 0x1U) << 16U;
214}
215static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v)
216{
217 return (v & 0x1U) << 17U;
218}
219static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v)
220{
221 return (v & 0x1U) << 18U;
222}
223static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v)
224{
225 return (v & 0x1U) << 19U;
226}
227static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v)
228{
229 return (v & 0x1U) << 20U;
230}
231static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v)
232{
233 return (v & 0x1U) << 21U;
234}
235static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v)
236{
237 return (v & 0x1U) << 22U;
238}
239static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v)
240{
241 return (v & 0x1U) << 23U;
242}
243static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v)
244{
245 return (v & 0xffU) << 24U;
246}
247static inline u32 falcon_falcon_curctx_r(void)
248{
249 return 0x00000050U;
250}
251static inline u32 falcon_falcon_nxtctx_r(void)
252{
253 return 0x00000054U;
254}
255static inline u32 falcon_falcon_mailbox0_r(void)
256{
257 return 0x00000040U;
258}
259static inline u32 falcon_falcon_mailbox1_r(void)
260{
261 return 0x00000044U;
262}
263static inline u32 falcon_falcon_itfen_r(void)
264{
265 return 0x00000048U;
266}
267static inline u32 falcon_falcon_itfen_ctxen_enable_f(void)
268{
269 return 0x1U;
270}
271static inline u32 falcon_falcon_idlestate_r(void)
272{
273 return 0x0000004cU;
274}
275static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r)
276{
277 return (r >> 0U) & 0x1U;
278}
279static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r)
280{
281 return (r >> 1U) & 0x7fffU;
282}
283static inline u32 falcon_falcon_os_r(void)
284{
285 return 0x00000080U;
286}
287static inline u32 falcon_falcon_engctl_r(void)
288{
289 return 0x000000a4U;
290}
291static inline u32 falcon_falcon_cpuctl_r(void)
292{
293 return 0x00000100U;
294}
295static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v)
296{
297 return (v & 0x1U) << 1U;
298}
299static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v)
300{
301 return (v & 0x1U) << 2U;
302}
303static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v)
304{
305 return (v & 0x1U) << 3U;
306}
307static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v)
308{
309 return (v & 0x1U) << 4U;
310}
311static inline u32 falcon_falcon_cpuctl_halt_intr_m(void)
312{
313 return 0x1U << 4U;
314}
315static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r)
316{
317 return (r >> 4U) & 0x1U;
318}
319static inline u32 falcon_falcon_cpuctl_stopped_m(void)
320{
321 return 0x1U << 5U;
322}
323static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_f(u32 v)
324{
325 return (v & 0x1U) << 6U;
326}
327static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_m(void)
328{
329 return 0x1U << 6U;
330}
331static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_v(u32 r)
332{
333 return (r >> 6U) & 0x1U;
334}
335static inline u32 falcon_falcon_cpuctl_alias_r(void)
336{
337 return 0x00000130U;
338}
339static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v)
340{
341 return (v & 0x1U) << 1U;
342}
343static inline u32 falcon_falcon_imemc_r(u32 i)
344{
345 return 0x00000180U + i*16U;
346}
347static inline u32 falcon_falcon_imemc_offs_f(u32 v)
348{
349 return (v & 0x3fU) << 2U;
350}
351static inline u32 falcon_falcon_imemc_blk_f(u32 v)
352{
353 return (v & 0xffU) << 8U;
354}
355static inline u32 falcon_falcon_imemc_aincw_f(u32 v)
356{
357 return (v & 0x1U) << 24U;
358}
359static inline u32 falcon_falcon_imemd_r(u32 i)
360{
361 return 0x00000184U + i*16U;
362}
363static inline u32 falcon_falcon_imemt_r(u32 i)
364{
365 return 0x00000188U + i*16U;
366}
367static inline u32 falcon_falcon_sctl_r(void)
368{
369 return 0x00000240U;
370}
371static inline u32 falcon_falcon_mmu_phys_sec_r(void)
372{
373 return 0x00100ce4U;
374}
375static inline u32 falcon_falcon_bootvec_r(void)
376{
377 return 0x00000104U;
378}
379static inline u32 falcon_falcon_bootvec_vec_f(u32 v)
380{
381 return (v & 0xffffffffU) << 0U;
382}
383static inline u32 falcon_falcon_dmactl_r(void)
384{
385 return 0x0000010cU;
386}
387static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void)
388{
389 return 0x1U << 1U;
390}
391static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void)
392{
393 return 0x1U << 2U;
394}
395static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v)
396{
397 return (v & 0x1U) << 0U;
398}
399static inline u32 falcon_falcon_hwcfg_r(void)
400{
401 return 0x00000108U;
402}
403static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r)
404{
405 return (r >> 0U) & 0x1ffU;
406}
407static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r)
408{
409 return (r >> 9U) & 0x1ffU;
410}
411static inline u32 falcon_falcon_dmatrfbase_r(void)
412{
413 return 0x00000110U;
414}
415static inline u32 falcon_falcon_dmatrfbase1_r(void)
416{
417 return 0x00000128U;
418}
419static inline u32 falcon_falcon_dmatrfmoffs_r(void)
420{
421 return 0x00000114U;
422}
423static inline u32 falcon_falcon_dmatrfcmd_r(void)
424{
425 return 0x00000118U;
426}
427static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v)
428{
429 return (v & 0x1U) << 4U;
430}
431static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v)
432{
433 return (v & 0x1U) << 5U;
434}
435static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v)
436{
437 return (v & 0x7U) << 8U;
438}
439static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v)
440{
441 return (v & 0x7U) << 12U;
442}
443static inline u32 falcon_falcon_dmatrffboffs_r(void)
444{
445 return 0x0000011cU;
446}
447static inline u32 falcon_falcon_imctl_debug_r(void)
448{
449 return 0x0000015cU;
450}
451static inline u32 falcon_falcon_imctl_debug_addr_blk_f(u32 v)
452{
453 return (v & 0xffffffU) << 0U;
454}
455static inline u32 falcon_falcon_imctl_debug_cmd_f(u32 v)
456{
457 return (v & 0x7U) << 24U;
458}
459static inline u32 falcon_falcon_imstat_r(void)
460{
461 return 0x00000144U;
462}
463static inline u32 falcon_falcon_traceidx_r(void)
464{
465 return 0x00000148U;
466}
467static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r)
468{
469 return (r >> 16U) & 0xffU;
470}
471static inline u32 falcon_falcon_traceidx_idx_f(u32 v)
472{
473 return (v & 0xffU) << 0U;
474}
475static inline u32 falcon_falcon_tracepc_r(void)
476{
477 return 0x0000014cU;
478}
479static inline u32 falcon_falcon_tracepc_pc_v(u32 r)
480{
481 return (r >> 0U) & 0xffffffU;
482}
483static inline u32 falcon_falcon_exterraddr_r(void)
484{
485 return 0x00000168U;
486}
487static inline u32 falcon_falcon_exterrstat_r(void)
488{
489 return 0x0000016cU;
490}
491static inline u32 falcon_falcon_exterrstat_valid_m(void)
492{
493 return 0x1U << 31U;
494}
495static inline u32 falcon_falcon_exterrstat_valid_v(u32 r)
496{
497 return (r >> 31U) & 0x1U;
498}
499static inline u32 falcon_falcon_exterrstat_valid_true_v(void)
500{
501 return 0x00000001U;
502}
503static inline u32 falcon_falcon_icd_cmd_r(void)
504{
505 return 0x00000200U;
506}
507static inline u32 falcon_falcon_icd_cmd_opc_s(void)
508{
509 return 4U;
510}
511static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v)
512{
513 return (v & 0xfU) << 0U;
514}
515static inline u32 falcon_falcon_icd_cmd_opc_m(void)
516{
517 return 0xfU << 0U;
518}
519static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r)
520{
521 return (r >> 0U) & 0xfU;
522}
523static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void)
524{
525 return 0x8U;
526}
527static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void)
528{
529 return 0xeU;
530}
531static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v)
532{
533 return (v & 0x1fU) << 8U;
534}
535static inline u32 falcon_falcon_icd_rdata_r(void)
536{
537 return 0x0000020cU;
538}
539static inline u32 falcon_falcon_dmemc_r(u32 i)
540{
541 return 0x000001c0U + i*8U;
542}
543static inline u32 falcon_falcon_dmemc_offs_f(u32 v)
544{
545 return (v & 0x3fU) << 2U;
546}
547static inline u32 falcon_falcon_dmemc_offs_m(void)
548{
549 return 0x3fU << 2U;
550}
551static inline u32 falcon_falcon_dmemc_blk_f(u32 v)
552{
553 return (v & 0xffU) << 8U;
554}
555static inline u32 falcon_falcon_dmemc_blk_m(void)
556{
557 return 0xffU << 8U;
558}
559static inline u32 falcon_falcon_dmemc_aincw_f(u32 v)
560{
561 return (v & 0x1U) << 24U;
562}
563static inline u32 falcon_falcon_dmemc_aincr_f(u32 v)
564{
565 return (v & 0x1U) << 25U;
566}
567static inline u32 falcon_falcon_dmemd_r(u32 i)
568{
569 return 0x000001c4U + i*8U;
570}
571static inline u32 falcon_falcon_debug1_r(void)
572{
573 return 0x00000090U;
574}
575static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void)
576{
577 return 1U;
578}
579static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v)
580{
581 return (v & 0x1U) << 16U;
582}
583static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void)
584{
585 return 0x1U << 16U;
586}
587static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r)
588{
589 return (r >> 16U) & 0x1U;
590}
591static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void)
592{
593 return 0x0U;
594}
595static inline u32 falcon_falcon_debuginfo_r(void)
596{
597 return 0x00000094U;
598}
599#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fb_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fb_gv100.h
new file mode 100644
index 00000000..a4fcd1e6
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fb_gv100.h
@@ -0,0 +1,1511 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_fb_gv100_h_
57#define _hw_fb_gv100_h_
58
59static inline u32 fb_fbhub_num_active_ltcs_r(void)
60{
61 return 0x00100800U;
62}
63static inline u32 fb_mmu_ctrl_r(void)
64{
65 return 0x00100c80U;
66}
67static inline u32 fb_mmu_ctrl_vm_pg_size_f(u32 v)
68{
69 return (v & 0x1U) << 0U;
70}
71static inline u32 fb_mmu_ctrl_vm_pg_size_128kb_f(void)
72{
73 return 0x0U;
74}
75static inline u32 fb_mmu_ctrl_vm_pg_size_64kb_f(void)
76{
77 return 0x1U;
78}
79static inline u32 fb_mmu_ctrl_pri_fifo_empty_v(u32 r)
80{
81 return (r >> 15U) & 0x1U;
82}
83static inline u32 fb_mmu_ctrl_pri_fifo_empty_false_f(void)
84{
85 return 0x0U;
86}
87static inline u32 fb_mmu_ctrl_pri_fifo_space_v(u32 r)
88{
89 return (r >> 16U) & 0xffU;
90}
91static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_v(u32 r)
92{
93 return (r >> 11U) & 0x1U;
94}
95static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_true_f(void)
96{
97 return 0x800U;
98}
99static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_false_f(void)
100{
101 return 0x0U;
102}
103static inline u32 fb_priv_mmu_phy_secure_r(void)
104{
105 return 0x00100ce4U;
106}
107static inline u32 fb_mmu_invalidate_pdb_r(void)
108{
109 return 0x00100cb8U;
110}
111static inline u32 fb_mmu_invalidate_pdb_aperture_vid_mem_f(void)
112{
113 return 0x0U;
114}
115static inline u32 fb_mmu_invalidate_pdb_aperture_sys_mem_f(void)
116{
117 return 0x2U;
118}
119static inline u32 fb_mmu_invalidate_pdb_addr_f(u32 v)
120{
121 return (v & 0xfffffffU) << 4U;
122}
123static inline u32 fb_mmu_invalidate_r(void)
124{
125 return 0x00100cbcU;
126}
127static inline u32 fb_mmu_invalidate_all_va_true_f(void)
128{
129 return 0x1U;
130}
131static inline u32 fb_mmu_invalidate_all_pdb_true_f(void)
132{
133 return 0x2U;
134}
135static inline u32 fb_mmu_invalidate_hubtlb_only_s(void)
136{
137 return 1U;
138}
139static inline u32 fb_mmu_invalidate_hubtlb_only_f(u32 v)
140{
141 return (v & 0x1U) << 2U;
142}
143static inline u32 fb_mmu_invalidate_hubtlb_only_m(void)
144{
145 return 0x1U << 2U;
146}
147static inline u32 fb_mmu_invalidate_hubtlb_only_v(u32 r)
148{
149 return (r >> 2U) & 0x1U;
150}
151static inline u32 fb_mmu_invalidate_hubtlb_only_true_f(void)
152{
153 return 0x4U;
154}
155static inline u32 fb_mmu_invalidate_replay_s(void)
156{
157 return 3U;
158}
159static inline u32 fb_mmu_invalidate_replay_f(u32 v)
160{
161 return (v & 0x7U) << 3U;
162}
163static inline u32 fb_mmu_invalidate_replay_m(void)
164{
165 return 0x7U << 3U;
166}
167static inline u32 fb_mmu_invalidate_replay_v(u32 r)
168{
169 return (r >> 3U) & 0x7U;
170}
171static inline u32 fb_mmu_invalidate_replay_none_f(void)
172{
173 return 0x0U;
174}
175static inline u32 fb_mmu_invalidate_replay_start_f(void)
176{
177 return 0x8U;
178}
179static inline u32 fb_mmu_invalidate_replay_start_ack_all_f(void)
180{
181 return 0x10U;
182}
183static inline u32 fb_mmu_invalidate_replay_cancel_global_f(void)
184{
185 return 0x20U;
186}
187static inline u32 fb_mmu_invalidate_sys_membar_s(void)
188{
189 return 1U;
190}
191static inline u32 fb_mmu_invalidate_sys_membar_f(u32 v)
192{
193 return (v & 0x1U) << 6U;
194}
195static inline u32 fb_mmu_invalidate_sys_membar_m(void)
196{
197 return 0x1U << 6U;
198}
199static inline u32 fb_mmu_invalidate_sys_membar_v(u32 r)
200{
201 return (r >> 6U) & 0x1U;
202}
203static inline u32 fb_mmu_invalidate_sys_membar_true_f(void)
204{
205 return 0x40U;
206}
207static inline u32 fb_mmu_invalidate_ack_s(void)
208{
209 return 2U;
210}
211static inline u32 fb_mmu_invalidate_ack_f(u32 v)
212{
213 return (v & 0x3U) << 7U;
214}
215static inline u32 fb_mmu_invalidate_ack_m(void)
216{
217 return 0x3U << 7U;
218}
219static inline u32 fb_mmu_invalidate_ack_v(u32 r)
220{
221 return (r >> 7U) & 0x3U;
222}
223static inline u32 fb_mmu_invalidate_ack_ack_none_required_f(void)
224{
225 return 0x0U;
226}
227static inline u32 fb_mmu_invalidate_ack_ack_intranode_f(void)
228{
229 return 0x100U;
230}
231static inline u32 fb_mmu_invalidate_ack_ack_globally_f(void)
232{
233 return 0x80U;
234}
235static inline u32 fb_mmu_invalidate_cancel_client_id_s(void)
236{
237 return 6U;
238}
239static inline u32 fb_mmu_invalidate_cancel_client_id_f(u32 v)
240{
241 return (v & 0x3fU) << 9U;
242}
243static inline u32 fb_mmu_invalidate_cancel_client_id_m(void)
244{
245 return 0x3fU << 9U;
246}
247static inline u32 fb_mmu_invalidate_cancel_client_id_v(u32 r)
248{
249 return (r >> 9U) & 0x3fU;
250}
251static inline u32 fb_mmu_invalidate_cancel_gpc_id_s(void)
252{
253 return 5U;
254}
255static inline u32 fb_mmu_invalidate_cancel_gpc_id_f(u32 v)
256{
257 return (v & 0x1fU) << 15U;
258}
259static inline u32 fb_mmu_invalidate_cancel_gpc_id_m(void)
260{
261 return 0x1fU << 15U;
262}
263static inline u32 fb_mmu_invalidate_cancel_gpc_id_v(u32 r)
264{
265 return (r >> 15U) & 0x1fU;
266}
267static inline u32 fb_mmu_invalidate_cancel_client_type_s(void)
268{
269 return 1U;
270}
271static inline u32 fb_mmu_invalidate_cancel_client_type_f(u32 v)
272{
273 return (v & 0x1U) << 20U;
274}
275static inline u32 fb_mmu_invalidate_cancel_client_type_m(void)
276{
277 return 0x1U << 20U;
278}
279static inline u32 fb_mmu_invalidate_cancel_client_type_v(u32 r)
280{
281 return (r >> 20U) & 0x1U;
282}
283static inline u32 fb_mmu_invalidate_cancel_client_type_gpc_f(void)
284{
285 return 0x0U;
286}
287static inline u32 fb_mmu_invalidate_cancel_client_type_hub_f(void)
288{
289 return 0x100000U;
290}
291static inline u32 fb_mmu_invalidate_cancel_cache_level_s(void)
292{
293 return 3U;
294}
295static inline u32 fb_mmu_invalidate_cancel_cache_level_f(u32 v)
296{
297 return (v & 0x7U) << 24U;
298}
299static inline u32 fb_mmu_invalidate_cancel_cache_level_m(void)
300{
301 return 0x7U << 24U;
302}
303static inline u32 fb_mmu_invalidate_cancel_cache_level_v(u32 r)
304{
305 return (r >> 24U) & 0x7U;
306}
307static inline u32 fb_mmu_invalidate_cancel_cache_level_all_f(void)
308{
309 return 0x0U;
310}
311static inline u32 fb_mmu_invalidate_cancel_cache_level_pte_only_f(void)
312{
313 return 0x1000000U;
314}
315static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde0_f(void)
316{
317 return 0x2000000U;
318}
319static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde1_f(void)
320{
321 return 0x3000000U;
322}
323static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde2_f(void)
324{
325 return 0x4000000U;
326}
327static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde3_f(void)
328{
329 return 0x5000000U;
330}
331static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde4_f(void)
332{
333 return 0x6000000U;
334}
335static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde5_f(void)
336{
337 return 0x7000000U;
338}
339static inline u32 fb_mmu_invalidate_trigger_s(void)
340{
341 return 1U;
342}
343static inline u32 fb_mmu_invalidate_trigger_f(u32 v)
344{
345 return (v & 0x1U) << 31U;
346}
347static inline u32 fb_mmu_invalidate_trigger_m(void)
348{
349 return 0x1U << 31U;
350}
351static inline u32 fb_mmu_invalidate_trigger_v(u32 r)
352{
353 return (r >> 31U) & 0x1U;
354}
355static inline u32 fb_mmu_invalidate_trigger_true_f(void)
356{
357 return 0x80000000U;
358}
359static inline u32 fb_mmu_debug_wr_r(void)
360{
361 return 0x00100cc8U;
362}
363static inline u32 fb_mmu_debug_wr_aperture_s(void)
364{
365 return 2U;
366}
367static inline u32 fb_mmu_debug_wr_aperture_f(u32 v)
368{
369 return (v & 0x3U) << 0U;
370}
371static inline u32 fb_mmu_debug_wr_aperture_m(void)
372{
373 return 0x3U << 0U;
374}
375static inline u32 fb_mmu_debug_wr_aperture_v(u32 r)
376{
377 return (r >> 0U) & 0x3U;
378}
379static inline u32 fb_mmu_debug_wr_aperture_vid_mem_f(void)
380{
381 return 0x0U;
382}
383static inline u32 fb_mmu_debug_wr_aperture_sys_mem_coh_f(void)
384{
385 return 0x2U;
386}
387static inline u32 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(void)
388{
389 return 0x3U;
390}
391static inline u32 fb_mmu_debug_wr_vol_false_f(void)
392{
393 return 0x0U;
394}
395static inline u32 fb_mmu_debug_wr_vol_true_v(void)
396{
397 return 0x00000001U;
398}
399static inline u32 fb_mmu_debug_wr_vol_true_f(void)
400{
401 return 0x4U;
402}
403static inline u32 fb_mmu_debug_wr_addr_f(u32 v)
404{
405 return (v & 0xfffffffU) << 4U;
406}
407static inline u32 fb_mmu_debug_wr_addr_alignment_v(void)
408{
409 return 0x0000000cU;
410}
411static inline u32 fb_mmu_debug_rd_r(void)
412{
413 return 0x00100cccU;
414}
415static inline u32 fb_mmu_debug_rd_aperture_vid_mem_f(void)
416{
417 return 0x0U;
418}
419static inline u32 fb_mmu_debug_rd_aperture_sys_mem_coh_f(void)
420{
421 return 0x2U;
422}
423static inline u32 fb_mmu_debug_rd_aperture_sys_mem_ncoh_f(void)
424{
425 return 0x3U;
426}
427static inline u32 fb_mmu_debug_rd_vol_false_f(void)
428{
429 return 0x0U;
430}
431static inline u32 fb_mmu_debug_rd_addr_f(u32 v)
432{
433 return (v & 0xfffffffU) << 4U;
434}
435static inline u32 fb_mmu_debug_rd_addr_alignment_v(void)
436{
437 return 0x0000000cU;
438}
439static inline u32 fb_mmu_debug_ctrl_r(void)
440{
441 return 0x00100cc4U;
442}
443static inline u32 fb_mmu_debug_ctrl_debug_v(u32 r)
444{
445 return (r >> 16U) & 0x1U;
446}
447static inline u32 fb_mmu_debug_ctrl_debug_m(void)
448{
449 return 0x1U << 16U;
450}
451static inline u32 fb_mmu_debug_ctrl_debug_enabled_v(void)
452{
453 return 0x00000001U;
454}
455static inline u32 fb_mmu_debug_ctrl_debug_disabled_v(void)
456{
457 return 0x00000000U;
458}
459static inline u32 fb_mmu_vpr_info_r(void)
460{
461 return 0x00100cd0U;
462}
463static inline u32 fb_mmu_vpr_info_index_f(u32 v)
464{
465 return (v & 0x3U) << 0U;
466}
467static inline u32 fb_mmu_vpr_info_index_v(u32 r)
468{
469 return (r >> 0U) & 0x3U;
470}
471static inline u32 fb_mmu_vpr_info_index_m(void)
472{
473 return 0x3U << 0U;
474}
475static inline u32 fb_mmu_vpr_info_index_addr_lo_v(void)
476{
477 return 0x00000000U;
478}
479static inline u32 fb_mmu_vpr_info_index_addr_hi_v(void)
480{
481 return 0x00000001U;
482}
483static inline u32 fb_mmu_vpr_info_index_cya_lo_v(void)
484{
485 return 0x00000002U;
486}
487static inline u32 fb_mmu_vpr_info_index_cya_hi_v(void)
488{
489 return 0x00000003U;
490}
491static inline u32 fb_mmu_vpr_info_cya_lo_in_use_m(void)
492{
493 return 0x1U << 4U;
494}
495static inline u32 fb_mmu_vpr_info_fetch_f(u32 v)
496{
497 return (v & 0x1U) << 2U;
498}
499static inline u32 fb_mmu_vpr_info_fetch_v(u32 r)
500{
501 return (r >> 2U) & 0x1U;
502}
503static inline u32 fb_mmu_vpr_info_fetch_false_v(void)
504{
505 return 0x00000000U;
506}
507static inline u32 fb_mmu_vpr_info_fetch_true_v(void)
508{
509 return 0x00000001U;
510}
511static inline u32 fb_niso_flush_sysmem_addr_r(void)
512{
513 return 0x00100c10U;
514}
515static inline u32 fb_niso_intr_r(void)
516{
517 return 0x00100a20U;
518}
519static inline u32 fb_niso_intr_hub_access_counter_notify_m(void)
520{
521 return 0x1U << 0U;
522}
523static inline u32 fb_niso_intr_hub_access_counter_notify_pending_f(void)
524{
525 return 0x1U;
526}
527static inline u32 fb_niso_intr_hub_access_counter_error_m(void)
528{
529 return 0x1U << 1U;
530}
531static inline u32 fb_niso_intr_hub_access_counter_error_pending_f(void)
532{
533 return 0x2U;
534}
535static inline u32 fb_niso_intr_mmu_replayable_fault_notify_m(void)
536{
537 return 0x1U << 27U;
538}
539static inline u32 fb_niso_intr_mmu_replayable_fault_notify_pending_f(void)
540{
541 return 0x8000000U;
542}
543static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_m(void)
544{
545 return 0x1U << 28U;
546}
547static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_pending_f(void)
548{
549 return 0x10000000U;
550}
551static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_m(void)
552{
553 return 0x1U << 29U;
554}
555static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_pending_f(void)
556{
557 return 0x20000000U;
558}
559static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_m(void)
560{
561 return 0x1U << 30U;
562}
563static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_pending_f(void)
564{
565 return 0x40000000U;
566}
567static inline u32 fb_niso_intr_mmu_other_fault_notify_m(void)
568{
569 return 0x1U << 31U;
570}
571static inline u32 fb_niso_intr_mmu_other_fault_notify_pending_f(void)
572{
573 return 0x80000000U;
574}
575static inline u32 fb_niso_intr_en_r(u32 i)
576{
577 return 0x00100a24U + i*4U;
578}
579static inline u32 fb_niso_intr_en__size_1_v(void)
580{
581 return 0x00000002U;
582}
583static inline u32 fb_niso_intr_en_hub_access_counter_notify_f(u32 v)
584{
585 return (v & 0x1U) << 0U;
586}
587static inline u32 fb_niso_intr_en_hub_access_counter_notify_enabled_f(void)
588{
589 return 0x1U;
590}
591static inline u32 fb_niso_intr_en_hub_access_counter_error_f(u32 v)
592{
593 return (v & 0x1U) << 1U;
594}
595static inline u32 fb_niso_intr_en_hub_access_counter_error_enabled_f(void)
596{
597 return 0x2U;
598}
599static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_f(u32 v)
600{
601 return (v & 0x1U) << 27U;
602}
603static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_enabled_f(void)
604{
605 return 0x8000000U;
606}
607static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_f(u32 v)
608{
609 return (v & 0x1U) << 28U;
610}
611static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_enabled_f(void)
612{
613 return 0x10000000U;
614}
615static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_f(u32 v)
616{
617 return (v & 0x1U) << 29U;
618}
619static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_enabled_f(void)
620{
621 return 0x20000000U;
622}
623static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_f(u32 v)
624{
625 return (v & 0x1U) << 30U;
626}
627static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_enabled_f(void)
628{
629 return 0x40000000U;
630}
631static inline u32 fb_niso_intr_en_mmu_other_fault_notify_f(u32 v)
632{
633 return (v & 0x1U) << 31U;
634}
635static inline u32 fb_niso_intr_en_mmu_other_fault_notify_enabled_f(void)
636{
637 return 0x80000000U;
638}
639static inline u32 fb_niso_intr_en_set_r(u32 i)
640{
641 return 0x00100a2cU + i*4U;
642}
643static inline u32 fb_niso_intr_en_set__size_1_v(void)
644{
645 return 0x00000002U;
646}
647static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_m(void)
648{
649 return 0x1U << 0U;
650}
651static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_set_f(void)
652{
653 return 0x1U;
654}
655static inline u32 fb_niso_intr_en_set_hub_access_counter_error_m(void)
656{
657 return 0x1U << 1U;
658}
659static inline u32 fb_niso_intr_en_set_hub_access_counter_error_set_f(void)
660{
661 return 0x2U;
662}
663static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_m(void)
664{
665 return 0x1U << 27U;
666}
667static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_set_f(void)
668{
669 return 0x8000000U;
670}
671static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m(void)
672{
673 return 0x1U << 28U;
674}
675static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_set_f(void)
676{
677 return 0x10000000U;
678}
679static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m(void)
680{
681 return 0x1U << 29U;
682}
683static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_set_f(void)
684{
685 return 0x20000000U;
686}
687static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m(void)
688{
689 return 0x1U << 30U;
690}
691static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_set_f(void)
692{
693 return 0x40000000U;
694}
695static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_m(void)
696{
697 return 0x1U << 31U;
698}
699static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_set_f(void)
700{
701 return 0x80000000U;
702}
703static inline u32 fb_niso_intr_en_clr_r(u32 i)
704{
705 return 0x00100a34U + i*4U;
706}
707static inline u32 fb_niso_intr_en_clr__size_1_v(void)
708{
709 return 0x00000002U;
710}
711static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_m(void)
712{
713 return 0x1U << 0U;
714}
715static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_set_f(void)
716{
717 return 0x1U;
718}
719static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_m(void)
720{
721 return 0x1U << 1U;
722}
723static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_set_f(void)
724{
725 return 0x2U;
726}
727static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_m(void)
728{
729 return 0x1U << 27U;
730}
731static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_set_f(void)
732{
733 return 0x8000000U;
734}
735static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m(void)
736{
737 return 0x1U << 28U;
738}
739static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_set_f(void)
740{
741 return 0x10000000U;
742}
743static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m(void)
744{
745 return 0x1U << 29U;
746}
747static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_set_f(void)
748{
749 return 0x20000000U;
750}
751static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m(void)
752{
753 return 0x1U << 30U;
754}
755static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_set_f(void)
756{
757 return 0x40000000U;
758}
759static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_m(void)
760{
761 return 0x1U << 31U;
762}
763static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_set_f(void)
764{
765 return 0x80000000U;
766}
767static inline u32 fb_niso_intr_en_clr_mmu_non_replay_fault_buffer_v(void)
768{
769 return 0x00000000U;
770}
771static inline u32 fb_niso_intr_en_clr_mmu_replay_fault_buffer_v(void)
772{
773 return 0x00000001U;
774}
775static inline u32 fb_mmu_fault_buffer_lo_r(u32 i)
776{
777 return 0x00100e24U + i*20U;
778}
779static inline u32 fb_mmu_fault_buffer_lo__size_1_v(void)
780{
781 return 0x00000002U;
782}
783static inline u32 fb_mmu_fault_buffer_lo_addr_mode_f(u32 v)
784{
785 return (v & 0x1U) << 0U;
786}
787static inline u32 fb_mmu_fault_buffer_lo_addr_mode_v(u32 r)
788{
789 return (r >> 0U) & 0x1U;
790}
791static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_v(void)
792{
793 return 0x00000000U;
794}
795static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_f(void)
796{
797 return 0x0U;
798}
799static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_v(void)
800{
801 return 0x00000001U;
802}
803static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_f(void)
804{
805 return 0x1U;
806}
807static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_f(u32 v)
808{
809 return (v & 0x3U) << 1U;
810}
811static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_v(u32 r)
812{
813 return (r >> 1U) & 0x3U;
814}
815static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_v(void)
816{
817 return 0x00000002U;
818}
819static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_f(void)
820{
821 return 0x4U;
822}
823static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_v(void)
824{
825 return 0x00000003U;
826}
827static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_f(void)
828{
829 return 0x6U;
830}
831static inline u32 fb_mmu_fault_buffer_lo_phys_vol_f(u32 v)
832{
833 return (v & 0x1U) << 3U;
834}
835static inline u32 fb_mmu_fault_buffer_lo_phys_vol_v(u32 r)
836{
837 return (r >> 3U) & 0x1U;
838}
839static inline u32 fb_mmu_fault_buffer_lo_addr_f(u32 v)
840{
841 return (v & 0xfffffU) << 12U;
842}
843static inline u32 fb_mmu_fault_buffer_lo_addr_v(u32 r)
844{
845 return (r >> 12U) & 0xfffffU;
846}
847static inline u32 fb_mmu_fault_buffer_hi_r(u32 i)
848{
849 return 0x00100e28U + i*20U;
850}
851static inline u32 fb_mmu_fault_buffer_hi__size_1_v(void)
852{
853 return 0x00000002U;
854}
855static inline u32 fb_mmu_fault_buffer_hi_addr_f(u32 v)
856{
857 return (v & 0xffffffffU) << 0U;
858}
859static inline u32 fb_mmu_fault_buffer_hi_addr_v(u32 r)
860{
861 return (r >> 0U) & 0xffffffffU;
862}
863static inline u32 fb_mmu_fault_buffer_get_r(u32 i)
864{
865 return 0x00100e2cU + i*20U;
866}
867static inline u32 fb_mmu_fault_buffer_get__size_1_v(void)
868{
869 return 0x00000002U;
870}
871static inline u32 fb_mmu_fault_buffer_get_ptr_f(u32 v)
872{
873 return (v & 0xfffffU) << 0U;
874}
875static inline u32 fb_mmu_fault_buffer_get_ptr_m(void)
876{
877 return 0xfffffU << 0U;
878}
879static inline u32 fb_mmu_fault_buffer_get_ptr_v(u32 r)
880{
881 return (r >> 0U) & 0xfffffU;
882}
883static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_f(u32 v)
884{
885 return (v & 0x1U) << 30U;
886}
887static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_m(void)
888{
889 return 0x1U << 30U;
890}
891static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_v(void)
892{
893 return 0x00000001U;
894}
895static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_f(void)
896{
897 return 0x40000000U;
898}
899static inline u32 fb_mmu_fault_buffer_get_overflow_f(u32 v)
900{
901 return (v & 0x1U) << 31U;
902}
903static inline u32 fb_mmu_fault_buffer_get_overflow_m(void)
904{
905 return 0x1U << 31U;
906}
907static inline u32 fb_mmu_fault_buffer_get_overflow_clear_v(void)
908{
909 return 0x00000001U;
910}
911static inline u32 fb_mmu_fault_buffer_get_overflow_clear_f(void)
912{
913 return 0x80000000U;
914}
915static inline u32 fb_mmu_fault_buffer_put_r(u32 i)
916{
917 return 0x00100e30U + i*20U;
918}
919static inline u32 fb_mmu_fault_buffer_put__size_1_v(void)
920{
921 return 0x00000002U;
922}
923static inline u32 fb_mmu_fault_buffer_put_ptr_f(u32 v)
924{
925 return (v & 0xfffffU) << 0U;
926}
927static inline u32 fb_mmu_fault_buffer_put_ptr_v(u32 r)
928{
929 return (r >> 0U) & 0xfffffU;
930}
931static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_f(u32 v)
932{
933 return (v & 0x1U) << 30U;
934}
935static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_v(u32 r)
936{
937 return (r >> 30U) & 0x1U;
938}
939static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_v(void)
940{
941 return 0x00000001U;
942}
943static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_f(void)
944{
945 return 0x40000000U;
946}
947static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_v(void)
948{
949 return 0x00000000U;
950}
951static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_f(void)
952{
953 return 0x0U;
954}
955static inline u32 fb_mmu_fault_buffer_put_overflow_f(u32 v)
956{
957 return (v & 0x1U) << 31U;
958}
959static inline u32 fb_mmu_fault_buffer_put_overflow_v(u32 r)
960{
961 return (r >> 31U) & 0x1U;
962}
963static inline u32 fb_mmu_fault_buffer_put_overflow_yes_v(void)
964{
965 return 0x00000001U;
966}
967static inline u32 fb_mmu_fault_buffer_put_overflow_yes_f(void)
968{
969 return 0x80000000U;
970}
971static inline u32 fb_mmu_fault_buffer_size_r(u32 i)
972{
973 return 0x00100e34U + i*20U;
974}
975static inline u32 fb_mmu_fault_buffer_size__size_1_v(void)
976{
977 return 0x00000002U;
978}
979static inline u32 fb_mmu_fault_buffer_size_val_f(u32 v)
980{
981 return (v & 0xfffffU) << 0U;
982}
983static inline u32 fb_mmu_fault_buffer_size_val_v(u32 r)
984{
985 return (r >> 0U) & 0xfffffU;
986}
987static inline u32 fb_mmu_fault_buffer_size_overflow_intr_f(u32 v)
988{
989 return (v & 0x1U) << 29U;
990}
991static inline u32 fb_mmu_fault_buffer_size_overflow_intr_v(u32 r)
992{
993 return (r >> 29U) & 0x1U;
994}
995static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_v(void)
996{
997 return 0x00000001U;
998}
999static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_f(void)
1000{
1001 return 0x20000000U;
1002}
1003static inline u32 fb_mmu_fault_buffer_size_set_default_f(u32 v)
1004{
1005 return (v & 0x1U) << 30U;
1006}
1007static inline u32 fb_mmu_fault_buffer_size_set_default_v(u32 r)
1008{
1009 return (r >> 30U) & 0x1U;
1010}
1011static inline u32 fb_mmu_fault_buffer_size_set_default_yes_v(void)
1012{
1013 return 0x00000001U;
1014}
1015static inline u32 fb_mmu_fault_buffer_size_set_default_yes_f(void)
1016{
1017 return 0x40000000U;
1018}
1019static inline u32 fb_mmu_fault_buffer_size_enable_f(u32 v)
1020{
1021 return (v & 0x1U) << 31U;
1022}
1023static inline u32 fb_mmu_fault_buffer_size_enable_m(void)
1024{
1025 return 0x1U << 31U;
1026}
1027static inline u32 fb_mmu_fault_buffer_size_enable_v(u32 r)
1028{
1029 return (r >> 31U) & 0x1U;
1030}
1031static inline u32 fb_mmu_fault_buffer_size_enable_true_v(void)
1032{
1033 return 0x00000001U;
1034}
1035static inline u32 fb_mmu_fault_buffer_size_enable_true_f(void)
1036{
1037 return 0x80000000U;
1038}
1039static inline u32 fb_mmu_fault_addr_lo_r(void)
1040{
1041 return 0x00100e4cU;
1042}
1043static inline u32 fb_mmu_fault_addr_lo_phys_aperture_f(u32 v)
1044{
1045 return (v & 0x3U) << 0U;
1046}
1047static inline u32 fb_mmu_fault_addr_lo_phys_aperture_v(u32 r)
1048{
1049 return (r >> 0U) & 0x3U;
1050}
1051static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_v(void)
1052{
1053 return 0x00000002U;
1054}
1055static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_f(void)
1056{
1057 return 0x2U;
1058}
1059static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_v(void)
1060{
1061 return 0x00000003U;
1062}
1063static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_f(void)
1064{
1065 return 0x3U;
1066}
1067static inline u32 fb_mmu_fault_addr_lo_addr_f(u32 v)
1068{
1069 return (v & 0xfffffU) << 12U;
1070}
1071static inline u32 fb_mmu_fault_addr_lo_addr_v(u32 r)
1072{
1073 return (r >> 12U) & 0xfffffU;
1074}
1075static inline u32 fb_mmu_fault_addr_hi_r(void)
1076{
1077 return 0x00100e50U;
1078}
1079static inline u32 fb_mmu_fault_addr_hi_addr_f(u32 v)
1080{
1081 return (v & 0xffffffffU) << 0U;
1082}
1083static inline u32 fb_mmu_fault_addr_hi_addr_v(u32 r)
1084{
1085 return (r >> 0U) & 0xffffffffU;
1086}
1087static inline u32 fb_mmu_fault_inst_lo_r(void)
1088{
1089 return 0x00100e54U;
1090}
1091static inline u32 fb_mmu_fault_inst_lo_engine_id_v(u32 r)
1092{
1093 return (r >> 0U) & 0x1ffU;
1094}
1095static inline u32 fb_mmu_fault_inst_lo_aperture_v(u32 r)
1096{
1097 return (r >> 10U) & 0x3U;
1098}
1099static inline u32 fb_mmu_fault_inst_lo_aperture_sys_coh_v(void)
1100{
1101 return 0x00000002U;
1102}
1103static inline u32 fb_mmu_fault_inst_lo_aperture_sys_nocoh_v(void)
1104{
1105 return 0x00000003U;
1106}
1107static inline u32 fb_mmu_fault_inst_lo_addr_f(u32 v)
1108{
1109 return (v & 0xfffffU) << 12U;
1110}
1111static inline u32 fb_mmu_fault_inst_lo_addr_v(u32 r)
1112{
1113 return (r >> 12U) & 0xfffffU;
1114}
1115static inline u32 fb_mmu_fault_inst_hi_r(void)
1116{
1117 return 0x00100e58U;
1118}
1119static inline u32 fb_mmu_fault_inst_hi_addr_v(u32 r)
1120{
1121 return (r >> 0U) & 0xffffffffU;
1122}
1123static inline u32 fb_mmu_fault_info_r(void)
1124{
1125 return 0x00100e5cU;
1126}
1127static inline u32 fb_mmu_fault_info_fault_type_v(u32 r)
1128{
1129 return (r >> 0U) & 0x1fU;
1130}
1131static inline u32 fb_mmu_fault_info_replayable_fault_v(u32 r)
1132{
1133 return (r >> 7U) & 0x1U;
1134}
1135static inline u32 fb_mmu_fault_info_client_v(u32 r)
1136{
1137 return (r >> 8U) & 0x7fU;
1138}
1139static inline u32 fb_mmu_fault_info_access_type_v(u32 r)
1140{
1141 return (r >> 16U) & 0xfU;
1142}
1143static inline u32 fb_mmu_fault_info_client_type_v(u32 r)
1144{
1145 return (r >> 20U) & 0x1U;
1146}
1147static inline u32 fb_mmu_fault_info_gpc_id_v(u32 r)
1148{
1149 return (r >> 24U) & 0x1fU;
1150}
1151static inline u32 fb_mmu_fault_info_protected_mode_v(u32 r)
1152{
1153 return (r >> 29U) & 0x1U;
1154}
1155static inline u32 fb_mmu_fault_info_replayable_fault_en_v(u32 r)
1156{
1157 return (r >> 30U) & 0x1U;
1158}
1159static inline u32 fb_mmu_fault_info_valid_v(u32 r)
1160{
1161 return (r >> 31U) & 0x1U;
1162}
1163static inline u32 fb_mmu_fault_status_r(void)
1164{
1165 return 0x00100e60U;
1166}
1167static inline u32 fb_mmu_fault_status_dropped_bar1_phys_m(void)
1168{
1169 return 0x1U << 0U;
1170}
1171static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_v(void)
1172{
1173 return 0x00000001U;
1174}
1175static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_f(void)
1176{
1177 return 0x1U;
1178}
1179static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_v(void)
1180{
1181 return 0x00000001U;
1182}
1183static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_f(void)
1184{
1185 return 0x1U;
1186}
1187static inline u32 fb_mmu_fault_status_dropped_bar1_virt_m(void)
1188{
1189 return 0x1U << 1U;
1190}
1191static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_v(void)
1192{
1193 return 0x00000001U;
1194}
1195static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_f(void)
1196{
1197 return 0x2U;
1198}
1199static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_v(void)
1200{
1201 return 0x00000001U;
1202}
1203static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_f(void)
1204{
1205 return 0x2U;
1206}
1207static inline u32 fb_mmu_fault_status_dropped_bar2_phys_m(void)
1208{
1209 return 0x1U << 2U;
1210}
1211static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_v(void)
1212{
1213 return 0x00000001U;
1214}
1215static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_f(void)
1216{
1217 return 0x4U;
1218}
1219static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_v(void)
1220{
1221 return 0x00000001U;
1222}
1223static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_f(void)
1224{
1225 return 0x4U;
1226}
1227static inline u32 fb_mmu_fault_status_dropped_bar2_virt_m(void)
1228{
1229 return 0x1U << 3U;
1230}
1231static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_v(void)
1232{
1233 return 0x00000001U;
1234}
1235static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_f(void)
1236{
1237 return 0x8U;
1238}
1239static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_v(void)
1240{
1241 return 0x00000001U;
1242}
1243static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_f(void)
1244{
1245 return 0x8U;
1246}
1247static inline u32 fb_mmu_fault_status_dropped_ifb_phys_m(void)
1248{
1249 return 0x1U << 4U;
1250}
1251static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_v(void)
1252{
1253 return 0x00000001U;
1254}
1255static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_f(void)
1256{
1257 return 0x10U;
1258}
1259static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_v(void)
1260{
1261 return 0x00000001U;
1262}
1263static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_f(void)
1264{
1265 return 0x10U;
1266}
1267static inline u32 fb_mmu_fault_status_dropped_ifb_virt_m(void)
1268{
1269 return 0x1U << 5U;
1270}
1271static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_v(void)
1272{
1273 return 0x00000001U;
1274}
1275static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_f(void)
1276{
1277 return 0x20U;
1278}
1279static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_v(void)
1280{
1281 return 0x00000001U;
1282}
1283static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_f(void)
1284{
1285 return 0x20U;
1286}
1287static inline u32 fb_mmu_fault_status_dropped_other_phys_m(void)
1288{
1289 return 0x1U << 6U;
1290}
1291static inline u32 fb_mmu_fault_status_dropped_other_phys_set_v(void)
1292{
1293 return 0x00000001U;
1294}
1295static inline u32 fb_mmu_fault_status_dropped_other_phys_set_f(void)
1296{
1297 return 0x40U;
1298}
1299static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_v(void)
1300{
1301 return 0x00000001U;
1302}
1303static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_f(void)
1304{
1305 return 0x40U;
1306}
1307static inline u32 fb_mmu_fault_status_dropped_other_virt_m(void)
1308{
1309 return 0x1U << 7U;
1310}
1311static inline u32 fb_mmu_fault_status_dropped_other_virt_set_v(void)
1312{
1313 return 0x00000001U;
1314}
1315static inline u32 fb_mmu_fault_status_dropped_other_virt_set_f(void)
1316{
1317 return 0x80U;
1318}
1319static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_v(void)
1320{
1321 return 0x00000001U;
1322}
1323static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_f(void)
1324{
1325 return 0x80U;
1326}
1327static inline u32 fb_mmu_fault_status_replayable_m(void)
1328{
1329 return 0x1U << 8U;
1330}
1331static inline u32 fb_mmu_fault_status_replayable_set_v(void)
1332{
1333 return 0x00000001U;
1334}
1335static inline u32 fb_mmu_fault_status_replayable_set_f(void)
1336{
1337 return 0x100U;
1338}
1339static inline u32 fb_mmu_fault_status_replayable_reset_f(void)
1340{
1341 return 0x0U;
1342}
1343static inline u32 fb_mmu_fault_status_non_replayable_m(void)
1344{
1345 return 0x1U << 9U;
1346}
1347static inline u32 fb_mmu_fault_status_non_replayable_set_v(void)
1348{
1349 return 0x00000001U;
1350}
1351static inline u32 fb_mmu_fault_status_non_replayable_set_f(void)
1352{
1353 return 0x200U;
1354}
1355static inline u32 fb_mmu_fault_status_non_replayable_reset_f(void)
1356{
1357 return 0x0U;
1358}
1359static inline u32 fb_mmu_fault_status_replayable_error_m(void)
1360{
1361 return 0x1U << 10U;
1362}
1363static inline u32 fb_mmu_fault_status_replayable_error_set_v(void)
1364{
1365 return 0x00000001U;
1366}
1367static inline u32 fb_mmu_fault_status_replayable_error_set_f(void)
1368{
1369 return 0x400U;
1370}
1371static inline u32 fb_mmu_fault_status_replayable_error_reset_f(void)
1372{
1373 return 0x0U;
1374}
1375static inline u32 fb_mmu_fault_status_non_replayable_error_m(void)
1376{
1377 return 0x1U << 11U;
1378}
1379static inline u32 fb_mmu_fault_status_non_replayable_error_set_v(void)
1380{
1381 return 0x00000001U;
1382}
1383static inline u32 fb_mmu_fault_status_non_replayable_error_set_f(void)
1384{
1385 return 0x800U;
1386}
1387static inline u32 fb_mmu_fault_status_non_replayable_error_reset_f(void)
1388{
1389 return 0x0U;
1390}
1391static inline u32 fb_mmu_fault_status_replayable_overflow_m(void)
1392{
1393 return 0x1U << 12U;
1394}
1395static inline u32 fb_mmu_fault_status_replayable_overflow_set_v(void)
1396{
1397 return 0x00000001U;
1398}
1399static inline u32 fb_mmu_fault_status_replayable_overflow_set_f(void)
1400{
1401 return 0x1000U;
1402}
1403static inline u32 fb_mmu_fault_status_replayable_overflow_reset_f(void)
1404{
1405 return 0x0U;
1406}
1407static inline u32 fb_mmu_fault_status_non_replayable_overflow_m(void)
1408{
1409 return 0x1U << 13U;
1410}
1411static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_v(void)
1412{
1413 return 0x00000001U;
1414}
1415static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_f(void)
1416{
1417 return 0x2000U;
1418}
1419static inline u32 fb_mmu_fault_status_non_replayable_overflow_reset_f(void)
1420{
1421 return 0x0U;
1422}
1423static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_m(void)
1424{
1425 return 0x1U << 14U;
1426}
1427static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_v(void)
1428{
1429 return 0x00000001U;
1430}
1431static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_f(void)
1432{
1433 return 0x4000U;
1434}
1435static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_m(void)
1436{
1437 return 0x1U << 15U;
1438}
1439static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_v(void)
1440{
1441 return 0x00000001U;
1442}
1443static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_f(void)
1444{
1445 return 0x8000U;
1446}
1447static inline u32 fb_mmu_fault_status_busy_m(void)
1448{
1449 return 0x1U << 30U;
1450}
1451static inline u32 fb_mmu_fault_status_busy_true_v(void)
1452{
1453 return 0x00000001U;
1454}
1455static inline u32 fb_mmu_fault_status_busy_true_f(void)
1456{
1457 return 0x40000000U;
1458}
1459static inline u32 fb_mmu_fault_status_valid_m(void)
1460{
1461 return 0x1U << 31U;
1462}
1463static inline u32 fb_mmu_fault_status_valid_set_v(void)
1464{
1465 return 0x00000001U;
1466}
1467static inline u32 fb_mmu_fault_status_valid_set_f(void)
1468{
1469 return 0x80000000U;
1470}
1471static inline u32 fb_mmu_fault_status_valid_clear_v(void)
1472{
1473 return 0x00000001U;
1474}
1475static inline u32 fb_mmu_fault_status_valid_clear_f(void)
1476{
1477 return 0x80000000U;
1478}
1479static inline u32 fb_mmu_local_memory_range_r(void)
1480{
1481 return 0x00100ce0U;
1482}
1483static inline u32 fb_mmu_local_memory_range_lower_scale_v(u32 r)
1484{
1485 return (r >> 0U) & 0xfU;
1486}
1487static inline u32 fb_mmu_local_memory_range_lower_mag_v(u32 r)
1488{
1489 return (r >> 4U) & 0x3fU;
1490}
1491static inline u32 fb_mmu_local_memory_range_ecc_mode_v(u32 r)
1492{
1493 return (r >> 30U) & 0x1U;
1494}
1495static inline u32 fb_niso_scrub_status_r(void)
1496{
1497 return 0x00100b20U;
1498}
1499static inline u32 fb_niso_scrub_status_flag_v(u32 r)
1500{
1501 return (r >> 0U) & 0x1U;
1502}
1503static inline u32 fb_mmu_priv_level_mask_r(void)
1504{
1505 return 0x00100cdcU;
1506}
1507static inline u32 fb_mmu_priv_level_mask_write_violation_m(void)
1508{
1509 return 0x1U << 7U;
1510}
1511#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fifo_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fifo_gv100.h
new file mode 100644
index 00000000..743afb1e
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fifo_gv100.h
@@ -0,0 +1,551 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_fifo_gv100_h_
57#define _hw_fifo_gv100_h_
58
59static inline u32 fifo_bar1_base_r(void)
60{
61 return 0x00002254U;
62}
63static inline u32 fifo_bar1_base_ptr_f(u32 v)
64{
65 return (v & 0xfffffffU) << 0U;
66}
67static inline u32 fifo_bar1_base_ptr_align_shift_v(void)
68{
69 return 0x0000000cU;
70}
71static inline u32 fifo_bar1_base_valid_false_f(void)
72{
73 return 0x0U;
74}
75static inline u32 fifo_bar1_base_valid_true_f(void)
76{
77 return 0x10000000U;
78}
79static inline u32 fifo_userd_writeback_r(void)
80{
81 return 0x0000225cU;
82}
83static inline u32 fifo_userd_writeback_timer_f(u32 v)
84{
85 return (v & 0xffU) << 0U;
86}
87static inline u32 fifo_userd_writeback_timer_disabled_v(void)
88{
89 return 0x00000000U;
90}
91static inline u32 fifo_userd_writeback_timer_shorter_v(void)
92{
93 return 0x00000003U;
94}
95static inline u32 fifo_userd_writeback_timer_100us_v(void)
96{
97 return 0x00000064U;
98}
99static inline u32 fifo_userd_writeback_timescale_f(u32 v)
100{
101 return (v & 0xfU) << 12U;
102}
103static inline u32 fifo_userd_writeback_timescale_0_v(void)
104{
105 return 0x00000000U;
106}
107static inline u32 fifo_runlist_base_r(void)
108{
109 return 0x00002270U;
110}
111static inline u32 fifo_runlist_base_ptr_f(u32 v)
112{
113 return (v & 0xfffffffU) << 0U;
114}
115static inline u32 fifo_runlist_base_target_vid_mem_f(void)
116{
117 return 0x0U;
118}
119static inline u32 fifo_runlist_base_target_sys_mem_coh_f(void)
120{
121 return 0x20000000U;
122}
123static inline u32 fifo_runlist_base_target_sys_mem_ncoh_f(void)
124{
125 return 0x30000000U;
126}
127static inline u32 fifo_runlist_r(void)
128{
129 return 0x00002274U;
130}
131static inline u32 fifo_runlist_engine_f(u32 v)
132{
133 return (v & 0xfU) << 20U;
134}
135static inline u32 fifo_eng_runlist_base_r(u32 i)
136{
137 return 0x00002280U + i*8U;
138}
139static inline u32 fifo_eng_runlist_base__size_1_v(void)
140{
141 return 0x0000000dU;
142}
143static inline u32 fifo_eng_runlist_r(u32 i)
144{
145 return 0x00002284U + i*8U;
146}
147static inline u32 fifo_eng_runlist__size_1_v(void)
148{
149 return 0x0000000dU;
150}
151static inline u32 fifo_eng_runlist_length_f(u32 v)
152{
153 return (v & 0xffffU) << 0U;
154}
155static inline u32 fifo_eng_runlist_length_max_v(void)
156{
157 return 0x0000ffffU;
158}
159static inline u32 fifo_eng_runlist_pending_true_f(void)
160{
161 return 0x100000U;
162}
163static inline u32 fifo_pb_timeslice_r(u32 i)
164{
165 return 0x00002350U + i*4U;
166}
167static inline u32 fifo_pb_timeslice_timeout_16_f(void)
168{
169 return 0x10U;
170}
171static inline u32 fifo_pb_timeslice_timescale_0_f(void)
172{
173 return 0x0U;
174}
175static inline u32 fifo_pb_timeslice_enable_true_f(void)
176{
177 return 0x10000000U;
178}
179static inline u32 fifo_pbdma_map_r(u32 i)
180{
181 return 0x00002390U + i*4U;
182}
183static inline u32 fifo_intr_0_r(void)
184{
185 return 0x00002100U;
186}
187static inline u32 fifo_intr_0_bind_error_pending_f(void)
188{
189 return 0x1U;
190}
191static inline u32 fifo_intr_0_bind_error_reset_f(void)
192{
193 return 0x1U;
194}
195static inline u32 fifo_intr_0_sched_error_pending_f(void)
196{
197 return 0x100U;
198}
199static inline u32 fifo_intr_0_sched_error_reset_f(void)
200{
201 return 0x100U;
202}
203static inline u32 fifo_intr_0_chsw_error_pending_f(void)
204{
205 return 0x10000U;
206}
207static inline u32 fifo_intr_0_chsw_error_reset_f(void)
208{
209 return 0x10000U;
210}
211static inline u32 fifo_intr_0_fb_flush_timeout_pending_f(void)
212{
213 return 0x800000U;
214}
215static inline u32 fifo_intr_0_fb_flush_timeout_reset_f(void)
216{
217 return 0x800000U;
218}
219static inline u32 fifo_intr_0_lb_error_pending_f(void)
220{
221 return 0x1000000U;
222}
223static inline u32 fifo_intr_0_lb_error_reset_f(void)
224{
225 return 0x1000000U;
226}
227static inline u32 fifo_intr_0_pbdma_intr_pending_f(void)
228{
229 return 0x20000000U;
230}
231static inline u32 fifo_intr_0_runlist_event_pending_f(void)
232{
233 return 0x40000000U;
234}
235static inline u32 fifo_intr_0_channel_intr_pending_f(void)
236{
237 return 0x80000000U;
238}
239static inline u32 fifo_intr_en_0_r(void)
240{
241 return 0x00002140U;
242}
243static inline u32 fifo_intr_en_0_sched_error_f(u32 v)
244{
245 return (v & 0x1U) << 8U;
246}
247static inline u32 fifo_intr_en_0_sched_error_m(void)
248{
249 return 0x1U << 8U;
250}
251static inline u32 fifo_intr_en_1_r(void)
252{
253 return 0x00002528U;
254}
255static inline u32 fifo_intr_bind_error_r(void)
256{
257 return 0x0000252cU;
258}
259static inline u32 fifo_intr_sched_error_r(void)
260{
261 return 0x0000254cU;
262}
263static inline u32 fifo_intr_sched_error_code_f(u32 v)
264{
265 return (v & 0xffU) << 0U;
266}
267static inline u32 fifo_intr_chsw_error_r(void)
268{
269 return 0x0000256cU;
270}
271static inline u32 fifo_intr_pbdma_id_r(void)
272{
273 return 0x000025a0U;
274}
275static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i)
276{
277 return (v & 0x1U) << (0U + i*1U);
278}
279static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i)
280{
281 return (r >> (0U + i*1U)) & 0x1U;
282}
283static inline u32 fifo_intr_pbdma_id_status__size_1_v(void)
284{
285 return 0x0000000eU;
286}
287static inline u32 fifo_intr_runlist_r(void)
288{
289 return 0x00002a00U;
290}
291static inline u32 fifo_fb_timeout_r(void)
292{
293 return 0x00002a04U;
294}
295static inline u32 fifo_fb_timeout_period_m(void)
296{
297 return 0x3fffffffU << 0U;
298}
299static inline u32 fifo_fb_timeout_period_max_f(void)
300{
301 return 0x3fffffffU;
302}
303static inline u32 fifo_fb_timeout_period_init_f(void)
304{
305 return 0x3c00U;
306}
307static inline u32 fifo_sched_disable_r(void)
308{
309 return 0x00002630U;
310}
311static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i)
312{
313 return (v & 0x1U) << (0U + i*1U);
314}
315static inline u32 fifo_sched_disable_runlist_m(u32 i)
316{
317 return 0x1U << (0U + i*1U);
318}
319static inline u32 fifo_sched_disable_true_v(void)
320{
321 return 0x00000001U;
322}
323static inline u32 fifo_runlist_preempt_r(void)
324{
325 return 0x00002638U;
326}
327static inline u32 fifo_runlist_preempt_runlist_f(u32 v, u32 i)
328{
329 return (v & 0x1U) << (0U + i*1U);
330}
331static inline u32 fifo_runlist_preempt_runlist_m(u32 i)
332{
333 return 0x1U << (0U + i*1U);
334}
335static inline u32 fifo_runlist_preempt_runlist_pending_v(void)
336{
337 return 0x00000001U;
338}
339static inline u32 fifo_preempt_r(void)
340{
341 return 0x00002634U;
342}
343static inline u32 fifo_preempt_pending_true_f(void)
344{
345 return 0x100000U;
346}
347static inline u32 fifo_preempt_type_channel_f(void)
348{
349 return 0x0U;
350}
351static inline u32 fifo_preempt_type_tsg_f(void)
352{
353 return 0x1000000U;
354}
355static inline u32 fifo_preempt_chid_f(u32 v)
356{
357 return (v & 0xfffU) << 0U;
358}
359static inline u32 fifo_preempt_id_f(u32 v)
360{
361 return (v & 0xfffU) << 0U;
362}
363static inline u32 fifo_engine_status_r(u32 i)
364{
365 return 0x00002640U + i*8U;
366}
367static inline u32 fifo_engine_status__size_1_v(void)
368{
369 return 0x0000000fU;
370}
371static inline u32 fifo_engine_status_id_v(u32 r)
372{
373 return (r >> 0U) & 0xfffU;
374}
375static inline u32 fifo_engine_status_id_type_v(u32 r)
376{
377 return (r >> 12U) & 0x1U;
378}
379static inline u32 fifo_engine_status_id_type_chid_v(void)
380{
381 return 0x00000000U;
382}
383static inline u32 fifo_engine_status_id_type_tsgid_v(void)
384{
385 return 0x00000001U;
386}
387static inline u32 fifo_engine_status_ctx_status_v(u32 r)
388{
389 return (r >> 13U) & 0x7U;
390}
391static inline u32 fifo_engine_status_ctx_status_valid_v(void)
392{
393 return 0x00000001U;
394}
395static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void)
396{
397 return 0x00000005U;
398}
399static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void)
400{
401 return 0x00000006U;
402}
403static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void)
404{
405 return 0x00000007U;
406}
407static inline u32 fifo_engine_status_next_id_v(u32 r)
408{
409 return (r >> 16U) & 0xfffU;
410}
411static inline u32 fifo_engine_status_next_id_type_v(u32 r)
412{
413 return (r >> 28U) & 0x1U;
414}
415static inline u32 fifo_engine_status_next_id_type_chid_v(void)
416{
417 return 0x00000000U;
418}
419static inline u32 fifo_engine_status_eng_reload_v(u32 r)
420{
421 return (r >> 29U) & 0x1U;
422}
423static inline u32 fifo_engine_status_faulted_v(u32 r)
424{
425 return (r >> 30U) & 0x1U;
426}
427static inline u32 fifo_engine_status_faulted_true_v(void)
428{
429 return 0x00000001U;
430}
431static inline u32 fifo_engine_status_engine_v(u32 r)
432{
433 return (r >> 31U) & 0x1U;
434}
435static inline u32 fifo_engine_status_engine_idle_v(void)
436{
437 return 0x00000000U;
438}
439static inline u32 fifo_engine_status_engine_busy_v(void)
440{
441 return 0x00000001U;
442}
443static inline u32 fifo_engine_status_ctxsw_v(u32 r)
444{
445 return (r >> 15U) & 0x1U;
446}
447static inline u32 fifo_engine_status_ctxsw_in_progress_v(void)
448{
449 return 0x00000001U;
450}
451static inline u32 fifo_engine_status_ctxsw_in_progress_f(void)
452{
453 return 0x8000U;
454}
455static inline u32 fifo_pbdma_status_r(u32 i)
456{
457 return 0x00003080U + i*4U;
458}
459static inline u32 fifo_pbdma_status__size_1_v(void)
460{
461 return 0x0000000eU;
462}
463static inline u32 fifo_pbdma_status_id_v(u32 r)
464{
465 return (r >> 0U) & 0xfffU;
466}
467static inline u32 fifo_pbdma_status_id_type_v(u32 r)
468{
469 return (r >> 12U) & 0x1U;
470}
471static inline u32 fifo_pbdma_status_id_type_chid_v(void)
472{
473 return 0x00000000U;
474}
475static inline u32 fifo_pbdma_status_id_type_tsgid_v(void)
476{
477 return 0x00000001U;
478}
479static inline u32 fifo_pbdma_status_chan_status_v(u32 r)
480{
481 return (r >> 13U) & 0x7U;
482}
483static inline u32 fifo_pbdma_status_chan_status_valid_v(void)
484{
485 return 0x00000001U;
486}
487static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void)
488{
489 return 0x00000005U;
490}
491static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void)
492{
493 return 0x00000006U;
494}
495static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void)
496{
497 return 0x00000007U;
498}
499static inline u32 fifo_pbdma_status_next_id_v(u32 r)
500{
501 return (r >> 16U) & 0xfffU;
502}
503static inline u32 fifo_pbdma_status_next_id_type_v(u32 r)
504{
505 return (r >> 28U) & 0x1U;
506}
507static inline u32 fifo_pbdma_status_next_id_type_chid_v(void)
508{
509 return 0x00000000U;
510}
511static inline u32 fifo_pbdma_status_chsw_v(u32 r)
512{
513 return (r >> 15U) & 0x1U;
514}
515static inline u32 fifo_pbdma_status_chsw_in_progress_v(void)
516{
517 return 0x00000001U;
518}
519static inline u32 fifo_cfg0_r(void)
520{
521 return 0x00002004U;
522}
523static inline u32 fifo_cfg0_num_pbdma_v(u32 r)
524{
525 return (r >> 0U) & 0xffU;
526}
527static inline u32 fifo_cfg0_pbdma_fault_id_v(u32 r)
528{
529 return (r >> 16U) & 0xffU;
530}
531static inline u32 fifo_fb_iface_r(void)
532{
533 return 0x000026f0U;
534}
535static inline u32 fifo_fb_iface_control_v(u32 r)
536{
537 return (r >> 0U) & 0x1U;
538}
539static inline u32 fifo_fb_iface_control_enable_f(void)
540{
541 return 0x1U;
542}
543static inline u32 fifo_fb_iface_status_v(u32 r)
544{
545 return (r >> 4U) & 0x1U;
546}
547static inline u32 fifo_fb_iface_status_enabled_f(void)
548{
549 return 0x10U;
550}
551#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_flush_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_flush_gv100.h
new file mode 100644
index 00000000..b6045626
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_flush_gv100.h
@@ -0,0 +1,187 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_flush_gv100_h_
57#define _hw_flush_gv100_h_
58
59static inline u32 flush_l2_system_invalidate_r(void)
60{
61 return 0x00070004U;
62}
63static inline u32 flush_l2_system_invalidate_pending_v(u32 r)
64{
65 return (r >> 0U) & 0x1U;
66}
67static inline u32 flush_l2_system_invalidate_pending_busy_v(void)
68{
69 return 0x00000001U;
70}
71static inline u32 flush_l2_system_invalidate_pending_busy_f(void)
72{
73 return 0x1U;
74}
75static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r)
76{
77 return (r >> 1U) & 0x1U;
78}
79static inline u32 flush_l2_system_invalidate_outstanding_true_v(void)
80{
81 return 0x00000001U;
82}
83static inline u32 flush_l2_flush_dirty_r(void)
84{
85 return 0x00070010U;
86}
87static inline u32 flush_l2_flush_dirty_pending_v(u32 r)
88{
89 return (r >> 0U) & 0x1U;
90}
91static inline u32 flush_l2_flush_dirty_pending_empty_v(void)
92{
93 return 0x00000000U;
94}
95static inline u32 flush_l2_flush_dirty_pending_empty_f(void)
96{
97 return 0x0U;
98}
99static inline u32 flush_l2_flush_dirty_pending_busy_v(void)
100{
101 return 0x00000001U;
102}
103static inline u32 flush_l2_flush_dirty_pending_busy_f(void)
104{
105 return 0x1U;
106}
107static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r)
108{
109 return (r >> 1U) & 0x1U;
110}
111static inline u32 flush_l2_flush_dirty_outstanding_false_v(void)
112{
113 return 0x00000000U;
114}
115static inline u32 flush_l2_flush_dirty_outstanding_false_f(void)
116{
117 return 0x0U;
118}
119static inline u32 flush_l2_flush_dirty_outstanding_true_v(void)
120{
121 return 0x00000001U;
122}
123static inline u32 flush_l2_clean_comptags_r(void)
124{
125 return 0x0007000cU;
126}
127static inline u32 flush_l2_clean_comptags_pending_v(u32 r)
128{
129 return (r >> 0U) & 0x1U;
130}
131static inline u32 flush_l2_clean_comptags_pending_empty_v(void)
132{
133 return 0x00000000U;
134}
135static inline u32 flush_l2_clean_comptags_pending_empty_f(void)
136{
137 return 0x0U;
138}
139static inline u32 flush_l2_clean_comptags_pending_busy_v(void)
140{
141 return 0x00000001U;
142}
143static inline u32 flush_l2_clean_comptags_pending_busy_f(void)
144{
145 return 0x1U;
146}
147static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r)
148{
149 return (r >> 1U) & 0x1U;
150}
151static inline u32 flush_l2_clean_comptags_outstanding_false_v(void)
152{
153 return 0x00000000U;
154}
155static inline u32 flush_l2_clean_comptags_outstanding_false_f(void)
156{
157 return 0x0U;
158}
159static inline u32 flush_l2_clean_comptags_outstanding_true_v(void)
160{
161 return 0x00000001U;
162}
163static inline u32 flush_fb_flush_r(void)
164{
165 return 0x00070000U;
166}
167static inline u32 flush_fb_flush_pending_v(u32 r)
168{
169 return (r >> 0U) & 0x1U;
170}
171static inline u32 flush_fb_flush_pending_busy_v(void)
172{
173 return 0x00000001U;
174}
175static inline u32 flush_fb_flush_pending_busy_f(void)
176{
177 return 0x1U;
178}
179static inline u32 flush_fb_flush_outstanding_v(u32 r)
180{
181 return (r >> 1U) & 0x1U;
182}
183static inline u32 flush_fb_flush_outstanding_true_v(void)
184{
185 return 0x00000001U;
186}
187#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fuse_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fuse_gv100.h
new file mode 100644
index 00000000..f7eacd29
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fuse_gv100.h
@@ -0,0 +1,143 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_fuse_gv100_h_
57#define _hw_fuse_gv100_h_
58
59static inline u32 fuse_status_opt_tpc_gpc_r(u32 i)
60{
61 return 0x00021c38U + i*4U;
62}
63static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i)
64{
65 return 0x00021838U + i*4U;
66}
67static inline u32 fuse_ctrl_opt_ram_svop_pdp_r(void)
68{
69 return 0x00021944U;
70}
71static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_f(u32 v)
72{
73 return (v & 0xffU) << 0U;
74}
75static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_m(void)
76{
77 return 0xffU << 0U;
78}
79static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_v(u32 r)
80{
81 return (r >> 0U) & 0xffU;
82}
83static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_r(void)
84{
85 return 0x00021948U;
86}
87static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_f(u32 v)
88{
89 return (v & 0x1U) << 0U;
90}
91static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_m(void)
92{
93 return 0x1U << 0U;
94}
95static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_v(u32 r)
96{
97 return (r >> 0U) & 0x1U;
98}
99static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_yes_f(void)
100{
101 return 0x1U;
102}
103static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_no_f(void)
104{
105 return 0x0U;
106}
107static inline u32 fuse_status_opt_fbio_r(void)
108{
109 return 0x00021c14U;
110}
111static inline u32 fuse_status_opt_fbio_data_f(u32 v)
112{
113 return (v & 0xffffU) << 0U;
114}
115static inline u32 fuse_status_opt_fbio_data_m(void)
116{
117 return 0xffffU << 0U;
118}
119static inline u32 fuse_status_opt_fbio_data_v(u32 r)
120{
121 return (r >> 0U) & 0xffffU;
122}
123static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i)
124{
125 return 0x00021d70U + i*4U;
126}
127static inline u32 fuse_status_opt_fbp_r(void)
128{
129 return 0x00021d38U;
130}
131static inline u32 fuse_status_opt_fbp_idx_v(u32 r, u32 i)
132{
133 return (r >> (0U + i*1U)) & 0x1U;
134}
135static inline u32 fuse_opt_ecc_en_r(void)
136{
137 return 0x00021228U;
138}
139static inline u32 fuse_opt_feature_fuses_override_disable_r(void)
140{
141 return 0x000213f0U;
142}
143#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gmmu_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gmmu_gv100.h
new file mode 100644
index 00000000..cf89f5f8
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gmmu_gv100.h
@@ -0,0 +1,1287 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_gmmu_gv100_h_
57#define _hw_gmmu_gv100_h_
58
59static inline u32 gmmu_new_pde_is_pte_w(void)
60{
61 return 0U;
62}
63static inline u32 gmmu_new_pde_is_pte_false_f(void)
64{
65 return 0x0U;
66}
67static inline u32 gmmu_new_pde_aperture_w(void)
68{
69 return 0U;
70}
71static inline u32 gmmu_new_pde_aperture_invalid_f(void)
72{
73 return 0x0U;
74}
75static inline u32 gmmu_new_pde_aperture_video_memory_f(void)
76{
77 return 0x2U;
78}
79static inline u32 gmmu_new_pde_aperture_sys_mem_coh_f(void)
80{
81 return 0x4U;
82}
83static inline u32 gmmu_new_pde_aperture_sys_mem_ncoh_f(void)
84{
85 return 0x6U;
86}
87static inline u32 gmmu_new_pde_address_sys_f(u32 v)
88{
89 return (v & 0xffffffU) << 8U;
90}
91static inline u32 gmmu_new_pde_address_sys_w(void)
92{
93 return 0U;
94}
95static inline u32 gmmu_new_pde_vol_w(void)
96{
97 return 0U;
98}
99static inline u32 gmmu_new_pde_vol_true_f(void)
100{
101 return 0x8U;
102}
103static inline u32 gmmu_new_pde_vol_false_f(void)
104{
105 return 0x0U;
106}
107static inline u32 gmmu_new_pde_address_shift_v(void)
108{
109 return 0x0000000cU;
110}
111static inline u32 gmmu_new_pde__size_v(void)
112{
113 return 0x00000008U;
114}
115static inline u32 gmmu_new_dual_pde_is_pte_w(void)
116{
117 return 0U;
118}
119static inline u32 gmmu_new_dual_pde_is_pte_false_f(void)
120{
121 return 0x0U;
122}
123static inline u32 gmmu_new_dual_pde_aperture_big_w(void)
124{
125 return 0U;
126}
127static inline u32 gmmu_new_dual_pde_aperture_big_invalid_f(void)
128{
129 return 0x0U;
130}
131static inline u32 gmmu_new_dual_pde_aperture_big_video_memory_f(void)
132{
133 return 0x2U;
134}
135static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_coh_f(void)
136{
137 return 0x4U;
138}
139static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(void)
140{
141 return 0x6U;
142}
143static inline u32 gmmu_new_dual_pde_address_big_sys_f(u32 v)
144{
145 return (v & 0xfffffffU) << 4U;
146}
147static inline u32 gmmu_new_dual_pde_address_big_sys_w(void)
148{
149 return 0U;
150}
151static inline u32 gmmu_new_dual_pde_aperture_small_w(void)
152{
153 return 2U;
154}
155static inline u32 gmmu_new_dual_pde_aperture_small_invalid_f(void)
156{
157 return 0x0U;
158}
159static inline u32 gmmu_new_dual_pde_aperture_small_video_memory_f(void)
160{
161 return 0x2U;
162}
163static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_coh_f(void)
164{
165 return 0x4U;
166}
167static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(void)
168{
169 return 0x6U;
170}
171static inline u32 gmmu_new_dual_pde_vol_small_w(void)
172{
173 return 2U;
174}
175static inline u32 gmmu_new_dual_pde_vol_small_true_f(void)
176{
177 return 0x8U;
178}
179static inline u32 gmmu_new_dual_pde_vol_small_false_f(void)
180{
181 return 0x0U;
182}
183static inline u32 gmmu_new_dual_pde_vol_big_w(void)
184{
185 return 0U;
186}
187static inline u32 gmmu_new_dual_pde_vol_big_true_f(void)
188{
189 return 0x8U;
190}
191static inline u32 gmmu_new_dual_pde_vol_big_false_f(void)
192{
193 return 0x0U;
194}
195static inline u32 gmmu_new_dual_pde_address_small_sys_f(u32 v)
196{
197 return (v & 0xffffffU) << 8U;
198}
199static inline u32 gmmu_new_dual_pde_address_small_sys_w(void)
200{
201 return 2U;
202}
203static inline u32 gmmu_new_dual_pde_address_shift_v(void)
204{
205 return 0x0000000cU;
206}
207static inline u32 gmmu_new_dual_pde_address_big_shift_v(void)
208{
209 return 0x00000008U;
210}
211static inline u32 gmmu_new_dual_pde__size_v(void)
212{
213 return 0x00000010U;
214}
215static inline u32 gmmu_new_pte__size_v(void)
216{
217 return 0x00000008U;
218}
219static inline u32 gmmu_new_pte_valid_w(void)
220{
221 return 0U;
222}
223static inline u32 gmmu_new_pte_valid_true_f(void)
224{
225 return 0x1U;
226}
227static inline u32 gmmu_new_pte_valid_false_f(void)
228{
229 return 0x0U;
230}
231static inline u32 gmmu_new_pte_privilege_w(void)
232{
233 return 0U;
234}
235static inline u32 gmmu_new_pte_privilege_true_f(void)
236{
237 return 0x20U;
238}
239static inline u32 gmmu_new_pte_privilege_false_f(void)
240{
241 return 0x0U;
242}
243static inline u32 gmmu_new_pte_address_sys_f(u32 v)
244{
245 return (v & 0xffffffU) << 8U;
246}
247static inline u32 gmmu_new_pte_address_sys_w(void)
248{
249 return 0U;
250}
251static inline u32 gmmu_new_pte_address_vid_f(u32 v)
252{
253 return (v & 0xffffffU) << 8U;
254}
255static inline u32 gmmu_new_pte_address_vid_w(void)
256{
257 return 0U;
258}
259static inline u32 gmmu_new_pte_vol_w(void)
260{
261 return 0U;
262}
263static inline u32 gmmu_new_pte_vol_true_f(void)
264{
265 return 0x8U;
266}
267static inline u32 gmmu_new_pte_vol_false_f(void)
268{
269 return 0x0U;
270}
271static inline u32 gmmu_new_pte_aperture_w(void)
272{
273 return 0U;
274}
275static inline u32 gmmu_new_pte_aperture_video_memory_f(void)
276{
277 return 0x0U;
278}
279static inline u32 gmmu_new_pte_aperture_sys_mem_coh_f(void)
280{
281 return 0x4U;
282}
283static inline u32 gmmu_new_pte_aperture_sys_mem_ncoh_f(void)
284{
285 return 0x6U;
286}
287static inline u32 gmmu_new_pte_read_only_w(void)
288{
289 return 0U;
290}
291static inline u32 gmmu_new_pte_read_only_true_f(void)
292{
293 return 0x40U;
294}
295static inline u32 gmmu_new_pte_comptagline_f(u32 v)
296{
297 return (v & 0x3ffffU) << 4U;
298}
299static inline u32 gmmu_new_pte_comptagline_w(void)
300{
301 return 1U;
302}
303static inline u32 gmmu_new_pte_kind_f(u32 v)
304{
305 return (v & 0xffU) << 24U;
306}
307static inline u32 gmmu_new_pte_kind_w(void)
308{
309 return 1U;
310}
311static inline u32 gmmu_new_pte_address_shift_v(void)
312{
313 return 0x0000000cU;
314}
315static inline u32 gmmu_pte_kind_f(u32 v)
316{
317 return (v & 0xffU) << 4U;
318}
319static inline u32 gmmu_pte_kind_w(void)
320{
321 return 1U;
322}
323static inline u32 gmmu_pte_kind_invalid_v(void)
324{
325 return 0x000000ffU;
326}
327static inline u32 gmmu_pte_kind_pitch_v(void)
328{
329 return 0x00000000U;
330}
331static inline u32 gmmu_pte_kind_z16_v(void)
332{
333 return 0x00000001U;
334}
335static inline u32 gmmu_pte_kind_z16_2c_v(void)
336{
337 return 0x00000002U;
338}
339static inline u32 gmmu_pte_kind_z16_ms2_2c_v(void)
340{
341 return 0x00000003U;
342}
343static inline u32 gmmu_pte_kind_z16_ms4_2c_v(void)
344{
345 return 0x00000004U;
346}
347static inline u32 gmmu_pte_kind_z16_ms8_2c_v(void)
348{
349 return 0x00000005U;
350}
351static inline u32 gmmu_pte_kind_z16_ms16_2c_v(void)
352{
353 return 0x00000006U;
354}
355static inline u32 gmmu_pte_kind_z16_2z_v(void)
356{
357 return 0x00000007U;
358}
359static inline u32 gmmu_pte_kind_z16_ms2_2z_v(void)
360{
361 return 0x00000008U;
362}
363static inline u32 gmmu_pte_kind_z16_ms4_2z_v(void)
364{
365 return 0x00000009U;
366}
367static inline u32 gmmu_pte_kind_z16_ms8_2z_v(void)
368{
369 return 0x0000000aU;
370}
371static inline u32 gmmu_pte_kind_z16_ms16_2z_v(void)
372{
373 return 0x0000000bU;
374}
375static inline u32 gmmu_pte_kind_z16_2cz_v(void)
376{
377 return 0x00000036U;
378}
379static inline u32 gmmu_pte_kind_z16_ms2_2cz_v(void)
380{
381 return 0x00000037U;
382}
383static inline u32 gmmu_pte_kind_z16_ms4_2cz_v(void)
384{
385 return 0x00000038U;
386}
387static inline u32 gmmu_pte_kind_z16_ms8_2cz_v(void)
388{
389 return 0x00000039U;
390}
391static inline u32 gmmu_pte_kind_z16_ms16_2cz_v(void)
392{
393 return 0x0000005fU;
394}
395static inline u32 gmmu_pte_kind_s8z24_v(void)
396{
397 return 0x00000011U;
398}
399static inline u32 gmmu_pte_kind_s8z24_1z_v(void)
400{
401 return 0x00000012U;
402}
403static inline u32 gmmu_pte_kind_s8z24_ms2_1z_v(void)
404{
405 return 0x00000013U;
406}
407static inline u32 gmmu_pte_kind_s8z24_ms4_1z_v(void)
408{
409 return 0x00000014U;
410}
411static inline u32 gmmu_pte_kind_s8z24_ms8_1z_v(void)
412{
413 return 0x00000015U;
414}
415static inline u32 gmmu_pte_kind_s8z24_ms16_1z_v(void)
416{
417 return 0x00000016U;
418}
419static inline u32 gmmu_pte_kind_s8z24_2cz_v(void)
420{
421 return 0x00000017U;
422}
423static inline u32 gmmu_pte_kind_s8z24_ms2_2cz_v(void)
424{
425 return 0x00000018U;
426}
427static inline u32 gmmu_pte_kind_s8z24_ms4_2cz_v(void)
428{
429 return 0x00000019U;
430}
431static inline u32 gmmu_pte_kind_s8z24_ms8_2cz_v(void)
432{
433 return 0x0000001aU;
434}
435static inline u32 gmmu_pte_kind_s8z24_ms16_2cz_v(void)
436{
437 return 0x0000001bU;
438}
439static inline u32 gmmu_pte_kind_s8z24_2cs_v(void)
440{
441 return 0x0000001cU;
442}
443static inline u32 gmmu_pte_kind_s8z24_ms2_2cs_v(void)
444{
445 return 0x0000001dU;
446}
447static inline u32 gmmu_pte_kind_s8z24_ms4_2cs_v(void)
448{
449 return 0x0000001eU;
450}
451static inline u32 gmmu_pte_kind_s8z24_ms8_2cs_v(void)
452{
453 return 0x0000001fU;
454}
455static inline u32 gmmu_pte_kind_s8z24_ms16_2cs_v(void)
456{
457 return 0x00000020U;
458}
459static inline u32 gmmu_pte_kind_s8z24_4cszv_v(void)
460{
461 return 0x00000021U;
462}
463static inline u32 gmmu_pte_kind_s8z24_ms2_4cszv_v(void)
464{
465 return 0x00000022U;
466}
467static inline u32 gmmu_pte_kind_s8z24_ms4_4cszv_v(void)
468{
469 return 0x00000023U;
470}
471static inline u32 gmmu_pte_kind_s8z24_ms8_4cszv_v(void)
472{
473 return 0x00000024U;
474}
475static inline u32 gmmu_pte_kind_s8z24_ms16_4cszv_v(void)
476{
477 return 0x00000025U;
478}
479static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_v(void)
480{
481 return 0x00000026U;
482}
483static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_v(void)
484{
485 return 0x00000027U;
486}
487static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_v(void)
488{
489 return 0x00000028U;
490}
491static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_v(void)
492{
493 return 0x00000029U;
494}
495static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_1zv_v(void)
496{
497 return 0x0000002eU;
498}
499static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_1zv_v(void)
500{
501 return 0x0000002fU;
502}
503static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_1zv_v(void)
504{
505 return 0x00000030U;
506}
507static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_1zv_v(void)
508{
509 return 0x00000031U;
510}
511static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2cs_v(void)
512{
513 return 0x00000032U;
514}
515static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2cs_v(void)
516{
517 return 0x00000033U;
518}
519static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2cs_v(void)
520{
521 return 0x00000034U;
522}
523static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2cs_v(void)
524{
525 return 0x00000035U;
526}
527static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2czv_v(void)
528{
529 return 0x0000003aU;
530}
531static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2czv_v(void)
532{
533 return 0x0000003bU;
534}
535static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2czv_v(void)
536{
537 return 0x0000003cU;
538}
539static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2czv_v(void)
540{
541 return 0x0000003dU;
542}
543static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2zv_v(void)
544{
545 return 0x0000003eU;
546}
547static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2zv_v(void)
548{
549 return 0x0000003fU;
550}
551static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2zv_v(void)
552{
553 return 0x00000040U;
554}
555static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2zv_v(void)
556{
557 return 0x00000041U;
558}
559static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_4cszv_v(void)
560{
561 return 0x00000042U;
562}
563static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_4cszv_v(void)
564{
565 return 0x00000043U;
566}
567static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_4cszv_v(void)
568{
569 return 0x00000044U;
570}
571static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_4cszv_v(void)
572{
573 return 0x00000045U;
574}
575static inline u32 gmmu_pte_kind_z24s8_v(void)
576{
577 return 0x00000046U;
578}
579static inline u32 gmmu_pte_kind_z24s8_1z_v(void)
580{
581 return 0x00000047U;
582}
583static inline u32 gmmu_pte_kind_z24s8_ms2_1z_v(void)
584{
585 return 0x00000048U;
586}
587static inline u32 gmmu_pte_kind_z24s8_ms4_1z_v(void)
588{
589 return 0x00000049U;
590}
591static inline u32 gmmu_pte_kind_z24s8_ms8_1z_v(void)
592{
593 return 0x0000004aU;
594}
595static inline u32 gmmu_pte_kind_z24s8_ms16_1z_v(void)
596{
597 return 0x0000004bU;
598}
599static inline u32 gmmu_pte_kind_z24s8_2cs_v(void)
600{
601 return 0x0000004cU;
602}
603static inline u32 gmmu_pte_kind_z24s8_ms2_2cs_v(void)
604{
605 return 0x0000004dU;
606}
607static inline u32 gmmu_pte_kind_z24s8_ms4_2cs_v(void)
608{
609 return 0x0000004eU;
610}
611static inline u32 gmmu_pte_kind_z24s8_ms8_2cs_v(void)
612{
613 return 0x0000004fU;
614}
615static inline u32 gmmu_pte_kind_z24s8_ms16_2cs_v(void)
616{
617 return 0x00000050U;
618}
619static inline u32 gmmu_pte_kind_z24s8_2cz_v(void)
620{
621 return 0x00000051U;
622}
623static inline u32 gmmu_pte_kind_z24s8_ms2_2cz_v(void)
624{
625 return 0x00000052U;
626}
627static inline u32 gmmu_pte_kind_z24s8_ms4_2cz_v(void)
628{
629 return 0x00000053U;
630}
631static inline u32 gmmu_pte_kind_z24s8_ms8_2cz_v(void)
632{
633 return 0x00000054U;
634}
635static inline u32 gmmu_pte_kind_z24s8_ms16_2cz_v(void)
636{
637 return 0x00000055U;
638}
639static inline u32 gmmu_pte_kind_z24s8_4cszv_v(void)
640{
641 return 0x00000056U;
642}
643static inline u32 gmmu_pte_kind_z24s8_ms2_4cszv_v(void)
644{
645 return 0x00000057U;
646}
647static inline u32 gmmu_pte_kind_z24s8_ms4_4cszv_v(void)
648{
649 return 0x00000058U;
650}
651static inline u32 gmmu_pte_kind_z24s8_ms8_4cszv_v(void)
652{
653 return 0x00000059U;
654}
655static inline u32 gmmu_pte_kind_z24s8_ms16_4cszv_v(void)
656{
657 return 0x0000005aU;
658}
659static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_v(void)
660{
661 return 0x0000005bU;
662}
663static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_v(void)
664{
665 return 0x0000005cU;
666}
667static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_v(void)
668{
669 return 0x0000005dU;
670}
671static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_v(void)
672{
673 return 0x0000005eU;
674}
675static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_1zv_v(void)
676{
677 return 0x00000063U;
678}
679static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_1zv_v(void)
680{
681 return 0x00000064U;
682}
683static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_1zv_v(void)
684{
685 return 0x00000065U;
686}
687static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_1zv_v(void)
688{
689 return 0x00000066U;
690}
691static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2cs_v(void)
692{
693 return 0x00000067U;
694}
695static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2cs_v(void)
696{
697 return 0x00000068U;
698}
699static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2cs_v(void)
700{
701 return 0x00000069U;
702}
703static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2cs_v(void)
704{
705 return 0x0000006aU;
706}
707static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2czv_v(void)
708{
709 return 0x0000006fU;
710}
711static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2czv_v(void)
712{
713 return 0x00000070U;
714}
715static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2czv_v(void)
716{
717 return 0x00000071U;
718}
719static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2czv_v(void)
720{
721 return 0x00000072U;
722}
723static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2zv_v(void)
724{
725 return 0x00000073U;
726}
727static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2zv_v(void)
728{
729 return 0x00000074U;
730}
731static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2zv_v(void)
732{
733 return 0x00000075U;
734}
735static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2zv_v(void)
736{
737 return 0x00000076U;
738}
739static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_4cszv_v(void)
740{
741 return 0x00000077U;
742}
743static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_4cszv_v(void)
744{
745 return 0x00000078U;
746}
747static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_4cszv_v(void)
748{
749 return 0x00000079U;
750}
751static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_4cszv_v(void)
752{
753 return 0x0000007aU;
754}
755static inline u32 gmmu_pte_kind_zf32_v(void)
756{
757 return 0x0000007bU;
758}
759static inline u32 gmmu_pte_kind_zf32_1z_v(void)
760{
761 return 0x0000007cU;
762}
763static inline u32 gmmu_pte_kind_zf32_ms2_1z_v(void)
764{
765 return 0x0000007dU;
766}
767static inline u32 gmmu_pte_kind_zf32_ms4_1z_v(void)
768{
769 return 0x0000007eU;
770}
771static inline u32 gmmu_pte_kind_zf32_ms8_1z_v(void)
772{
773 return 0x0000007fU;
774}
775static inline u32 gmmu_pte_kind_zf32_ms16_1z_v(void)
776{
777 return 0x00000080U;
778}
779static inline u32 gmmu_pte_kind_zf32_2cs_v(void)
780{
781 return 0x00000081U;
782}
783static inline u32 gmmu_pte_kind_zf32_ms2_2cs_v(void)
784{
785 return 0x00000082U;
786}
787static inline u32 gmmu_pte_kind_zf32_ms4_2cs_v(void)
788{
789 return 0x00000083U;
790}
791static inline u32 gmmu_pte_kind_zf32_ms8_2cs_v(void)
792{
793 return 0x00000084U;
794}
795static inline u32 gmmu_pte_kind_zf32_ms16_2cs_v(void)
796{
797 return 0x00000085U;
798}
799static inline u32 gmmu_pte_kind_zf32_2cz_v(void)
800{
801 return 0x00000086U;
802}
803static inline u32 gmmu_pte_kind_zf32_ms2_2cz_v(void)
804{
805 return 0x00000087U;
806}
807static inline u32 gmmu_pte_kind_zf32_ms4_2cz_v(void)
808{
809 return 0x00000088U;
810}
811static inline u32 gmmu_pte_kind_zf32_ms8_2cz_v(void)
812{
813 return 0x00000089U;
814}
815static inline u32 gmmu_pte_kind_zf32_ms16_2cz_v(void)
816{
817 return 0x0000008aU;
818}
819static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_v(void)
820{
821 return 0x0000008bU;
822}
823static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_v(void)
824{
825 return 0x0000008cU;
826}
827static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_v(void)
828{
829 return 0x0000008dU;
830}
831static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_v(void)
832{
833 return 0x0000008eU;
834}
835static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1cs_v(void)
836{
837 return 0x0000008fU;
838}
839static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1cs_v(void)
840{
841 return 0x00000090U;
842}
843static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1cs_v(void)
844{
845 return 0x00000091U;
846}
847static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1cs_v(void)
848{
849 return 0x00000092U;
850}
851static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1zv_v(void)
852{
853 return 0x00000097U;
854}
855static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1zv_v(void)
856{
857 return 0x00000098U;
858}
859static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1zv_v(void)
860{
861 return 0x00000099U;
862}
863static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1zv_v(void)
864{
865 return 0x0000009aU;
866}
867static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1czv_v(void)
868{
869 return 0x0000009bU;
870}
871static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1czv_v(void)
872{
873 return 0x0000009cU;
874}
875static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1czv_v(void)
876{
877 return 0x0000009dU;
878}
879static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1czv_v(void)
880{
881 return 0x0000009eU;
882}
883static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_2cs_v(void)
884{
885 return 0x0000009fU;
886}
887static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_2cs_v(void)
888{
889 return 0x000000a0U;
890}
891static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_2cs_v(void)
892{
893 return 0x000000a1U;
894}
895static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_2cs_v(void)
896{
897 return 0x000000a2U;
898}
899static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_2cszv_v(void)
900{
901 return 0x000000a3U;
902}
903static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_2cszv_v(void)
904{
905 return 0x000000a4U;
906}
907static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_2cszv_v(void)
908{
909 return 0x000000a5U;
910}
911static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_2cszv_v(void)
912{
913 return 0x000000a6U;
914}
915static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_v(void)
916{
917 return 0x000000a7U;
918}
919static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_v(void)
920{
921 return 0x000000a8U;
922}
923static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_v(void)
924{
925 return 0x000000a9U;
926}
927static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_v(void)
928{
929 return 0x000000aaU;
930}
931static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1cs_v(void)
932{
933 return 0x000000abU;
934}
935static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1cs_v(void)
936{
937 return 0x000000acU;
938}
939static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1cs_v(void)
940{
941 return 0x000000adU;
942}
943static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1cs_v(void)
944{
945 return 0x000000aeU;
946}
947static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1zv_v(void)
948{
949 return 0x000000b3U;
950}
951static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1zv_v(void)
952{
953 return 0x000000b4U;
954}
955static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1zv_v(void)
956{
957 return 0x000000b5U;
958}
959static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1zv_v(void)
960{
961 return 0x000000b6U;
962}
963static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1czv_v(void)
964{
965 return 0x000000b7U;
966}
967static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1czv_v(void)
968{
969 return 0x000000b8U;
970}
971static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1czv_v(void)
972{
973 return 0x000000b9U;
974}
975static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1czv_v(void)
976{
977 return 0x000000baU;
978}
979static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_2cs_v(void)
980{
981 return 0x000000bbU;
982}
983static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_2cs_v(void)
984{
985 return 0x000000bcU;
986}
987static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_2cs_v(void)
988{
989 return 0x000000bdU;
990}
991static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_2cs_v(void)
992{
993 return 0x000000beU;
994}
995static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_2cszv_v(void)
996{
997 return 0x000000bfU;
998}
999static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_2cszv_v(void)
1000{
1001 return 0x000000c0U;
1002}
1003static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_2cszv_v(void)
1004{
1005 return 0x000000c1U;
1006}
1007static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_2cszv_v(void)
1008{
1009 return 0x000000c2U;
1010}
1011static inline u32 gmmu_pte_kind_zf32_x24s8_v(void)
1012{
1013 return 0x000000c3U;
1014}
1015static inline u32 gmmu_pte_kind_zf32_x24s8_1cs_v(void)
1016{
1017 return 0x000000c4U;
1018}
1019static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_1cs_v(void)
1020{
1021 return 0x000000c5U;
1022}
1023static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_1cs_v(void)
1024{
1025 return 0x000000c6U;
1026}
1027static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_1cs_v(void)
1028{
1029 return 0x000000c7U;
1030}
1031static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_1cs_v(void)
1032{
1033 return 0x000000c8U;
1034}
1035static inline u32 gmmu_pte_kind_zf32_x24s8_2cszv_v(void)
1036{
1037 return 0x000000ceU;
1038}
1039static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_2cszv_v(void)
1040{
1041 return 0x000000cfU;
1042}
1043static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_2cszv_v(void)
1044{
1045 return 0x000000d0U;
1046}
1047static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_2cszv_v(void)
1048{
1049 return 0x000000d1U;
1050}
1051static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_2cszv_v(void)
1052{
1053 return 0x000000d2U;
1054}
1055static inline u32 gmmu_pte_kind_zf32_x24s8_2cs_v(void)
1056{
1057 return 0x000000d3U;
1058}
1059static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_2cs_v(void)
1060{
1061 return 0x000000d4U;
1062}
1063static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_2cs_v(void)
1064{
1065 return 0x000000d5U;
1066}
1067static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_2cs_v(void)
1068{
1069 return 0x000000d6U;
1070}
1071static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_2cs_v(void)
1072{
1073 return 0x000000d7U;
1074}
1075static inline u32 gmmu_pte_kind_generic_16bx2_v(void)
1076{
1077 return 0x000000feU;
1078}
1079static inline u32 gmmu_pte_kind_c32_2c_v(void)
1080{
1081 return 0x000000d8U;
1082}
1083static inline u32 gmmu_pte_kind_c32_2cbr_v(void)
1084{
1085 return 0x000000d9U;
1086}
1087static inline u32 gmmu_pte_kind_c32_2cba_v(void)
1088{
1089 return 0x000000daU;
1090}
1091static inline u32 gmmu_pte_kind_c32_2cra_v(void)
1092{
1093 return 0x000000dbU;
1094}
1095static inline u32 gmmu_pte_kind_c32_2bra_v(void)
1096{
1097 return 0x000000dcU;
1098}
1099static inline u32 gmmu_pte_kind_c32_ms2_2c_v(void)
1100{
1101 return 0x000000ddU;
1102}
1103static inline u32 gmmu_pte_kind_c32_ms2_2cbr_v(void)
1104{
1105 return 0x000000deU;
1106}
1107static inline u32 gmmu_pte_kind_c32_ms2_4cbra_v(void)
1108{
1109 return 0x000000ccU;
1110}
1111static inline u32 gmmu_pte_kind_c32_ms4_2c_v(void)
1112{
1113 return 0x000000dfU;
1114}
1115static inline u32 gmmu_pte_kind_c32_ms4_2cbr_v(void)
1116{
1117 return 0x000000e0U;
1118}
1119static inline u32 gmmu_pte_kind_c32_ms4_2cba_v(void)
1120{
1121 return 0x000000e1U;
1122}
1123static inline u32 gmmu_pte_kind_c32_ms4_2cra_v(void)
1124{
1125 return 0x000000e2U;
1126}
1127static inline u32 gmmu_pte_kind_c32_ms4_2bra_v(void)
1128{
1129 return 0x000000e3U;
1130}
1131static inline u32 gmmu_pte_kind_c32_ms4_4cbra_v(void)
1132{
1133 return 0x0000002cU;
1134}
1135static inline u32 gmmu_pte_kind_c32_ms8_ms16_2c_v(void)
1136{
1137 return 0x000000e4U;
1138}
1139static inline u32 gmmu_pte_kind_c32_ms8_ms16_2cra_v(void)
1140{
1141 return 0x000000e5U;
1142}
1143static inline u32 gmmu_pte_kind_c64_2c_v(void)
1144{
1145 return 0x000000e6U;
1146}
1147static inline u32 gmmu_pte_kind_c64_2cbr_v(void)
1148{
1149 return 0x000000e7U;
1150}
1151static inline u32 gmmu_pte_kind_c64_2cba_v(void)
1152{
1153 return 0x000000e8U;
1154}
1155static inline u32 gmmu_pte_kind_c64_2cra_v(void)
1156{
1157 return 0x000000e9U;
1158}
1159static inline u32 gmmu_pte_kind_c64_2bra_v(void)
1160{
1161 return 0x000000eaU;
1162}
1163static inline u32 gmmu_pte_kind_c64_ms2_2c_v(void)
1164{
1165 return 0x000000ebU;
1166}
1167static inline u32 gmmu_pte_kind_c64_ms2_2cbr_v(void)
1168{
1169 return 0x000000ecU;
1170}
1171static inline u32 gmmu_pte_kind_c64_ms2_4cbra_v(void)
1172{
1173 return 0x000000cdU;
1174}
1175static inline u32 gmmu_pte_kind_c64_ms4_2c_v(void)
1176{
1177 return 0x000000edU;
1178}
1179static inline u32 gmmu_pte_kind_c64_ms4_2cbr_v(void)
1180{
1181 return 0x000000eeU;
1182}
1183static inline u32 gmmu_pte_kind_c64_ms4_2cba_v(void)
1184{
1185 return 0x000000efU;
1186}
1187static inline u32 gmmu_pte_kind_c64_ms4_2cra_v(void)
1188{
1189 return 0x000000f0U;
1190}
1191static inline u32 gmmu_pte_kind_c64_ms4_2bra_v(void)
1192{
1193 return 0x000000f1U;
1194}
1195static inline u32 gmmu_pte_kind_c64_ms4_4cbra_v(void)
1196{
1197 return 0x0000002dU;
1198}
1199static inline u32 gmmu_pte_kind_c64_ms8_ms16_2c_v(void)
1200{
1201 return 0x000000f2U;
1202}
1203static inline u32 gmmu_pte_kind_c64_ms8_ms16_2cra_v(void)
1204{
1205 return 0x000000f3U;
1206}
1207static inline u32 gmmu_pte_kind_c128_2c_v(void)
1208{
1209 return 0x000000f4U;
1210}
1211static inline u32 gmmu_pte_kind_c128_2cr_v(void)
1212{
1213 return 0x000000f5U;
1214}
1215static inline u32 gmmu_pte_kind_c128_ms2_2c_v(void)
1216{
1217 return 0x000000f6U;
1218}
1219static inline u32 gmmu_pte_kind_c128_ms2_2cr_v(void)
1220{
1221 return 0x000000f7U;
1222}
1223static inline u32 gmmu_pte_kind_c128_ms4_2c_v(void)
1224{
1225 return 0x000000f8U;
1226}
1227static inline u32 gmmu_pte_kind_c128_ms4_2cr_v(void)
1228{
1229 return 0x000000f9U;
1230}
1231static inline u32 gmmu_pte_kind_c128_ms8_ms16_2c_v(void)
1232{
1233 return 0x000000faU;
1234}
1235static inline u32 gmmu_pte_kind_c128_ms8_ms16_2cr_v(void)
1236{
1237 return 0x000000fbU;
1238}
1239static inline u32 gmmu_pte_kind_x8c24_v(void)
1240{
1241 return 0x000000fcU;
1242}
1243static inline u32 gmmu_pte_kind_pitch_no_swizzle_v(void)
1244{
1245 return 0x000000fdU;
1246}
1247static inline u32 gmmu_pte_kind_smsked_message_v(void)
1248{
1249 return 0x000000caU;
1250}
1251static inline u32 gmmu_pte_kind_smhost_message_v(void)
1252{
1253 return 0x000000cbU;
1254}
1255static inline u32 gmmu_pte_kind_s8_v(void)
1256{
1257 return 0x0000002aU;
1258}
1259static inline u32 gmmu_pte_kind_s8_2s_v(void)
1260{
1261 return 0x0000002bU;
1262}
1263static inline u32 gmmu_fault_client_type_gpc_v(void)
1264{
1265 return 0x00000000U;
1266}
1267static inline u32 gmmu_fault_client_type_hub_v(void)
1268{
1269 return 0x00000001U;
1270}
1271static inline u32 gmmu_fault_type_unbound_inst_block_v(void)
1272{
1273 return 0x00000004U;
1274}
1275static inline u32 gmmu_fault_mmu_eng_id_bar2_v(void)
1276{
1277 return 0x00000005U;
1278}
1279static inline u32 gmmu_fault_mmu_eng_id_physical_v(void)
1280{
1281 return 0x0000001fU;
1282}
1283static inline u32 gmmu_fault_mmu_eng_id_ce0_v(void)
1284{
1285 return 0x0000000fU;
1286}
1287#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gr_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gr_gv100.h
new file mode 100644
index 00000000..09cbc793
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gr_gv100.h
@@ -0,0 +1,3935 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_gr_gv100_h_
57#define _hw_gr_gv100_h_
58
59static inline u32 gr_intr_r(void)
60{
61 return 0x00400100U;
62}
63static inline u32 gr_intr_notify_pending_f(void)
64{
65 return 0x1U;
66}
67static inline u32 gr_intr_notify_reset_f(void)
68{
69 return 0x1U;
70}
71static inline u32 gr_intr_semaphore_pending_f(void)
72{
73 return 0x2U;
74}
75static inline u32 gr_intr_semaphore_reset_f(void)
76{
77 return 0x2U;
78}
79static inline u32 gr_intr_illegal_method_pending_f(void)
80{
81 return 0x10U;
82}
83static inline u32 gr_intr_illegal_method_reset_f(void)
84{
85 return 0x10U;
86}
87static inline u32 gr_intr_illegal_notify_pending_f(void)
88{
89 return 0x40U;
90}
91static inline u32 gr_intr_illegal_notify_reset_f(void)
92{
93 return 0x40U;
94}
95static inline u32 gr_intr_firmware_method_f(u32 v)
96{
97 return (v & 0x1U) << 8U;
98}
99static inline u32 gr_intr_firmware_method_pending_f(void)
100{
101 return 0x100U;
102}
103static inline u32 gr_intr_firmware_method_reset_f(void)
104{
105 return 0x100U;
106}
107static inline u32 gr_intr_illegal_class_pending_f(void)
108{
109 return 0x20U;
110}
111static inline u32 gr_intr_illegal_class_reset_f(void)
112{
113 return 0x20U;
114}
115static inline u32 gr_intr_fecs_error_pending_f(void)
116{
117 return 0x80000U;
118}
119static inline u32 gr_intr_fecs_error_reset_f(void)
120{
121 return 0x80000U;
122}
123static inline u32 gr_intr_class_error_pending_f(void)
124{
125 return 0x100000U;
126}
127static inline u32 gr_intr_class_error_reset_f(void)
128{
129 return 0x100000U;
130}
131static inline u32 gr_intr_exception_pending_f(void)
132{
133 return 0x200000U;
134}
135static inline u32 gr_intr_exception_reset_f(void)
136{
137 return 0x200000U;
138}
139static inline u32 gr_fecs_intr_r(void)
140{
141 return 0x00400144U;
142}
143static inline u32 gr_class_error_r(void)
144{
145 return 0x00400110U;
146}
147static inline u32 gr_class_error_code_v(u32 r)
148{
149 return (r >> 0U) & 0xffffU;
150}
151static inline u32 gr_intr_nonstall_r(void)
152{
153 return 0x00400120U;
154}
155static inline u32 gr_intr_nonstall_trap_pending_f(void)
156{
157 return 0x2U;
158}
159static inline u32 gr_intr_en_r(void)
160{
161 return 0x0040013cU;
162}
163static inline u32 gr_exception_r(void)
164{
165 return 0x00400108U;
166}
167static inline u32 gr_exception_fe_m(void)
168{
169 return 0x1U << 0U;
170}
171static inline u32 gr_exception_gpc_m(void)
172{
173 return 0x1U << 24U;
174}
175static inline u32 gr_exception_memfmt_m(void)
176{
177 return 0x1U << 1U;
178}
179static inline u32 gr_exception_ds_m(void)
180{
181 return 0x1U << 4U;
182}
183static inline u32 gr_exception_sked_m(void)
184{
185 return 0x1U << 8U;
186}
187static inline u32 gr_exception1_r(void)
188{
189 return 0x00400118U;
190}
191static inline u32 gr_exception1_gpc_0_pending_f(void)
192{
193 return 0x1U;
194}
195static inline u32 gr_exception2_r(void)
196{
197 return 0x0040011cU;
198}
199static inline u32 gr_exception_en_r(void)
200{
201 return 0x00400138U;
202}
203static inline u32 gr_exception_en_fe_m(void)
204{
205 return 0x1U << 0U;
206}
207static inline u32 gr_exception_en_fe_enabled_f(void)
208{
209 return 0x1U;
210}
211static inline u32 gr_exception_en_gpc_m(void)
212{
213 return 0x1U << 24U;
214}
215static inline u32 gr_exception_en_gpc_enabled_f(void)
216{
217 return 0x1000000U;
218}
219static inline u32 gr_exception_en_memfmt_m(void)
220{
221 return 0x1U << 1U;
222}
223static inline u32 gr_exception_en_memfmt_enabled_f(void)
224{
225 return 0x2U;
226}
227static inline u32 gr_exception_en_ds_m(void)
228{
229 return 0x1U << 4U;
230}
231static inline u32 gr_exception_en_ds_enabled_f(void)
232{
233 return 0x10U;
234}
235static inline u32 gr_exception1_en_r(void)
236{
237 return 0x00400130U;
238}
239static inline u32 gr_exception2_en_r(void)
240{
241 return 0x00400134U;
242}
243static inline u32 gr_gpfifo_ctl_r(void)
244{
245 return 0x00400500U;
246}
247static inline u32 gr_gpfifo_ctl_access_f(u32 v)
248{
249 return (v & 0x1U) << 0U;
250}
251static inline u32 gr_gpfifo_ctl_access_disabled_f(void)
252{
253 return 0x0U;
254}
255static inline u32 gr_gpfifo_ctl_access_enabled_f(void)
256{
257 return 0x1U;
258}
259static inline u32 gr_gpfifo_ctl_semaphore_access_f(u32 v)
260{
261 return (v & 0x1U) << 16U;
262}
263static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_v(void)
264{
265 return 0x00000001U;
266}
267static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_f(void)
268{
269 return 0x10000U;
270}
271static inline u32 gr_gpfifo_status_r(void)
272{
273 return 0x00400504U;
274}
275static inline u32 gr_trapped_addr_r(void)
276{
277 return 0x00400704U;
278}
279static inline u32 gr_trapped_addr_mthd_v(u32 r)
280{
281 return (r >> 2U) & 0xfffU;
282}
283static inline u32 gr_trapped_addr_subch_v(u32 r)
284{
285 return (r >> 16U) & 0x7U;
286}
287static inline u32 gr_trapped_addr_mme_generated_v(u32 r)
288{
289 return (r >> 20U) & 0x1U;
290}
291static inline u32 gr_trapped_addr_datahigh_v(u32 r)
292{
293 return (r >> 24U) & 0x1U;
294}
295static inline u32 gr_trapped_addr_priv_v(u32 r)
296{
297 return (r >> 28U) & 0x1U;
298}
299static inline u32 gr_trapped_addr_status_v(u32 r)
300{
301 return (r >> 31U) & 0x1U;
302}
303static inline u32 gr_trapped_data_lo_r(void)
304{
305 return 0x00400708U;
306}
307static inline u32 gr_trapped_data_hi_r(void)
308{
309 return 0x0040070cU;
310}
311static inline u32 gr_trapped_data_mme_r(void)
312{
313 return 0x00400710U;
314}
315static inline u32 gr_trapped_data_mme_pc_v(u32 r)
316{
317 return (r >> 0U) & 0xfffU;
318}
319static inline u32 gr_status_r(void)
320{
321 return 0x00400700U;
322}
323static inline u32 gr_status_fe_method_upper_v(u32 r)
324{
325 return (r >> 1U) & 0x1U;
326}
327static inline u32 gr_status_fe_method_lower_v(u32 r)
328{
329 return (r >> 2U) & 0x1U;
330}
331static inline u32 gr_status_fe_method_lower_idle_v(void)
332{
333 return 0x00000000U;
334}
335static inline u32 gr_status_fe_gi_v(u32 r)
336{
337 return (r >> 21U) & 0x1U;
338}
339static inline u32 gr_status_mask_r(void)
340{
341 return 0x00400610U;
342}
343static inline u32 gr_status_1_r(void)
344{
345 return 0x00400604U;
346}
347static inline u32 gr_status_2_r(void)
348{
349 return 0x00400608U;
350}
351static inline u32 gr_engine_status_r(void)
352{
353 return 0x0040060cU;
354}
355static inline u32 gr_engine_status_value_busy_f(void)
356{
357 return 0x1U;
358}
359static inline u32 gr_pri_be0_becs_be_exception_r(void)
360{
361 return 0x00410204U;
362}
363static inline u32 gr_pri_be0_becs_be_exception_en_r(void)
364{
365 return 0x00410208U;
366}
367static inline u32 gr_pri_gpc0_gpccs_gpc_exception_r(void)
368{
369 return 0x00502c90U;
370}
371static inline u32 gr_pri_gpc0_gpccs_gpc_exception_en_r(void)
372{
373 return 0x00502c94U;
374}
375static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_r(void)
376{
377 return 0x00504508U;
378}
379static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r(void)
380{
381 return 0x0050450cU;
382}
383static inline u32 gr_activity_0_r(void)
384{
385 return 0x00400380U;
386}
387static inline u32 gr_activity_1_r(void)
388{
389 return 0x00400384U;
390}
391static inline u32 gr_activity_2_r(void)
392{
393 return 0x00400388U;
394}
395static inline u32 gr_activity_4_r(void)
396{
397 return 0x00400390U;
398}
399static inline u32 gr_activity_4_gpc0_s(void)
400{
401 return 3U;
402}
403static inline u32 gr_activity_4_gpc0_f(u32 v)
404{
405 return (v & 0x7U) << 0U;
406}
407static inline u32 gr_activity_4_gpc0_m(void)
408{
409 return 0x7U << 0U;
410}
411static inline u32 gr_activity_4_gpc0_v(u32 r)
412{
413 return (r >> 0U) & 0x7U;
414}
415static inline u32 gr_activity_4_gpc0_empty_v(void)
416{
417 return 0x00000000U;
418}
419static inline u32 gr_activity_4_gpc0_preempted_v(void)
420{
421 return 0x00000004U;
422}
423static inline u32 gr_pri_gpc0_gcc_dbg_r(void)
424{
425 return 0x00501000U;
426}
427static inline u32 gr_pri_gpcs_gcc_dbg_r(void)
428{
429 return 0x00419000U;
430}
431static inline u32 gr_pri_gpcs_gcc_dbg_invalidate_m(void)
432{
433 return 0x1U << 1U;
434}
435static inline u32 gr_pri_gpc0_tpc0_sm_cache_control_r(void)
436{
437 return 0x0050433cU;
438}
439static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_r(void)
440{
441 return 0x00419b3cU;
442}
443static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_invalidate_cache_m(void)
444{
445 return 0x1U << 0U;
446}
447static inline u32 gr_pri_sked_activity_r(void)
448{
449 return 0x00407054U;
450}
451static inline u32 gr_pri_gpc0_gpccs_gpc_activity0_r(void)
452{
453 return 0x00502c80U;
454}
455static inline u32 gr_pri_gpc0_gpccs_gpc_activity1_r(void)
456{
457 return 0x00502c84U;
458}
459static inline u32 gr_pri_gpc0_gpccs_gpc_activity2_r(void)
460{
461 return 0x00502c88U;
462}
463static inline u32 gr_pri_gpc0_gpccs_gpc_activity3_r(void)
464{
465 return 0x00502c8cU;
466}
467static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r(void)
468{
469 return 0x00504500U;
470}
471static inline u32 gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r(void)
472{
473 return 0x00504d00U;
474}
475static inline u32 gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r(void)
476{
477 return 0x00501d00U;
478}
479static inline u32 gr_pri_gpcs_gpccs_gpc_activity_0_r(void)
480{
481 return 0x0041ac80U;
482}
483static inline u32 gr_pri_gpcs_gpccs_gpc_activity_1_r(void)
484{
485 return 0x0041ac84U;
486}
487static inline u32 gr_pri_gpcs_gpccs_gpc_activity_2_r(void)
488{
489 return 0x0041ac88U;
490}
491static inline u32 gr_pri_gpcs_gpccs_gpc_activity_3_r(void)
492{
493 return 0x0041ac8cU;
494}
495static inline u32 gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r(void)
496{
497 return 0x0041c500U;
498}
499static inline u32 gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r(void)
500{
501 return 0x0041cd00U;
502}
503static inline u32 gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r(void)
504{
505 return 0x00419d00U;
506}
507static inline u32 gr_pri_be0_becs_be_activity0_r(void)
508{
509 return 0x00410200U;
510}
511static inline u32 gr_pri_be1_becs_be_activity0_r(void)
512{
513 return 0x00410600U;
514}
515static inline u32 gr_pri_bes_becs_be_activity0_r(void)
516{
517 return 0x00408a00U;
518}
519static inline u32 gr_pri_ds_mpipe_status_r(void)
520{
521 return 0x00405858U;
522}
523static inline u32 gr_pri_fe_go_idle_info_r(void)
524{
525 return 0x00404194U;
526}
527static inline u32 gr_pri_fe_chip_def_info_r(void)
528{
529 return 0x00404030U;
530}
531static inline u32 gr_pri_fe_chip_def_info_max_veid_count_v(u32 r)
532{
533 return (r >> 0U) & 0xfffU;
534}
535static inline u32 gr_pri_fe_chip_def_info_max_veid_count_init_v(void)
536{
537 return 0x00000040U;
538}
539static inline u32 gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r(void)
540{
541 return 0x00504238U;
542}
543static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r(void)
544{
545 return 0x00504358U;
546}
547static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp0_m(void)
548{
549 return 0x1U << 0U;
550}
551static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp1_m(void)
552{
553 return 0x1U << 1U;
554}
555static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp2_m(void)
556{
557 return 0x1U << 2U;
558}
559static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp3_m(void)
560{
561 return 0x1U << 3U;
562}
563static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp4_m(void)
564{
565 return 0x1U << 4U;
566}
567static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp5_m(void)
568{
569 return 0x1U << 5U;
570}
571static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp6_m(void)
572{
573 return 0x1U << 6U;
574}
575static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp7_m(void)
576{
577 return 0x1U << 7U;
578}
579static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp0_m(void)
580{
581 return 0x1U << 8U;
582}
583static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp1_m(void)
584{
585 return 0x1U << 9U;
586}
587static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp2_m(void)
588{
589 return 0x1U << 10U;
590}
591static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp3_m(void)
592{
593 return 0x1U << 11U;
594}
595static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp4_m(void)
596{
597 return 0x1U << 12U;
598}
599static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp5_m(void)
600{
601 return 0x1U << 13U;
602}
603static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp6_m(void)
604{
605 return 0x1U << 14U;
606}
607static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp7_m(void)
608{
609 return 0x1U << 15U;
610}
611static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_total_counter_overflow_v(u32 r)
612{
613 return (r >> 24U) & 0x1U;
614}
615static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r)
616{
617 return (r >> 26U) & 0x1U;
618}
619static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_reset_task_f(void)
620{
621 return 0x40000000U;
622}
623static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_r(void)
624{
625 return 0x0050435cU;
626}
627static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_s(void)
628{
629 return 16U;
630}
631static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_v(u32 r)
632{
633 return (r >> 0U) & 0xffffU;
634}
635static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_r(void)
636{
637 return 0x00504360U;
638}
639static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_s(void)
640{
641 return 16U;
642}
643static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_v(u32 r)
644{
645 return (r >> 0U) & 0xffffU;
646}
647static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_r(void)
648{
649 return 0x0050436cU;
650}
651static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_0_m(void)
652{
653 return 0x1U << 0U;
654}
655static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_1_m(void)
656{
657 return 0x1U << 1U;
658}
659static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_0_m(void)
660{
661 return 0x1U << 2U;
662}
663static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_1_m(void)
664{
665 return 0x1U << 3U;
666}
667static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_total_counter_overflow_v(u32 r)
668{
669 return (r >> 8U) & 0x1U;
670}
671static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r)
672{
673 return (r >> 10U) & 0x1U;
674}
675static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_reset_task_f(void)
676{
677 return 0x40000000U;
678}
679static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r(void)
680{
681 return 0x00504370U;
682}
683static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_s(void)
684{
685 return 16U;
686}
687static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_v(u32 r)
688{
689 return (r >> 0U) & 0xffffU;
690}
691static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r(void)
692{
693 return 0x00504374U;
694}
695static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_s(void)
696{
697 return 16U;
698}
699static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_v(u32 r)
700{
701 return (r >> 0U) & 0xffffU;
702}
703static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_r(void)
704{
705 return 0x00504638U;
706}
707static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm0_m(void)
708{
709 return 0x1U << 0U;
710}
711static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm1_m(void)
712{
713 return 0x1U << 1U;
714}
715static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm0_m(void)
716{
717 return 0x1U << 2U;
718}
719static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm1_m(void)
720{
721 return 0x1U << 3U;
722}
723static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm0_m(void)
724{
725 return 0x1U << 4U;
726}
727static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm1_m(void)
728{
729 return 0x1U << 5U;
730}
731static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm0_m(void)
732{
733 return 0x1U << 6U;
734}
735static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm1_m(void)
736{
737 return 0x1U << 7U;
738}
739static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_total_counter_overflow_v(u32 r)
740{
741 return (r >> 16U) & 0x1U;
742}
743static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r)
744{
745 return (r >> 18U) & 0x1U;
746}
747static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_reset_task_f(void)
748{
749 return 0x40000000U;
750}
751static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r(void)
752{
753 return 0x0050463cU;
754}
755static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_s(void)
756{
757 return 16U;
758}
759static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_v(u32 r)
760{
761 return (r >> 0U) & 0xffffU;
762}
763static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r(void)
764{
765 return 0x00504640U;
766}
767static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_s(void)
768{
769 return 16U;
770}
771static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_v(u32 r)
772{
773 return (r >> 0U) & 0xffffU;
774}
775static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_r(void)
776{
777 return 0x005042c4U;
778}
779static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f(void)
780{
781 return 0x0U;
782}
783static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe0_f(void)
784{
785 return 0x1U;
786}
787static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe1_f(void)
788{
789 return 0x2U;
790}
791static inline u32 gr_gpc0_tpc0_mpc_hww_esr_r(void)
792{
793 return 0x00504430U;
794}
795static inline u32 gr_gpc0_tpc0_mpc_hww_esr_reset_trigger_f(void)
796{
797 return 0x40000000U;
798}
799static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_r(void)
800{
801 return 0x00504434U;
802}
803static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_veid_v(u32 r)
804{
805 return (r >> 0U) & 0x3fU;
806}
807static inline u32 gr_pri_be0_crop_status1_r(void)
808{
809 return 0x00410134U;
810}
811static inline u32 gr_pri_bes_crop_status1_r(void)
812{
813 return 0x00408934U;
814}
815static inline u32 gr_pri_be0_zrop_status_r(void)
816{
817 return 0x00410048U;
818}
819static inline u32 gr_pri_be0_zrop_status2_r(void)
820{
821 return 0x0041004cU;
822}
823static inline u32 gr_pri_bes_zrop_status_r(void)
824{
825 return 0x00408848U;
826}
827static inline u32 gr_pri_bes_zrop_status2_r(void)
828{
829 return 0x0040884cU;
830}
831static inline u32 gr_pipe_bundle_address_r(void)
832{
833 return 0x00400200U;
834}
835static inline u32 gr_pipe_bundle_address_value_v(u32 r)
836{
837 return (r >> 0U) & 0xffffU;
838}
839static inline u32 gr_pipe_bundle_address_veid_f(u32 v)
840{
841 return (v & 0x3fU) << 20U;
842}
843static inline u32 gr_pipe_bundle_address_veid_w(void)
844{
845 return 0U;
846}
847static inline u32 gr_pipe_bundle_data_r(void)
848{
849 return 0x00400204U;
850}
851static inline u32 gr_pipe_bundle_config_r(void)
852{
853 return 0x00400208U;
854}
855static inline u32 gr_pipe_bundle_config_override_pipe_mode_disabled_f(void)
856{
857 return 0x0U;
858}
859static inline u32 gr_pipe_bundle_config_override_pipe_mode_enabled_f(void)
860{
861 return 0x80000000U;
862}
863static inline u32 gr_fe_hww_esr_r(void)
864{
865 return 0x00404000U;
866}
867static inline u32 gr_fe_hww_esr_reset_active_f(void)
868{
869 return 0x40000000U;
870}
871static inline u32 gr_fe_hww_esr_en_enable_f(void)
872{
873 return 0x80000000U;
874}
875static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r(void)
876{
877 return 0x00419eacU;
878}
879static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r(void)
880{
881 return 0x0050472cU;
882}
883static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_multiple_warp_errors_report_f(void)
884{
885 return 0x4U;
886}
887static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_int_report_f(void)
888{
889 return 0x10U;
890}
891static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_pause_report_f(void)
892{
893 return 0x20U;
894}
895static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_single_step_complete_report_f(void)
896{
897 return 0x40U;
898}
899static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_error_in_trap_report_f(void)
900{
901 return 0x100U;
902}
903static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_r(void)
904{
905 return 0x00419eb4U;
906}
907static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_r(void)
908{
909 return 0x00504734U;
910}
911static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_m(void)
912{
913 return 0x1U << 4U;
914}
915static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f(void)
916{
917 return 0x10U;
918}
919static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_m(void)
920{
921 return 0x1U << 5U;
922}
923static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_pending_f(void)
924{
925 return 0x20U;
926}
927static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_m(void)
928{
929 return 0x1U << 6U;
930}
931static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f(void)
932{
933 return 0x40U;
934}
935static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_m(void)
936{
937 return 0x1U << 2U;
938}
939static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_pending_f(void)
940{
941 return 0x4U;
942}
943static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_m(void)
944{
945 return 0x1U << 8U;
946}
947static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_pending_f(void)
948{
949 return 0x100U;
950}
951static inline u32 gr_fe_go_idle_timeout_r(void)
952{
953 return 0x00404154U;
954}
955static inline u32 gr_fe_go_idle_timeout_count_f(u32 v)
956{
957 return (v & 0xffffffffU) << 0U;
958}
959static inline u32 gr_fe_go_idle_timeout_count_disabled_f(void)
960{
961 return 0x0U;
962}
963static inline u32 gr_fe_go_idle_timeout_count_prod_f(void)
964{
965 return 0x1800U;
966}
967static inline u32 gr_fe_object_table_r(u32 i)
968{
969 return 0x00404200U + i*4U;
970}
971static inline u32 gr_fe_object_table_nvclass_v(u32 r)
972{
973 return (r >> 0U) & 0xffffU;
974}
975static inline u32 gr_fe_tpc_fs_r(u32 i)
976{
977 return 0x0040a200U + i*4U;
978}
979static inline u32 gr_pri_mme_shadow_raw_index_r(void)
980{
981 return 0x00404488U;
982}
983static inline u32 gr_pri_mme_shadow_raw_index_write_trigger_f(void)
984{
985 return 0x80000000U;
986}
987static inline u32 gr_pri_mme_shadow_raw_data_r(void)
988{
989 return 0x0040448cU;
990}
991static inline u32 gr_mme_hww_esr_r(void)
992{
993 return 0x00404490U;
994}
995static inline u32 gr_mme_hww_esr_reset_active_f(void)
996{
997 return 0x40000000U;
998}
999static inline u32 gr_mme_hww_esr_en_enable_f(void)
1000{
1001 return 0x80000000U;
1002}
1003static inline u32 gr_memfmt_hww_esr_r(void)
1004{
1005 return 0x00404600U;
1006}
1007static inline u32 gr_memfmt_hww_esr_reset_active_f(void)
1008{
1009 return 0x40000000U;
1010}
1011static inline u32 gr_memfmt_hww_esr_en_enable_f(void)
1012{
1013 return 0x80000000U;
1014}
1015static inline u32 gr_fecs_cpuctl_r(void)
1016{
1017 return 0x00409100U;
1018}
1019static inline u32 gr_fecs_cpuctl_startcpu_f(u32 v)
1020{
1021 return (v & 0x1U) << 1U;
1022}
1023static inline u32 gr_fecs_cpuctl_alias_r(void)
1024{
1025 return 0x00409130U;
1026}
1027static inline u32 gr_fecs_cpuctl_alias_startcpu_f(u32 v)
1028{
1029 return (v & 0x1U) << 1U;
1030}
1031static inline u32 gr_fecs_dmactl_r(void)
1032{
1033 return 0x0040910cU;
1034}
1035static inline u32 gr_fecs_dmactl_require_ctx_f(u32 v)
1036{
1037 return (v & 0x1U) << 0U;
1038}
1039static inline u32 gr_fecs_dmactl_dmem_scrubbing_m(void)
1040{
1041 return 0x1U << 1U;
1042}
1043static inline u32 gr_fecs_dmactl_imem_scrubbing_m(void)
1044{
1045 return 0x1U << 2U;
1046}
1047static inline u32 gr_fecs_os_r(void)
1048{
1049 return 0x00409080U;
1050}
1051static inline u32 gr_fecs_idlestate_r(void)
1052{
1053 return 0x0040904cU;
1054}
1055static inline u32 gr_fecs_mailbox0_r(void)
1056{
1057 return 0x00409040U;
1058}
1059static inline u32 gr_fecs_mailbox1_r(void)
1060{
1061 return 0x00409044U;
1062}
1063static inline u32 gr_fecs_irqstat_r(void)
1064{
1065 return 0x00409008U;
1066}
1067static inline u32 gr_fecs_irqmode_r(void)
1068{
1069 return 0x0040900cU;
1070}
1071static inline u32 gr_fecs_irqmask_r(void)
1072{
1073 return 0x00409018U;
1074}
1075static inline u32 gr_fecs_irqdest_r(void)
1076{
1077 return 0x0040901cU;
1078}
1079static inline u32 gr_fecs_curctx_r(void)
1080{
1081 return 0x00409050U;
1082}
1083static inline u32 gr_fecs_nxtctx_r(void)
1084{
1085 return 0x00409054U;
1086}
1087static inline u32 gr_fecs_engctl_r(void)
1088{
1089 return 0x004090a4U;
1090}
1091static inline u32 gr_fecs_debug1_r(void)
1092{
1093 return 0x00409090U;
1094}
1095static inline u32 gr_fecs_debuginfo_r(void)
1096{
1097 return 0x00409094U;
1098}
1099static inline u32 gr_fecs_icd_cmd_r(void)
1100{
1101 return 0x00409200U;
1102}
1103static inline u32 gr_fecs_icd_cmd_opc_s(void)
1104{
1105 return 4U;
1106}
1107static inline u32 gr_fecs_icd_cmd_opc_f(u32 v)
1108{
1109 return (v & 0xfU) << 0U;
1110}
1111static inline u32 gr_fecs_icd_cmd_opc_m(void)
1112{
1113 return 0xfU << 0U;
1114}
1115static inline u32 gr_fecs_icd_cmd_opc_v(u32 r)
1116{
1117 return (r >> 0U) & 0xfU;
1118}
1119static inline u32 gr_fecs_icd_cmd_opc_rreg_f(void)
1120{
1121 return 0x8U;
1122}
1123static inline u32 gr_fecs_icd_cmd_opc_rstat_f(void)
1124{
1125 return 0xeU;
1126}
1127static inline u32 gr_fecs_icd_cmd_idx_f(u32 v)
1128{
1129 return (v & 0x1fU) << 8U;
1130}
1131static inline u32 gr_fecs_icd_rdata_r(void)
1132{
1133 return 0x0040920cU;
1134}
1135static inline u32 gr_fecs_imemc_r(u32 i)
1136{
1137 return 0x00409180U + i*16U;
1138}
1139static inline u32 gr_fecs_imemc_offs_f(u32 v)
1140{
1141 return (v & 0x3fU) << 2U;
1142}
1143static inline u32 gr_fecs_imemc_blk_f(u32 v)
1144{
1145 return (v & 0xffU) << 8U;
1146}
1147static inline u32 gr_fecs_imemc_aincw_f(u32 v)
1148{
1149 return (v & 0x1U) << 24U;
1150}
1151static inline u32 gr_fecs_imemd_r(u32 i)
1152{
1153 return 0x00409184U + i*16U;
1154}
1155static inline u32 gr_fecs_imemt_r(u32 i)
1156{
1157 return 0x00409188U + i*16U;
1158}
1159static inline u32 gr_fecs_imemt_tag_f(u32 v)
1160{
1161 return (v & 0xffffU) << 0U;
1162}
1163static inline u32 gr_fecs_dmemc_r(u32 i)
1164{
1165 return 0x004091c0U + i*8U;
1166}
1167static inline u32 gr_fecs_dmemc_offs_s(void)
1168{
1169 return 6U;
1170}
1171static inline u32 gr_fecs_dmemc_offs_f(u32 v)
1172{
1173 return (v & 0x3fU) << 2U;
1174}
1175static inline u32 gr_fecs_dmemc_offs_m(void)
1176{
1177 return 0x3fU << 2U;
1178}
1179static inline u32 gr_fecs_dmemc_offs_v(u32 r)
1180{
1181 return (r >> 2U) & 0x3fU;
1182}
1183static inline u32 gr_fecs_dmemc_blk_f(u32 v)
1184{
1185 return (v & 0xffU) << 8U;
1186}
1187static inline u32 gr_fecs_dmemc_aincw_f(u32 v)
1188{
1189 return (v & 0x1U) << 24U;
1190}
1191static inline u32 gr_fecs_dmemd_r(u32 i)
1192{
1193 return 0x004091c4U + i*8U;
1194}
1195static inline u32 gr_fecs_dmatrfbase_r(void)
1196{
1197 return 0x00409110U;
1198}
1199static inline u32 gr_fecs_dmatrfmoffs_r(void)
1200{
1201 return 0x00409114U;
1202}
1203static inline u32 gr_fecs_dmatrffboffs_r(void)
1204{
1205 return 0x0040911cU;
1206}
1207static inline u32 gr_fecs_dmatrfcmd_r(void)
1208{
1209 return 0x00409118U;
1210}
1211static inline u32 gr_fecs_dmatrfcmd_imem_f(u32 v)
1212{
1213 return (v & 0x1U) << 4U;
1214}
1215static inline u32 gr_fecs_dmatrfcmd_write_f(u32 v)
1216{
1217 return (v & 0x1U) << 5U;
1218}
1219static inline u32 gr_fecs_dmatrfcmd_size_f(u32 v)
1220{
1221 return (v & 0x7U) << 8U;
1222}
1223static inline u32 gr_fecs_dmatrfcmd_ctxdma_f(u32 v)
1224{
1225 return (v & 0x7U) << 12U;
1226}
1227static inline u32 gr_fecs_bootvec_r(void)
1228{
1229 return 0x00409104U;
1230}
1231static inline u32 gr_fecs_bootvec_vec_f(u32 v)
1232{
1233 return (v & 0xffffffffU) << 0U;
1234}
1235static inline u32 gr_fecs_falcon_hwcfg_r(void)
1236{
1237 return 0x00409108U;
1238}
1239static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void)
1240{
1241 return 0x0041a108U;
1242}
1243static inline u32 gr_fecs_falcon_rm_r(void)
1244{
1245 return 0x00409084U;
1246}
1247static inline u32 gr_fecs_current_ctx_r(void)
1248{
1249 return 0x00409b00U;
1250}
1251static inline u32 gr_fecs_current_ctx_ptr_f(u32 v)
1252{
1253 return (v & 0xfffffffU) << 0U;
1254}
1255static inline u32 gr_fecs_current_ctx_ptr_v(u32 r)
1256{
1257 return (r >> 0U) & 0xfffffffU;
1258}
1259static inline u32 gr_fecs_current_ctx_target_s(void)
1260{
1261 return 2U;
1262}
1263static inline u32 gr_fecs_current_ctx_target_f(u32 v)
1264{
1265 return (v & 0x3U) << 28U;
1266}
1267static inline u32 gr_fecs_current_ctx_target_m(void)
1268{
1269 return 0x3U << 28U;
1270}
1271static inline u32 gr_fecs_current_ctx_target_v(u32 r)
1272{
1273 return (r >> 28U) & 0x3U;
1274}
1275static inline u32 gr_fecs_current_ctx_target_vid_mem_f(void)
1276{
1277 return 0x0U;
1278}
1279static inline u32 gr_fecs_current_ctx_target_sys_mem_coh_f(void)
1280{
1281 return 0x20000000U;
1282}
1283static inline u32 gr_fecs_current_ctx_target_sys_mem_ncoh_f(void)
1284{
1285 return 0x30000000U;
1286}
1287static inline u32 gr_fecs_current_ctx_valid_s(void)
1288{
1289 return 1U;
1290}
1291static inline u32 gr_fecs_current_ctx_valid_f(u32 v)
1292{
1293 return (v & 0x1U) << 31U;
1294}
1295static inline u32 gr_fecs_current_ctx_valid_m(void)
1296{
1297 return 0x1U << 31U;
1298}
1299static inline u32 gr_fecs_current_ctx_valid_v(u32 r)
1300{
1301 return (r >> 31U) & 0x1U;
1302}
1303static inline u32 gr_fecs_current_ctx_valid_false_f(void)
1304{
1305 return 0x0U;
1306}
1307static inline u32 gr_fecs_method_data_r(void)
1308{
1309 return 0x00409500U;
1310}
1311static inline u32 gr_fecs_method_push_r(void)
1312{
1313 return 0x00409504U;
1314}
1315static inline u32 gr_fecs_method_push_adr_f(u32 v)
1316{
1317 return (v & 0xfffU) << 0U;
1318}
1319static inline u32 gr_fecs_method_push_adr_bind_pointer_v(void)
1320{
1321 return 0x00000003U;
1322}
1323static inline u32 gr_fecs_method_push_adr_bind_pointer_f(void)
1324{
1325 return 0x3U;
1326}
1327static inline u32 gr_fecs_method_push_adr_discover_image_size_v(void)
1328{
1329 return 0x00000010U;
1330}
1331static inline u32 gr_fecs_method_push_adr_wfi_golden_save_v(void)
1332{
1333 return 0x00000009U;
1334}
1335static inline u32 gr_fecs_method_push_adr_restore_golden_v(void)
1336{
1337 return 0x00000015U;
1338}
1339static inline u32 gr_fecs_method_push_adr_discover_zcull_image_size_v(void)
1340{
1341 return 0x00000016U;
1342}
1343static inline u32 gr_fecs_method_push_adr_discover_pm_image_size_v(void)
1344{
1345 return 0x00000025U;
1346}
1347static inline u32 gr_fecs_method_push_adr_discover_reglist_image_size_v(void)
1348{
1349 return 0x00000030U;
1350}
1351static inline u32 gr_fecs_method_push_adr_set_reglist_bind_instance_v(void)
1352{
1353 return 0x00000031U;
1354}
1355static inline u32 gr_fecs_method_push_adr_set_reglist_virtual_address_v(void)
1356{
1357 return 0x00000032U;
1358}
1359static inline u32 gr_fecs_method_push_adr_stop_ctxsw_v(void)
1360{
1361 return 0x00000038U;
1362}
1363static inline u32 gr_fecs_method_push_adr_start_ctxsw_v(void)
1364{
1365 return 0x00000039U;
1366}
1367static inline u32 gr_fecs_method_push_adr_set_watchdog_timeout_f(void)
1368{
1369 return 0x21U;
1370}
1371static inline u32 gr_fecs_method_push_adr_discover_preemption_image_size_v(void)
1372{
1373 return 0x0000001aU;
1374}
1375static inline u32 gr_fecs_method_push_adr_halt_pipeline_v(void)
1376{
1377 return 0x00000004U;
1378}
1379static inline u32 gr_fecs_method_push_adr_configure_interrupt_completion_option_v(void)
1380{
1381 return 0x0000003aU;
1382}
1383static inline u32 gr_fecs_host_int_status_r(void)
1384{
1385 return 0x00409c18U;
1386}
1387static inline u32 gr_fecs_host_int_status_fault_during_ctxsw_f(u32 v)
1388{
1389 return (v & 0x1U) << 16U;
1390}
1391static inline u32 gr_fecs_host_int_status_umimp_firmware_method_f(u32 v)
1392{
1393 return (v & 0x1U) << 17U;
1394}
1395static inline u32 gr_fecs_host_int_status_umimp_illegal_method_f(u32 v)
1396{
1397 return (v & 0x1U) << 18U;
1398}
1399static inline u32 gr_fecs_host_int_status_ctxsw_intr_f(u32 v)
1400{
1401 return (v & 0xffffU) << 0U;
1402}
1403static inline u32 gr_fecs_host_int_clear_r(void)
1404{
1405 return 0x00409c20U;
1406}
1407static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_f(u32 v)
1408{
1409 return (v & 0x1U) << 1U;
1410}
1411static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_clear_f(void)
1412{
1413 return 0x2U;
1414}
1415static inline u32 gr_fecs_host_int_enable_r(void)
1416{
1417 return 0x00409c24U;
1418}
1419static inline u32 gr_fecs_host_int_enable_ctxsw_intr1_enable_f(void)
1420{
1421 return 0x2U;
1422}
1423static inline u32 gr_fecs_host_int_enable_fault_during_ctxsw_enable_f(void)
1424{
1425 return 0x10000U;
1426}
1427static inline u32 gr_fecs_host_int_enable_umimp_firmware_method_enable_f(void)
1428{
1429 return 0x20000U;
1430}
1431static inline u32 gr_fecs_host_int_enable_umimp_illegal_method_enable_f(void)
1432{
1433 return 0x40000U;
1434}
1435static inline u32 gr_fecs_host_int_enable_watchdog_enable_f(void)
1436{
1437 return 0x80000U;
1438}
1439static inline u32 gr_fecs_ctxsw_reset_ctl_r(void)
1440{
1441 return 0x00409614U;
1442}
1443static inline u32 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f(void)
1444{
1445 return 0x0U;
1446}
1447static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f(void)
1448{
1449 return 0x0U;
1450}
1451static inline u32 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f(void)
1452{
1453 return 0x0U;
1454}
1455static inline u32 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f(void)
1456{
1457 return 0x10U;
1458}
1459static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f(void)
1460{
1461 return 0x20U;
1462}
1463static inline u32 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f(void)
1464{
1465 return 0x40U;
1466}
1467static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f(void)
1468{
1469 return 0x0U;
1470}
1471static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f(void)
1472{
1473 return 0x100U;
1474}
1475static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f(void)
1476{
1477 return 0x0U;
1478}
1479static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f(void)
1480{
1481 return 0x200U;
1482}
1483static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_s(void)
1484{
1485 return 1U;
1486}
1487static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_f(u32 v)
1488{
1489 return (v & 0x1U) << 10U;
1490}
1491static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_m(void)
1492{
1493 return 0x1U << 10U;
1494}
1495static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_v(u32 r)
1496{
1497 return (r >> 10U) & 0x1U;
1498}
1499static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f(void)
1500{
1501 return 0x0U;
1502}
1503static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f(void)
1504{
1505 return 0x400U;
1506}
1507static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void)
1508{
1509 return 0x0040960cU;
1510}
1511static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i)
1512{
1513 return 0x00409800U + i*4U;
1514}
1515static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void)
1516{
1517 return 0x00000010U;
1518}
1519static inline u32 gr_fecs_ctxsw_mailbox_value_f(u32 v)
1520{
1521 return (v & 0xffffffffU) << 0U;
1522}
1523static inline u32 gr_fecs_ctxsw_mailbox_value_pass_v(void)
1524{
1525 return 0x00000001U;
1526}
1527static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void)
1528{
1529 return 0x00000002U;
1530}
1531static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i)
1532{
1533 return 0x004098c0U + i*4U;
1534}
1535static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v)
1536{
1537 return (v & 0xffffffffU) << 0U;
1538}
1539static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i)
1540{
1541 return 0x00409840U + i*4U;
1542}
1543static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v)
1544{
1545 return (v & 0xffffffffU) << 0U;
1546}
1547static inline u32 gr_fecs_fs_r(void)
1548{
1549 return 0x00409604U;
1550}
1551static inline u32 gr_fecs_fs_num_available_gpcs_s(void)
1552{
1553 return 5U;
1554}
1555static inline u32 gr_fecs_fs_num_available_gpcs_f(u32 v)
1556{
1557 return (v & 0x1fU) << 0U;
1558}
1559static inline u32 gr_fecs_fs_num_available_gpcs_m(void)
1560{
1561 return 0x1fU << 0U;
1562}
1563static inline u32 gr_fecs_fs_num_available_gpcs_v(u32 r)
1564{
1565 return (r >> 0U) & 0x1fU;
1566}
1567static inline u32 gr_fecs_fs_num_available_fbps_s(void)
1568{
1569 return 5U;
1570}
1571static inline u32 gr_fecs_fs_num_available_fbps_f(u32 v)
1572{
1573 return (v & 0x1fU) << 16U;
1574}
1575static inline u32 gr_fecs_fs_num_available_fbps_m(void)
1576{
1577 return 0x1fU << 16U;
1578}
1579static inline u32 gr_fecs_fs_num_available_fbps_v(u32 r)
1580{
1581 return (r >> 16U) & 0x1fU;
1582}
1583static inline u32 gr_fecs_cfg_r(void)
1584{
1585 return 0x00409620U;
1586}
1587static inline u32 gr_fecs_cfg_imem_sz_v(u32 r)
1588{
1589 return (r >> 0U) & 0xffU;
1590}
1591static inline u32 gr_fecs_rc_lanes_r(void)
1592{
1593 return 0x00409880U;
1594}
1595static inline u32 gr_fecs_rc_lanes_num_chains_s(void)
1596{
1597 return 6U;
1598}
1599static inline u32 gr_fecs_rc_lanes_num_chains_f(u32 v)
1600{
1601 return (v & 0x3fU) << 0U;
1602}
1603static inline u32 gr_fecs_rc_lanes_num_chains_m(void)
1604{
1605 return 0x3fU << 0U;
1606}
1607static inline u32 gr_fecs_rc_lanes_num_chains_v(u32 r)
1608{
1609 return (r >> 0U) & 0x3fU;
1610}
1611static inline u32 gr_fecs_ctxsw_status_1_r(void)
1612{
1613 return 0x00409400U;
1614}
1615static inline u32 gr_fecs_ctxsw_status_1_arb_busy_s(void)
1616{
1617 return 1U;
1618}
1619static inline u32 gr_fecs_ctxsw_status_1_arb_busy_f(u32 v)
1620{
1621 return (v & 0x1U) << 12U;
1622}
1623static inline u32 gr_fecs_ctxsw_status_1_arb_busy_m(void)
1624{
1625 return 0x1U << 12U;
1626}
1627static inline u32 gr_fecs_ctxsw_status_1_arb_busy_v(u32 r)
1628{
1629 return (r >> 12U) & 0x1U;
1630}
1631static inline u32 gr_fecs_arb_ctx_adr_r(void)
1632{
1633 return 0x00409a24U;
1634}
1635static inline u32 gr_fecs_new_ctx_r(void)
1636{
1637 return 0x00409b04U;
1638}
1639static inline u32 gr_fecs_new_ctx_ptr_s(void)
1640{
1641 return 28U;
1642}
1643static inline u32 gr_fecs_new_ctx_ptr_f(u32 v)
1644{
1645 return (v & 0xfffffffU) << 0U;
1646}
1647static inline u32 gr_fecs_new_ctx_ptr_m(void)
1648{
1649 return 0xfffffffU << 0U;
1650}
1651static inline u32 gr_fecs_new_ctx_ptr_v(u32 r)
1652{
1653 return (r >> 0U) & 0xfffffffU;
1654}
1655static inline u32 gr_fecs_new_ctx_target_s(void)
1656{
1657 return 2U;
1658}
1659static inline u32 gr_fecs_new_ctx_target_f(u32 v)
1660{
1661 return (v & 0x3U) << 28U;
1662}
1663static inline u32 gr_fecs_new_ctx_target_m(void)
1664{
1665 return 0x3U << 28U;
1666}
1667static inline u32 gr_fecs_new_ctx_target_v(u32 r)
1668{
1669 return (r >> 28U) & 0x3U;
1670}
1671static inline u32 gr_fecs_new_ctx_valid_s(void)
1672{
1673 return 1U;
1674}
1675static inline u32 gr_fecs_new_ctx_valid_f(u32 v)
1676{
1677 return (v & 0x1U) << 31U;
1678}
1679static inline u32 gr_fecs_new_ctx_valid_m(void)
1680{
1681 return 0x1U << 31U;
1682}
1683static inline u32 gr_fecs_new_ctx_valid_v(u32 r)
1684{
1685 return (r >> 31U) & 0x1U;
1686}
1687static inline u32 gr_fecs_arb_ctx_ptr_r(void)
1688{
1689 return 0x00409a0cU;
1690}
1691static inline u32 gr_fecs_arb_ctx_ptr_ptr_s(void)
1692{
1693 return 28U;
1694}
1695static inline u32 gr_fecs_arb_ctx_ptr_ptr_f(u32 v)
1696{
1697 return (v & 0xfffffffU) << 0U;
1698}
1699static inline u32 gr_fecs_arb_ctx_ptr_ptr_m(void)
1700{
1701 return 0xfffffffU << 0U;
1702}
1703static inline u32 gr_fecs_arb_ctx_ptr_ptr_v(u32 r)
1704{
1705 return (r >> 0U) & 0xfffffffU;
1706}
1707static inline u32 gr_fecs_arb_ctx_ptr_target_s(void)
1708{
1709 return 2U;
1710}
1711static inline u32 gr_fecs_arb_ctx_ptr_target_f(u32 v)
1712{
1713 return (v & 0x3U) << 28U;
1714}
1715static inline u32 gr_fecs_arb_ctx_ptr_target_m(void)
1716{
1717 return 0x3U << 28U;
1718}
1719static inline u32 gr_fecs_arb_ctx_ptr_target_v(u32 r)
1720{
1721 return (r >> 28U) & 0x3U;
1722}
1723static inline u32 gr_fecs_arb_ctx_cmd_r(void)
1724{
1725 return 0x00409a10U;
1726}
1727static inline u32 gr_fecs_arb_ctx_cmd_cmd_s(void)
1728{
1729 return 5U;
1730}
1731static inline u32 gr_fecs_arb_ctx_cmd_cmd_f(u32 v)
1732{
1733 return (v & 0x1fU) << 0U;
1734}
1735static inline u32 gr_fecs_arb_ctx_cmd_cmd_m(void)
1736{
1737 return 0x1fU << 0U;
1738}
1739static inline u32 gr_fecs_arb_ctx_cmd_cmd_v(u32 r)
1740{
1741 return (r >> 0U) & 0x1fU;
1742}
1743static inline u32 gr_fecs_ctxsw_status_fe_0_r(void)
1744{
1745 return 0x00409c00U;
1746}
1747static inline u32 gr_gpc0_gpccs_ctxsw_status_gpc_0_r(void)
1748{
1749 return 0x00502c04U;
1750}
1751static inline u32 gr_gpc0_gpccs_ctxsw_status_1_r(void)
1752{
1753 return 0x00502400U;
1754}
1755static inline u32 gr_fecs_ctxsw_idlestate_r(void)
1756{
1757 return 0x00409420U;
1758}
1759static inline u32 gr_fecs_feature_override_ecc_r(void)
1760{
1761 return 0x00409658U;
1762}
1763static inline u32 gr_fecs_feature_override_ecc_sm_lrf_override_v(u32 r)
1764{
1765 return (r >> 3U) & 0x1U;
1766}
1767static inline u32 gr_fecs_feature_override_ecc_ltc_override_v(u32 r)
1768{
1769 return (r >> 15U) & 0x1U;
1770}
1771static inline u32 gr_fecs_feature_override_ecc_sm_lrf_v(u32 r)
1772{
1773 return (r >> 0U) & 0x1U;
1774}
1775static inline u32 gr_fecs_feature_override_ecc_ltc_v(u32 r)
1776{
1777 return (r >> 12U) & 0x1U;
1778}
1779static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void)
1780{
1781 return 0x00502420U;
1782}
1783static inline u32 gr_rstr2d_gpc_map_r(u32 i)
1784{
1785 return 0x0040780cU + i*4U;
1786}
1787static inline u32 gr_rstr2d_map_table_cfg_r(void)
1788{
1789 return 0x004078bcU;
1790}
1791static inline u32 gr_rstr2d_map_table_cfg_row_offset_f(u32 v)
1792{
1793 return (v & 0xffU) << 0U;
1794}
1795static inline u32 gr_rstr2d_map_table_cfg_num_entries_f(u32 v)
1796{
1797 return (v & 0xffU) << 8U;
1798}
1799static inline u32 gr_pd_hww_esr_r(void)
1800{
1801 return 0x00406018U;
1802}
1803static inline u32 gr_pd_hww_esr_reset_active_f(void)
1804{
1805 return 0x40000000U;
1806}
1807static inline u32 gr_pd_hww_esr_en_enable_f(void)
1808{
1809 return 0x80000000U;
1810}
1811static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i)
1812{
1813 return 0x00406028U + i*4U;
1814}
1815static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void)
1816{
1817 return 0x00000004U;
1818}
1819static inline u32 gr_pd_num_tpc_per_gpc_count0_f(u32 v)
1820{
1821 return (v & 0xfU) << 0U;
1822}
1823static inline u32 gr_pd_num_tpc_per_gpc_count1_f(u32 v)
1824{
1825 return (v & 0xfU) << 4U;
1826}
1827static inline u32 gr_pd_num_tpc_per_gpc_count2_f(u32 v)
1828{
1829 return (v & 0xfU) << 8U;
1830}
1831static inline u32 gr_pd_num_tpc_per_gpc_count3_f(u32 v)
1832{
1833 return (v & 0xfU) << 12U;
1834}
1835static inline u32 gr_pd_num_tpc_per_gpc_count4_f(u32 v)
1836{
1837 return (v & 0xfU) << 16U;
1838}
1839static inline u32 gr_pd_num_tpc_per_gpc_count5_f(u32 v)
1840{
1841 return (v & 0xfU) << 20U;
1842}
1843static inline u32 gr_pd_num_tpc_per_gpc_count6_f(u32 v)
1844{
1845 return (v & 0xfU) << 24U;
1846}
1847static inline u32 gr_pd_num_tpc_per_gpc_count7_f(u32 v)
1848{
1849 return (v & 0xfU) << 28U;
1850}
1851static inline u32 gr_pd_ab_dist_cfg0_r(void)
1852{
1853 return 0x004064c0U;
1854}
1855static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_en_f(void)
1856{
1857 return 0x80000000U;
1858}
1859static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_dis_f(void)
1860{
1861 return 0x0U;
1862}
1863static inline u32 gr_pd_ab_dist_cfg1_r(void)
1864{
1865 return 0x004064c4U;
1866}
1867static inline u32 gr_pd_ab_dist_cfg1_max_batches_init_f(void)
1868{
1869 return 0xffffU;
1870}
1871static inline u32 gr_pd_ab_dist_cfg1_max_output_f(u32 v)
1872{
1873 return (v & 0xffffU) << 16U;
1874}
1875static inline u32 gr_pd_ab_dist_cfg1_max_output_granularity_v(void)
1876{
1877 return 0x00000080U;
1878}
1879static inline u32 gr_pd_ab_dist_cfg2_r(void)
1880{
1881 return 0x004064c8U;
1882}
1883static inline u32 gr_pd_ab_dist_cfg2_token_limit_f(u32 v)
1884{
1885 return (v & 0x1fffU) << 0U;
1886}
1887static inline u32 gr_pd_ab_dist_cfg2_token_limit_init_v(void)
1888{
1889 return 0x00001680U;
1890}
1891static inline u32 gr_pd_ab_dist_cfg2_state_limit_f(u32 v)
1892{
1893 return (v & 0x1fffU) << 16U;
1894}
1895static inline u32 gr_pd_ab_dist_cfg2_state_limit_scc_bundle_granularity_v(void)
1896{
1897 return 0x00000020U;
1898}
1899static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void)
1900{
1901 return 0x00001680U;
1902}
1903static inline u32 gr_pd_dist_skip_table_r(u32 i)
1904{
1905 return 0x004064d0U + i*4U;
1906}
1907static inline u32 gr_pd_dist_skip_table__size_1_v(void)
1908{
1909 return 0x00000008U;
1910}
1911static inline u32 gr_pd_dist_skip_table_gpc_4n0_mask_f(u32 v)
1912{
1913 return (v & 0xffU) << 0U;
1914}
1915static inline u32 gr_pd_dist_skip_table_gpc_4n1_mask_f(u32 v)
1916{
1917 return (v & 0xffU) << 8U;
1918}
1919static inline u32 gr_pd_dist_skip_table_gpc_4n2_mask_f(u32 v)
1920{
1921 return (v & 0xffU) << 16U;
1922}
1923static inline u32 gr_pd_dist_skip_table_gpc_4n3_mask_f(u32 v)
1924{
1925 return (v & 0xffU) << 24U;
1926}
1927static inline u32 gr_ds_debug_r(void)
1928{
1929 return 0x00405800U;
1930}
1931static inline u32 gr_ds_debug_timeslice_mode_disable_f(void)
1932{
1933 return 0x0U;
1934}
1935static inline u32 gr_ds_debug_timeslice_mode_enable_f(void)
1936{
1937 return 0x8000000U;
1938}
1939static inline u32 gr_ds_zbc_color_r_r(void)
1940{
1941 return 0x00405804U;
1942}
1943static inline u32 gr_ds_zbc_color_r_val_f(u32 v)
1944{
1945 return (v & 0xffffffffU) << 0U;
1946}
1947static inline u32 gr_ds_zbc_color_g_r(void)
1948{
1949 return 0x00405808U;
1950}
1951static inline u32 gr_ds_zbc_color_g_val_f(u32 v)
1952{
1953 return (v & 0xffffffffU) << 0U;
1954}
1955static inline u32 gr_ds_zbc_color_b_r(void)
1956{
1957 return 0x0040580cU;
1958}
1959static inline u32 gr_ds_zbc_color_b_val_f(u32 v)
1960{
1961 return (v & 0xffffffffU) << 0U;
1962}
1963static inline u32 gr_ds_zbc_color_a_r(void)
1964{
1965 return 0x00405810U;
1966}
1967static inline u32 gr_ds_zbc_color_a_val_f(u32 v)
1968{
1969 return (v & 0xffffffffU) << 0U;
1970}
1971static inline u32 gr_ds_zbc_color_fmt_r(void)
1972{
1973 return 0x00405814U;
1974}
1975static inline u32 gr_ds_zbc_color_fmt_val_f(u32 v)
1976{
1977 return (v & 0x7fU) << 0U;
1978}
1979static inline u32 gr_ds_zbc_color_fmt_val_invalid_f(void)
1980{
1981 return 0x0U;
1982}
1983static inline u32 gr_ds_zbc_color_fmt_val_zero_v(void)
1984{
1985 return 0x00000001U;
1986}
1987static inline u32 gr_ds_zbc_color_fmt_val_unorm_one_v(void)
1988{
1989 return 0x00000002U;
1990}
1991static inline u32 gr_ds_zbc_color_fmt_val_rf32_gf32_bf32_af32_v(void)
1992{
1993 return 0x00000004U;
1994}
1995static inline u32 gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v(void)
1996{
1997 return 0x00000028U;
1998}
1999static inline u32 gr_ds_zbc_z_r(void)
2000{
2001 return 0x00405818U;
2002}
2003static inline u32 gr_ds_zbc_z_val_s(void)
2004{
2005 return 32U;
2006}
2007static inline u32 gr_ds_zbc_z_val_f(u32 v)
2008{
2009 return (v & 0xffffffffU) << 0U;
2010}
2011static inline u32 gr_ds_zbc_z_val_m(void)
2012{
2013 return 0xffffffffU << 0U;
2014}
2015static inline u32 gr_ds_zbc_z_val_v(u32 r)
2016{
2017 return (r >> 0U) & 0xffffffffU;
2018}
2019static inline u32 gr_ds_zbc_z_val__init_v(void)
2020{
2021 return 0x00000000U;
2022}
2023static inline u32 gr_ds_zbc_z_val__init_f(void)
2024{
2025 return 0x0U;
2026}
2027static inline u32 gr_ds_zbc_z_fmt_r(void)
2028{
2029 return 0x0040581cU;
2030}
2031static inline u32 gr_ds_zbc_z_fmt_val_f(u32 v)
2032{
2033 return (v & 0x1U) << 0U;
2034}
2035static inline u32 gr_ds_zbc_z_fmt_val_invalid_f(void)
2036{
2037 return 0x0U;
2038}
2039static inline u32 gr_ds_zbc_z_fmt_val_fp32_v(void)
2040{
2041 return 0x00000001U;
2042}
2043static inline u32 gr_ds_zbc_tbl_index_r(void)
2044{
2045 return 0x00405820U;
2046}
2047static inline u32 gr_ds_zbc_tbl_index_val_f(u32 v)
2048{
2049 return (v & 0xfU) << 0U;
2050}
2051static inline u32 gr_ds_zbc_tbl_ld_r(void)
2052{
2053 return 0x00405824U;
2054}
2055static inline u32 gr_ds_zbc_tbl_ld_select_c_f(void)
2056{
2057 return 0x0U;
2058}
2059static inline u32 gr_ds_zbc_tbl_ld_select_z_f(void)
2060{
2061 return 0x1U;
2062}
2063static inline u32 gr_ds_zbc_tbl_ld_action_write_f(void)
2064{
2065 return 0x0U;
2066}
2067static inline u32 gr_ds_zbc_tbl_ld_trigger_active_f(void)
2068{
2069 return 0x4U;
2070}
2071static inline u32 gr_ds_tga_constraintlogic_beta_r(void)
2072{
2073 return 0x00405830U;
2074}
2075static inline u32 gr_ds_tga_constraintlogic_beta_cbsize_f(u32 v)
2076{
2077 return (v & 0x3fffffU) << 0U;
2078}
2079static inline u32 gr_ds_tga_constraintlogic_alpha_r(void)
2080{
2081 return 0x0040585cU;
2082}
2083static inline u32 gr_ds_tga_constraintlogic_alpha_cbsize_f(u32 v)
2084{
2085 return (v & 0xffffU) << 0U;
2086}
2087static inline u32 gr_ds_hww_esr_r(void)
2088{
2089 return 0x00405840U;
2090}
2091static inline u32 gr_ds_hww_esr_reset_s(void)
2092{
2093 return 1U;
2094}
2095static inline u32 gr_ds_hww_esr_reset_f(u32 v)
2096{
2097 return (v & 0x1U) << 30U;
2098}
2099static inline u32 gr_ds_hww_esr_reset_m(void)
2100{
2101 return 0x1U << 30U;
2102}
2103static inline u32 gr_ds_hww_esr_reset_v(u32 r)
2104{
2105 return (r >> 30U) & 0x1U;
2106}
2107static inline u32 gr_ds_hww_esr_reset_task_v(void)
2108{
2109 return 0x00000001U;
2110}
2111static inline u32 gr_ds_hww_esr_reset_task_f(void)
2112{
2113 return 0x40000000U;
2114}
2115static inline u32 gr_ds_hww_esr_en_enabled_f(void)
2116{
2117 return 0x80000000U;
2118}
2119static inline u32 gr_ds_hww_esr_2_r(void)
2120{
2121 return 0x00405848U;
2122}
2123static inline u32 gr_ds_hww_esr_2_reset_s(void)
2124{
2125 return 1U;
2126}
2127static inline u32 gr_ds_hww_esr_2_reset_f(u32 v)
2128{
2129 return (v & 0x1U) << 30U;
2130}
2131static inline u32 gr_ds_hww_esr_2_reset_m(void)
2132{
2133 return 0x1U << 30U;
2134}
2135static inline u32 gr_ds_hww_esr_2_reset_v(u32 r)
2136{
2137 return (r >> 30U) & 0x1U;
2138}
2139static inline u32 gr_ds_hww_esr_2_reset_task_v(void)
2140{
2141 return 0x00000001U;
2142}
2143static inline u32 gr_ds_hww_esr_2_reset_task_f(void)
2144{
2145 return 0x40000000U;
2146}
2147static inline u32 gr_ds_hww_esr_2_en_enabled_f(void)
2148{
2149 return 0x80000000U;
2150}
2151static inline u32 gr_ds_hww_report_mask_r(void)
2152{
2153 return 0x00405844U;
2154}
2155static inline u32 gr_ds_hww_report_mask_sph0_err_report_f(void)
2156{
2157 return 0x1U;
2158}
2159static inline u32 gr_ds_hww_report_mask_sph1_err_report_f(void)
2160{
2161 return 0x2U;
2162}
2163static inline u32 gr_ds_hww_report_mask_sph2_err_report_f(void)
2164{
2165 return 0x4U;
2166}
2167static inline u32 gr_ds_hww_report_mask_sph3_err_report_f(void)
2168{
2169 return 0x8U;
2170}
2171static inline u32 gr_ds_hww_report_mask_sph4_err_report_f(void)
2172{
2173 return 0x10U;
2174}
2175static inline u32 gr_ds_hww_report_mask_sph5_err_report_f(void)
2176{
2177 return 0x20U;
2178}
2179static inline u32 gr_ds_hww_report_mask_sph6_err_report_f(void)
2180{
2181 return 0x40U;
2182}
2183static inline u32 gr_ds_hww_report_mask_sph7_err_report_f(void)
2184{
2185 return 0x80U;
2186}
2187static inline u32 gr_ds_hww_report_mask_sph8_err_report_f(void)
2188{
2189 return 0x100U;
2190}
2191static inline u32 gr_ds_hww_report_mask_sph9_err_report_f(void)
2192{
2193 return 0x200U;
2194}
2195static inline u32 gr_ds_hww_report_mask_sph10_err_report_f(void)
2196{
2197 return 0x400U;
2198}
2199static inline u32 gr_ds_hww_report_mask_sph11_err_report_f(void)
2200{
2201 return 0x800U;
2202}
2203static inline u32 gr_ds_hww_report_mask_sph12_err_report_f(void)
2204{
2205 return 0x1000U;
2206}
2207static inline u32 gr_ds_hww_report_mask_sph13_err_report_f(void)
2208{
2209 return 0x2000U;
2210}
2211static inline u32 gr_ds_hww_report_mask_sph14_err_report_f(void)
2212{
2213 return 0x4000U;
2214}
2215static inline u32 gr_ds_hww_report_mask_sph15_err_report_f(void)
2216{
2217 return 0x8000U;
2218}
2219static inline u32 gr_ds_hww_report_mask_sph16_err_report_f(void)
2220{
2221 return 0x10000U;
2222}
2223static inline u32 gr_ds_hww_report_mask_sph17_err_report_f(void)
2224{
2225 return 0x20000U;
2226}
2227static inline u32 gr_ds_hww_report_mask_sph18_err_report_f(void)
2228{
2229 return 0x40000U;
2230}
2231static inline u32 gr_ds_hww_report_mask_sph19_err_report_f(void)
2232{
2233 return 0x80000U;
2234}
2235static inline u32 gr_ds_hww_report_mask_sph20_err_report_f(void)
2236{
2237 return 0x100000U;
2238}
2239static inline u32 gr_ds_hww_report_mask_sph21_err_report_f(void)
2240{
2241 return 0x200000U;
2242}
2243static inline u32 gr_ds_hww_report_mask_sph22_err_report_f(void)
2244{
2245 return 0x400000U;
2246}
2247static inline u32 gr_ds_hww_report_mask_sph23_err_report_f(void)
2248{
2249 return 0x800000U;
2250}
2251static inline u32 gr_ds_hww_report_mask_2_r(void)
2252{
2253 return 0x0040584cU;
2254}
2255static inline u32 gr_ds_hww_report_mask_2_sph24_err_report_f(void)
2256{
2257 return 0x1U;
2258}
2259static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i)
2260{
2261 return 0x00405870U + i*4U;
2262}
2263static inline u32 gr_scc_bundle_cb_base_r(void)
2264{
2265 return 0x00408004U;
2266}
2267static inline u32 gr_scc_bundle_cb_base_addr_39_8_f(u32 v)
2268{
2269 return (v & 0xffffffffU) << 0U;
2270}
2271static inline u32 gr_scc_bundle_cb_base_addr_39_8_align_bits_v(void)
2272{
2273 return 0x00000008U;
2274}
2275static inline u32 gr_scc_bundle_cb_size_r(void)
2276{
2277 return 0x00408008U;
2278}
2279static inline u32 gr_scc_bundle_cb_size_div_256b_f(u32 v)
2280{
2281 return (v & 0x7ffU) << 0U;
2282}
2283static inline u32 gr_scc_bundle_cb_size_div_256b__prod_v(void)
2284{
2285 return 0x00000030U;
2286}
2287static inline u32 gr_scc_bundle_cb_size_div_256b_byte_granularity_v(void)
2288{
2289 return 0x00000100U;
2290}
2291static inline u32 gr_scc_bundle_cb_size_valid_false_v(void)
2292{
2293 return 0x00000000U;
2294}
2295static inline u32 gr_scc_bundle_cb_size_valid_false_f(void)
2296{
2297 return 0x0U;
2298}
2299static inline u32 gr_scc_bundle_cb_size_valid_true_f(void)
2300{
2301 return 0x80000000U;
2302}
2303static inline u32 gr_scc_pagepool_base_r(void)
2304{
2305 return 0x0040800cU;
2306}
2307static inline u32 gr_scc_pagepool_base_addr_39_8_f(u32 v)
2308{
2309 return (v & 0xffffffffU) << 0U;
2310}
2311static inline u32 gr_scc_pagepool_base_addr_39_8_align_bits_v(void)
2312{
2313 return 0x00000008U;
2314}
2315static inline u32 gr_scc_pagepool_r(void)
2316{
2317 return 0x00408010U;
2318}
2319static inline u32 gr_scc_pagepool_total_pages_f(u32 v)
2320{
2321 return (v & 0x3ffU) << 0U;
2322}
2323static inline u32 gr_scc_pagepool_total_pages_hwmax_v(void)
2324{
2325 return 0x00000000U;
2326}
2327static inline u32 gr_scc_pagepool_total_pages_hwmax_value_v(void)
2328{
2329 return 0x00000200U;
2330}
2331static inline u32 gr_scc_pagepool_total_pages_byte_granularity_v(void)
2332{
2333 return 0x00000100U;
2334}
2335static inline u32 gr_scc_pagepool_max_valid_pages_s(void)
2336{
2337 return 10U;
2338}
2339static inline u32 gr_scc_pagepool_max_valid_pages_f(u32 v)
2340{
2341 return (v & 0x3ffU) << 10U;
2342}
2343static inline u32 gr_scc_pagepool_max_valid_pages_m(void)
2344{
2345 return 0x3ffU << 10U;
2346}
2347static inline u32 gr_scc_pagepool_max_valid_pages_v(u32 r)
2348{
2349 return (r >> 10U) & 0x3ffU;
2350}
2351static inline u32 gr_scc_pagepool_valid_true_f(void)
2352{
2353 return 0x80000000U;
2354}
2355static inline u32 gr_scc_init_r(void)
2356{
2357 return 0x0040802cU;
2358}
2359static inline u32 gr_scc_init_ram_trigger_f(void)
2360{
2361 return 0x1U;
2362}
2363static inline u32 gr_scc_hww_esr_r(void)
2364{
2365 return 0x00408030U;
2366}
2367static inline u32 gr_scc_hww_esr_reset_active_f(void)
2368{
2369 return 0x40000000U;
2370}
2371static inline u32 gr_scc_hww_esr_en_enable_f(void)
2372{
2373 return 0x80000000U;
2374}
2375static inline u32 gr_sked_hww_esr_r(void)
2376{
2377 return 0x00407020U;
2378}
2379static inline u32 gr_sked_hww_esr_reset_active_f(void)
2380{
2381 return 0x40000000U;
2382}
2383static inline u32 gr_sked_hww_esr_en_r(void)
2384{
2385 return 0x00407024U;
2386}
2387static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_m(void)
2388{
2389 return 0x1U << 25U;
2390}
2391static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_disabled_f(void)
2392{
2393 return 0x0U;
2394}
2395static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_enabled_f(void)
2396{
2397 return 0x2000000U;
2398}
2399static inline u32 gr_cwd_fs_r(void)
2400{
2401 return 0x00405b00U;
2402}
2403static inline u32 gr_cwd_fs_num_gpcs_f(u32 v)
2404{
2405 return (v & 0xffU) << 0U;
2406}
2407static inline u32 gr_cwd_fs_num_tpcs_f(u32 v)
2408{
2409 return (v & 0xffU) << 8U;
2410}
2411static inline u32 gr_cwd_gpc_tpc_id_r(u32 i)
2412{
2413 return 0x00405b60U + i*4U;
2414}
2415static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void)
2416{
2417 return 4U;
2418}
2419static inline u32 gr_cwd_gpc_tpc_id_tpc0_f(u32 v)
2420{
2421 return (v & 0xfU) << 0U;
2422}
2423static inline u32 gr_cwd_gpc_tpc_id_gpc0_s(void)
2424{
2425 return 4U;
2426}
2427static inline u32 gr_cwd_gpc_tpc_id_gpc0_f(u32 v)
2428{
2429 return (v & 0xfU) << 4U;
2430}
2431static inline u32 gr_cwd_gpc_tpc_id_tpc1_f(u32 v)
2432{
2433 return (v & 0xfU) << 8U;
2434}
2435static inline u32 gr_cwd_sm_id_r(u32 i)
2436{
2437 return 0x00405ba0U + i*4U;
2438}
2439static inline u32 gr_cwd_sm_id__size_1_v(void)
2440{
2441 return 0x00000010U;
2442}
2443static inline u32 gr_cwd_sm_id_tpc0_f(u32 v)
2444{
2445 return (v & 0xffU) << 0U;
2446}
2447static inline u32 gr_cwd_sm_id_tpc1_f(u32 v)
2448{
2449 return (v & 0xffU) << 8U;
2450}
2451static inline u32 gr_gpc0_fs_gpc_r(void)
2452{
2453 return 0x00502608U;
2454}
2455static inline u32 gr_gpc0_fs_gpc_num_available_tpcs_v(u32 r)
2456{
2457 return (r >> 0U) & 0x1fU;
2458}
2459static inline u32 gr_gpc0_fs_gpc_num_available_zculls_v(u32 r)
2460{
2461 return (r >> 16U) & 0x1fU;
2462}
2463static inline u32 gr_gpc0_cfg_r(void)
2464{
2465 return 0x00502620U;
2466}
2467static inline u32 gr_gpc0_cfg_imem_sz_v(u32 r)
2468{
2469 return (r >> 0U) & 0xffU;
2470}
2471static inline u32 gr_gpccs_rc_lanes_r(void)
2472{
2473 return 0x00502880U;
2474}
2475static inline u32 gr_gpccs_rc_lanes_num_chains_s(void)
2476{
2477 return 6U;
2478}
2479static inline u32 gr_gpccs_rc_lanes_num_chains_f(u32 v)
2480{
2481 return (v & 0x3fU) << 0U;
2482}
2483static inline u32 gr_gpccs_rc_lanes_num_chains_m(void)
2484{
2485 return 0x3fU << 0U;
2486}
2487static inline u32 gr_gpccs_rc_lanes_num_chains_v(u32 r)
2488{
2489 return (r >> 0U) & 0x3fU;
2490}
2491static inline u32 gr_gpccs_rc_lane_size_r(void)
2492{
2493 return 0x00502910U;
2494}
2495static inline u32 gr_gpccs_rc_lane_size_v_s(void)
2496{
2497 return 24U;
2498}
2499static inline u32 gr_gpccs_rc_lane_size_v_f(u32 v)
2500{
2501 return (v & 0xffffffU) << 0U;
2502}
2503static inline u32 gr_gpccs_rc_lane_size_v_m(void)
2504{
2505 return 0xffffffU << 0U;
2506}
2507static inline u32 gr_gpccs_rc_lane_size_v_v(u32 r)
2508{
2509 return (r >> 0U) & 0xffffffU;
2510}
2511static inline u32 gr_gpccs_rc_lane_size_v_0_v(void)
2512{
2513 return 0x00000000U;
2514}
2515static inline u32 gr_gpccs_rc_lane_size_v_0_f(void)
2516{
2517 return 0x0U;
2518}
2519static inline u32 gr_gpc0_zcull_fs_r(void)
2520{
2521 return 0x00500910U;
2522}
2523static inline u32 gr_gpc0_zcull_fs_num_sms_f(u32 v)
2524{
2525 return (v & 0x1ffU) << 0U;
2526}
2527static inline u32 gr_gpc0_zcull_fs_num_active_banks_f(u32 v)
2528{
2529 return (v & 0xfU) << 16U;
2530}
2531static inline u32 gr_gpc0_zcull_ram_addr_r(void)
2532{
2533 return 0x00500914U;
2534}
2535static inline u32 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f(u32 v)
2536{
2537 return (v & 0xfU) << 0U;
2538}
2539static inline u32 gr_gpc0_zcull_ram_addr_row_offset_f(u32 v)
2540{
2541 return (v & 0xfU) << 8U;
2542}
2543static inline u32 gr_gpc0_zcull_sm_num_rcp_r(void)
2544{
2545 return 0x00500918U;
2546}
2547static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative_f(u32 v)
2548{
2549 return (v & 0xffffffU) << 0U;
2550}
2551static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative__max_v(void)
2552{
2553 return 0x00800000U;
2554}
2555static inline u32 gr_gpc0_zcull_total_ram_size_r(void)
2556{
2557 return 0x00500920U;
2558}
2559static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v)
2560{
2561 return (v & 0xffffU) << 0U;
2562}
2563static inline u32 gr_gpc0_zcull_zcsize_r(u32 i)
2564{
2565 return 0x00500a04U + i*32U;
2566}
2567static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void)
2568{
2569 return 0x00000040U;
2570}
2571static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void)
2572{
2573 return 0x00000010U;
2574}
2575static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i)
2576{
2577 return 0x00500c10U + i*4U;
2578}
2579static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v)
2580{
2581 return (v & 0xffU) << 0U;
2582}
2583static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i)
2584{
2585 return 0x00500c30U + i*4U;
2586}
2587static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r)
2588{
2589 return (r >> 0U) & 0xffU;
2590}
2591static inline u32 gr_gpc0_tpc0_pe_cfg_smid_r(void)
2592{
2593 return 0x00504088U;
2594}
2595static inline u32 gr_gpc0_tpc0_pe_cfg_smid_value_f(u32 v)
2596{
2597 return (v & 0xffffU) << 0U;
2598}
2599static inline u32 gr_gpc0_tpc0_sm_cfg_r(void)
2600{
2601 return 0x00504608U;
2602}
2603static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_f(u32 v)
2604{
2605 return (v & 0xffffU) << 0U;
2606}
2607static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_v(u32 r)
2608{
2609 return (r >> 0U) & 0xffffU;
2610}
2611static inline u32 gr_gpc0_tpc0_sm_arch_r(void)
2612{
2613 return 0x00504330U;
2614}
2615static inline u32 gr_gpc0_tpc0_sm_arch_warp_count_v(u32 r)
2616{
2617 return (r >> 0U) & 0xffU;
2618}
2619static inline u32 gr_gpc0_tpc0_sm_arch_spa_version_v(u32 r)
2620{
2621 return (r >> 8U) & 0xfffU;
2622}
2623static inline u32 gr_gpc0_tpc0_sm_arch_sm_version_v(u32 r)
2624{
2625 return (r >> 20U) & 0xfffU;
2626}
2627static inline u32 gr_gpc0_ppc0_pes_vsc_strem_r(void)
2628{
2629 return 0x00503018U;
2630}
2631static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_m(void)
2632{
2633 return 0x1U << 0U;
2634}
2635static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_true_f(void)
2636{
2637 return 0x1U;
2638}
2639static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_r(void)
2640{
2641 return 0x005030c0U;
2642}
2643static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_f(u32 v)
2644{
2645 return (v & 0x3fffffU) << 0U;
2646}
2647static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_m(void)
2648{
2649 return 0x3fffffU << 0U;
2650}
2651static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(void)
2652{
2653 return 0x00000480U;
2654}
2655static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v(void)
2656{
2657 return 0x00000d10U;
2658}
2659static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(void)
2660{
2661 return 0x00000020U;
2662}
2663static inline u32 gr_gpc0_ppc0_cbm_beta_cb_offset_r(void)
2664{
2665 return 0x005030f4U;
2666}
2667static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_r(void)
2668{
2669 return 0x005030e4U;
2670}
2671static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(u32 v)
2672{
2673 return (v & 0xffffU) << 0U;
2674}
2675static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_m(void)
2676{
2677 return 0xffffU << 0U;
2678}
2679static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(void)
2680{
2681 return 0x00000800U;
2682}
2683static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(void)
2684{
2685 return 0x00000020U;
2686}
2687static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_offset_r(void)
2688{
2689 return 0x005030f8U;
2690}
2691static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r(void)
2692{
2693 return 0x005030f0U;
2694}
2695static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_f(u32 v)
2696{
2697 return (v & 0x3fffffU) << 0U;
2698}
2699static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_default_v(void)
2700{
2701 return 0x00000480U;
2702}
2703static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_r(void)
2704{
2705 return 0x00419e00U;
2706}
2707static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_base_addr_43_12_f(u32 v)
2708{
2709 return (v & 0xffffffffU) << 0U;
2710}
2711static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_r(void)
2712{
2713 return 0x00419e04U;
2714}
2715static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_s(void)
2716{
2717 return 21U;
2718}
2719static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_f(u32 v)
2720{
2721 return (v & 0x1fffffU) << 0U;
2722}
2723static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_m(void)
2724{
2725 return 0x1fffffU << 0U;
2726}
2727static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_v(u32 r)
2728{
2729 return (r >> 0U) & 0x1fffffU;
2730}
2731static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_granularity_f(void)
2732{
2733 return 0x80U;
2734}
2735static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_s(void)
2736{
2737 return 1U;
2738}
2739static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_f(u32 v)
2740{
2741 return (v & 0x1U) << 31U;
2742}
2743static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_m(void)
2744{
2745 return 0x1U << 31U;
2746}
2747static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_v(u32 r)
2748{
2749 return (r >> 31U) & 0x1U;
2750}
2751static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(void)
2752{
2753 return 0x80000000U;
2754}
2755static inline u32 gr_gpccs_falcon_addr_r(void)
2756{
2757 return 0x0041a0acU;
2758}
2759static inline u32 gr_gpccs_falcon_addr_lsb_s(void)
2760{
2761 return 6U;
2762}
2763static inline u32 gr_gpccs_falcon_addr_lsb_f(u32 v)
2764{
2765 return (v & 0x3fU) << 0U;
2766}
2767static inline u32 gr_gpccs_falcon_addr_lsb_m(void)
2768{
2769 return 0x3fU << 0U;
2770}
2771static inline u32 gr_gpccs_falcon_addr_lsb_v(u32 r)
2772{
2773 return (r >> 0U) & 0x3fU;
2774}
2775static inline u32 gr_gpccs_falcon_addr_lsb_init_v(void)
2776{
2777 return 0x00000000U;
2778}
2779static inline u32 gr_gpccs_falcon_addr_lsb_init_f(void)
2780{
2781 return 0x0U;
2782}
2783static inline u32 gr_gpccs_falcon_addr_msb_s(void)
2784{
2785 return 6U;
2786}
2787static inline u32 gr_gpccs_falcon_addr_msb_f(u32 v)
2788{
2789 return (v & 0x3fU) << 6U;
2790}
2791static inline u32 gr_gpccs_falcon_addr_msb_m(void)
2792{
2793 return 0x3fU << 6U;
2794}
2795static inline u32 gr_gpccs_falcon_addr_msb_v(u32 r)
2796{
2797 return (r >> 6U) & 0x3fU;
2798}
2799static inline u32 gr_gpccs_falcon_addr_msb_init_v(void)
2800{
2801 return 0x00000000U;
2802}
2803static inline u32 gr_gpccs_falcon_addr_msb_init_f(void)
2804{
2805 return 0x0U;
2806}
2807static inline u32 gr_gpccs_falcon_addr_ext_s(void)
2808{
2809 return 12U;
2810}
2811static inline u32 gr_gpccs_falcon_addr_ext_f(u32 v)
2812{
2813 return (v & 0xfffU) << 0U;
2814}
2815static inline u32 gr_gpccs_falcon_addr_ext_m(void)
2816{
2817 return 0xfffU << 0U;
2818}
2819static inline u32 gr_gpccs_falcon_addr_ext_v(u32 r)
2820{
2821 return (r >> 0U) & 0xfffU;
2822}
2823static inline u32 gr_gpccs_cpuctl_r(void)
2824{
2825 return 0x0041a100U;
2826}
2827static inline u32 gr_gpccs_cpuctl_startcpu_f(u32 v)
2828{
2829 return (v & 0x1U) << 1U;
2830}
2831static inline u32 gr_gpccs_dmactl_r(void)
2832{
2833 return 0x0041a10cU;
2834}
2835static inline u32 gr_gpccs_dmactl_require_ctx_f(u32 v)
2836{
2837 return (v & 0x1U) << 0U;
2838}
2839static inline u32 gr_gpccs_dmactl_dmem_scrubbing_m(void)
2840{
2841 return 0x1U << 1U;
2842}
2843static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void)
2844{
2845 return 0x1U << 2U;
2846}
2847static inline u32 gr_gpccs_imemc_r(u32 i)
2848{
2849 return 0x0041a180U + i*16U;
2850}
2851static inline u32 gr_gpccs_imemc_offs_f(u32 v)
2852{
2853 return (v & 0x3fU) << 2U;
2854}
2855static inline u32 gr_gpccs_imemc_blk_f(u32 v)
2856{
2857 return (v & 0xffU) << 8U;
2858}
2859static inline u32 gr_gpccs_imemc_aincw_f(u32 v)
2860{
2861 return (v & 0x1U) << 24U;
2862}
2863static inline u32 gr_gpccs_imemd_r(u32 i)
2864{
2865 return 0x0041a184U + i*16U;
2866}
2867static inline u32 gr_gpccs_imemt_r(u32 i)
2868{
2869 return 0x0041a188U + i*16U;
2870}
2871static inline u32 gr_gpccs_imemt__size_1_v(void)
2872{
2873 return 0x00000004U;
2874}
2875static inline u32 gr_gpccs_imemt_tag_f(u32 v)
2876{
2877 return (v & 0xffffU) << 0U;
2878}
2879static inline u32 gr_gpccs_dmemc_r(u32 i)
2880{
2881 return 0x0041a1c0U + i*8U;
2882}
2883static inline u32 gr_gpccs_dmemc_offs_f(u32 v)
2884{
2885 return (v & 0x3fU) << 2U;
2886}
2887static inline u32 gr_gpccs_dmemc_blk_f(u32 v)
2888{
2889 return (v & 0xffU) << 8U;
2890}
2891static inline u32 gr_gpccs_dmemc_aincw_f(u32 v)
2892{
2893 return (v & 0x1U) << 24U;
2894}
2895static inline u32 gr_gpccs_dmemd_r(u32 i)
2896{
2897 return 0x0041a1c4U + i*8U;
2898}
2899static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i)
2900{
2901 return 0x0041a800U + i*4U;
2902}
2903static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v)
2904{
2905 return (v & 0xffffffffU) << 0U;
2906}
2907static inline u32 gr_gpcs_swdx_bundle_cb_base_r(void)
2908{
2909 return 0x00418e24U;
2910}
2911static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_s(void)
2912{
2913 return 32U;
2914}
2915static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(u32 v)
2916{
2917 return (v & 0xffffffffU) << 0U;
2918}
2919static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_m(void)
2920{
2921 return 0xffffffffU << 0U;
2922}
2923static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_v(u32 r)
2924{
2925 return (r >> 0U) & 0xffffffffU;
2926}
2927static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_v(void)
2928{
2929 return 0x00000000U;
2930}
2931static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_f(void)
2932{
2933 return 0x0U;
2934}
2935static inline u32 gr_gpcs_swdx_bundle_cb_size_r(void)
2936{
2937 return 0x00418e28U;
2938}
2939static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_s(void)
2940{
2941 return 11U;
2942}
2943static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_f(u32 v)
2944{
2945 return (v & 0x7ffU) << 0U;
2946}
2947static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_m(void)
2948{
2949 return 0x7ffU << 0U;
2950}
2951static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_v(u32 r)
2952{
2953 return (r >> 0U) & 0x7ffU;
2954}
2955static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_v(void)
2956{
2957 return 0x00000030U;
2958}
2959static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_f(void)
2960{
2961 return 0x30U;
2962}
2963static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_s(void)
2964{
2965 return 1U;
2966}
2967static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_f(u32 v)
2968{
2969 return (v & 0x1U) << 31U;
2970}
2971static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_m(void)
2972{
2973 return 0x1U << 31U;
2974}
2975static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_v(u32 r)
2976{
2977 return (r >> 31U) & 0x1U;
2978}
2979static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_v(void)
2980{
2981 return 0x00000000U;
2982}
2983static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_f(void)
2984{
2985 return 0x0U;
2986}
2987static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_v(void)
2988{
2989 return 0x00000001U;
2990}
2991static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_f(void)
2992{
2993 return 0x80000000U;
2994}
2995static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_r(void)
2996{
2997 return 0x005001dcU;
2998}
2999static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_f(u32 v)
3000{
3001 return (v & 0xffffU) << 0U;
3002}
3003static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(void)
3004{
3005 return 0x000004b0U;
3006}
3007static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v(void)
3008{
3009 return 0x00000100U;
3010}
3011static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_r(void)
3012{
3013 return 0x005001d8U;
3014}
3015static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f(u32 v)
3016{
3017 return (v & 0xffffffffU) << 0U;
3018}
3019static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v(void)
3020{
3021 return 0x00000008U;
3022}
3023static inline u32 gr_gpcs_swdx_beta_cb_ctrl_r(void)
3024{
3025 return 0x004181e4U;
3026}
3027static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_f(u32 v)
3028{
3029 return (v & 0xfffU) << 0U;
3030}
3031static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_gfxp_v(void)
3032{
3033 return 0x00000100U;
3034}
3035static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_r(void)
3036{
3037 return 0x0041befcU;
3038}
3039static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(u32 v)
3040{
3041 return (v & 0xfffU) << 0U;
3042}
3043static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i)
3044{
3045 return 0x00418ea0U + i*4U;
3046}
3047static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v)
3048{
3049 return (v & 0x3fffffU) << 0U;
3050}
3051static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_m(void)
3052{
3053 return 0x3fffffU << 0U;
3054}
3055static inline u32 gr_gpcs_swdx_dss_zbc_color_r_r(u32 i)
3056{
3057 return 0x00418010U + i*4U;
3058}
3059static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v)
3060{
3061 return (v & 0xffffffffU) << 0U;
3062}
3063static inline u32 gr_gpcs_swdx_dss_zbc_color_g_r(u32 i)
3064{
3065 return 0x0041804cU + i*4U;
3066}
3067static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v)
3068{
3069 return (v & 0xffffffffU) << 0U;
3070}
3071static inline u32 gr_gpcs_swdx_dss_zbc_color_b_r(u32 i)
3072{
3073 return 0x00418088U + i*4U;
3074}
3075static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v)
3076{
3077 return (v & 0xffffffffU) << 0U;
3078}
3079static inline u32 gr_gpcs_swdx_dss_zbc_color_a_r(u32 i)
3080{
3081 return 0x004180c4U + i*4U;
3082}
3083static inline u32 gr_gpcs_swdx_dss_zbc_color_a_val_f(u32 v)
3084{
3085 return (v & 0xffffffffU) << 0U;
3086}
3087static inline u32 gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r(void)
3088{
3089 return 0x00418100U;
3090}
3091static inline u32 gr_gpcs_swdx_dss_zbc_z_r(u32 i)
3092{
3093 return 0x00418110U + i*4U;
3094}
3095static inline u32 gr_gpcs_swdx_dss_zbc_z_val_f(u32 v)
3096{
3097 return (v & 0xffffffffU) << 0U;
3098}
3099static inline u32 gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r(void)
3100{
3101 return 0x0041814cU;
3102}
3103static inline u32 gr_gpcs_swdx_dss_zbc_s_r(u32 i)
3104{
3105 return 0x0041815cU + i*4U;
3106}
3107static inline u32 gr_gpcs_swdx_dss_zbc_s_val_f(u32 v)
3108{
3109 return (v & 0xffU) << 0U;
3110}
3111static inline u32 gr_gpcs_swdx_dss_zbc_s_01_to_04_format_r(void)
3112{
3113 return 0x00418198U;
3114}
3115static inline u32 gr_gpcs_setup_attrib_cb_base_r(void)
3116{
3117 return 0x00418810U;
3118}
3119static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_f(u32 v)
3120{
3121 return (v & 0xfffffffU) << 0U;
3122}
3123static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v(void)
3124{
3125 return 0x0000000cU;
3126}
3127static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void)
3128{
3129 return 0x80000000U;
3130}
3131static inline u32 gr_crstr_gpc_map_r(u32 i)
3132{
3133 return 0x00418b08U + i*4U;
3134}
3135static inline u32 gr_crstr_gpc_map_tile0_f(u32 v)
3136{
3137 return (v & 0x1fU) << 0U;
3138}
3139static inline u32 gr_crstr_gpc_map_tile1_f(u32 v)
3140{
3141 return (v & 0x1fU) << 5U;
3142}
3143static inline u32 gr_crstr_gpc_map_tile2_f(u32 v)
3144{
3145 return (v & 0x1fU) << 10U;
3146}
3147static inline u32 gr_crstr_gpc_map_tile3_f(u32 v)
3148{
3149 return (v & 0x1fU) << 15U;
3150}
3151static inline u32 gr_crstr_gpc_map_tile4_f(u32 v)
3152{
3153 return (v & 0x1fU) << 20U;
3154}
3155static inline u32 gr_crstr_gpc_map_tile5_f(u32 v)
3156{
3157 return (v & 0x1fU) << 25U;
3158}
3159static inline u32 gr_crstr_map_table_cfg_r(void)
3160{
3161 return 0x00418bb8U;
3162}
3163static inline u32 gr_crstr_map_table_cfg_row_offset_f(u32 v)
3164{
3165 return (v & 0xffU) << 0U;
3166}
3167static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v)
3168{
3169 return (v & 0xffU) << 8U;
3170}
3171static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_r(u32 i)
3172{
3173 return 0x00418980U + i*4U;
3174}
3175static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_0_f(u32 v)
3176{
3177 return (v & 0x7U) << 0U;
3178}
3179static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_1_f(u32 v)
3180{
3181 return (v & 0x7U) << 4U;
3182}
3183static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_2_f(u32 v)
3184{
3185 return (v & 0x7U) << 8U;
3186}
3187static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_3_f(u32 v)
3188{
3189 return (v & 0x7U) << 12U;
3190}
3191static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_4_f(u32 v)
3192{
3193 return (v & 0x7U) << 16U;
3194}
3195static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_5_f(u32 v)
3196{
3197 return (v & 0x7U) << 20U;
3198}
3199static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_6_f(u32 v)
3200{
3201 return (v & 0x7U) << 24U;
3202}
3203static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_7_f(u32 v)
3204{
3205 return (v & 0x7U) << 28U;
3206}
3207static inline u32 gr_gpcs_gpm_pd_cfg_r(void)
3208{
3209 return 0x00418c6cU;
3210}
3211static inline u32 gr_gpcs_gcc_pagepool_base_r(void)
3212{
3213 return 0x00419004U;
3214}
3215static inline u32 gr_gpcs_gcc_pagepool_base_addr_39_8_f(u32 v)
3216{
3217 return (v & 0xffffffffU) << 0U;
3218}
3219static inline u32 gr_gpcs_gcc_pagepool_r(void)
3220{
3221 return 0x00419008U;
3222}
3223static inline u32 gr_gpcs_gcc_pagepool_total_pages_f(u32 v)
3224{
3225 return (v & 0x3ffU) << 0U;
3226}
3227static inline u32 gr_gpcs_tpcs_pe_vaf_r(void)
3228{
3229 return 0x0041980cU;
3230}
3231static inline u32 gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f(void)
3232{
3233 return 0x10U;
3234}
3235static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_r(void)
3236{
3237 return 0x00419848U;
3238}
3239static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(u32 v)
3240{
3241 return (v & 0xfffffffU) << 0U;
3242}
3243static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_f(u32 v)
3244{
3245 return (v & 0x1U) << 28U;
3246}
3247static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(void)
3248{
3249 return 0x10000000U;
3250}
3251static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_r(void)
3252{
3253 return 0x00419c00U;
3254}
3255static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_disabled_f(void)
3256{
3257 return 0x0U;
3258}
3259static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f(void)
3260{
3261 return 0x8U;
3262}
3263static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(void)
3264{
3265 return 0x00419c2cU;
3266}
3267static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(u32 v)
3268{
3269 return (v & 0xfffffffU) << 0U;
3270}
3271static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_f(u32 v)
3272{
3273 return (v & 0x1U) << 28U;
3274}
3275static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(void)
3276{
3277 return 0x10000000U;
3278}
3279static inline u32 gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(void)
3280{
3281 return 0x00419ea8U;
3282}
3283static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r(void)
3284{
3285 return 0x00504728U;
3286}
3287static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_error_report_f(void)
3288{
3289 return 0x2U;
3290}
3291static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_api_stack_error_report_f(void)
3292{
3293 return 0x4U;
3294}
3295static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_wrap_report_f(void)
3296{
3297 return 0x10U;
3298}
3299static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_pc_report_f(void)
3300{
3301 return 0x20U;
3302}
3303static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_overflow_report_f(void)
3304{
3305 return 0x40U;
3306}
3307static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_reg_report_f(void)
3308{
3309 return 0x100U;
3310}
3311static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_encoding_report_f(void)
3312{
3313 return 0x200U;
3314}
3315static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_param_report_f(void)
3316{
3317 return 0x800U;
3318}
3319static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_reg_report_f(void)
3320{
3321 return 0x2000U;
3322}
3323static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_addr_report_f(void)
3324{
3325 return 0x4000U;
3326}
3327static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_addr_report_f(void)
3328{
3329 return 0x8000U;
3330}
3331static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_addr_space_report_f(void)
3332{
3333 return 0x10000U;
3334}
3335static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f(void)
3336{
3337 return 0x40000U;
3338}
3339static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_mmu_fault_report_f(void)
3340{
3341 return 0x800000U;
3342}
3343static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_overflow_report_f(void)
3344{
3345 return 0x400000U;
3346}
3347static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_r(void)
3348{
3349 return 0x00419d0cU;
3350}
3351static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void)
3352{
3353 return 0x2U;
3354}
3355static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void)
3356{
3357 return 0x1U;
3358}
3359static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_mpc_enabled_f(void)
3360{
3361 return 0x10U;
3362}
3363static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void)
3364{
3365 return 0x0050450cU;
3366}
3367static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(u32 r)
3368{
3369 return (r >> 1U) & 0x1U;
3370}
3371static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(void)
3372{
3373 return 0x2U;
3374}
3375static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_mpc_enabled_f(void)
3376{
3377 return 0x10U;
3378}
3379static inline u32 gr_gpcs_gpccs_gpc_exception_en_r(void)
3380{
3381 return 0x0041ac94U;
3382}
3383static inline u32 gr_gpcs_gpccs_gpc_exception_en_gcc_f(u32 v)
3384{
3385 return (v & 0x1U) << 2U;
3386}
3387static inline u32 gr_gpcs_gpccs_gpc_exception_en_tpc_f(u32 v)
3388{
3389 return (v & 0xffU) << 16U;
3390}
3391static inline u32 gr_gpc0_gpccs_gpc_exception_r(void)
3392{
3393 return 0x00502c90U;
3394}
3395static inline u32 gr_gpc0_gpccs_gpc_exception_gcc_v(u32 r)
3396{
3397 return (r >> 2U) & 0x1U;
3398}
3399static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_v(u32 r)
3400{
3401 return (r >> 16U) & 0xffU;
3402}
3403static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_0_pending_v(void)
3404{
3405 return 0x00000001U;
3406}
3407static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void)
3408{
3409 return 0x00504508U;
3410}
3411static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r)
3412{
3413 return (r >> 0U) & 0x1U;
3414}
3415static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void)
3416{
3417 return 0x00000001U;
3418}
3419static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r)
3420{
3421 return (r >> 1U) & 0x1U;
3422}
3423static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v(void)
3424{
3425 return 0x00000001U;
3426}
3427static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m(void)
3428{
3429 return 0x1U << 4U;
3430}
3431static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_pending_f(void)
3432{
3433 return 0x10U;
3434}
3435static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_r(void)
3436{
3437 return 0x00504704U;
3438}
3439static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_m(void)
3440{
3441 return 0x1U << 0U;
3442}
3443static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(u32 r)
3444{
3445 return (r >> 0U) & 0x1U;
3446}
3447static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v(void)
3448{
3449 return 0x00000001U;
3450}
3451static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_f(void)
3452{
3453 return 0x1U;
3454}
3455static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_v(void)
3456{
3457 return 0x00000000U;
3458}
3459static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_f(void)
3460{
3461 return 0x0U;
3462}
3463static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_m(void)
3464{
3465 return 0x1U << 31U;
3466}
3467static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f(void)
3468{
3469 return 0x80000000U;
3470}
3471static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_disable_f(void)
3472{
3473 return 0x0U;
3474}
3475static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_m(void)
3476{
3477 return 0x1U << 3U;
3478}
3479static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f(void)
3480{
3481 return 0x8U;
3482}
3483static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_disable_f(void)
3484{
3485 return 0x0U;
3486}
3487static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_run_trigger_task_f(void)
3488{
3489 return 0x40000000U;
3490}
3491static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_0_r(void)
3492{
3493 return 0x00504708U;
3494}
3495static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_1_r(void)
3496{
3497 return 0x0050470cU;
3498}
3499static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_0_r(void)
3500{
3501 return 0x00504710U;
3502}
3503static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_1_r(void)
3504{
3505 return 0x00504714U;
3506}
3507static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_0_r(void)
3508{
3509 return 0x00504718U;
3510}
3511static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_1_r(void)
3512{
3513 return 0x0050471cU;
3514}
3515static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r(void)
3516{
3517 return 0x00419e90U;
3518}
3519static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_1_r(void)
3520{
3521 return 0x00419e94U;
3522}
3523static inline u32 gr_gpcs_tpcs_sms_dbgr_status0_r(void)
3524{
3525 return 0x00419e80U;
3526}
3527static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_r(void)
3528{
3529 return 0x00504700U;
3530}
3531static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_sm_in_trap_mode_v(u32 r)
3532{
3533 return (r >> 0U) & 0x1U;
3534}
3535static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_v(u32 r)
3536{
3537 return (r >> 4U) & 0x1U;
3538}
3539static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_true_v(void)
3540{
3541 return 0x00000001U;
3542}
3543static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_r(void)
3544{
3545 return 0x00504730U;
3546}
3547static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(u32 r)
3548{
3549 return (r >> 0U) & 0xffffU;
3550}
3551static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_v(void)
3552{
3553 return 0x00000000U;
3554}
3555static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_f(void)
3556{
3557 return 0x0U;
3558}
3559static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_wrap_id_m(void)
3560{
3561 return 0xffU << 16U;
3562}
3563static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_m(void)
3564{
3565 return 0xfU << 24U;
3566}
3567static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_none_f(void)
3568{
3569 return 0x0U;
3570}
3571static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r(void)
3572{
3573 return 0x0050460cU;
3574}
3575static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(u32 r)
3576{
3577 return (r >> 0U) & 0x1U;
3578}
3579static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(u32 r)
3580{
3581 return (r >> 1U) & 0x1U;
3582}
3583static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_pc_r(void)
3584{
3585 return 0x00504738U;
3586}
3587static inline u32 gr_gpc0_tpc0_sm_halfctl_ctrl_r(void)
3588{
3589 return 0x005043a0U;
3590}
3591static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_r(void)
3592{
3593 return 0x00419ba0U;
3594}
3595static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(void)
3596{
3597 return 0x1U << 4U;
3598}
3599static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(u32 v)
3600{
3601 return (v & 0x1U) << 4U;
3602}
3603static inline u32 gr_gpc0_tpc0_sm_debug_sfe_control_r(void)
3604{
3605 return 0x005043b0U;
3606}
3607static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_r(void)
3608{
3609 return 0x00419bb0U;
3610}
3611static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(void)
3612{
3613 return 0x1U << 0U;
3614}
3615static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(u32 v)
3616{
3617 return (v & 0x1U) << 0U;
3618}
3619static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_r(void)
3620{
3621 return 0x0041be08U;
3622}
3623static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void)
3624{
3625 return 0x4U;
3626}
3627static inline u32 gr_ppcs_wwdx_map_gpc_map_r(u32 i)
3628{
3629 return 0x0041bf00U + i*4U;
3630}
3631static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void)
3632{
3633 return 0x0041bfd0U;
3634}
3635static inline u32 gr_ppcs_wwdx_map_table_cfg_row_offset_f(u32 v)
3636{
3637 return (v & 0xffU) << 0U;
3638}
3639static inline u32 gr_ppcs_wwdx_map_table_cfg_num_entries_f(u32 v)
3640{
3641 return (v & 0xffU) << 8U;
3642}
3643static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_num_entries_f(u32 v)
3644{
3645 return (v & 0x1fU) << 16U;
3646}
3647static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_shift_value_f(u32 v)
3648{
3649 return (v & 0x7U) << 21U;
3650}
3651static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_r(void)
3652{
3653 return 0x0041bfd4U;
3654}
3655static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v)
3656{
3657 return (v & 0xffffffU) << 0U;
3658}
3659static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_r(u32 i)
3660{
3661 return 0x0041bfb0U + i*4U;
3662}
3663static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff__size_1_v(void)
3664{
3665 return 0x00000005U;
3666}
3667static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_0_mod_value_f(u32 v)
3668{
3669 return (v & 0xffU) << 0U;
3670}
3671static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_1_mod_value_f(u32 v)
3672{
3673 return (v & 0xffU) << 8U;
3674}
3675static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_2_mod_value_f(u32 v)
3676{
3677 return (v & 0xffU) << 16U;
3678}
3679static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_3_mod_value_f(u32 v)
3680{
3681 return (v & 0xffU) << 24U;
3682}
3683static inline u32 gr_bes_zrop_settings_r(void)
3684{
3685 return 0x00408850U;
3686}
3687static inline u32 gr_bes_zrop_settings_num_active_ltcs_f(u32 v)
3688{
3689 return (v & 0xfU) << 0U;
3690}
3691static inline u32 gr_be0_crop_debug3_r(void)
3692{
3693 return 0x00410108U;
3694}
3695static inline u32 gr_bes_crop_debug3_r(void)
3696{
3697 return 0x00408908U;
3698}
3699static inline u32 gr_bes_crop_debug3_comp_vdc_4to2_disable_m(void)
3700{
3701 return 0x1U << 31U;
3702}
3703static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_m(void)
3704{
3705 return 0x1U << 1U;
3706}
3707static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_disabled_f(void)
3708{
3709 return 0x0U;
3710}
3711static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_enabled_f(void)
3712{
3713 return 0x2U;
3714}
3715static inline u32 gr_bes_crop_debug3_blendopt_fill_override_m(void)
3716{
3717 return 0x1U << 2U;
3718}
3719static inline u32 gr_bes_crop_debug3_blendopt_fill_override_disabled_f(void)
3720{
3721 return 0x0U;
3722}
3723static inline u32 gr_bes_crop_debug3_blendopt_fill_override_enabled_f(void)
3724{
3725 return 0x4U;
3726}
3727static inline u32 gr_bes_crop_settings_r(void)
3728{
3729 return 0x00408958U;
3730}
3731static inline u32 gr_bes_crop_settings_num_active_ltcs_f(u32 v)
3732{
3733 return (v & 0xfU) << 0U;
3734}
3735static inline u32 gr_zcull_bytes_per_aliquot_per_gpu_v(void)
3736{
3737 return 0x00000020U;
3738}
3739static inline u32 gr_zcull_save_restore_header_bytes_per_gpc_v(void)
3740{
3741 return 0x00000020U;
3742}
3743static inline u32 gr_zcull_save_restore_subregion_header_bytes_per_gpc_v(void)
3744{
3745 return 0x000000c0U;
3746}
3747static inline u32 gr_zcull_subregion_qty_v(void)
3748{
3749 return 0x00000010U;
3750}
3751static inline u32 gr_gpcs_tpcs_tex_in_dbg_r(void)
3752{
3753 return 0x00419a00U;
3754}
3755static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_f(u32 v)
3756{
3757 return (v & 0x1U) << 19U;
3758}
3759static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_m(void)
3760{
3761 return 0x1U << 19U;
3762}
3763static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_r(void)
3764{
3765 return 0x00419bf0U;
3766}
3767static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_f(u32 v)
3768{
3769 return (v & 0x1U) << 5U;
3770}
3771static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_m(void)
3772{
3773 return 0x1U << 5U;
3774}
3775static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_f(u32 v)
3776{
3777 return (v & 0x1U) << 10U;
3778}
3779static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_m(void)
3780{
3781 return 0x1U << 10U;
3782}
3783static inline u32 gr_fe_pwr_mode_r(void)
3784{
3785 return 0x00404170U;
3786}
3787static inline u32 gr_fe_pwr_mode_mode_auto_f(void)
3788{
3789 return 0x0U;
3790}
3791static inline u32 gr_fe_pwr_mode_mode_force_on_f(void)
3792{
3793 return 0x2U;
3794}
3795static inline u32 gr_fe_pwr_mode_req_v(u32 r)
3796{
3797 return (r >> 4U) & 0x1U;
3798}
3799static inline u32 gr_fe_pwr_mode_req_send_f(void)
3800{
3801 return 0x10U;
3802}
3803static inline u32 gr_fe_pwr_mode_req_done_v(void)
3804{
3805 return 0x00000000U;
3806}
3807static inline u32 gr_gpcs_pri_mmu_ctrl_r(void)
3808{
3809 return 0x00418880U;
3810}
3811static inline u32 gr_gpcs_pri_mmu_ctrl_vm_pg_size_m(void)
3812{
3813 return 0x1U << 0U;
3814}
3815static inline u32 gr_gpcs_pri_mmu_ctrl_use_pdb_big_page_size_m(void)
3816{
3817 return 0x1U << 11U;
3818}
3819static inline u32 gr_gpcs_pri_mmu_ctrl_vol_fault_m(void)
3820{
3821 return 0x1U << 1U;
3822}
3823static inline u32 gr_gpcs_pri_mmu_ctrl_comp_fault_m(void)
3824{
3825 return 0x1U << 2U;
3826}
3827static inline u32 gr_gpcs_pri_mmu_ctrl_miss_gran_m(void)
3828{
3829 return 0x3U << 3U;
3830}
3831static inline u32 gr_gpcs_pri_mmu_ctrl_cache_mode_m(void)
3832{
3833 return 0x3U << 5U;
3834}
3835static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_aperture_m(void)
3836{
3837 return 0x3U << 28U;
3838}
3839static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_vol_m(void)
3840{
3841 return 0x1U << 30U;
3842}
3843static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_disable_m(void)
3844{
3845 return 0x1U << 31U;
3846}
3847static inline u32 gr_gpcs_pri_mmu_pm_unit_mask_r(void)
3848{
3849 return 0x00418890U;
3850}
3851static inline u32 gr_gpcs_pri_mmu_pm_req_mask_r(void)
3852{
3853 return 0x00418894U;
3854}
3855static inline u32 gr_gpcs_pri_mmu_debug_ctrl_r(void)
3856{
3857 return 0x004188b0U;
3858}
3859static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_v(u32 r)
3860{
3861 return (r >> 16U) & 0x1U;
3862}
3863static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(void)
3864{
3865 return 0x00000001U;
3866}
3867static inline u32 gr_gpcs_pri_mmu_debug_wr_r(void)
3868{
3869 return 0x004188b4U;
3870}
3871static inline u32 gr_gpcs_pri_mmu_debug_rd_r(void)
3872{
3873 return 0x004188b8U;
3874}
3875static inline u32 gr_gpcs_mmu_num_active_ltcs_r(void)
3876{
3877 return 0x004188acU;
3878}
3879static inline u32 gr_gpcs_tpcs_sms_dbgr_control0_r(void)
3880{
3881 return 0x00419e84U;
3882}
3883static inline u32 gr_fe_gfxp_wfi_timeout_r(void)
3884{
3885 return 0x004041c0U;
3886}
3887static inline u32 gr_fe_gfxp_wfi_timeout_count_f(u32 v)
3888{
3889 return (v & 0xffffffffU) << 0U;
3890}
3891static inline u32 gr_fe_gfxp_wfi_timeout_count_disabled_f(void)
3892{
3893 return 0x0U;
3894}
3895static inline u32 gr_gpcs_tpcs_sm_texio_control_r(void)
3896{
3897 return 0x00419bd8U;
3898}
3899static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_f(u32 v)
3900{
3901 return (v & 0x7U) << 8U;
3902}
3903static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(void)
3904{
3905 return 0x7U << 8U;
3906}
3907static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_arm_63_48_match_f(void)
3908{
3909 return 0x100U;
3910}
3911static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_r(void)
3912{
3913 return 0x00419ba4U;
3914}
3915static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_m(void)
3916{
3917 return 0x3U << 11U;
3918}
3919static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f(void)
3920{
3921 return 0x1000U;
3922}
3923static inline u32 gr_gpcs_tc_debug0_r(void)
3924{
3925 return 0x00418708U;
3926}
3927static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(u32 v)
3928{
3929 return (v & 0x1ffU) << 0U;
3930}
3931static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(void)
3932{
3933 return 0x1ffU << 0U;
3934}
3935#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ltc_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ltc_gv100.h
new file mode 100644
index 00000000..3543f0b7
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ltc_gv100.h
@@ -0,0 +1,619 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_ltc_gv100_h_
57#define _hw_ltc_gv100_h_
58
59static inline u32 ltc_pltcg_base_v(void)
60{
61 return 0x00140000U;
62}
63static inline u32 ltc_pltcg_extent_v(void)
64{
65 return 0x0017ffffU;
66}
67static inline u32 ltc_ltc0_ltss_v(void)
68{
69 return 0x00140200U;
70}
71static inline u32 ltc_ltc0_lts0_v(void)
72{
73 return 0x00140400U;
74}
75static inline u32 ltc_ltcs_ltss_v(void)
76{
77 return 0x0017e200U;
78}
79static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void)
80{
81 return 0x0014046cU;
82}
83static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void)
84{
85 return 0x00140518U;
86}
87static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void)
88{
89 return 0x0017e318U;
90}
91static inline u32 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m(void)
92{
93 return 0x1U << 15U;
94}
95static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void)
96{
97 return 0x00140494U;
98}
99static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r)
100{
101 return (r >> 0U) & 0xffffU;
102}
103static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r)
104{
105 return (r >> 16U) & 0x3U;
106}
107static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void)
108{
109 return 0x00000000U;
110}
111static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void)
112{
113 return 0x00000001U;
114}
115static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void)
116{
117 return 0x00000002U;
118}
119static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void)
120{
121 return 0x0017e26cU;
122}
123static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void)
124{
125 return 0x1U;
126}
127static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void)
128{
129 return 0x2U;
130}
131static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r)
132{
133 return (r >> 2U) & 0x1U;
134}
135static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void)
136{
137 return 0x00000001U;
138}
139static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void)
140{
141 return 0x4U;
142}
143static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void)
144{
145 return 0x0014046cU;
146}
147static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void)
148{
149 return 0x0017e270U;
150}
151static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v)
152{
153 return (v & 0x3ffffU) << 0U;
154}
155static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void)
156{
157 return 0x0017e274U;
158}
159static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v)
160{
161 return (v & 0x3ffffU) << 0U;
162}
163static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void)
164{
165 return 0x0003ffffU;
166}
167static inline u32 ltc_ltcs_ltss_cbc_base_r(void)
168{
169 return 0x0017e278U;
170}
171static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void)
172{
173 return 0x0000000bU;
174}
175static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r)
176{
177 return (r >> 0U) & 0x3ffffffU;
178}
179static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_r(void)
180{
181 return 0x0017e27cU;
182}
183static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs__v(u32 r)
184{
185 return (r >> 0U) & 0x1fU;
186}
187static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_f(u32 v)
188{
189 return (v & 0x1U) << 24U;
190}
191static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_v(u32 r)
192{
193 return (r >> 24U) & 0x1U;
194}
195static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_f(u32 v)
196{
197 return (v & 0x1U) << 25U;
198}
199static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_v(u32 r)
200{
201 return (r >> 25U) & 0x1U;
202}
203static inline u32 ltc_ltcs_misc_ltc_num_active_ltcs_r(void)
204{
205 return 0x0017e000U;
206}
207static inline u32 ltc_ltcs_ltss_cbc_param_r(void)
208{
209 return 0x0017e280U;
210}
211static inline u32 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(u32 r)
212{
213 return (r >> 0U) & 0xffffU;
214}
215static inline u32 ltc_ltcs_ltss_cbc_param_cache_line_size_v(u32 r)
216{
217 return (r >> 24U) & 0xfU;
218}
219static inline u32 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(u32 r)
220{
221 return (r >> 28U) & 0xfU;
222}
223static inline u32 ltc_ltcs_ltss_cbc_param2_r(void)
224{
225 return 0x0017e3f4U;
226}
227static inline u32 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(u32 r)
228{
229 return (r >> 0U) & 0xffffU;
230}
231static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void)
232{
233 return 0x0017e2acU;
234}
235static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v)
236{
237 return (v & 0x1fU) << 16U;
238}
239static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void)
240{
241 return 0x0017e338U;
242}
243static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v)
244{
245 return (v & 0xfU) << 0U;
246}
247static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i)
248{
249 return 0x0017e33cU + i*4U;
250}
251static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void)
252{
253 return 0x00000004U;
254}
255static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void)
256{
257 return 0x0017e34cU;
258}
259static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void)
260{
261 return 32U;
262}
263static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v)
264{
265 return (v & 0xffffffffU) << 0U;
266}
267static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void)
268{
269 return 0xffffffffU << 0U;
270}
271static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r)
272{
273 return (r >> 0U) & 0xffffffffU;
274}
275static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(void)
276{
277 return 0x0017e204U;
278}
279static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_s(void)
280{
281 return 8U;
282}
283static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_f(u32 v)
284{
285 return (v & 0xffU) << 0U;
286}
287static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_m(void)
288{
289 return 0xffU << 0U;
290}
291static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_v(u32 r)
292{
293 return (r >> 0U) & 0xffU;
294}
295static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void)
296{
297 return 0x0017e2b0U;
298}
299static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void)
300{
301 return 0x10000000U;
302}
303static inline u32 ltc_ltcs_ltss_g_elpg_r(void)
304{
305 return 0x0017e214U;
306}
307static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r)
308{
309 return (r >> 0U) & 0x1U;
310}
311static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void)
312{
313 return 0x00000001U;
314}
315static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void)
316{
317 return 0x1U;
318}
319static inline u32 ltc_ltc0_ltss_g_elpg_r(void)
320{
321 return 0x00140214U;
322}
323static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r)
324{
325 return (r >> 0U) & 0x1U;
326}
327static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void)
328{
329 return 0x00000001U;
330}
331static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void)
332{
333 return 0x1U;
334}
335static inline u32 ltc_ltc1_ltss_g_elpg_r(void)
336{
337 return 0x00142214U;
338}
339static inline u32 ltc_ltc1_ltss_g_elpg_flush_v(u32 r)
340{
341 return (r >> 0U) & 0x1U;
342}
343static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_v(void)
344{
345 return 0x00000001U;
346}
347static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_f(void)
348{
349 return 0x1U;
350}
351static inline u32 ltc_ltcs_ltss_intr_r(void)
352{
353 return 0x0017e20cU;
354}
355static inline u32 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f(void)
356{
357 return 0x100U;
358}
359static inline u32 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f(void)
360{
361 return 0x200U;
362}
363static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void)
364{
365 return 0x1U << 20U;
366}
367static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(void)
368{
369 return 0x1U << 30U;
370}
371static inline u32 ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f(void)
372{
373 return 0x1000000U;
374}
375static inline u32 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(void)
376{
377 return 0x2000000U;
378}
379static inline u32 ltc_ltc0_lts0_intr_r(void)
380{
381 return 0x0014040cU;
382}
383static inline u32 ltc_ltc0_lts0_dstg_ecc_report_r(void)
384{
385 return 0x0014051cU;
386}
387static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_m(void)
388{
389 return 0xffU << 0U;
390}
391static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(u32 r)
392{
393 return (r >> 0U) & 0xffU;
394}
395static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_m(void)
396{
397 return 0xffU << 16U;
398}
399static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(u32 r)
400{
401 return (r >> 16U) & 0xffU;
402}
403static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void)
404{
405 return 0x0017e2a0U;
406}
407static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r)
408{
409 return (r >> 0U) & 0x1U;
410}
411static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void)
412{
413 return 0x00000001U;
414}
415static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void)
416{
417 return 0x1U;
418}
419static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r)
420{
421 return (r >> 8U) & 0xfU;
422}
423static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void)
424{
425 return 0x00000003U;
426}
427static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void)
428{
429 return 0x300U;
430}
431static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r)
432{
433 return (r >> 28U) & 0x1U;
434}
435static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void)
436{
437 return 0x00000001U;
438}
439static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void)
440{
441 return 0x10000000U;
442}
443static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r)
444{
445 return (r >> 29U) & 0x1U;
446}
447static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void)
448{
449 return 0x00000001U;
450}
451static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void)
452{
453 return 0x20000000U;
454}
455static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r)
456{
457 return (r >> 30U) & 0x1U;
458}
459static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void)
460{
461 return 0x00000001U;
462}
463static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void)
464{
465 return 0x40000000U;
466}
467static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void)
468{
469 return 0x0017e2a4U;
470}
471static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r)
472{
473 return (r >> 0U) & 0x1U;
474}
475static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void)
476{
477 return 0x00000001U;
478}
479static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void)
480{
481 return 0x1U;
482}
483static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r)
484{
485 return (r >> 8U) & 0xfU;
486}
487static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void)
488{
489 return 0x00000003U;
490}
491static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void)
492{
493 return 0x300U;
494}
495static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r)
496{
497 return (r >> 16U) & 0x1U;
498}
499static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void)
500{
501 return 0x00000001U;
502}
503static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void)
504{
505 return 0x10000U;
506}
507static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r)
508{
509 return (r >> 28U) & 0x1U;
510}
511static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void)
512{
513 return 0x00000001U;
514}
515static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void)
516{
517 return 0x10000000U;
518}
519static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r)
520{
521 return (r >> 29U) & 0x1U;
522}
523static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void)
524{
525 return 0x00000001U;
526}
527static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void)
528{
529 return 0x20000000U;
530}
531static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r)
532{
533 return (r >> 30U) & 0x1U;
534}
535static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void)
536{
537 return 0x00000001U;
538}
539static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void)
540{
541 return 0x40000000U;
542}
543static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void)
544{
545 return 0x001402a0U;
546}
547static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r)
548{
549 return (r >> 0U) & 0x1U;
550}
551static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void)
552{
553 return 0x00000001U;
554}
555static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void)
556{
557 return 0x1U;
558}
559static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void)
560{
561 return 0x001402a4U;
562}
563static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r)
564{
565 return (r >> 0U) & 0x1U;
566}
567static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void)
568{
569 return 0x00000001U;
570}
571static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void)
572{
573 return 0x1U;
574}
575static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_r(void)
576{
577 return 0x001422a0U;
578}
579static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_v(u32 r)
580{
581 return (r >> 0U) & 0x1U;
582}
583static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_v(void)
584{
585 return 0x00000001U;
586}
587static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_f(void)
588{
589 return 0x1U;
590}
591static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_r(void)
592{
593 return 0x001422a4U;
594}
595static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_v(u32 r)
596{
597 return (r >> 0U) & 0x1U;
598}
599static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_v(void)
600{
601 return 0x00000001U;
602}
603static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_f(void)
604{
605 return 0x1U;
606}
607static inline u32 ltc_ltc0_lts0_tstg_info_1_r(void)
608{
609 return 0x0014058cU;
610}
611static inline u32 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(u32 r)
612{
613 return (r >> 0U) & 0xffffU;
614}
615static inline u32 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(u32 r)
616{
617 return (r >> 16U) & 0x1fU;
618}
619#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_mc_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_mc_gv100.h
new file mode 100644
index 00000000..f367991e
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_mc_gv100.h
@@ -0,0 +1,259 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_mc_gv100_h_
57#define _hw_mc_gv100_h_
58
59static inline u32 mc_boot_0_r(void)
60{
61 return 0x00000000U;
62}
63static inline u32 mc_boot_0_architecture_v(u32 r)
64{
65 return (r >> 24U) & 0x1fU;
66}
67static inline u32 mc_boot_0_implementation_v(u32 r)
68{
69 return (r >> 20U) & 0xfU;
70}
71static inline u32 mc_boot_0_major_revision_v(u32 r)
72{
73 return (r >> 4U) & 0xfU;
74}
75static inline u32 mc_boot_0_minor_revision_v(u32 r)
76{
77 return (r >> 0U) & 0xfU;
78}
79static inline u32 mc_intr_r(u32 i)
80{
81 return 0x00000100U + i*4U;
82}
83static inline u32 mc_intr_pfifo_pending_f(void)
84{
85 return 0x100U;
86}
87static inline u32 mc_intr_hub_pending_f(void)
88{
89 return 0x200U;
90}
91static inline u32 mc_intr_pgraph_pending_f(void)
92{
93 return 0x1000U;
94}
95static inline u32 mc_intr_pmu_pending_f(void)
96{
97 return 0x1000000U;
98}
99static inline u32 mc_intr_ltc_pending_f(void)
100{
101 return 0x2000000U;
102}
103static inline u32 mc_intr_priv_ring_pending_f(void)
104{
105 return 0x40000000U;
106}
107static inline u32 mc_intr_pbus_pending_f(void)
108{
109 return 0x10000000U;
110}
111static inline u32 mc_intr_en_r(u32 i)
112{
113 return 0x00000140U + i*4U;
114}
115static inline u32 mc_intr_en_set_r(u32 i)
116{
117 return 0x00000160U + i*4U;
118}
119static inline u32 mc_intr_en_clear_r(u32 i)
120{
121 return 0x00000180U + i*4U;
122}
123static inline u32 mc_enable_r(void)
124{
125 return 0x00000200U;
126}
127static inline u32 mc_enable_xbar_enabled_f(void)
128{
129 return 0x4U;
130}
131static inline u32 mc_enable_l2_enabled_f(void)
132{
133 return 0x8U;
134}
135static inline u32 mc_enable_pmedia_s(void)
136{
137 return 1U;
138}
139static inline u32 mc_enable_pmedia_f(u32 v)
140{
141 return (v & 0x1U) << 4U;
142}
143static inline u32 mc_enable_pmedia_m(void)
144{
145 return 0x1U << 4U;
146}
147static inline u32 mc_enable_pmedia_v(u32 r)
148{
149 return (r >> 4U) & 0x1U;
150}
151static inline u32 mc_enable_ce0_m(void)
152{
153 return 0x1U << 6U;
154}
155static inline u32 mc_enable_pfifo_enabled_f(void)
156{
157 return 0x100U;
158}
159static inline u32 mc_enable_pgraph_enabled_f(void)
160{
161 return 0x1000U;
162}
163static inline u32 mc_enable_pwr_v(u32 r)
164{
165 return (r >> 13U) & 0x1U;
166}
167static inline u32 mc_enable_pwr_disabled_v(void)
168{
169 return 0x00000000U;
170}
171static inline u32 mc_enable_pwr_enabled_f(void)
172{
173 return 0x2000U;
174}
175static inline u32 mc_enable_pfb_enabled_f(void)
176{
177 return 0x100000U;
178}
179static inline u32 mc_enable_ce2_m(void)
180{
181 return 0x1U << 21U;
182}
183static inline u32 mc_enable_ce2_enabled_f(void)
184{
185 return 0x200000U;
186}
187static inline u32 mc_enable_blg_enabled_f(void)
188{
189 return 0x8000000U;
190}
191static inline u32 mc_enable_perfmon_enabled_f(void)
192{
193 return 0x10000000U;
194}
195static inline u32 mc_enable_hub_enabled_f(void)
196{
197 return 0x20000000U;
198}
199static inline u32 mc_enable_nvdec_disabled_v(void)
200{
201 return 0x00000000U;
202}
203static inline u32 mc_enable_nvdec_enabled_f(void)
204{
205 return 0x8000U;
206}
207static inline u32 mc_intr_ltc_r(void)
208{
209 return 0x000001c0U;
210}
211static inline u32 mc_enable_pb_r(void)
212{
213 return 0x00000204U;
214}
215static inline u32 mc_enable_pb_0_s(void)
216{
217 return 1U;
218}
219static inline u32 mc_enable_pb_0_f(u32 v)
220{
221 return (v & 0x1U) << 0U;
222}
223static inline u32 mc_enable_pb_0_m(void)
224{
225 return 0x1U << 0U;
226}
227static inline u32 mc_enable_pb_0_v(u32 r)
228{
229 return (r >> 0U) & 0x1U;
230}
231static inline u32 mc_enable_pb_0_enabled_v(void)
232{
233 return 0x00000001U;
234}
235static inline u32 mc_enable_pb_sel_f(u32 v, u32 i)
236{
237 return (v & 0x1U) << (0U + i*1U);
238}
239static inline u32 mc_elpg_enable_r(void)
240{
241 return 0x0000020cU;
242}
243static inline u32 mc_elpg_enable_xbar_enabled_f(void)
244{
245 return 0x4U;
246}
247static inline u32 mc_elpg_enable_pfb_enabled_f(void)
248{
249 return 0x100000U;
250}
251static inline u32 mc_elpg_enable_hub_enabled_f(void)
252{
253 return 0x20000000U;
254}
255static inline u32 mc_elpg_enable_l2_enabled_f(void)
256{
257 return 0x8U;
258}
259#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pbdma_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pbdma_gv100.h
new file mode 100644
index 00000000..66a0737c
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pbdma_gv100.h
@@ -0,0 +1,659 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pbdma_gv100_h_
57#define _hw_pbdma_gv100_h_
58
59static inline u32 pbdma_gp_entry1_r(void)
60{
61 return 0x10000004U;
62}
63static inline u32 pbdma_gp_entry1_get_hi_v(u32 r)
64{
65 return (r >> 0U) & 0xffU;
66}
67static inline u32 pbdma_gp_entry1_length_f(u32 v)
68{
69 return (v & 0x1fffffU) << 10U;
70}
71static inline u32 pbdma_gp_entry1_length_v(u32 r)
72{
73 return (r >> 10U) & 0x1fffffU;
74}
75static inline u32 pbdma_gp_base_r(u32 i)
76{
77 return 0x00040048U + i*8192U;
78}
79static inline u32 pbdma_gp_base__size_1_v(void)
80{
81 return 0x0000000eU;
82}
83static inline u32 pbdma_gp_base_offset_f(u32 v)
84{
85 return (v & 0x1fffffffU) << 3U;
86}
87static inline u32 pbdma_gp_base_rsvd_s(void)
88{
89 return 3U;
90}
91static inline u32 pbdma_gp_base_hi_r(u32 i)
92{
93 return 0x0004004cU + i*8192U;
94}
95static inline u32 pbdma_gp_base_hi_offset_f(u32 v)
96{
97 return (v & 0xffU) << 0U;
98}
99static inline u32 pbdma_gp_base_hi_limit2_f(u32 v)
100{
101 return (v & 0x1fU) << 16U;
102}
103static inline u32 pbdma_gp_fetch_r(u32 i)
104{
105 return 0x00040050U + i*8192U;
106}
107static inline u32 pbdma_gp_get_r(u32 i)
108{
109 return 0x00040014U + i*8192U;
110}
111static inline u32 pbdma_gp_put_r(u32 i)
112{
113 return 0x00040000U + i*8192U;
114}
115static inline u32 pbdma_pb_fetch_r(u32 i)
116{
117 return 0x00040054U + i*8192U;
118}
119static inline u32 pbdma_pb_fetch_hi_r(u32 i)
120{
121 return 0x00040058U + i*8192U;
122}
123static inline u32 pbdma_get_r(u32 i)
124{
125 return 0x00040018U + i*8192U;
126}
127static inline u32 pbdma_get_hi_r(u32 i)
128{
129 return 0x0004001cU + i*8192U;
130}
131static inline u32 pbdma_put_r(u32 i)
132{
133 return 0x0004005cU + i*8192U;
134}
135static inline u32 pbdma_put_hi_r(u32 i)
136{
137 return 0x00040060U + i*8192U;
138}
139static inline u32 pbdma_pb_header_r(u32 i)
140{
141 return 0x00040084U + i*8192U;
142}
143static inline u32 pbdma_pb_header_priv_user_f(void)
144{
145 return 0x0U;
146}
147static inline u32 pbdma_pb_header_method_zero_f(void)
148{
149 return 0x0U;
150}
151static inline u32 pbdma_pb_header_subchannel_zero_f(void)
152{
153 return 0x0U;
154}
155static inline u32 pbdma_pb_header_level_main_f(void)
156{
157 return 0x0U;
158}
159static inline u32 pbdma_pb_header_first_true_f(void)
160{
161 return 0x400000U;
162}
163static inline u32 pbdma_pb_header_type_inc_f(void)
164{
165 return 0x20000000U;
166}
167static inline u32 pbdma_pb_header_type_non_inc_f(void)
168{
169 return 0x60000000U;
170}
171static inline u32 pbdma_hdr_shadow_r(u32 i)
172{
173 return 0x00040118U + i*8192U;
174}
175static inline u32 pbdma_gp_shadow_0_r(u32 i)
176{
177 return 0x00040110U + i*8192U;
178}
179static inline u32 pbdma_gp_shadow_1_r(u32 i)
180{
181 return 0x00040114U + i*8192U;
182}
183static inline u32 pbdma_subdevice_r(u32 i)
184{
185 return 0x00040094U + i*8192U;
186}
187static inline u32 pbdma_subdevice_id_f(u32 v)
188{
189 return (v & 0xfffU) << 0U;
190}
191static inline u32 pbdma_subdevice_status_active_f(void)
192{
193 return 0x10000000U;
194}
195static inline u32 pbdma_subdevice_channel_dma_enable_f(void)
196{
197 return 0x20000000U;
198}
199static inline u32 pbdma_method0_r(u32 i)
200{
201 return 0x000400c0U + i*8192U;
202}
203static inline u32 pbdma_method0_fifo_size_v(void)
204{
205 return 0x00000004U;
206}
207static inline u32 pbdma_method0_addr_f(u32 v)
208{
209 return (v & 0xfffU) << 2U;
210}
211static inline u32 pbdma_method0_addr_v(u32 r)
212{
213 return (r >> 2U) & 0xfffU;
214}
215static inline u32 pbdma_method0_subch_v(u32 r)
216{
217 return (r >> 16U) & 0x7U;
218}
219static inline u32 pbdma_method0_first_true_f(void)
220{
221 return 0x400000U;
222}
223static inline u32 pbdma_method0_valid_true_f(void)
224{
225 return 0x80000000U;
226}
227static inline u32 pbdma_method1_r(u32 i)
228{
229 return 0x000400c8U + i*8192U;
230}
231static inline u32 pbdma_method2_r(u32 i)
232{
233 return 0x000400d0U + i*8192U;
234}
235static inline u32 pbdma_method3_r(u32 i)
236{
237 return 0x000400d8U + i*8192U;
238}
239static inline u32 pbdma_data0_r(u32 i)
240{
241 return 0x000400c4U + i*8192U;
242}
243static inline u32 pbdma_acquire_r(u32 i)
244{
245 return 0x00040030U + i*8192U;
246}
247static inline u32 pbdma_acquire_retry_man_2_f(void)
248{
249 return 0x2U;
250}
251static inline u32 pbdma_acquire_retry_exp_2_f(void)
252{
253 return 0x100U;
254}
255static inline u32 pbdma_acquire_timeout_exp_f(u32 v)
256{
257 return (v & 0xfU) << 11U;
258}
259static inline u32 pbdma_acquire_timeout_exp_max_v(void)
260{
261 return 0x0000000fU;
262}
263static inline u32 pbdma_acquire_timeout_exp_max_f(void)
264{
265 return 0x7800U;
266}
267static inline u32 pbdma_acquire_timeout_man_f(u32 v)
268{
269 return (v & 0xffffU) << 15U;
270}
271static inline u32 pbdma_acquire_timeout_man_max_v(void)
272{
273 return 0x0000ffffU;
274}
275static inline u32 pbdma_acquire_timeout_man_max_f(void)
276{
277 return 0x7fff8000U;
278}
279static inline u32 pbdma_acquire_timeout_en_enable_f(void)
280{
281 return 0x80000000U;
282}
283static inline u32 pbdma_acquire_timeout_en_disable_f(void)
284{
285 return 0x0U;
286}
287static inline u32 pbdma_status_r(u32 i)
288{
289 return 0x00040100U + i*8192U;
290}
291static inline u32 pbdma_channel_r(u32 i)
292{
293 return 0x00040120U + i*8192U;
294}
295static inline u32 pbdma_signature_r(u32 i)
296{
297 return 0x00040010U + i*8192U;
298}
299static inline u32 pbdma_signature_hw_valid_f(void)
300{
301 return 0xfaceU;
302}
303static inline u32 pbdma_signature_sw_zero_f(void)
304{
305 return 0x0U;
306}
307static inline u32 pbdma_userd_r(u32 i)
308{
309 return 0x00040008U + i*8192U;
310}
311static inline u32 pbdma_userd_target_vid_mem_f(void)
312{
313 return 0x0U;
314}
315static inline u32 pbdma_userd_target_sys_mem_coh_f(void)
316{
317 return 0x2U;
318}
319static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void)
320{
321 return 0x3U;
322}
323static inline u32 pbdma_userd_addr_f(u32 v)
324{
325 return (v & 0x7fffffU) << 9U;
326}
327static inline u32 pbdma_config_r(u32 i)
328{
329 return 0x000400f4U + i*8192U;
330}
331static inline u32 pbdma_config_l2_evict_first_f(void)
332{
333 return 0x0U;
334}
335static inline u32 pbdma_config_l2_evict_normal_f(void)
336{
337 return 0x1U;
338}
339static inline u32 pbdma_config_l2_evict_last_f(void)
340{
341 return 0x2U;
342}
343static inline u32 pbdma_config_ce_split_enable_f(void)
344{
345 return 0x0U;
346}
347static inline u32 pbdma_config_ce_split_disable_f(void)
348{
349 return 0x10U;
350}
351static inline u32 pbdma_config_auth_level_non_privileged_f(void)
352{
353 return 0x0U;
354}
355static inline u32 pbdma_config_auth_level_privileged_f(void)
356{
357 return 0x100U;
358}
359static inline u32 pbdma_config_userd_writeback_disable_f(void)
360{
361 return 0x0U;
362}
363static inline u32 pbdma_config_userd_writeback_enable_f(void)
364{
365 return 0x1000U;
366}
367static inline u32 pbdma_userd_hi_r(u32 i)
368{
369 return 0x0004000cU + i*8192U;
370}
371static inline u32 pbdma_userd_hi_addr_f(u32 v)
372{
373 return (v & 0xffU) << 0U;
374}
375static inline u32 pbdma_hce_ctrl_r(u32 i)
376{
377 return 0x000400e4U + i*8192U;
378}
379static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void)
380{
381 return 0x20U;
382}
383static inline u32 pbdma_intr_0_r(u32 i)
384{
385 return 0x00040108U + i*8192U;
386}
387static inline u32 pbdma_intr_0_memreq_v(u32 r)
388{
389 return (r >> 0U) & 0x1U;
390}
391static inline u32 pbdma_intr_0_memreq_pending_f(void)
392{
393 return 0x1U;
394}
395static inline u32 pbdma_intr_0_memack_timeout_pending_f(void)
396{
397 return 0x2U;
398}
399static inline u32 pbdma_intr_0_memack_extra_pending_f(void)
400{
401 return 0x4U;
402}
403static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void)
404{
405 return 0x8U;
406}
407static inline u32 pbdma_intr_0_memdat_extra_pending_f(void)
408{
409 return 0x10U;
410}
411static inline u32 pbdma_intr_0_memflush_pending_f(void)
412{
413 return 0x20U;
414}
415static inline u32 pbdma_intr_0_memop_pending_f(void)
416{
417 return 0x40U;
418}
419static inline u32 pbdma_intr_0_lbconnect_pending_f(void)
420{
421 return 0x80U;
422}
423static inline u32 pbdma_intr_0_lbreq_pending_f(void)
424{
425 return 0x100U;
426}
427static inline u32 pbdma_intr_0_lback_timeout_pending_f(void)
428{
429 return 0x200U;
430}
431static inline u32 pbdma_intr_0_lback_extra_pending_f(void)
432{
433 return 0x400U;
434}
435static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void)
436{
437 return 0x800U;
438}
439static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void)
440{
441 return 0x1000U;
442}
443static inline u32 pbdma_intr_0_gpfifo_pending_f(void)
444{
445 return 0x2000U;
446}
447static inline u32 pbdma_intr_0_gpptr_pending_f(void)
448{
449 return 0x4000U;
450}
451static inline u32 pbdma_intr_0_gpentry_pending_f(void)
452{
453 return 0x8000U;
454}
455static inline u32 pbdma_intr_0_gpcrc_pending_f(void)
456{
457 return 0x10000U;
458}
459static inline u32 pbdma_intr_0_pbptr_pending_f(void)
460{
461 return 0x20000U;
462}
463static inline u32 pbdma_intr_0_pbentry_pending_f(void)
464{
465 return 0x40000U;
466}
467static inline u32 pbdma_intr_0_pbcrc_pending_f(void)
468{
469 return 0x80000U;
470}
471static inline u32 pbdma_intr_0_clear_faulted_error_pending_f(void)
472{
473 return 0x100000U;
474}
475static inline u32 pbdma_intr_0_method_pending_f(void)
476{
477 return 0x200000U;
478}
479static inline u32 pbdma_intr_0_methodcrc_pending_f(void)
480{
481 return 0x400000U;
482}
483static inline u32 pbdma_intr_0_device_pending_f(void)
484{
485 return 0x800000U;
486}
487static inline u32 pbdma_intr_0_eng_reset_pending_f(void)
488{
489 return 0x1000000U;
490}
491static inline u32 pbdma_intr_0_semaphore_pending_f(void)
492{
493 return 0x2000000U;
494}
495static inline u32 pbdma_intr_0_acquire_pending_f(void)
496{
497 return 0x4000000U;
498}
499static inline u32 pbdma_intr_0_pri_pending_f(void)
500{
501 return 0x8000000U;
502}
503static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void)
504{
505 return 0x20000000U;
506}
507static inline u32 pbdma_intr_0_pbseg_pending_f(void)
508{
509 return 0x40000000U;
510}
511static inline u32 pbdma_intr_0_signature_pending_f(void)
512{
513 return 0x80000000U;
514}
515static inline u32 pbdma_intr_1_r(u32 i)
516{
517 return 0x00040148U + i*8192U;
518}
519static inline u32 pbdma_intr_1_ctxnotvalid_m(void)
520{
521 return 0x1U << 31U;
522}
523static inline u32 pbdma_intr_1_ctxnotvalid_pending_f(void)
524{
525 return 0x80000000U;
526}
527static inline u32 pbdma_intr_en_0_r(u32 i)
528{
529 return 0x0004010cU + i*8192U;
530}
531static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void)
532{
533 return 0x100U;
534}
535static inline u32 pbdma_intr_en_1_r(u32 i)
536{
537 return 0x0004014cU + i*8192U;
538}
539static inline u32 pbdma_intr_stall_r(u32 i)
540{
541 return 0x0004013cU + i*8192U;
542}
543static inline u32 pbdma_intr_stall_lbreq_enabled_f(void)
544{
545 return 0x100U;
546}
547static inline u32 pbdma_intr_stall_1_r(u32 i)
548{
549 return 0x00040140U + i*8192U;
550}
551static inline u32 pbdma_udma_nop_r(void)
552{
553 return 0x00000008U;
554}
555static inline u32 pbdma_runlist_timeslice_r(u32 i)
556{
557 return 0x000400f8U + i*8192U;
558}
559static inline u32 pbdma_runlist_timeslice_timeout_128_f(void)
560{
561 return 0x80U;
562}
563static inline u32 pbdma_runlist_timeslice_timescale_3_f(void)
564{
565 return 0x3000U;
566}
567static inline u32 pbdma_runlist_timeslice_enable_true_f(void)
568{
569 return 0x10000000U;
570}
571static inline u32 pbdma_target_r(u32 i)
572{
573 return 0x000400acU + i*8192U;
574}
575static inline u32 pbdma_target_engine_sw_f(void)
576{
577 return 0x1fU;
578}
579static inline u32 pbdma_target_eng_ctx_valid_true_f(void)
580{
581 return 0x10000U;
582}
583static inline u32 pbdma_target_eng_ctx_valid_false_f(void)
584{
585 return 0x0U;
586}
587static inline u32 pbdma_target_ce_ctx_valid_true_f(void)
588{
589 return 0x20000U;
590}
591static inline u32 pbdma_target_ce_ctx_valid_false_f(void)
592{
593 return 0x0U;
594}
595static inline u32 pbdma_target_host_tsg_event_reason_pbdma_idle_f(void)
596{
597 return 0x0U;
598}
599static inline u32 pbdma_target_host_tsg_event_reason_semaphore_acquire_failure_f(void)
600{
601 return 0x1000000U;
602}
603static inline u32 pbdma_target_host_tsg_event_reason_tsg_yield_f(void)
604{
605 return 0x2000000U;
606}
607static inline u32 pbdma_target_host_tsg_event_reason_host_subchannel_switch_f(void)
608{
609 return 0x3000000U;
610}
611static inline u32 pbdma_target_should_send_tsg_event_true_f(void)
612{
613 return 0x20000000U;
614}
615static inline u32 pbdma_target_should_send_tsg_event_false_f(void)
616{
617 return 0x0U;
618}
619static inline u32 pbdma_target_needs_host_tsg_event_true_f(void)
620{
621 return 0x80000000U;
622}
623static inline u32 pbdma_target_needs_host_tsg_event_false_f(void)
624{
625 return 0x0U;
626}
627static inline u32 pbdma_set_channel_info_r(u32 i)
628{
629 return 0x000400fcU + i*8192U;
630}
631static inline u32 pbdma_set_channel_info_scg_type_graphics_compute0_f(void)
632{
633 return 0x0U;
634}
635static inline u32 pbdma_set_channel_info_scg_type_compute1_f(void)
636{
637 return 0x1U;
638}
639static inline u32 pbdma_set_channel_info_veid_f(u32 v)
640{
641 return (v & 0x3fU) << 8U;
642}
643static inline u32 pbdma_timeout_r(u32 i)
644{
645 return 0x0004012cU + i*8192U;
646}
647static inline u32 pbdma_timeout_period_m(void)
648{
649 return 0xffffffffU << 0U;
650}
651static inline u32 pbdma_timeout_period_max_f(void)
652{
653 return 0xffffffffU;
654}
655static inline u32 pbdma_timeout_period_init_f(void)
656{
657 return 0x10000U;
658}
659#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_perf_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_perf_gv100.h
new file mode 100644
index 00000000..4fbe37cb
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_perf_gv100.h
@@ -0,0 +1,211 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_perf_gv100_h_
57#define _hw_perf_gv100_h_
58
59static inline u32 perf_pmasys_control_r(void)
60{
61 return 0x0024a000U;
62}
63static inline u32 perf_pmasys_control_membuf_status_v(u32 r)
64{
65 return (r >> 4U) & 0x1U;
66}
67static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void)
68{
69 return 0x00000001U;
70}
71static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void)
72{
73 return 0x10U;
74}
75static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v)
76{
77 return (v & 0x1U) << 5U;
78}
79static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r)
80{
81 return (r >> 5U) & 0x1U;
82}
83static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void)
84{
85 return 0x00000001U;
86}
87static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void)
88{
89 return 0x20U;
90}
91static inline u32 perf_pmasys_mem_block_r(void)
92{
93 return 0x0024a070U;
94}
95static inline u32 perf_pmasys_mem_block_base_f(u32 v)
96{
97 return (v & 0xfffffffU) << 0U;
98}
99static inline u32 perf_pmasys_mem_block_target_f(u32 v)
100{
101 return (v & 0x3U) << 28U;
102}
103static inline u32 perf_pmasys_mem_block_target_v(u32 r)
104{
105 return (r >> 28U) & 0x3U;
106}
107static inline u32 perf_pmasys_mem_block_target_lfb_v(void)
108{
109 return 0x00000000U;
110}
111static inline u32 perf_pmasys_mem_block_target_lfb_f(void)
112{
113 return 0x0U;
114}
115static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void)
116{
117 return 0x00000002U;
118}
119static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void)
120{
121 return 0x20000000U;
122}
123static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void)
124{
125 return 0x00000003U;
126}
127static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void)
128{
129 return 0x30000000U;
130}
131static inline u32 perf_pmasys_mem_block_valid_f(u32 v)
132{
133 return (v & 0x1U) << 31U;
134}
135static inline u32 perf_pmasys_mem_block_valid_v(u32 r)
136{
137 return (r >> 31U) & 0x1U;
138}
139static inline u32 perf_pmasys_mem_block_valid_true_v(void)
140{
141 return 0x00000001U;
142}
143static inline u32 perf_pmasys_mem_block_valid_true_f(void)
144{
145 return 0x80000000U;
146}
147static inline u32 perf_pmasys_mem_block_valid_false_v(void)
148{
149 return 0x00000000U;
150}
151static inline u32 perf_pmasys_mem_block_valid_false_f(void)
152{
153 return 0x0U;
154}
155static inline u32 perf_pmasys_outbase_r(void)
156{
157 return 0x0024a074U;
158}
159static inline u32 perf_pmasys_outbase_ptr_f(u32 v)
160{
161 return (v & 0x7ffffffU) << 5U;
162}
163static inline u32 perf_pmasys_outbaseupper_r(void)
164{
165 return 0x0024a078U;
166}
167static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v)
168{
169 return (v & 0xffU) << 0U;
170}
171static inline u32 perf_pmasys_outsize_r(void)
172{
173 return 0x0024a07cU;
174}
175static inline u32 perf_pmasys_outsize_numbytes_f(u32 v)
176{
177 return (v & 0x7ffffffU) << 5U;
178}
179static inline u32 perf_pmasys_mem_bytes_r(void)
180{
181 return 0x0024a084U;
182}
183static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v)
184{
185 return (v & 0xfffffffU) << 4U;
186}
187static inline u32 perf_pmasys_mem_bump_r(void)
188{
189 return 0x0024a088U;
190}
191static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v)
192{
193 return (v & 0xfffffffU) << 4U;
194}
195static inline u32 perf_pmasys_enginestatus_r(void)
196{
197 return 0x0024a0a4U;
198}
199static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v)
200{
201 return (v & 0x1U) << 4U;
202}
203static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void)
204{
205 return 0x00000001U;
206}
207static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void)
208{
209 return 0x10U;
210}
211#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pram_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pram_gv100.h
new file mode 100644
index 00000000..8f005a22
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pram_gv100.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pram_gv100_h_
57#define _hw_pram_gv100_h_
58
59static inline u32 pram_data032_r(u32 i)
60{
61 return 0x00700000U + i*4U;
62}
63#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringmaster_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringmaster_gv100.h
new file mode 100644
index 00000000..5eca93cc
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringmaster_gv100.h
@@ -0,0 +1,167 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pri_ringmaster_gv100_h_
57#define _hw_pri_ringmaster_gv100_h_
58
59static inline u32 pri_ringmaster_command_r(void)
60{
61 return 0x0012004cU;
62}
63static inline u32 pri_ringmaster_command_cmd_m(void)
64{
65 return 0x3fU << 0U;
66}
67static inline u32 pri_ringmaster_command_cmd_v(u32 r)
68{
69 return (r >> 0U) & 0x3fU;
70}
71static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void)
72{
73 return 0x00000000U;
74}
75static inline u32 pri_ringmaster_command_cmd_start_ring_f(void)
76{
77 return 0x1U;
78}
79static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void)
80{
81 return 0x2U;
82}
83static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void)
84{
85 return 0x3U;
86}
87static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void)
88{
89 return 0x0U;
90}
91static inline u32 pri_ringmaster_command_data_r(void)
92{
93 return 0x00120048U;
94}
95static inline u32 pri_ringmaster_start_results_r(void)
96{
97 return 0x00120050U;
98}
99static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r)
100{
101 return (r >> 0U) & 0x1U;
102}
103static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void)
104{
105 return 0x00000001U;
106}
107static inline u32 pri_ringmaster_intr_status0_r(void)
108{
109 return 0x00120058U;
110}
111static inline u32 pri_ringmaster_intr_status0_ring_start_conn_fault_v(u32 r)
112{
113 return (r >> 0U) & 0x1U;
114}
115static inline u32 pri_ringmaster_intr_status0_disconnect_fault_v(u32 r)
116{
117 return (r >> 1U) & 0x1U;
118}
119static inline u32 pri_ringmaster_intr_status0_overflow_fault_v(u32 r)
120{
121 return (r >> 2U) & 0x1U;
122}
123static inline u32 pri_ringmaster_intr_status0_gbl_write_error_sys_v(u32 r)
124{
125 return (r >> 8U) & 0x1U;
126}
127static inline u32 pri_ringmaster_intr_status1_r(void)
128{
129 return 0x0012005cU;
130}
131static inline u32 pri_ringmaster_global_ctl_r(void)
132{
133 return 0x00120060U;
134}
135static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void)
136{
137 return 0x1U;
138}
139static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void)
140{
141 return 0x0U;
142}
143static inline u32 pri_ringmaster_enum_fbp_r(void)
144{
145 return 0x00120074U;
146}
147static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r)
148{
149 return (r >> 0U) & 0x1fU;
150}
151static inline u32 pri_ringmaster_enum_gpc_r(void)
152{
153 return 0x00120078U;
154}
155static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r)
156{
157 return (r >> 0U) & 0x1fU;
158}
159static inline u32 pri_ringmaster_enum_ltc_r(void)
160{
161 return 0x0012006cU;
162}
163static inline u32 pri_ringmaster_enum_ltc_count_v(u32 r)
164{
165 return (r >> 0U) & 0x1fU;
166}
167#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_gpc_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_gpc_gv100.h
new file mode 100644
index 00000000..fc522d51
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_gpc_gv100.h
@@ -0,0 +1,79 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pri_ringstation_gpc_gv100_h_
57#define _hw_pri_ringstation_gpc_gv100_h_
58
59static inline u32 pri_ringstation_gpc_master_config_r(u32 i)
60{
61 return 0x00128300U + i*4U;
62}
63static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void)
64{
65 return 0x00128120U;
66}
67static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void)
68{
69 return 0x00128124U;
70}
71static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void)
72{
73 return 0x00128128U;
74}
75static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void)
76{
77 return 0x0012812cU;
78}
79#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_sys_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_sys_gv100.h
new file mode 100644
index 00000000..885ea30a
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_sys_gv100.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pri_ringstation_sys_gv100_h_
57#define _hw_pri_ringstation_sys_gv100_h_
58
59static inline u32 pri_ringstation_sys_master_config_r(u32 i)
60{
61 return 0x00122300U + i*4U;
62}
63static inline u32 pri_ringstation_sys_decode_config_r(void)
64{
65 return 0x00122204U;
66}
67static inline u32 pri_ringstation_sys_decode_config_ring_m(void)
68{
69 return 0x7U << 0U;
70}
71static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void)
72{
73 return 0x1U;
74}
75static inline u32 pri_ringstation_sys_priv_error_adr_r(void)
76{
77 return 0x00122120U;
78}
79static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void)
80{
81 return 0x00122124U;
82}
83static inline u32 pri_ringstation_sys_priv_error_info_r(void)
84{
85 return 0x00122128U;
86}
87static inline u32 pri_ringstation_sys_priv_error_code_r(void)
88{
89 return 0x0012212cU;
90}
91#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_proj_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_proj_gv100.h
new file mode 100644
index 00000000..dc4c377d
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_proj_gv100.h
@@ -0,0 +1,195 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_proj_gv100_h_
57#define _hw_proj_gv100_h_
58
59static inline u32 proj_gpc_base_v(void)
60{
61 return 0x00500000U;
62}
63static inline u32 proj_gpc_shared_base_v(void)
64{
65 return 0x00418000U;
66}
67static inline u32 proj_gpc_stride_v(void)
68{
69 return 0x00008000U;
70}
71static inline u32 proj_ltc_stride_v(void)
72{
73 return 0x00002000U;
74}
75static inline u32 proj_lts_stride_v(void)
76{
77 return 0x00000200U;
78}
79static inline u32 proj_fbpa_base_v(void)
80{
81 return 0x00900000U;
82}
83static inline u32 proj_fbpa_shared_base_v(void)
84{
85 return 0x009a0000U;
86}
87static inline u32 proj_fbpa_stride_v(void)
88{
89 return 0x00004000U;
90}
91static inline u32 proj_ppc_in_gpc_base_v(void)
92{
93 return 0x00003000U;
94}
95static inline u32 proj_ppc_in_gpc_shared_base_v(void)
96{
97 return 0x00003e00U;
98}
99static inline u32 proj_ppc_in_gpc_stride_v(void)
100{
101 return 0x00000200U;
102}
103static inline u32 proj_rop_base_v(void)
104{
105 return 0x00410000U;
106}
107static inline u32 proj_rop_shared_base_v(void)
108{
109 return 0x00408800U;
110}
111static inline u32 proj_rop_stride_v(void)
112{
113 return 0x00000400U;
114}
115static inline u32 proj_tpc_in_gpc_base_v(void)
116{
117 return 0x00004000U;
118}
119static inline u32 proj_tpc_in_gpc_stride_v(void)
120{
121 return 0x00000800U;
122}
123static inline u32 proj_tpc_in_gpc_shared_base_v(void)
124{
125 return 0x00001800U;
126}
127static inline u32 proj_smpc_base_v(void)
128{
129 return 0x00000200U;
130}
131static inline u32 proj_smpc_shared_base_v(void)
132{
133 return 0x00000300U;
134}
135static inline u32 proj_smpc_unique_base_v(void)
136{
137 return 0x00000600U;
138}
139static inline u32 proj_smpc_stride_v(void)
140{
141 return 0x00000100U;
142}
143static inline u32 proj_host_num_engines_v(void)
144{
145 return 0x0000000fU;
146}
147static inline u32 proj_host_num_pbdma_v(void)
148{
149 return 0x0000000eU;
150}
151static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void)
152{
153 return 0x00000007U;
154}
155static inline u32 proj_scal_litter_num_fbps_v(void)
156{
157 return 0x00000008U;
158}
159static inline u32 proj_scal_litter_num_fbpas_v(void)
160{
161 return 0x00000010U;
162}
163static inline u32 proj_scal_litter_num_gpcs_v(void)
164{
165 return 0x00000006U;
166}
167static inline u32 proj_scal_litter_num_pes_per_gpc_v(void)
168{
169 return 0x00000003U;
170}
171static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void)
172{
173 return 0x00000003U;
174}
175static inline u32 proj_scal_litter_num_zcull_banks_v(void)
176{
177 return 0x00000004U;
178}
179static inline u32 proj_scal_litter_num_sm_per_tpc_v(void)
180{
181 return 0x00000002U;
182}
183static inline u32 proj_scal_max_gpcs_v(void)
184{
185 return 0x00000020U;
186}
187static inline u32 proj_scal_max_tpc_per_gpc_v(void)
188{
189 return 0x00000008U;
190}
191static inline u32 proj_sm_stride_v(void)
192{
193 return 0x00000080U;
194}
195#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pwr_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pwr_gv100.h
new file mode 100644
index 00000000..4b0b0326
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pwr_gv100.h
@@ -0,0 +1,935 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pwr_gv100_h_
57#define _hw_pwr_gv100_h_
58
59static inline u32 pwr_falcon_irqsset_r(void)
60{
61 return 0x0010a000U;
62}
63static inline u32 pwr_falcon_irqsset_swgen0_set_f(void)
64{
65 return 0x40U;
66}
67static inline u32 pwr_falcon_irqsclr_r(void)
68{
69 return 0x0010a004U;
70}
71static inline u32 pwr_falcon_irqstat_r(void)
72{
73 return 0x0010a008U;
74}
75static inline u32 pwr_falcon_irqstat_halt_true_f(void)
76{
77 return 0x10U;
78}
79static inline u32 pwr_falcon_irqstat_exterr_true_f(void)
80{
81 return 0x20U;
82}
83static inline u32 pwr_falcon_irqstat_swgen0_true_f(void)
84{
85 return 0x40U;
86}
87static inline u32 pwr_falcon_irqstat_ext_second_true_f(void)
88{
89 return 0x800U;
90}
91static inline u32 pwr_falcon_irqmode_r(void)
92{
93 return 0x0010a00cU;
94}
95static inline u32 pwr_falcon_irqmset_r(void)
96{
97 return 0x0010a010U;
98}
99static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v)
100{
101 return (v & 0x1U) << 0U;
102}
103static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v)
104{
105 return (v & 0x1U) << 1U;
106}
107static inline u32 pwr_falcon_irqmset_mthd_f(u32 v)
108{
109 return (v & 0x1U) << 2U;
110}
111static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v)
112{
113 return (v & 0x1U) << 3U;
114}
115static inline u32 pwr_falcon_irqmset_halt_f(u32 v)
116{
117 return (v & 0x1U) << 4U;
118}
119static inline u32 pwr_falcon_irqmset_exterr_f(u32 v)
120{
121 return (v & 0x1U) << 5U;
122}
123static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v)
124{
125 return (v & 0x1U) << 6U;
126}
127static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v)
128{
129 return (v & 0x1U) << 7U;
130}
131static inline u32 pwr_falcon_irqmset_ext_f(u32 v)
132{
133 return (v & 0xffU) << 8U;
134}
135static inline u32 pwr_falcon_irqmset_ext_ctxe_f(u32 v)
136{
137 return (v & 0x1U) << 8U;
138}
139static inline u32 pwr_falcon_irqmset_ext_limitv_f(u32 v)
140{
141 return (v & 0x1U) << 9U;
142}
143static inline u32 pwr_falcon_irqmset_ext_second_f(u32 v)
144{
145 return (v & 0x1U) << 11U;
146}
147static inline u32 pwr_falcon_irqmset_ext_therm_f(u32 v)
148{
149 return (v & 0x1U) << 12U;
150}
151static inline u32 pwr_falcon_irqmset_ext_miscio_f(u32 v)
152{
153 return (v & 0x1U) << 13U;
154}
155static inline u32 pwr_falcon_irqmset_ext_rttimer_f(u32 v)
156{
157 return (v & 0x1U) << 14U;
158}
159static inline u32 pwr_falcon_irqmclr_r(void)
160{
161 return 0x0010a014U;
162}
163static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v)
164{
165 return (v & 0x1U) << 0U;
166}
167static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v)
168{
169 return (v & 0x1U) << 1U;
170}
171static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v)
172{
173 return (v & 0x1U) << 2U;
174}
175static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v)
176{
177 return (v & 0x1U) << 3U;
178}
179static inline u32 pwr_falcon_irqmclr_halt_f(u32 v)
180{
181 return (v & 0x1U) << 4U;
182}
183static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v)
184{
185 return (v & 0x1U) << 5U;
186}
187static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v)
188{
189 return (v & 0x1U) << 6U;
190}
191static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v)
192{
193 return (v & 0x1U) << 7U;
194}
195static inline u32 pwr_falcon_irqmclr_ext_f(u32 v)
196{
197 return (v & 0xffU) << 8U;
198}
199static inline u32 pwr_falcon_irqmclr_ext_ctxe_f(u32 v)
200{
201 return (v & 0x1U) << 8U;
202}
203static inline u32 pwr_falcon_irqmclr_ext_limitv_f(u32 v)
204{
205 return (v & 0x1U) << 9U;
206}
207static inline u32 pwr_falcon_irqmclr_ext_second_f(u32 v)
208{
209 return (v & 0x1U) << 11U;
210}
211static inline u32 pwr_falcon_irqmclr_ext_therm_f(u32 v)
212{
213 return (v & 0x1U) << 12U;
214}
215static inline u32 pwr_falcon_irqmclr_ext_miscio_f(u32 v)
216{
217 return (v & 0x1U) << 13U;
218}
219static inline u32 pwr_falcon_irqmclr_ext_rttimer_f(u32 v)
220{
221 return (v & 0x1U) << 14U;
222}
223static inline u32 pwr_falcon_irqmask_r(void)
224{
225 return 0x0010a018U;
226}
227static inline u32 pwr_falcon_irqdest_r(void)
228{
229 return 0x0010a01cU;
230}
231static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v)
232{
233 return (v & 0x1U) << 0U;
234}
235static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v)
236{
237 return (v & 0x1U) << 1U;
238}
239static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v)
240{
241 return (v & 0x1U) << 2U;
242}
243static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v)
244{
245 return (v & 0x1U) << 3U;
246}
247static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v)
248{
249 return (v & 0x1U) << 4U;
250}
251static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v)
252{
253 return (v & 0x1U) << 5U;
254}
255static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v)
256{
257 return (v & 0x1U) << 6U;
258}
259static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v)
260{
261 return (v & 0x1U) << 7U;
262}
263static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v)
264{
265 return (v & 0xffU) << 8U;
266}
267static inline u32 pwr_falcon_irqdest_host_ext_ctxe_f(u32 v)
268{
269 return (v & 0x1U) << 8U;
270}
271static inline u32 pwr_falcon_irqdest_host_ext_limitv_f(u32 v)
272{
273 return (v & 0x1U) << 9U;
274}
275static inline u32 pwr_falcon_irqdest_host_ext_second_f(u32 v)
276{
277 return (v & 0x1U) << 11U;
278}
279static inline u32 pwr_falcon_irqdest_host_ext_therm_f(u32 v)
280{
281 return (v & 0x1U) << 12U;
282}
283static inline u32 pwr_falcon_irqdest_host_ext_miscio_f(u32 v)
284{
285 return (v & 0x1U) << 13U;
286}
287static inline u32 pwr_falcon_irqdest_host_ext_rttimer_f(u32 v)
288{
289 return (v & 0x1U) << 14U;
290}
291static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v)
292{
293 return (v & 0x1U) << 16U;
294}
295static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v)
296{
297 return (v & 0x1U) << 17U;
298}
299static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v)
300{
301 return (v & 0x1U) << 18U;
302}
303static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v)
304{
305 return (v & 0x1U) << 19U;
306}
307static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v)
308{
309 return (v & 0x1U) << 20U;
310}
311static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v)
312{
313 return (v & 0x1U) << 21U;
314}
315static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v)
316{
317 return (v & 0x1U) << 22U;
318}
319static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v)
320{
321 return (v & 0x1U) << 23U;
322}
323static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v)
324{
325 return (v & 0xffU) << 24U;
326}
327static inline u32 pwr_falcon_irqdest_target_ext_ctxe_f(u32 v)
328{
329 return (v & 0x1U) << 24U;
330}
331static inline u32 pwr_falcon_irqdest_target_ext_limitv_f(u32 v)
332{
333 return (v & 0x1U) << 25U;
334}
335static inline u32 pwr_falcon_irqdest_target_ext_second_f(u32 v)
336{
337 return (v & 0x1U) << 27U;
338}
339static inline u32 pwr_falcon_irqdest_target_ext_therm_f(u32 v)
340{
341 return (v & 0x1U) << 28U;
342}
343static inline u32 pwr_falcon_irqdest_target_ext_miscio_f(u32 v)
344{
345 return (v & 0x1U) << 29U;
346}
347static inline u32 pwr_falcon_irqdest_target_ext_rttimer_f(u32 v)
348{
349 return (v & 0x1U) << 30U;
350}
351static inline u32 pwr_falcon_curctx_r(void)
352{
353 return 0x0010a050U;
354}
355static inline u32 pwr_falcon_nxtctx_r(void)
356{
357 return 0x0010a054U;
358}
359static inline u32 pwr_falcon_mailbox0_r(void)
360{
361 return 0x0010a040U;
362}
363static inline u32 pwr_falcon_mailbox1_r(void)
364{
365 return 0x0010a044U;
366}
367static inline u32 pwr_falcon_itfen_r(void)
368{
369 return 0x0010a048U;
370}
371static inline u32 pwr_falcon_itfen_ctxen_enable_f(void)
372{
373 return 0x1U;
374}
375static inline u32 pwr_falcon_idlestate_r(void)
376{
377 return 0x0010a04cU;
378}
379static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r)
380{
381 return (r >> 0U) & 0x1U;
382}
383static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r)
384{
385 return (r >> 1U) & 0x7fffU;
386}
387static inline u32 pwr_falcon_os_r(void)
388{
389 return 0x0010a080U;
390}
391static inline u32 pwr_falcon_engctl_r(void)
392{
393 return 0x0010a0a4U;
394}
395static inline u32 pwr_falcon_cpuctl_r(void)
396{
397 return 0x0010a100U;
398}
399static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v)
400{
401 return (v & 0x1U) << 1U;
402}
403static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v)
404{
405 return (v & 0x1U) << 4U;
406}
407static inline u32 pwr_falcon_cpuctl_halt_intr_m(void)
408{
409 return 0x1U << 4U;
410}
411static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r)
412{
413 return (r >> 4U) & 0x1U;
414}
415static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_f(u32 v)
416{
417 return (v & 0x1U) << 6U;
418}
419static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_m(void)
420{
421 return 0x1U << 6U;
422}
423static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_v(u32 r)
424{
425 return (r >> 6U) & 0x1U;
426}
427static inline u32 pwr_falcon_cpuctl_alias_r(void)
428{
429 return 0x0010a130U;
430}
431static inline u32 pwr_falcon_cpuctl_alias_startcpu_f(u32 v)
432{
433 return (v & 0x1U) << 1U;
434}
435static inline u32 pwr_pmu_scpctl_stat_r(void)
436{
437 return 0x0010ac08U;
438}
439static inline u32 pwr_pmu_scpctl_stat_debug_mode_f(u32 v)
440{
441 return (v & 0x1U) << 20U;
442}
443static inline u32 pwr_pmu_scpctl_stat_debug_mode_m(void)
444{
445 return 0x1U << 20U;
446}
447static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r)
448{
449 return (r >> 20U) & 0x1U;
450}
451static inline u32 pwr_falcon_imemc_r(u32 i)
452{
453 return 0x0010a180U + i*16U;
454}
455static inline u32 pwr_falcon_imemc_offs_f(u32 v)
456{
457 return (v & 0x3fU) << 2U;
458}
459static inline u32 pwr_falcon_imemc_blk_f(u32 v)
460{
461 return (v & 0xffU) << 8U;
462}
463static inline u32 pwr_falcon_imemc_aincw_f(u32 v)
464{
465 return (v & 0x1U) << 24U;
466}
467static inline u32 pwr_falcon_imemd_r(u32 i)
468{
469 return 0x0010a184U + i*16U;
470}
471static inline u32 pwr_falcon_imemt_r(u32 i)
472{
473 return 0x0010a188U + i*16U;
474}
475static inline u32 pwr_falcon_sctl_r(void)
476{
477 return 0x0010a240U;
478}
479static inline u32 pwr_falcon_mmu_phys_sec_r(void)
480{
481 return 0x00100ce4U;
482}
483static inline u32 pwr_falcon_bootvec_r(void)
484{
485 return 0x0010a104U;
486}
487static inline u32 pwr_falcon_bootvec_vec_f(u32 v)
488{
489 return (v & 0xffffffffU) << 0U;
490}
491static inline u32 pwr_falcon_dmactl_r(void)
492{
493 return 0x0010a10cU;
494}
495static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void)
496{
497 return 0x1U << 1U;
498}
499static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void)
500{
501 return 0x1U << 2U;
502}
503static inline u32 pwr_falcon_hwcfg_r(void)
504{
505 return 0x0010a108U;
506}
507static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r)
508{
509 return (r >> 0U) & 0x1ffU;
510}
511static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r)
512{
513 return (r >> 9U) & 0x1ffU;
514}
515static inline u32 pwr_falcon_dmatrfbase_r(void)
516{
517 return 0x0010a110U;
518}
519static inline u32 pwr_falcon_dmatrfbase1_r(void)
520{
521 return 0x0010a128U;
522}
523static inline u32 pwr_falcon_dmatrfmoffs_r(void)
524{
525 return 0x0010a114U;
526}
527static inline u32 pwr_falcon_dmatrfcmd_r(void)
528{
529 return 0x0010a118U;
530}
531static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v)
532{
533 return (v & 0x1U) << 4U;
534}
535static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v)
536{
537 return (v & 0x1U) << 5U;
538}
539static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v)
540{
541 return (v & 0x7U) << 8U;
542}
543static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v)
544{
545 return (v & 0x7U) << 12U;
546}
547static inline u32 pwr_falcon_dmatrffboffs_r(void)
548{
549 return 0x0010a11cU;
550}
551static inline u32 pwr_falcon_exterraddr_r(void)
552{
553 return 0x0010a168U;
554}
555static inline u32 pwr_falcon_exterrstat_r(void)
556{
557 return 0x0010a16cU;
558}
559static inline u32 pwr_falcon_exterrstat_valid_m(void)
560{
561 return 0x1U << 31U;
562}
563static inline u32 pwr_falcon_exterrstat_valid_v(u32 r)
564{
565 return (r >> 31U) & 0x1U;
566}
567static inline u32 pwr_falcon_exterrstat_valid_true_v(void)
568{
569 return 0x00000001U;
570}
571static inline u32 pwr_pmu_falcon_icd_cmd_r(void)
572{
573 return 0x0010a200U;
574}
575static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void)
576{
577 return 4U;
578}
579static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v)
580{
581 return (v & 0xfU) << 0U;
582}
583static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void)
584{
585 return 0xfU << 0U;
586}
587static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r)
588{
589 return (r >> 0U) & 0xfU;
590}
591static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void)
592{
593 return 0x8U;
594}
595static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void)
596{
597 return 0xeU;
598}
599static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v)
600{
601 return (v & 0x1fU) << 8U;
602}
603static inline u32 pwr_pmu_falcon_icd_rdata_r(void)
604{
605 return 0x0010a20cU;
606}
607static inline u32 pwr_falcon_dmemc_r(u32 i)
608{
609 return 0x0010a1c0U + i*8U;
610}
611static inline u32 pwr_falcon_dmemc_offs_f(u32 v)
612{
613 return (v & 0x3fU) << 2U;
614}
615static inline u32 pwr_falcon_dmemc_offs_m(void)
616{
617 return 0x3fU << 2U;
618}
619static inline u32 pwr_falcon_dmemc_blk_f(u32 v)
620{
621 return (v & 0xffU) << 8U;
622}
623static inline u32 pwr_falcon_dmemc_blk_m(void)
624{
625 return 0xffU << 8U;
626}
627static inline u32 pwr_falcon_dmemc_aincw_f(u32 v)
628{
629 return (v & 0x1U) << 24U;
630}
631static inline u32 pwr_falcon_dmemc_aincr_f(u32 v)
632{
633 return (v & 0x1U) << 25U;
634}
635static inline u32 pwr_falcon_dmemd_r(u32 i)
636{
637 return 0x0010a1c4U + i*8U;
638}
639static inline u32 pwr_pmu_new_instblk_r(void)
640{
641 return 0x0010a480U;
642}
643static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v)
644{
645 return (v & 0xfffffffU) << 0U;
646}
647static inline u32 pwr_pmu_new_instblk_target_fb_f(void)
648{
649 return 0x0U;
650}
651static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void)
652{
653 return 0x20000000U;
654}
655static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void)
656{
657 return 0x30000000U;
658}
659static inline u32 pwr_pmu_new_instblk_valid_f(u32 v)
660{
661 return (v & 0x1U) << 30U;
662}
663static inline u32 pwr_pmu_mutex_id_r(void)
664{
665 return 0x0010a488U;
666}
667static inline u32 pwr_pmu_mutex_id_value_v(u32 r)
668{
669 return (r >> 0U) & 0xffU;
670}
671static inline u32 pwr_pmu_mutex_id_value_init_v(void)
672{
673 return 0x00000000U;
674}
675static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void)
676{
677 return 0x000000ffU;
678}
679static inline u32 pwr_pmu_mutex_id_release_r(void)
680{
681 return 0x0010a48cU;
682}
683static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v)
684{
685 return (v & 0xffU) << 0U;
686}
687static inline u32 pwr_pmu_mutex_id_release_value_m(void)
688{
689 return 0xffU << 0U;
690}
691static inline u32 pwr_pmu_mutex_id_release_value_init_v(void)
692{
693 return 0x00000000U;
694}
695static inline u32 pwr_pmu_mutex_id_release_value_init_f(void)
696{
697 return 0x0U;
698}
699static inline u32 pwr_pmu_mutex_r(u32 i)
700{
701 return 0x0010a580U + i*4U;
702}
703static inline u32 pwr_pmu_mutex__size_1_v(void)
704{
705 return 0x00000010U;
706}
707static inline u32 pwr_pmu_mutex_value_f(u32 v)
708{
709 return (v & 0xffU) << 0U;
710}
711static inline u32 pwr_pmu_mutex_value_v(u32 r)
712{
713 return (r >> 0U) & 0xffU;
714}
715static inline u32 pwr_pmu_mutex_value_initial_lock_f(void)
716{
717 return 0x0U;
718}
719static inline u32 pwr_pmu_queue_head_r(u32 i)
720{
721 return 0x0010a800U + i*4U;
722}
723static inline u32 pwr_pmu_queue_head__size_1_v(void)
724{
725 return 0x00000008U;
726}
727static inline u32 pwr_pmu_queue_head_address_f(u32 v)
728{
729 return (v & 0xffffffffU) << 0U;
730}
731static inline u32 pwr_pmu_queue_head_address_v(u32 r)
732{
733 return (r >> 0U) & 0xffffffffU;
734}
735static inline u32 pwr_pmu_queue_tail_r(u32 i)
736{
737 return 0x0010a820U + i*4U;
738}
739static inline u32 pwr_pmu_queue_tail__size_1_v(void)
740{
741 return 0x00000008U;
742}
743static inline u32 pwr_pmu_queue_tail_address_f(u32 v)
744{
745 return (v & 0xffffffffU) << 0U;
746}
747static inline u32 pwr_pmu_queue_tail_address_v(u32 r)
748{
749 return (r >> 0U) & 0xffffffffU;
750}
751static inline u32 pwr_pmu_msgq_head_r(void)
752{
753 return 0x0010a4c8U;
754}
755static inline u32 pwr_pmu_msgq_head_val_f(u32 v)
756{
757 return (v & 0xffffffffU) << 0U;
758}
759static inline u32 pwr_pmu_msgq_head_val_v(u32 r)
760{
761 return (r >> 0U) & 0xffffffffU;
762}
763static inline u32 pwr_pmu_msgq_tail_r(void)
764{
765 return 0x0010a4ccU;
766}
767static inline u32 pwr_pmu_msgq_tail_val_f(u32 v)
768{
769 return (v & 0xffffffffU) << 0U;
770}
771static inline u32 pwr_pmu_msgq_tail_val_v(u32 r)
772{
773 return (r >> 0U) & 0xffffffffU;
774}
775static inline u32 pwr_pmu_idle_mask_r(u32 i)
776{
777 return 0x0010a504U + i*16U;
778}
779static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void)
780{
781 return 0x1U;
782}
783static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void)
784{
785 return 0x200000U;
786}
787static inline u32 pwr_pmu_idle_count_r(u32 i)
788{
789 return 0x0010a508U + i*16U;
790}
791static inline u32 pwr_pmu_idle_count_value_f(u32 v)
792{
793 return (v & 0x7fffffffU) << 0U;
794}
795static inline u32 pwr_pmu_idle_count_value_v(u32 r)
796{
797 return (r >> 0U) & 0x7fffffffU;
798}
799static inline u32 pwr_pmu_idle_count_reset_f(u32 v)
800{
801 return (v & 0x1U) << 31U;
802}
803static inline u32 pwr_pmu_idle_ctrl_r(u32 i)
804{
805 return 0x0010a50cU + i*16U;
806}
807static inline u32 pwr_pmu_idle_ctrl_value_m(void)
808{
809 return 0x3U << 0U;
810}
811static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void)
812{
813 return 0x2U;
814}
815static inline u32 pwr_pmu_idle_ctrl_value_always_f(void)
816{
817 return 0x3U;
818}
819static inline u32 pwr_pmu_idle_ctrl_filter_m(void)
820{
821 return 0x1U << 2U;
822}
823static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void)
824{
825 return 0x0U;
826}
827static inline u32 pwr_pmu_idle_mask_supp_r(u32 i)
828{
829 return 0x0010a9f0U + i*8U;
830}
831static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i)
832{
833 return 0x0010a9f4U + i*8U;
834}
835static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i)
836{
837 return 0x0010aa30U + i*8U;
838}
839static inline u32 pwr_pmu_debug_r(u32 i)
840{
841 return 0x0010a5c0U + i*4U;
842}
843static inline u32 pwr_pmu_debug__size_1_v(void)
844{
845 return 0x00000004U;
846}
847static inline u32 pwr_pmu_mailbox_r(u32 i)
848{
849 return 0x0010a450U + i*4U;
850}
851static inline u32 pwr_pmu_mailbox__size_1_v(void)
852{
853 return 0x0000000cU;
854}
855static inline u32 pwr_pmu_bar0_addr_r(void)
856{
857 return 0x0010a7a0U;
858}
859static inline u32 pwr_pmu_bar0_data_r(void)
860{
861 return 0x0010a7a4U;
862}
863static inline u32 pwr_pmu_bar0_ctl_r(void)
864{
865 return 0x0010a7acU;
866}
867static inline u32 pwr_pmu_bar0_timeout_r(void)
868{
869 return 0x0010a7a8U;
870}
871static inline u32 pwr_pmu_bar0_fecs_error_r(void)
872{
873 return 0x0010a988U;
874}
875static inline u32 pwr_pmu_bar0_error_status_r(void)
876{
877 return 0x0010a7b0U;
878}
879static inline u32 pwr_pmu_pg_idlefilth_r(u32 i)
880{
881 return 0x0010a6c0U + i*4U;
882}
883static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i)
884{
885 return 0x0010a6e8U + i*4U;
886}
887static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i)
888{
889 return 0x0010a710U + i*4U;
890}
891static inline u32 pwr_pmu_pg_intren_r(u32 i)
892{
893 return 0x0010a760U + i*4U;
894}
895static inline u32 pwr_fbif_transcfg_r(u32 i)
896{
897 return 0x0010ae00U + i*4U;
898}
899static inline u32 pwr_fbif_transcfg_target_local_fb_f(void)
900{
901 return 0x0U;
902}
903static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void)
904{
905 return 0x1U;
906}
907static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void)
908{
909 return 0x2U;
910}
911static inline u32 pwr_fbif_transcfg_mem_type_s(void)
912{
913 return 1U;
914}
915static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v)
916{
917 return (v & 0x1U) << 2U;
918}
919static inline u32 pwr_fbif_transcfg_mem_type_m(void)
920{
921 return 0x1U << 2U;
922}
923static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r)
924{
925 return (r >> 2U) & 0x1U;
926}
927static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void)
928{
929 return 0x0U;
930}
931static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void)
932{
933 return 0x4U;
934}
935#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ram_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ram_gv100.h
new file mode 100644
index 00000000..6b3e8aa6
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ram_gv100.h
@@ -0,0 +1,775 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_ram_gv100_h_
57#define _hw_ram_gv100_h_
58
59static inline u32 ram_in_ramfc_s(void)
60{
61 return 4096U;
62}
63static inline u32 ram_in_ramfc_w(void)
64{
65 return 0U;
66}
67static inline u32 ram_in_page_dir_base_target_f(u32 v)
68{
69 return (v & 0x3U) << 0U;
70}
71static inline u32 ram_in_page_dir_base_target_w(void)
72{
73 return 128U;
74}
75static inline u32 ram_in_page_dir_base_target_vid_mem_f(void)
76{
77 return 0x0U;
78}
79static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void)
80{
81 return 0x2U;
82}
83static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void)
84{
85 return 0x3U;
86}
87static inline u32 ram_in_page_dir_base_vol_w(void)
88{
89 return 128U;
90}
91static inline u32 ram_in_page_dir_base_vol_true_f(void)
92{
93 return 0x4U;
94}
95static inline u32 ram_in_page_dir_base_vol_false_f(void)
96{
97 return 0x0U;
98}
99static inline u32 ram_in_page_dir_base_fault_replay_tex_f(u32 v)
100{
101 return (v & 0x1U) << 4U;
102}
103static inline u32 ram_in_page_dir_base_fault_replay_tex_m(void)
104{
105 return 0x1U << 4U;
106}
107static inline u32 ram_in_page_dir_base_fault_replay_tex_w(void)
108{
109 return 128U;
110}
111static inline u32 ram_in_page_dir_base_fault_replay_tex_true_f(void)
112{
113 return 0x10U;
114}
115static inline u32 ram_in_page_dir_base_fault_replay_gcc_f(u32 v)
116{
117 return (v & 0x1U) << 5U;
118}
119static inline u32 ram_in_page_dir_base_fault_replay_gcc_m(void)
120{
121 return 0x1U << 5U;
122}
123static inline u32 ram_in_page_dir_base_fault_replay_gcc_w(void)
124{
125 return 128U;
126}
127static inline u32 ram_in_page_dir_base_fault_replay_gcc_true_f(void)
128{
129 return 0x20U;
130}
131static inline u32 ram_in_big_page_size_f(u32 v)
132{
133 return (v & 0x1U) << 11U;
134}
135static inline u32 ram_in_big_page_size_m(void)
136{
137 return 0x1U << 11U;
138}
139static inline u32 ram_in_big_page_size_w(void)
140{
141 return 128U;
142}
143static inline u32 ram_in_big_page_size_128kb_f(void)
144{
145 return 0x0U;
146}
147static inline u32 ram_in_big_page_size_64kb_f(void)
148{
149 return 0x800U;
150}
151static inline u32 ram_in_page_dir_base_lo_f(u32 v)
152{
153 return (v & 0xfffffU) << 12U;
154}
155static inline u32 ram_in_page_dir_base_lo_w(void)
156{
157 return 128U;
158}
159static inline u32 ram_in_page_dir_base_hi_f(u32 v)
160{
161 return (v & 0xffffffffU) << 0U;
162}
163static inline u32 ram_in_page_dir_base_hi_w(void)
164{
165 return 129U;
166}
167static inline u32 ram_in_engine_cs_w(void)
168{
169 return 132U;
170}
171static inline u32 ram_in_engine_cs_wfi_v(void)
172{
173 return 0x00000000U;
174}
175static inline u32 ram_in_engine_cs_wfi_f(void)
176{
177 return 0x0U;
178}
179static inline u32 ram_in_engine_cs_fg_v(void)
180{
181 return 0x00000001U;
182}
183static inline u32 ram_in_engine_cs_fg_f(void)
184{
185 return 0x8U;
186}
187static inline u32 ram_in_engine_wfi_mode_f(u32 v)
188{
189 return (v & 0x1U) << 2U;
190}
191static inline u32 ram_in_engine_wfi_mode_w(void)
192{
193 return 132U;
194}
195static inline u32 ram_in_engine_wfi_mode_physical_v(void)
196{
197 return 0x00000000U;
198}
199static inline u32 ram_in_engine_wfi_mode_virtual_v(void)
200{
201 return 0x00000001U;
202}
203static inline u32 ram_in_engine_wfi_target_f(u32 v)
204{
205 return (v & 0x3U) << 0U;
206}
207static inline u32 ram_in_engine_wfi_target_w(void)
208{
209 return 132U;
210}
211static inline u32 ram_in_engine_wfi_target_sys_mem_coh_v(void)
212{
213 return 0x00000002U;
214}
215static inline u32 ram_in_engine_wfi_target_sys_mem_ncoh_v(void)
216{
217 return 0x00000003U;
218}
219static inline u32 ram_in_engine_wfi_target_local_mem_v(void)
220{
221 return 0x00000000U;
222}
223static inline u32 ram_in_engine_wfi_ptr_lo_f(u32 v)
224{
225 return (v & 0xfffffU) << 12U;
226}
227static inline u32 ram_in_engine_wfi_ptr_lo_w(void)
228{
229 return 132U;
230}
231static inline u32 ram_in_engine_wfi_ptr_hi_f(u32 v)
232{
233 return (v & 0xffU) << 0U;
234}
235static inline u32 ram_in_engine_wfi_ptr_hi_w(void)
236{
237 return 133U;
238}
239static inline u32 ram_in_engine_wfi_veid_f(u32 v)
240{
241 return (v & 0x3fU) << 0U;
242}
243static inline u32 ram_in_engine_wfi_veid_w(void)
244{
245 return 134U;
246}
247static inline u32 ram_in_eng_method_buffer_addr_lo_f(u32 v)
248{
249 return (v & 0xffffffffU) << 0U;
250}
251static inline u32 ram_in_eng_method_buffer_addr_lo_w(void)
252{
253 return 136U;
254}
255static inline u32 ram_in_eng_method_buffer_addr_hi_f(u32 v)
256{
257 return (v & 0x1ffffU) << 0U;
258}
259static inline u32 ram_in_eng_method_buffer_addr_hi_w(void)
260{
261 return 137U;
262}
263static inline u32 ram_in_sc_page_dir_base_target_f(u32 v, u32 i)
264{
265 return (v & 0x3U) << (0U + i*0U);
266}
267static inline u32 ram_in_sc_page_dir_base_target__size_1_v(void)
268{
269 return 0x00000040U;
270}
271static inline u32 ram_in_sc_page_dir_base_target_vid_mem_v(void)
272{
273 return 0x00000000U;
274}
275static inline u32 ram_in_sc_page_dir_base_target_invalid_v(void)
276{
277 return 0x00000001U;
278}
279static inline u32 ram_in_sc_page_dir_base_target_sys_mem_coh_v(void)
280{
281 return 0x00000002U;
282}
283static inline u32 ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(void)
284{
285 return 0x00000003U;
286}
287static inline u32 ram_in_sc_page_dir_base_vol_f(u32 v, u32 i)
288{
289 return (v & 0x1U) << (2U + i*0U);
290}
291static inline u32 ram_in_sc_page_dir_base_vol__size_1_v(void)
292{
293 return 0x00000040U;
294}
295static inline u32 ram_in_sc_page_dir_base_vol_true_v(void)
296{
297 return 0x00000001U;
298}
299static inline u32 ram_in_sc_page_dir_base_vol_false_v(void)
300{
301 return 0x00000000U;
302}
303static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_f(u32 v, u32 i)
304{
305 return (v & 0x1U) << (4U + i*0U);
306}
307static inline u32 ram_in_sc_page_dir_base_fault_replay_tex__size_1_v(void)
308{
309 return 0x00000040U;
310}
311static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_enabled_v(void)
312{
313 return 0x00000001U;
314}
315static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_disabled_v(void)
316{
317 return 0x00000000U;
318}
319static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_f(u32 v, u32 i)
320{
321 return (v & 0x1U) << (5U + i*0U);
322}
323static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc__size_1_v(void)
324{
325 return 0x00000040U;
326}
327static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_enabled_v(void)
328{
329 return 0x00000001U;
330}
331static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_disabled_v(void)
332{
333 return 0x00000000U;
334}
335static inline u32 ram_in_sc_use_ver2_pt_format_f(u32 v, u32 i)
336{
337 return (v & 0x1U) << (10U + i*0U);
338}
339static inline u32 ram_in_sc_use_ver2_pt_format__size_1_v(void)
340{
341 return 0x00000040U;
342}
343static inline u32 ram_in_sc_use_ver2_pt_format_false_v(void)
344{
345 return 0x00000000U;
346}
347static inline u32 ram_in_sc_use_ver2_pt_format_true_v(void)
348{
349 return 0x00000001U;
350}
351static inline u32 ram_in_sc_big_page_size_f(u32 v, u32 i)
352{
353 return (v & 0x1U) << (11U + i*0U);
354}
355static inline u32 ram_in_sc_big_page_size__size_1_v(void)
356{
357 return 0x00000040U;
358}
359static inline u32 ram_in_sc_big_page_size_64kb_v(void)
360{
361 return 0x00000001U;
362}
363static inline u32 ram_in_sc_page_dir_base_lo_f(u32 v, u32 i)
364{
365 return (v & 0xfffffU) << (12U + i*0U);
366}
367static inline u32 ram_in_sc_page_dir_base_lo__size_1_v(void)
368{
369 return 0x00000040U;
370}
371static inline u32 ram_in_sc_page_dir_base_hi_f(u32 v, u32 i)
372{
373 return (v & 0xffffffffU) << (0U + i*0U);
374}
375static inline u32 ram_in_sc_page_dir_base_hi__size_1_v(void)
376{
377 return 0x00000040U;
378}
379static inline u32 ram_in_sc_page_dir_base_target_0_f(u32 v)
380{
381 return (v & 0x3U) << 0U;
382}
383static inline u32 ram_in_sc_page_dir_base_target_0_w(void)
384{
385 return 168U;
386}
387static inline u32 ram_in_sc_page_dir_base_vol_0_f(u32 v)
388{
389 return (v & 0x1U) << 2U;
390}
391static inline u32 ram_in_sc_page_dir_base_vol_0_w(void)
392{
393 return 168U;
394}
395static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_f(u32 v)
396{
397 return (v & 0x1U) << 4U;
398}
399static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_w(void)
400{
401 return 168U;
402}
403static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_f(u32 v)
404{
405 return (v & 0x1U) << 5U;
406}
407static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_w(void)
408{
409 return 168U;
410}
411static inline u32 ram_in_sc_use_ver2_pt_format_0_f(u32 v)
412{
413 return (v & 0x1U) << 10U;
414}
415static inline u32 ram_in_sc_use_ver2_pt_format_0_w(void)
416{
417 return 168U;
418}
419static inline u32 ram_in_sc_big_page_size_0_f(u32 v)
420{
421 return (v & 0x1U) << 11U;
422}
423static inline u32 ram_in_sc_big_page_size_0_w(void)
424{
425 return 168U;
426}
427static inline u32 ram_in_sc_page_dir_base_lo_0_f(u32 v)
428{
429 return (v & 0xfffffU) << 12U;
430}
431static inline u32 ram_in_sc_page_dir_base_lo_0_w(void)
432{
433 return 168U;
434}
435static inline u32 ram_in_sc_page_dir_base_hi_0_f(u32 v)
436{
437 return (v & 0xffffffffU) << 0U;
438}
439static inline u32 ram_in_sc_page_dir_base_hi_0_w(void)
440{
441 return 169U;
442}
443static inline u32 ram_in_base_shift_v(void)
444{
445 return 0x0000000cU;
446}
447static inline u32 ram_in_alloc_size_v(void)
448{
449 return 0x00001000U;
450}
451static inline u32 ram_fc_size_val_v(void)
452{
453 return 0x00000200U;
454}
455static inline u32 ram_fc_gp_put_w(void)
456{
457 return 0U;
458}
459static inline u32 ram_fc_userd_w(void)
460{
461 return 2U;
462}
463static inline u32 ram_fc_userd_hi_w(void)
464{
465 return 3U;
466}
467static inline u32 ram_fc_signature_w(void)
468{
469 return 4U;
470}
471static inline u32 ram_fc_gp_get_w(void)
472{
473 return 5U;
474}
475static inline u32 ram_fc_pb_get_w(void)
476{
477 return 6U;
478}
479static inline u32 ram_fc_pb_get_hi_w(void)
480{
481 return 7U;
482}
483static inline u32 ram_fc_pb_top_level_get_w(void)
484{
485 return 8U;
486}
487static inline u32 ram_fc_pb_top_level_get_hi_w(void)
488{
489 return 9U;
490}
491static inline u32 ram_fc_acquire_w(void)
492{
493 return 12U;
494}
495static inline u32 ram_fc_sem_addr_hi_w(void)
496{
497 return 14U;
498}
499static inline u32 ram_fc_sem_addr_lo_w(void)
500{
501 return 15U;
502}
503static inline u32 ram_fc_sem_payload_lo_w(void)
504{
505 return 16U;
506}
507static inline u32 ram_fc_sem_payload_hi_w(void)
508{
509 return 39U;
510}
511static inline u32 ram_fc_sem_execute_w(void)
512{
513 return 17U;
514}
515static inline u32 ram_fc_gp_base_w(void)
516{
517 return 18U;
518}
519static inline u32 ram_fc_gp_base_hi_w(void)
520{
521 return 19U;
522}
523static inline u32 ram_fc_gp_fetch_w(void)
524{
525 return 20U;
526}
527static inline u32 ram_fc_pb_fetch_w(void)
528{
529 return 21U;
530}
531static inline u32 ram_fc_pb_fetch_hi_w(void)
532{
533 return 22U;
534}
535static inline u32 ram_fc_pb_put_w(void)
536{
537 return 23U;
538}
539static inline u32 ram_fc_pb_put_hi_w(void)
540{
541 return 24U;
542}
543static inline u32 ram_fc_pb_header_w(void)
544{
545 return 33U;
546}
547static inline u32 ram_fc_pb_count_w(void)
548{
549 return 34U;
550}
551static inline u32 ram_fc_subdevice_w(void)
552{
553 return 37U;
554}
555static inline u32 ram_fc_target_w(void)
556{
557 return 43U;
558}
559static inline u32 ram_fc_hce_ctrl_w(void)
560{
561 return 57U;
562}
563static inline u32 ram_fc_chid_w(void)
564{
565 return 58U;
566}
567static inline u32 ram_fc_chid_id_f(u32 v)
568{
569 return (v & 0xfffU) << 0U;
570}
571static inline u32 ram_fc_chid_id_w(void)
572{
573 return 0U;
574}
575static inline u32 ram_fc_config_w(void)
576{
577 return 61U;
578}
579static inline u32 ram_fc_runlist_timeslice_w(void)
580{
581 return 62U;
582}
583static inline u32 ram_fc_set_channel_info_w(void)
584{
585 return 63U;
586}
587static inline u32 ram_userd_base_shift_v(void)
588{
589 return 0x00000009U;
590}
591static inline u32 ram_userd_chan_size_v(void)
592{
593 return 0x00000200U;
594}
595static inline u32 ram_userd_put_w(void)
596{
597 return 16U;
598}
599static inline u32 ram_userd_get_w(void)
600{
601 return 17U;
602}
603static inline u32 ram_userd_ref_w(void)
604{
605 return 18U;
606}
607static inline u32 ram_userd_put_hi_w(void)
608{
609 return 19U;
610}
611static inline u32 ram_userd_ref_threshold_w(void)
612{
613 return 20U;
614}
615static inline u32 ram_userd_top_level_get_w(void)
616{
617 return 22U;
618}
619static inline u32 ram_userd_top_level_get_hi_w(void)
620{
621 return 23U;
622}
623static inline u32 ram_userd_get_hi_w(void)
624{
625 return 24U;
626}
627static inline u32 ram_userd_gp_get_w(void)
628{
629 return 34U;
630}
631static inline u32 ram_userd_gp_put_w(void)
632{
633 return 35U;
634}
635static inline u32 ram_userd_gp_top_level_get_w(void)
636{
637 return 22U;
638}
639static inline u32 ram_userd_gp_top_level_get_hi_w(void)
640{
641 return 23U;
642}
643static inline u32 ram_rl_entry_size_v(void)
644{
645 return 0x00000010U;
646}
647static inline u32 ram_rl_entry_type_f(u32 v)
648{
649 return (v & 0x1U) << 0U;
650}
651static inline u32 ram_rl_entry_type_channel_v(void)
652{
653 return 0x00000000U;
654}
655static inline u32 ram_rl_entry_type_tsg_v(void)
656{
657 return 0x00000001U;
658}
659static inline u32 ram_rl_entry_id_f(u32 v)
660{
661 return (v & 0xfffU) << 0U;
662}
663static inline u32 ram_rl_entry_chan_runqueue_selector_f(u32 v)
664{
665 return (v & 0x1U) << 1U;
666}
667static inline u32 ram_rl_entry_chan_inst_target_f(u32 v)
668{
669 return (v & 0x3U) << 4U;
670}
671static inline u32 ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(void)
672{
673 return 0x00000003U;
674}
675static inline u32 ram_rl_entry_chan_inst_target_sys_mem_coh_v(void)
676{
677 return 0x00000002U;
678}
679static inline u32 ram_rl_entry_chan_inst_target_vid_mem_v(void)
680{
681 return 0x00000000U;
682}
683static inline u32 ram_rl_entry_chan_userd_target_f(u32 v)
684{
685 return (v & 0x3U) << 6U;
686}
687static inline u32 ram_rl_entry_chan_userd_target_vid_mem_v(void)
688{
689 return 0x00000000U;
690}
691static inline u32 ram_rl_entry_chan_userd_target_vid_mem_nvlink_coh_v(void)
692{
693 return 0x00000001U;
694}
695static inline u32 ram_rl_entry_chan_userd_target_sys_mem_coh_v(void)
696{
697 return 0x00000002U;
698}
699static inline u32 ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(void)
700{
701 return 0x00000003U;
702}
703static inline u32 ram_rl_entry_chan_userd_ptr_lo_f(u32 v)
704{
705 return (v & 0xffffffU) << 8U;
706}
707static inline u32 ram_rl_entry_chan_userd_ptr_hi_f(u32 v)
708{
709 return (v & 0xffffffffU) << 0U;
710}
711static inline u32 ram_rl_entry_chid_f(u32 v)
712{
713 return (v & 0xfffU) << 0U;
714}
715static inline u32 ram_rl_entry_chan_inst_ptr_lo_f(u32 v)
716{
717 return (v & 0xfffffU) << 12U;
718}
719static inline u32 ram_rl_entry_chan_inst_ptr_hi_f(u32 v)
720{
721 return (v & 0xffffffffU) << 0U;
722}
723static inline u32 ram_rl_entry_tsg_timeslice_scale_f(u32 v)
724{
725 return (v & 0xfU) << 16U;
726}
727static inline u32 ram_rl_entry_tsg_timeslice_scale_3_v(void)
728{
729 return 0x00000003U;
730}
731static inline u32 ram_rl_entry_tsg_timeslice_timeout_f(u32 v)
732{
733 return (v & 0xffU) << 24U;
734}
735static inline u32 ram_rl_entry_tsg_timeslice_timeout_128_v(void)
736{
737 return 0x00000080U;
738}
739static inline u32 ram_rl_entry_tsg_timeslice_timeout_disable_v(void)
740{
741 return 0x00000000U;
742}
743static inline u32 ram_rl_entry_tsg_length_f(u32 v)
744{
745 return (v & 0xffU) << 0U;
746}
747static inline u32 ram_rl_entry_tsg_length_init_v(void)
748{
749 return 0x00000000U;
750}
751static inline u32 ram_rl_entry_tsg_length_min_v(void)
752{
753 return 0x00000001U;
754}
755static inline u32 ram_rl_entry_tsg_length_max_v(void)
756{
757 return 0x00000080U;
758}
759static inline u32 ram_rl_entry_tsg_tsgid_f(u32 v)
760{
761 return (v & 0xfffU) << 0U;
762}
763static inline u32 ram_rl_entry_chan_userd_ptr_align_shift_v(void)
764{
765 return 0x00000008U;
766}
767static inline u32 ram_rl_entry_chan_userd_align_shift_v(void)
768{
769 return 0x00000008U;
770}
771static inline u32 ram_rl_entry_chan_inst_ptr_align_shift_v(void)
772{
773 return 0x0000000cU;
774}
775#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_therm_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_therm_gv100.h
new file mode 100644
index 00000000..2ea71ef1
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_therm_gv100.h
@@ -0,0 +1,299 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_therm_gv100_h_
57#define _hw_therm_gv100_h_
58
59static inline u32 therm_weight_1_r(void)
60{
61 return 0x00020024U;
62}
63static inline u32 therm_config1_r(void)
64{
65 return 0x00020050U;
66}
67static inline u32 therm_config2_r(void)
68{
69 return 0x00020130U;
70}
71static inline u32 therm_config2_slowdown_factor_extended_f(u32 v)
72{
73 return (v & 0x1U) << 24U;
74}
75static inline u32 therm_config2_grad_enable_f(u32 v)
76{
77 return (v & 0x1U) << 31U;
78}
79static inline u32 therm_gate_ctrl_r(u32 i)
80{
81 return 0x00020200U + i*4U;
82}
83static inline u32 therm_gate_ctrl_eng_clk_m(void)
84{
85 return 0x3U << 0U;
86}
87static inline u32 therm_gate_ctrl_eng_clk_run_f(void)
88{
89 return 0x0U;
90}
91static inline u32 therm_gate_ctrl_eng_clk_auto_f(void)
92{
93 return 0x1U;
94}
95static inline u32 therm_gate_ctrl_eng_clk_stop_f(void)
96{
97 return 0x2U;
98}
99static inline u32 therm_gate_ctrl_blk_clk_m(void)
100{
101 return 0x3U << 2U;
102}
103static inline u32 therm_gate_ctrl_blk_clk_run_f(void)
104{
105 return 0x0U;
106}
107static inline u32 therm_gate_ctrl_blk_clk_auto_f(void)
108{
109 return 0x4U;
110}
111static inline u32 therm_gate_ctrl_idle_holdoff_m(void)
112{
113 return 0x1U << 4U;
114}
115static inline u32 therm_gate_ctrl_idle_holdoff_off_f(void)
116{
117 return 0x0U;
118}
119static inline u32 therm_gate_ctrl_idle_holdoff_on_f(void)
120{
121 return 0x10U;
122}
123static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v)
124{
125 return (v & 0x1fU) << 8U;
126}
127static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void)
128{
129 return 0x1fU << 8U;
130}
131static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v)
132{
133 return (v & 0x7U) << 13U;
134}
135static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void)
136{
137 return 0x7U << 13U;
138}
139static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v)
140{
141 return (v & 0xfU) << 16U;
142}
143static inline u32 therm_gate_ctrl_eng_delay_before_m(void)
144{
145 return 0xfU << 16U;
146}
147static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v)
148{
149 return (v & 0xfU) << 20U;
150}
151static inline u32 therm_gate_ctrl_eng_delay_after_m(void)
152{
153 return 0xfU << 20U;
154}
155static inline u32 therm_fecs_idle_filter_r(void)
156{
157 return 0x00020288U;
158}
159static inline u32 therm_fecs_idle_filter_value_m(void)
160{
161 return 0xffffffffU << 0U;
162}
163static inline u32 therm_hubmmu_idle_filter_r(void)
164{
165 return 0x0002028cU;
166}
167static inline u32 therm_hubmmu_idle_filter_value_m(void)
168{
169 return 0xffffffffU << 0U;
170}
171static inline u32 therm_clk_slowdown_r(u32 i)
172{
173 return 0x00020160U + i*4U;
174}
175static inline u32 therm_clk_slowdown_idle_factor_f(u32 v)
176{
177 return (v & 0x3fU) << 16U;
178}
179static inline u32 therm_clk_slowdown_idle_factor_m(void)
180{
181 return 0x3fU << 16U;
182}
183static inline u32 therm_clk_slowdown_idle_factor_v(u32 r)
184{
185 return (r >> 16U) & 0x3fU;
186}
187static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void)
188{
189 return 0x0U;
190}
191static inline u32 therm_grad_stepping_table_r(u32 i)
192{
193 return 0x000202c8U + i*4U;
194}
195static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v)
196{
197 return (v & 0x3fU) << 0U;
198}
199static inline u32 therm_grad_stepping_table_slowdown_factor0_m(void)
200{
201 return 0x3fU << 0U;
202}
203static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f(void)
204{
205 return 0x1U;
206}
207static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f(void)
208{
209 return 0x2U;
210}
211static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f(void)
212{
213 return 0x6U;
214}
215static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f(void)
216{
217 return 0xeU;
218}
219static inline u32 therm_grad_stepping_table_slowdown_factor1_f(u32 v)
220{
221 return (v & 0x3fU) << 6U;
222}
223static inline u32 therm_grad_stepping_table_slowdown_factor1_m(void)
224{
225 return 0x3fU << 6U;
226}
227static inline u32 therm_grad_stepping_table_slowdown_factor2_f(u32 v)
228{
229 return (v & 0x3fU) << 12U;
230}
231static inline u32 therm_grad_stepping_table_slowdown_factor2_m(void)
232{
233 return 0x3fU << 12U;
234}
235static inline u32 therm_grad_stepping_table_slowdown_factor3_f(u32 v)
236{
237 return (v & 0x3fU) << 18U;
238}
239static inline u32 therm_grad_stepping_table_slowdown_factor3_m(void)
240{
241 return 0x3fU << 18U;
242}
243static inline u32 therm_grad_stepping_table_slowdown_factor4_f(u32 v)
244{
245 return (v & 0x3fU) << 24U;
246}
247static inline u32 therm_grad_stepping_table_slowdown_factor4_m(void)
248{
249 return 0x3fU << 24U;
250}
251static inline u32 therm_grad_stepping0_r(void)
252{
253 return 0x000202c0U;
254}
255static inline u32 therm_grad_stepping0_feature_s(void)
256{
257 return 1U;
258}
259static inline u32 therm_grad_stepping0_feature_f(u32 v)
260{
261 return (v & 0x1U) << 0U;
262}
263static inline u32 therm_grad_stepping0_feature_m(void)
264{
265 return 0x1U << 0U;
266}
267static inline u32 therm_grad_stepping0_feature_v(u32 r)
268{
269 return (r >> 0U) & 0x1U;
270}
271static inline u32 therm_grad_stepping0_feature_enable_f(void)
272{
273 return 0x1U;
274}
275static inline u32 therm_grad_stepping1_r(void)
276{
277 return 0x000202c4U;
278}
279static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v)
280{
281 return (v & 0x1ffffU) << 0U;
282}
283static inline u32 therm_clk_timing_r(u32 i)
284{
285 return 0x000203c0U + i*4U;
286}
287static inline u32 therm_clk_timing_grad_slowdown_f(u32 v)
288{
289 return (v & 0x1U) << 16U;
290}
291static inline u32 therm_clk_timing_grad_slowdown_m(void)
292{
293 return 0x1U << 16U;
294}
295static inline u32 therm_clk_timing_grad_slowdown_enabled_f(void)
296{
297 return 0x10000U;
298}
299#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_timer_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_timer_gv100.h
new file mode 100644
index 00000000..9d76e241
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_timer_gv100.h
@@ -0,0 +1,115 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_timer_gv100_h_
57#define _hw_timer_gv100_h_
58
59static inline u32 timer_pri_timeout_r(void)
60{
61 return 0x00009080U;
62}
63static inline u32 timer_pri_timeout_period_f(u32 v)
64{
65 return (v & 0xffffffU) << 0U;
66}
67static inline u32 timer_pri_timeout_period_m(void)
68{
69 return 0xffffffU << 0U;
70}
71static inline u32 timer_pri_timeout_period_v(u32 r)
72{
73 return (r >> 0U) & 0xffffffU;
74}
75static inline u32 timer_pri_timeout_en_f(u32 v)
76{
77 return (v & 0x1U) << 31U;
78}
79static inline u32 timer_pri_timeout_en_m(void)
80{
81 return 0x1U << 31U;
82}
83static inline u32 timer_pri_timeout_en_v(u32 r)
84{
85 return (r >> 31U) & 0x1U;
86}
87static inline u32 timer_pri_timeout_en_en_enabled_f(void)
88{
89 return 0x80000000U;
90}
91static inline u32 timer_pri_timeout_en_en_disabled_f(void)
92{
93 return 0x0U;
94}
95static inline u32 timer_pri_timeout_save_0_r(void)
96{
97 return 0x00009084U;
98}
99static inline u32 timer_pri_timeout_save_1_r(void)
100{
101 return 0x00009088U;
102}
103static inline u32 timer_pri_timeout_fecs_errcode_r(void)
104{
105 return 0x0000908cU;
106}
107static inline u32 timer_time_0_r(void)
108{
109 return 0x00009400U;
110}
111static inline u32 timer_time_1_r(void)
112{
113 return 0x00009410U;
114}
115#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_top_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_top_gv100.h
new file mode 100644
index 00000000..da297b72
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_top_gv100.h
@@ -0,0 +1,235 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_top_gv100_h_
57#define _hw_top_gv100_h_
58
59static inline u32 top_num_gpcs_r(void)
60{
61 return 0x00022430U;
62}
63static inline u32 top_num_gpcs_value_v(u32 r)
64{
65 return (r >> 0U) & 0x1fU;
66}
67static inline u32 top_tpc_per_gpc_r(void)
68{
69 return 0x00022434U;
70}
71static inline u32 top_tpc_per_gpc_value_v(u32 r)
72{
73 return (r >> 0U) & 0x1fU;
74}
75static inline u32 top_num_fbps_r(void)
76{
77 return 0x00022438U;
78}
79static inline u32 top_num_fbps_value_v(u32 r)
80{
81 return (r >> 0U) & 0x1fU;
82}
83static inline u32 top_ltc_per_fbp_r(void)
84{
85 return 0x00022450U;
86}
87static inline u32 top_ltc_per_fbp_value_v(u32 r)
88{
89 return (r >> 0U) & 0x1fU;
90}
91static inline u32 top_slices_per_ltc_r(void)
92{
93 return 0x0002245cU;
94}
95static inline u32 top_slices_per_ltc_value_v(u32 r)
96{
97 return (r >> 0U) & 0x1fU;
98}
99static inline u32 top_num_ltcs_r(void)
100{
101 return 0x00022454U;
102}
103static inline u32 top_num_ces_r(void)
104{
105 return 0x00022444U;
106}
107static inline u32 top_num_ces_value_v(u32 r)
108{
109 return (r >> 0U) & 0x1fU;
110}
111static inline u32 top_device_info_r(u32 i)
112{
113 return 0x00022700U + i*4U;
114}
115static inline u32 top_device_info__size_1_v(void)
116{
117 return 0x00000040U;
118}
119static inline u32 top_device_info_chain_v(u32 r)
120{
121 return (r >> 31U) & 0x1U;
122}
123static inline u32 top_device_info_chain_enable_v(void)
124{
125 return 0x00000001U;
126}
127static inline u32 top_device_info_engine_enum_v(u32 r)
128{
129 return (r >> 26U) & 0xfU;
130}
131static inline u32 top_device_info_runlist_enum_v(u32 r)
132{
133 return (r >> 21U) & 0xfU;
134}
135static inline u32 top_device_info_intr_enum_v(u32 r)
136{
137 return (r >> 15U) & 0x1fU;
138}
139static inline u32 top_device_info_reset_enum_v(u32 r)
140{
141 return (r >> 9U) & 0x1fU;
142}
143static inline u32 top_device_info_type_enum_v(u32 r)
144{
145 return (r >> 2U) & 0x1fffffffU;
146}
147static inline u32 top_device_info_type_enum_graphics_v(void)
148{
149 return 0x00000000U;
150}
151static inline u32 top_device_info_type_enum_graphics_f(void)
152{
153 return 0x0U;
154}
155static inline u32 top_device_info_type_enum_copy2_v(void)
156{
157 return 0x00000003U;
158}
159static inline u32 top_device_info_type_enum_copy2_f(void)
160{
161 return 0xcU;
162}
163static inline u32 top_device_info_type_enum_lce_v(void)
164{
165 return 0x00000013U;
166}
167static inline u32 top_device_info_type_enum_lce_f(void)
168{
169 return 0x4cU;
170}
171static inline u32 top_device_info_engine_v(u32 r)
172{
173 return (r >> 5U) & 0x1U;
174}
175static inline u32 top_device_info_runlist_v(u32 r)
176{
177 return (r >> 4U) & 0x1U;
178}
179static inline u32 top_device_info_intr_v(u32 r)
180{
181 return (r >> 3U) & 0x1U;
182}
183static inline u32 top_device_info_reset_v(u32 r)
184{
185 return (r >> 2U) & 0x1U;
186}
187static inline u32 top_device_info_entry_v(u32 r)
188{
189 return (r >> 0U) & 0x3U;
190}
191static inline u32 top_device_info_entry_not_valid_v(void)
192{
193 return 0x00000000U;
194}
195static inline u32 top_device_info_entry_enum_v(void)
196{
197 return 0x00000002U;
198}
199static inline u32 top_device_info_entry_data_v(void)
200{
201 return 0x00000001U;
202}
203static inline u32 top_device_info_data_type_v(u32 r)
204{
205 return (r >> 30U) & 0x1U;
206}
207static inline u32 top_device_info_data_type_enum2_v(void)
208{
209 return 0x00000000U;
210}
211static inline u32 top_device_info_data_inst_id_v(u32 r)
212{
213 return (r >> 26U) & 0xfU;
214}
215static inline u32 top_device_info_data_pri_base_v(u32 r)
216{
217 return (r >> 12U) & 0xfffU;
218}
219static inline u32 top_device_info_data_pri_base_align_v(void)
220{
221 return 0x0000000cU;
222}
223static inline u32 top_device_info_data_fault_id_enum_v(u32 r)
224{
225 return (r >> 3U) & 0x7fU;
226}
227static inline u32 top_device_info_data_fault_id_v(u32 r)
228{
229 return (r >> 2U) & 0x1U;
230}
231static inline u32 top_device_info_data_fault_id_valid_v(void)
232{
233 return 0x00000001U;
234}
235#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_usermode_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_usermode_gv100.h
new file mode 100644
index 00000000..7b1d861e
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_usermode_gv100.h
@@ -0,0 +1,95 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_usermode_gv100_h_
57#define _hw_usermode_gv100_h_
58
59static inline u32 usermode_cfg0_r(void)
60{
61 return 0x00810000U;
62}
63static inline u32 usermode_cfg0_class_id_f(u32 v)
64{
65 return (v & 0xffffU) << 0U;
66}
67static inline u32 usermode_cfg0_class_id_value_v(void)
68{
69 return 0x0000c361U;
70}
71static inline u32 usermode_time_0_r(void)
72{
73 return 0x00810080U;
74}
75static inline u32 usermode_time_0_nsec_f(u32 v)
76{
77 return (v & 0x7ffffffU) << 5U;
78}
79static inline u32 usermode_time_1_r(void)
80{
81 return 0x00810084U;
82}
83static inline u32 usermode_time_1_nsec_f(u32 v)
84{
85 return (v & 0x1fffffffU) << 0U;
86}
87static inline u32 usermode_notify_channel_pending_r(void)
88{
89 return 0x00810090U;
90}
91static inline u32 usermode_notify_channel_pending_id_f(u32 v)
92{
93 return (v & 0xffffffffU) << 0U;
94}
95#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xp_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xp_gv100.h
new file mode 100644
index 00000000..4296e043
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xp_gv100.h
@@ -0,0 +1,143 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_xp_gv100_h_
57#define _hw_xp_gv100_h_
58
59static inline u32 xp_dl_mgr_r(u32 i)
60{
61 return 0x0008b8c0U + i*4U;
62}
63static inline u32 xp_dl_mgr_safe_timing_f(u32 v)
64{
65 return (v & 0x1U) << 2U;
66}
67static inline u32 xp_pl_link_config_r(u32 i)
68{
69 return 0x0008c040U + i*4U;
70}
71static inline u32 xp_pl_link_config_ltssm_status_f(u32 v)
72{
73 return (v & 0x1U) << 4U;
74}
75static inline u32 xp_pl_link_config_ltssm_status_idle_v(void)
76{
77 return 0x00000000U;
78}
79static inline u32 xp_pl_link_config_ltssm_directive_f(u32 v)
80{
81 return (v & 0xfU) << 0U;
82}
83static inline u32 xp_pl_link_config_ltssm_directive_m(void)
84{
85 return 0xfU << 0U;
86}
87static inline u32 xp_pl_link_config_ltssm_directive_normal_operations_v(void)
88{
89 return 0x00000000U;
90}
91static inline u32 xp_pl_link_config_ltssm_directive_change_speed_v(void)
92{
93 return 0x00000001U;
94}
95static inline u32 xp_pl_link_config_max_link_rate_f(u32 v)
96{
97 return (v & 0x3U) << 18U;
98}
99static inline u32 xp_pl_link_config_max_link_rate_m(void)
100{
101 return 0x3U << 18U;
102}
103static inline u32 xp_pl_link_config_max_link_rate_2500_mtps_v(void)
104{
105 return 0x00000002U;
106}
107static inline u32 xp_pl_link_config_max_link_rate_5000_mtps_v(void)
108{
109 return 0x00000001U;
110}
111static inline u32 xp_pl_link_config_max_link_rate_8000_mtps_v(void)
112{
113 return 0x00000000U;
114}
115static inline u32 xp_pl_link_config_target_tx_width_f(u32 v)
116{
117 return (v & 0x7U) << 20U;
118}
119static inline u32 xp_pl_link_config_target_tx_width_m(void)
120{
121 return 0x7U << 20U;
122}
123static inline u32 xp_pl_link_config_target_tx_width_x1_v(void)
124{
125 return 0x00000007U;
126}
127static inline u32 xp_pl_link_config_target_tx_width_x2_v(void)
128{
129 return 0x00000006U;
130}
131static inline u32 xp_pl_link_config_target_tx_width_x4_v(void)
132{
133 return 0x00000005U;
134}
135static inline u32 xp_pl_link_config_target_tx_width_x8_v(void)
136{
137 return 0x00000004U;
138}
139static inline u32 xp_pl_link_config_target_tx_width_x16_v(void)
140{
141 return 0x00000000U;
142}
143#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xve_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xve_gv100.h
new file mode 100644
index 00000000..fc7aa72e
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xve_gv100.h
@@ -0,0 +1,207 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_xve_gv100_h_
57#define _hw_xve_gv100_h_
58
59static inline u32 xve_rom_ctrl_r(void)
60{
61 return 0x00000050U;
62}
63static inline u32 xve_rom_ctrl_rom_shadow_f(u32 v)
64{
65 return (v & 0x1U) << 0U;
66}
67static inline u32 xve_rom_ctrl_rom_shadow_disabled_f(void)
68{
69 return 0x0U;
70}
71static inline u32 xve_rom_ctrl_rom_shadow_enabled_f(void)
72{
73 return 0x1U;
74}
75static inline u32 xve_link_control_status_r(void)
76{
77 return 0x00000088U;
78}
79static inline u32 xve_link_control_status_link_speed_m(void)
80{
81 return 0xfU << 16U;
82}
83static inline u32 xve_link_control_status_link_speed_v(u32 r)
84{
85 return (r >> 16U) & 0xfU;
86}
87static inline u32 xve_link_control_status_link_speed_link_speed_2p5_v(void)
88{
89 return 0x00000001U;
90}
91static inline u32 xve_link_control_status_link_speed_link_speed_5p0_v(void)
92{
93 return 0x00000002U;
94}
95static inline u32 xve_link_control_status_link_speed_link_speed_8p0_v(void)
96{
97 return 0x00000003U;
98}
99static inline u32 xve_link_control_status_link_width_m(void)
100{
101 return 0x3fU << 20U;
102}
103static inline u32 xve_link_control_status_link_width_v(u32 r)
104{
105 return (r >> 20U) & 0x3fU;
106}
107static inline u32 xve_link_control_status_link_width_x1_v(void)
108{
109 return 0x00000001U;
110}
111static inline u32 xve_link_control_status_link_width_x2_v(void)
112{
113 return 0x00000002U;
114}
115static inline u32 xve_link_control_status_link_width_x4_v(void)
116{
117 return 0x00000004U;
118}
119static inline u32 xve_link_control_status_link_width_x8_v(void)
120{
121 return 0x00000008U;
122}
123static inline u32 xve_link_control_status_link_width_x16_v(void)
124{
125 return 0x00000010U;
126}
127static inline u32 xve_priv_xv_r(void)
128{
129 return 0x00000150U;
130}
131static inline u32 xve_priv_xv_cya_l0s_enable_f(u32 v)
132{
133 return (v & 0x1U) << 7U;
134}
135static inline u32 xve_priv_xv_cya_l0s_enable_m(void)
136{
137 return 0x1U << 7U;
138}
139static inline u32 xve_priv_xv_cya_l0s_enable_v(u32 r)
140{
141 return (r >> 7U) & 0x1U;
142}
143static inline u32 xve_priv_xv_cya_l1_enable_f(u32 v)
144{
145 return (v & 0x1U) << 8U;
146}
147static inline u32 xve_priv_xv_cya_l1_enable_m(void)
148{
149 return 0x1U << 8U;
150}
151static inline u32 xve_priv_xv_cya_l1_enable_v(u32 r)
152{
153 return (r >> 8U) & 0x1U;
154}
155static inline u32 xve_cya_2_r(void)
156{
157 return 0x00000704U;
158}
159static inline u32 xve_reset_r(void)
160{
161 return 0x00000718U;
162}
163static inline u32 xve_reset_reset_m(void)
164{
165 return 0x1U << 0U;
166}
167static inline u32 xve_reset_gpu_on_sw_reset_m(void)
168{
169 return 0x1U << 1U;
170}
171static inline u32 xve_reset_counter_en_m(void)
172{
173 return 0x1U << 2U;
174}
175static inline u32 xve_reset_counter_val_f(u32 v)
176{
177 return (v & 0x7ffU) << 4U;
178}
179static inline u32 xve_reset_counter_val_m(void)
180{
181 return 0x7ffU << 4U;
182}
183static inline u32 xve_reset_counter_val_v(u32 r)
184{
185 return (r >> 4U) & 0x7ffU;
186}
187static inline u32 xve_reset_clock_on_sw_reset_m(void)
188{
189 return 0x1U << 15U;
190}
191static inline u32 xve_reset_clock_counter_en_m(void)
192{
193 return 0x1U << 16U;
194}
195static inline u32 xve_reset_clock_counter_val_f(u32 v)
196{
197 return (v & 0x7ffU) << 17U;
198}
199static inline u32 xve_reset_clock_counter_val_m(void)
200{
201 return 0x7ffU << 17U;
202}
203static inline u32 xve_reset_clock_counter_val_v(u32 r)
204{
205 return (r >> 17U) & 0x7ffU;
206}
207#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_bus_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_bus_gv11b.h
new file mode 100644
index 00000000..d1d9b34a
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_bus_gv11b.h
@@ -0,0 +1,223 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_bus_gv11b_h_
57#define _hw_bus_gv11b_h_
58
59static inline u32 bus_bar0_window_r(void)
60{
61 return 0x00001700U;
62}
63static inline u32 bus_bar0_window_base_f(u32 v)
64{
65 return (v & 0xffffffU) << 0U;
66}
67static inline u32 bus_bar0_window_target_vid_mem_f(void)
68{
69 return 0x0U;
70}
71static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void)
72{
73 return 0x2000000U;
74}
75static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void)
76{
77 return 0x3000000U;
78}
79static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void)
80{
81 return 0x00000010U;
82}
83static inline u32 bus_bar1_block_r(void)
84{
85 return 0x00001704U;
86}
87static inline u32 bus_bar1_block_ptr_f(u32 v)
88{
89 return (v & 0xfffffffU) << 0U;
90}
91static inline u32 bus_bar1_block_target_vid_mem_f(void)
92{
93 return 0x0U;
94}
95static inline u32 bus_bar1_block_target_sys_mem_coh_f(void)
96{
97 return 0x20000000U;
98}
99static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void)
100{
101 return 0x30000000U;
102}
103static inline u32 bus_bar1_block_mode_virtual_f(void)
104{
105 return 0x80000000U;
106}
107static inline u32 bus_bar2_block_r(void)
108{
109 return 0x00001714U;
110}
111static inline u32 bus_bar2_block_ptr_f(u32 v)
112{
113 return (v & 0xfffffffU) << 0U;
114}
115static inline u32 bus_bar2_block_target_vid_mem_f(void)
116{
117 return 0x0U;
118}
119static inline u32 bus_bar2_block_target_sys_mem_coh_f(void)
120{
121 return 0x20000000U;
122}
123static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void)
124{
125 return 0x30000000U;
126}
127static inline u32 bus_bar2_block_mode_virtual_f(void)
128{
129 return 0x80000000U;
130}
131static inline u32 bus_bar1_block_ptr_shift_v(void)
132{
133 return 0x0000000cU;
134}
135static inline u32 bus_bar2_block_ptr_shift_v(void)
136{
137 return 0x0000000cU;
138}
139static inline u32 bus_bind_status_r(void)
140{
141 return 0x00001710U;
142}
143static inline u32 bus_bind_status_bar1_pending_v(u32 r)
144{
145 return (r >> 0U) & 0x1U;
146}
147static inline u32 bus_bind_status_bar1_pending_empty_f(void)
148{
149 return 0x0U;
150}
151static inline u32 bus_bind_status_bar1_pending_busy_f(void)
152{
153 return 0x1U;
154}
155static inline u32 bus_bind_status_bar1_outstanding_v(u32 r)
156{
157 return (r >> 1U) & 0x1U;
158}
159static inline u32 bus_bind_status_bar1_outstanding_false_f(void)
160{
161 return 0x0U;
162}
163static inline u32 bus_bind_status_bar1_outstanding_true_f(void)
164{
165 return 0x2U;
166}
167static inline u32 bus_bind_status_bar2_pending_v(u32 r)
168{
169 return (r >> 2U) & 0x1U;
170}
171static inline u32 bus_bind_status_bar2_pending_empty_f(void)
172{
173 return 0x0U;
174}
175static inline u32 bus_bind_status_bar2_pending_busy_f(void)
176{
177 return 0x4U;
178}
179static inline u32 bus_bind_status_bar2_outstanding_v(u32 r)
180{
181 return (r >> 3U) & 0x1U;
182}
183static inline u32 bus_bind_status_bar2_outstanding_false_f(void)
184{
185 return 0x0U;
186}
187static inline u32 bus_bind_status_bar2_outstanding_true_f(void)
188{
189 return 0x8U;
190}
191static inline u32 bus_intr_0_r(void)
192{
193 return 0x00001100U;
194}
195static inline u32 bus_intr_0_pri_squash_m(void)
196{
197 return 0x1U << 1U;
198}
199static inline u32 bus_intr_0_pri_fecserr_m(void)
200{
201 return 0x1U << 2U;
202}
203static inline u32 bus_intr_0_pri_timeout_m(void)
204{
205 return 0x1U << 3U;
206}
207static inline u32 bus_intr_en_0_r(void)
208{
209 return 0x00001140U;
210}
211static inline u32 bus_intr_en_0_pri_squash_m(void)
212{
213 return 0x1U << 1U;
214}
215static inline u32 bus_intr_en_0_pri_fecserr_m(void)
216{
217 return 0x1U << 2U;
218}
219static inline u32 bus_intr_en_0_pri_timeout_m(void)
220{
221 return 0x1U << 3U;
222}
223#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ccsr_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ccsr_gv11b.h
new file mode 100644
index 00000000..e21a4738
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ccsr_gv11b.h
@@ -0,0 +1,187 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_ccsr_gv11b_h_
57#define _hw_ccsr_gv11b_h_
58
59static inline u32 ccsr_channel_inst_r(u32 i)
60{
61 return 0x00800000U + i*8U;
62}
63static inline u32 ccsr_channel_inst__size_1_v(void)
64{
65 return 0x00000200U;
66}
67static inline u32 ccsr_channel_inst_ptr_f(u32 v)
68{
69 return (v & 0xfffffffU) << 0U;
70}
71static inline u32 ccsr_channel_inst_target_vid_mem_f(void)
72{
73 return 0x0U;
74}
75static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void)
76{
77 return 0x20000000U;
78}
79static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void)
80{
81 return 0x30000000U;
82}
83static inline u32 ccsr_channel_inst_bind_false_f(void)
84{
85 return 0x0U;
86}
87static inline u32 ccsr_channel_inst_bind_true_f(void)
88{
89 return 0x80000000U;
90}
91static inline u32 ccsr_channel_r(u32 i)
92{
93 return 0x00800004U + i*8U;
94}
95static inline u32 ccsr_channel__size_1_v(void)
96{
97 return 0x00000200U;
98}
99static inline u32 ccsr_channel_enable_v(u32 r)
100{
101 return (r >> 0U) & 0x1U;
102}
103static inline u32 ccsr_channel_enable_set_f(u32 v)
104{
105 return (v & 0x1U) << 10U;
106}
107static inline u32 ccsr_channel_enable_set_true_f(void)
108{
109 return 0x400U;
110}
111static inline u32 ccsr_channel_enable_clr_true_f(void)
112{
113 return 0x800U;
114}
115static inline u32 ccsr_channel_status_v(u32 r)
116{
117 return (r >> 24U) & 0xfU;
118}
119static inline u32 ccsr_channel_status_pending_ctx_reload_v(void)
120{
121 return 0x00000002U;
122}
123static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void)
124{
125 return 0x00000004U;
126}
127static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void)
128{
129 return 0x0000000aU;
130}
131static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void)
132{
133 return 0x0000000bU;
134}
135static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void)
136{
137 return 0x0000000cU;
138}
139static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void)
140{
141 return 0x0000000dU;
142}
143static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void)
144{
145 return 0x0000000eU;
146}
147static inline u32 ccsr_channel_next_v(u32 r)
148{
149 return (r >> 1U) & 0x1U;
150}
151static inline u32 ccsr_channel_next_true_v(void)
152{
153 return 0x00000001U;
154}
155static inline u32 ccsr_channel_force_ctx_reload_true_f(void)
156{
157 return 0x100U;
158}
159static inline u32 ccsr_channel_pbdma_faulted_f(u32 v)
160{
161 return (v & 0x1U) << 22U;
162}
163static inline u32 ccsr_channel_pbdma_faulted_reset_f(void)
164{
165 return 0x400000U;
166}
167static inline u32 ccsr_channel_eng_faulted_f(u32 v)
168{
169 return (v & 0x1U) << 23U;
170}
171static inline u32 ccsr_channel_eng_faulted_v(u32 r)
172{
173 return (r >> 23U) & 0x1U;
174}
175static inline u32 ccsr_channel_eng_faulted_reset_f(void)
176{
177 return 0x800000U;
178}
179static inline u32 ccsr_channel_eng_faulted_true_v(void)
180{
181 return 0x00000001U;
182}
183static inline u32 ccsr_channel_busy_v(u32 r)
184{
185 return (r >> 28U) & 0x1U;
186}
187#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ce_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ce_gv11b.h
new file mode 100644
index 00000000..efc14d00
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ce_gv11b.h
@@ -0,0 +1,107 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_ce_gv11b_h_
57#define _hw_ce_gv11b_h_
58
59static inline u32 ce_intr_status_r(u32 i)
60{
61 return 0x00104410U + i*128U;
62}
63static inline u32 ce_intr_status_blockpipe_pending_f(void)
64{
65 return 0x1U;
66}
67static inline u32 ce_intr_status_blockpipe_reset_f(void)
68{
69 return 0x1U;
70}
71static inline u32 ce_intr_status_nonblockpipe_pending_f(void)
72{
73 return 0x2U;
74}
75static inline u32 ce_intr_status_nonblockpipe_reset_f(void)
76{
77 return 0x2U;
78}
79static inline u32 ce_intr_status_launcherr_pending_f(void)
80{
81 return 0x4U;
82}
83static inline u32 ce_intr_status_launcherr_reset_f(void)
84{
85 return 0x4U;
86}
87static inline u32 ce_intr_status_invalid_config_pending_f(void)
88{
89 return 0x8U;
90}
91static inline u32 ce_intr_status_invalid_config_reset_f(void)
92{
93 return 0x8U;
94}
95static inline u32 ce_intr_status_mthd_buffer_fault_pending_f(void)
96{
97 return 0x10U;
98}
99static inline u32 ce_intr_status_mthd_buffer_fault_reset_f(void)
100{
101 return 0x10U;
102}
103static inline u32 ce_pce_map_r(void)
104{
105 return 0x00104028U;
106}
107#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h
new file mode 100644
index 00000000..623a8c15
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h
@@ -0,0 +1,455 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_ctxsw_prog_gv11b_h_
57#define _hw_ctxsw_prog_gv11b_h_
58
59static inline u32 ctxsw_prog_fecs_header_v(void)
60{
61 return 0x00000100U;
62}
63static inline u32 ctxsw_prog_main_image_num_gpcs_o(void)
64{
65 return 0x00000008U;
66}
67static inline u32 ctxsw_prog_main_image_ctl_o(void)
68{
69 return 0x0000000cU;
70}
71static inline u32 ctxsw_prog_main_image_ctl_type_f(u32 v)
72{
73 return (v & 0x3fU) << 0U;
74}
75static inline u32 ctxsw_prog_main_image_ctl_type_undefined_v(void)
76{
77 return 0x00000000U;
78}
79static inline u32 ctxsw_prog_main_image_ctl_type_opengl_v(void)
80{
81 return 0x00000008U;
82}
83static inline u32 ctxsw_prog_main_image_ctl_type_dx9_v(void)
84{
85 return 0x00000010U;
86}
87static inline u32 ctxsw_prog_main_image_ctl_type_dx10_v(void)
88{
89 return 0x00000011U;
90}
91static inline u32 ctxsw_prog_main_image_ctl_type_dx11_v(void)
92{
93 return 0x00000012U;
94}
95static inline u32 ctxsw_prog_main_image_ctl_type_compute_v(void)
96{
97 return 0x00000020U;
98}
99static inline u32 ctxsw_prog_main_image_ctl_type_per_veid_header_v(void)
100{
101 return 0x00000021U;
102}
103static inline u32 ctxsw_prog_main_image_patch_count_o(void)
104{
105 return 0x00000010U;
106}
107static inline u32 ctxsw_prog_main_image_context_id_o(void)
108{
109 return 0x000000f0U;
110}
111static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void)
112{
113 return 0x00000014U;
114}
115static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void)
116{
117 return 0x00000018U;
118}
119static inline u32 ctxsw_prog_main_image_zcull_o(void)
120{
121 return 0x0000001cU;
122}
123static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void)
124{
125 return 0x00000001U;
126}
127static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void)
128{
129 return 0x00000002U;
130}
131static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void)
132{
133 return 0x00000020U;
134}
135static inline u32 ctxsw_prog_main_image_pm_o(void)
136{
137 return 0x00000028U;
138}
139static inline u32 ctxsw_prog_main_image_pm_mode_m(void)
140{
141 return 0x7U << 0U;
142}
143static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void)
144{
145 return 0x0U;
146}
147static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void)
148{
149 return 0x7U << 3U;
150}
151static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void)
152{
153 return 0x8U;
154}
155static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void)
156{
157 return 0x0U;
158}
159static inline u32 ctxsw_prog_main_image_pm_ptr_o(void)
160{
161 return 0x0000002cU;
162}
163static inline u32 ctxsw_prog_main_image_num_save_ops_o(void)
164{
165 return 0x000000f4U;
166}
167static inline u32 ctxsw_prog_main_image_num_wfi_save_ops_o(void)
168{
169 return 0x000000d0U;
170}
171static inline u32 ctxsw_prog_main_image_num_cta_save_ops_o(void)
172{
173 return 0x000000d4U;
174}
175static inline u32 ctxsw_prog_main_image_num_gfxp_save_ops_o(void)
176{
177 return 0x000000d8U;
178}
179static inline u32 ctxsw_prog_main_image_num_cilp_save_ops_o(void)
180{
181 return 0x000000dcU;
182}
183static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void)
184{
185 return 0x000000f8U;
186}
187static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_o(void)
188{
189 return 0x00000060U;
190}
191static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_v_f(u32 v)
192{
193 return (v & 0x1ffffU) << 0U;
194}
195static inline u32 ctxsw_prog_main_image_pm_ptr_hi_o(void)
196{
197 return 0x00000094U;
198}
199static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_o(void)
200{
201 return 0x00000064U;
202}
203static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_v_f(u32 v)
204{
205 return (v & 0x1ffffU) << 0U;
206}
207static inline u32 ctxsw_prog_main_image_full_preemption_ptr_o(void)
208{
209 return 0x00000068U;
210}
211static inline u32 ctxsw_prog_main_image_full_preemption_ptr_v_f(u32 v)
212{
213 return (v & 0xffffffffU) << 0U;
214}
215static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_o(void)
216{
217 return 0x00000070U;
218}
219static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_v_f(u32 v)
220{
221 return (v & 0x1ffffU) << 0U;
222}
223static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_o(void)
224{
225 return 0x00000074U;
226}
227static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_v_f(u32 v)
228{
229 return (v & 0xffffffffU) << 0U;
230}
231static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_o(void)
232{
233 return 0x00000078U;
234}
235static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_v_f(u32 v)
236{
237 return (v & 0x1ffffU) << 0U;
238}
239static inline u32 ctxsw_prog_main_image_context_buffer_ptr_o(void)
240{
241 return 0x0000007cU;
242}
243static inline u32 ctxsw_prog_main_image_context_buffer_ptr_v_f(u32 v)
244{
245 return (v & 0xffffffffU) << 0U;
246}
247static inline u32 ctxsw_prog_main_image_magic_value_o(void)
248{
249 return 0x000000fcU;
250}
251static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void)
252{
253 return 0x600dc0deU;
254}
255static inline u32 ctxsw_prog_local_priv_register_ctl_o(void)
256{
257 return 0x0000000cU;
258}
259static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r)
260{
261 return (r >> 0U) & 0xffffU;
262}
263static inline u32 ctxsw_prog_main_image_global_cb_ptr_o(void)
264{
265 return 0x000000b8U;
266}
267static inline u32 ctxsw_prog_main_image_global_cb_ptr_v_f(u32 v)
268{
269 return (v & 0xffffffffU) << 0U;
270}
271static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_o(void)
272{
273 return 0x000000bcU;
274}
275static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_v_f(u32 v)
276{
277 return (v & 0x1ffffU) << 0U;
278}
279static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_o(void)
280{
281 return 0x000000c0U;
282}
283static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_v_f(u32 v)
284{
285 return (v & 0xffffffffU) << 0U;
286}
287static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_o(void)
288{
289 return 0x000000c4U;
290}
291static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_v_f(u32 v)
292{
293 return (v & 0x1ffffU) << 0U;
294}
295static inline u32 ctxsw_prog_main_image_control_block_ptr_o(void)
296{
297 return 0x000000c8U;
298}
299static inline u32 ctxsw_prog_main_image_control_block_ptr_v_f(u32 v)
300{
301 return (v & 0xffffffffU) << 0U;
302}
303static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_o(void)
304{
305 return 0x000000ccU;
306}
307static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_v_f(u32 v)
308{
309 return (v & 0x1ffffU) << 0U;
310}
311static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_o(void)
312{
313 return 0x000000e0U;
314}
315static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_v_f(u32 v)
316{
317 return (v & 0xffffffffU) << 0U;
318}
319static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_o(void)
320{
321 return 0x000000e4U;
322}
323static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_v_f(u32 v)
324{
325 return (v & 0x1ffffU) << 0U;
326}
327static inline u32 ctxsw_prog_local_image_ppc_info_o(void)
328{
329 return 0x000000f4U;
330}
331static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r)
332{
333 return (r >> 0U) & 0xffffU;
334}
335static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r)
336{
337 return (r >> 16U) & 0xffffU;
338}
339static inline u32 ctxsw_prog_local_image_num_tpcs_o(void)
340{
341 return 0x000000f8U;
342}
343static inline u32 ctxsw_prog_local_magic_value_o(void)
344{
345 return 0x000000fcU;
346}
347static inline u32 ctxsw_prog_local_magic_value_v_value_v(void)
348{
349 return 0xad0becabU;
350}
351static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void)
352{
353 return 0x000000ecU;
354}
355static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r)
356{
357 return (r >> 0U) & 0xffffU;
358}
359static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r)
360{
361 return (r >> 16U) & 0xffU;
362}
363static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void)
364{
365 return 0x00000100U;
366}
367static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void)
368{
369 return 0x00000004U;
370}
371static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void)
372{
373 return 0x00000000U;
374}
375static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void)
376{
377 return 0x00000002U;
378}
379static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void)
380{
381 return 0x000000a0U;
382}
383static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void)
384{
385 return 2U;
386}
387static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v)
388{
389 return (v & 0x3U) << 0U;
390}
391static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void)
392{
393 return 0x3U << 0U;
394}
395static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r)
396{
397 return (r >> 0U) & 0x3U;
398}
399static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void)
400{
401 return 0x0U;
402}
403static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void)
404{
405 return 0x2U;
406}
407static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void)
408{
409 return 0x000000a4U;
410}
411static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void)
412{
413 return 0x000000a8U;
414}
415static inline u32 ctxsw_prog_main_image_misc_options_o(void)
416{
417 return 0x0000003cU;
418}
419static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void)
420{
421 return 0x1U << 3U;
422}
423static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void)
424{
425 return 0x0U;
426}
427static inline u32 ctxsw_prog_main_image_graphics_preemption_options_o(void)
428{
429 return 0x00000080U;
430}
431static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_f(u32 v)
432{
433 return (v & 0x3U) << 0U;
434}
435static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f(void)
436{
437 return 0x1U;
438}
439static inline u32 ctxsw_prog_main_image_compute_preemption_options_o(void)
440{
441 return 0x00000084U;
442}
443static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_f(u32 v)
444{
445 return (v & 0x3U) << 0U;
446}
447static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(void)
448{
449 return 0x1U;
450}
451static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f(void)
452{
453 return 0x2U;
454}
455#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_falcon_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_falcon_gv11b.h
new file mode 100644
index 00000000..4bb8f2de
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_falcon_gv11b.h
@@ -0,0 +1,599 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_falcon_gv11b_h_
57#define _hw_falcon_gv11b_h_
58
59static inline u32 falcon_falcon_irqsset_r(void)
60{
61 return 0x00000000U;
62}
63static inline u32 falcon_falcon_irqsset_swgen0_set_f(void)
64{
65 return 0x40U;
66}
67static inline u32 falcon_falcon_irqsclr_r(void)
68{
69 return 0x00000004U;
70}
71static inline u32 falcon_falcon_irqstat_r(void)
72{
73 return 0x00000008U;
74}
75static inline u32 falcon_falcon_irqstat_halt_true_f(void)
76{
77 return 0x10U;
78}
79static inline u32 falcon_falcon_irqstat_exterr_true_f(void)
80{
81 return 0x20U;
82}
83static inline u32 falcon_falcon_irqstat_swgen0_true_f(void)
84{
85 return 0x40U;
86}
87static inline u32 falcon_falcon_irqmode_r(void)
88{
89 return 0x0000000cU;
90}
91static inline u32 falcon_falcon_irqmset_r(void)
92{
93 return 0x00000010U;
94}
95static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v)
96{
97 return (v & 0x1U) << 0U;
98}
99static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v)
100{
101 return (v & 0x1U) << 1U;
102}
103static inline u32 falcon_falcon_irqmset_mthd_f(u32 v)
104{
105 return (v & 0x1U) << 2U;
106}
107static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v)
108{
109 return (v & 0x1U) << 3U;
110}
111static inline u32 falcon_falcon_irqmset_halt_f(u32 v)
112{
113 return (v & 0x1U) << 4U;
114}
115static inline u32 falcon_falcon_irqmset_exterr_f(u32 v)
116{
117 return (v & 0x1U) << 5U;
118}
119static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v)
120{
121 return (v & 0x1U) << 6U;
122}
123static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v)
124{
125 return (v & 0x1U) << 7U;
126}
127static inline u32 falcon_falcon_irqmclr_r(void)
128{
129 return 0x00000014U;
130}
131static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v)
132{
133 return (v & 0x1U) << 0U;
134}
135static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v)
136{
137 return (v & 0x1U) << 1U;
138}
139static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v)
140{
141 return (v & 0x1U) << 2U;
142}
143static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v)
144{
145 return (v & 0x1U) << 3U;
146}
147static inline u32 falcon_falcon_irqmclr_halt_f(u32 v)
148{
149 return (v & 0x1U) << 4U;
150}
151static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v)
152{
153 return (v & 0x1U) << 5U;
154}
155static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v)
156{
157 return (v & 0x1U) << 6U;
158}
159static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v)
160{
161 return (v & 0x1U) << 7U;
162}
163static inline u32 falcon_falcon_irqmclr_ext_f(u32 v)
164{
165 return (v & 0xffU) << 8U;
166}
167static inline u32 falcon_falcon_irqmask_r(void)
168{
169 return 0x00000018U;
170}
171static inline u32 falcon_falcon_irqdest_r(void)
172{
173 return 0x0000001cU;
174}
175static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v)
176{
177 return (v & 0x1U) << 0U;
178}
179static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v)
180{
181 return (v & 0x1U) << 1U;
182}
183static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v)
184{
185 return (v & 0x1U) << 2U;
186}
187static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v)
188{
189 return (v & 0x1U) << 3U;
190}
191static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v)
192{
193 return (v & 0x1U) << 4U;
194}
195static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v)
196{
197 return (v & 0x1U) << 5U;
198}
199static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v)
200{
201 return (v & 0x1U) << 6U;
202}
203static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v)
204{
205 return (v & 0x1U) << 7U;
206}
207static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v)
208{
209 return (v & 0xffU) << 8U;
210}
211static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v)
212{
213 return (v & 0x1U) << 16U;
214}
215static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v)
216{
217 return (v & 0x1U) << 17U;
218}
219static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v)
220{
221 return (v & 0x1U) << 18U;
222}
223static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v)
224{
225 return (v & 0x1U) << 19U;
226}
227static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v)
228{
229 return (v & 0x1U) << 20U;
230}
231static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v)
232{
233 return (v & 0x1U) << 21U;
234}
235static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v)
236{
237 return (v & 0x1U) << 22U;
238}
239static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v)
240{
241 return (v & 0x1U) << 23U;
242}
243static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v)
244{
245 return (v & 0xffU) << 24U;
246}
247static inline u32 falcon_falcon_curctx_r(void)
248{
249 return 0x00000050U;
250}
251static inline u32 falcon_falcon_nxtctx_r(void)
252{
253 return 0x00000054U;
254}
255static inline u32 falcon_falcon_mailbox0_r(void)
256{
257 return 0x00000040U;
258}
259static inline u32 falcon_falcon_mailbox1_r(void)
260{
261 return 0x00000044U;
262}
263static inline u32 falcon_falcon_itfen_r(void)
264{
265 return 0x00000048U;
266}
267static inline u32 falcon_falcon_itfen_ctxen_enable_f(void)
268{
269 return 0x1U;
270}
271static inline u32 falcon_falcon_idlestate_r(void)
272{
273 return 0x0000004cU;
274}
275static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r)
276{
277 return (r >> 0U) & 0x1U;
278}
279static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r)
280{
281 return (r >> 1U) & 0x7fffU;
282}
283static inline u32 falcon_falcon_os_r(void)
284{
285 return 0x00000080U;
286}
287static inline u32 falcon_falcon_engctl_r(void)
288{
289 return 0x000000a4U;
290}
291static inline u32 falcon_falcon_cpuctl_r(void)
292{
293 return 0x00000100U;
294}
295static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v)
296{
297 return (v & 0x1U) << 1U;
298}
299static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v)
300{
301 return (v & 0x1U) << 2U;
302}
303static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v)
304{
305 return (v & 0x1U) << 3U;
306}
307static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v)
308{
309 return (v & 0x1U) << 4U;
310}
311static inline u32 falcon_falcon_cpuctl_halt_intr_m(void)
312{
313 return 0x1U << 4U;
314}
315static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r)
316{
317 return (r >> 4U) & 0x1U;
318}
319static inline u32 falcon_falcon_cpuctl_stopped_m(void)
320{
321 return 0x1U << 5U;
322}
323static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_f(u32 v)
324{
325 return (v & 0x1U) << 6U;
326}
327static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_m(void)
328{
329 return 0x1U << 6U;
330}
331static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_v(u32 r)
332{
333 return (r >> 6U) & 0x1U;
334}
335static inline u32 falcon_falcon_cpuctl_alias_r(void)
336{
337 return 0x00000130U;
338}
339static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v)
340{
341 return (v & 0x1U) << 1U;
342}
343static inline u32 falcon_falcon_imemc_r(u32 i)
344{
345 return 0x00000180U + i*16U;
346}
347static inline u32 falcon_falcon_imemc_offs_f(u32 v)
348{
349 return (v & 0x3fU) << 2U;
350}
351static inline u32 falcon_falcon_imemc_blk_f(u32 v)
352{
353 return (v & 0xffU) << 8U;
354}
355static inline u32 falcon_falcon_imemc_aincw_f(u32 v)
356{
357 return (v & 0x1U) << 24U;
358}
359static inline u32 falcon_falcon_imemd_r(u32 i)
360{
361 return 0x00000184U + i*16U;
362}
363static inline u32 falcon_falcon_imemt_r(u32 i)
364{
365 return 0x00000188U + i*16U;
366}
367static inline u32 falcon_falcon_sctl_r(void)
368{
369 return 0x00000240U;
370}
371static inline u32 falcon_falcon_mmu_phys_sec_r(void)
372{
373 return 0x00100ce4U;
374}
375static inline u32 falcon_falcon_bootvec_r(void)
376{
377 return 0x00000104U;
378}
379static inline u32 falcon_falcon_bootvec_vec_f(u32 v)
380{
381 return (v & 0xffffffffU) << 0U;
382}
383static inline u32 falcon_falcon_dmactl_r(void)
384{
385 return 0x0000010cU;
386}
387static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void)
388{
389 return 0x1U << 1U;
390}
391static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void)
392{
393 return 0x1U << 2U;
394}
395static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v)
396{
397 return (v & 0x1U) << 0U;
398}
399static inline u32 falcon_falcon_hwcfg_r(void)
400{
401 return 0x00000108U;
402}
403static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r)
404{
405 return (r >> 0U) & 0x1ffU;
406}
407static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r)
408{
409 return (r >> 9U) & 0x1ffU;
410}
411static inline u32 falcon_falcon_dmatrfbase_r(void)
412{
413 return 0x00000110U;
414}
415static inline u32 falcon_falcon_dmatrfbase1_r(void)
416{
417 return 0x00000128U;
418}
419static inline u32 falcon_falcon_dmatrfmoffs_r(void)
420{
421 return 0x00000114U;
422}
423static inline u32 falcon_falcon_dmatrfcmd_r(void)
424{
425 return 0x00000118U;
426}
427static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v)
428{
429 return (v & 0x1U) << 4U;
430}
431static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v)
432{
433 return (v & 0x1U) << 5U;
434}
435static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v)
436{
437 return (v & 0x7U) << 8U;
438}
439static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v)
440{
441 return (v & 0x7U) << 12U;
442}
443static inline u32 falcon_falcon_dmatrffboffs_r(void)
444{
445 return 0x0000011cU;
446}
447static inline u32 falcon_falcon_imctl_debug_r(void)
448{
449 return 0x0000015cU;
450}
451static inline u32 falcon_falcon_imctl_debug_addr_blk_f(u32 v)
452{
453 return (v & 0xffffffU) << 0U;
454}
455static inline u32 falcon_falcon_imctl_debug_cmd_f(u32 v)
456{
457 return (v & 0x7U) << 24U;
458}
459static inline u32 falcon_falcon_imstat_r(void)
460{
461 return 0x00000144U;
462}
463static inline u32 falcon_falcon_traceidx_r(void)
464{
465 return 0x00000148U;
466}
467static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r)
468{
469 return (r >> 16U) & 0xffU;
470}
471static inline u32 falcon_falcon_traceidx_idx_f(u32 v)
472{
473 return (v & 0xffU) << 0U;
474}
475static inline u32 falcon_falcon_tracepc_r(void)
476{
477 return 0x0000014cU;
478}
479static inline u32 falcon_falcon_tracepc_pc_v(u32 r)
480{
481 return (r >> 0U) & 0xffffffU;
482}
483static inline u32 falcon_falcon_exterraddr_r(void)
484{
485 return 0x00000168U;
486}
487static inline u32 falcon_falcon_exterrstat_r(void)
488{
489 return 0x0000016cU;
490}
491static inline u32 falcon_falcon_exterrstat_valid_m(void)
492{
493 return 0x1U << 31U;
494}
495static inline u32 falcon_falcon_exterrstat_valid_v(u32 r)
496{
497 return (r >> 31U) & 0x1U;
498}
499static inline u32 falcon_falcon_exterrstat_valid_true_v(void)
500{
501 return 0x00000001U;
502}
503static inline u32 falcon_falcon_icd_cmd_r(void)
504{
505 return 0x00000200U;
506}
507static inline u32 falcon_falcon_icd_cmd_opc_s(void)
508{
509 return 4U;
510}
511static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v)
512{
513 return (v & 0xfU) << 0U;
514}
515static inline u32 falcon_falcon_icd_cmd_opc_m(void)
516{
517 return 0xfU << 0U;
518}
519static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r)
520{
521 return (r >> 0U) & 0xfU;
522}
523static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void)
524{
525 return 0x8U;
526}
527static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void)
528{
529 return 0xeU;
530}
531static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v)
532{
533 return (v & 0x1fU) << 8U;
534}
535static inline u32 falcon_falcon_icd_rdata_r(void)
536{
537 return 0x0000020cU;
538}
539static inline u32 falcon_falcon_dmemc_r(u32 i)
540{
541 return 0x000001c0U + i*8U;
542}
543static inline u32 falcon_falcon_dmemc_offs_f(u32 v)
544{
545 return (v & 0x3fU) << 2U;
546}
547static inline u32 falcon_falcon_dmemc_offs_m(void)
548{
549 return 0x3fU << 2U;
550}
551static inline u32 falcon_falcon_dmemc_blk_f(u32 v)
552{
553 return (v & 0xffU) << 8U;
554}
555static inline u32 falcon_falcon_dmemc_blk_m(void)
556{
557 return 0xffU << 8U;
558}
559static inline u32 falcon_falcon_dmemc_aincw_f(u32 v)
560{
561 return (v & 0x1U) << 24U;
562}
563static inline u32 falcon_falcon_dmemc_aincr_f(u32 v)
564{
565 return (v & 0x1U) << 25U;
566}
567static inline u32 falcon_falcon_dmemd_r(u32 i)
568{
569 return 0x000001c4U + i*8U;
570}
571static inline u32 falcon_falcon_debug1_r(void)
572{
573 return 0x00000090U;
574}
575static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void)
576{
577 return 1U;
578}
579static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v)
580{
581 return (v & 0x1U) << 16U;
582}
583static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void)
584{
585 return 0x1U << 16U;
586}
587static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r)
588{
589 return (r >> 16U) & 0x1U;
590}
591static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void)
592{
593 return 0x0U;
594}
595static inline u32 falcon_falcon_debuginfo_r(void)
596{
597 return 0x00000094U;
598}
599#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fb_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fb_gv11b.h
new file mode 100644
index 00000000..ea3c7939
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fb_gv11b.h
@@ -0,0 +1,1827 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_fb_gv11b_h_
57#define _hw_fb_gv11b_h_
58
59static inline u32 fb_fbhub_num_active_ltcs_r(void)
60{
61 return 0x00100800U;
62}
63static inline u32 fb_mmu_ctrl_r(void)
64{
65 return 0x00100c80U;
66}
67static inline u32 fb_mmu_ctrl_vm_pg_size_f(u32 v)
68{
69 return (v & 0x1U) << 0U;
70}
71static inline u32 fb_mmu_ctrl_vm_pg_size_128kb_f(void)
72{
73 return 0x0U;
74}
75static inline u32 fb_mmu_ctrl_vm_pg_size_64kb_f(void)
76{
77 return 0x1U;
78}
79static inline u32 fb_mmu_ctrl_pri_fifo_empty_v(u32 r)
80{
81 return (r >> 15U) & 0x1U;
82}
83static inline u32 fb_mmu_ctrl_pri_fifo_empty_false_f(void)
84{
85 return 0x0U;
86}
87static inline u32 fb_mmu_ctrl_pri_fifo_space_v(u32 r)
88{
89 return (r >> 16U) & 0xffU;
90}
91static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_v(u32 r)
92{
93 return (r >> 11U) & 0x1U;
94}
95static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_true_f(void)
96{
97 return 0x800U;
98}
99static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_false_f(void)
100{
101 return 0x0U;
102}
103static inline u32 fb_priv_mmu_phy_secure_r(void)
104{
105 return 0x00100ce4U;
106}
107static inline u32 fb_mmu_invalidate_pdb_r(void)
108{
109 return 0x00100cb8U;
110}
111static inline u32 fb_mmu_invalidate_pdb_aperture_vid_mem_f(void)
112{
113 return 0x0U;
114}
115static inline u32 fb_mmu_invalidate_pdb_aperture_sys_mem_f(void)
116{
117 return 0x2U;
118}
119static inline u32 fb_mmu_invalidate_pdb_addr_f(u32 v)
120{
121 return (v & 0xfffffffU) << 4U;
122}
123static inline u32 fb_mmu_invalidate_r(void)
124{
125 return 0x00100cbcU;
126}
127static inline u32 fb_mmu_invalidate_all_va_true_f(void)
128{
129 return 0x1U;
130}
131static inline u32 fb_mmu_invalidate_all_pdb_true_f(void)
132{
133 return 0x2U;
134}
135static inline u32 fb_mmu_invalidate_hubtlb_only_s(void)
136{
137 return 1U;
138}
139static inline u32 fb_mmu_invalidate_hubtlb_only_f(u32 v)
140{
141 return (v & 0x1U) << 2U;
142}
143static inline u32 fb_mmu_invalidate_hubtlb_only_m(void)
144{
145 return 0x1U << 2U;
146}
147static inline u32 fb_mmu_invalidate_hubtlb_only_v(u32 r)
148{
149 return (r >> 2U) & 0x1U;
150}
151static inline u32 fb_mmu_invalidate_hubtlb_only_true_f(void)
152{
153 return 0x4U;
154}
155static inline u32 fb_mmu_invalidate_replay_s(void)
156{
157 return 3U;
158}
159static inline u32 fb_mmu_invalidate_replay_f(u32 v)
160{
161 return (v & 0x7U) << 3U;
162}
163static inline u32 fb_mmu_invalidate_replay_m(void)
164{
165 return 0x7U << 3U;
166}
167static inline u32 fb_mmu_invalidate_replay_v(u32 r)
168{
169 return (r >> 3U) & 0x7U;
170}
171static inline u32 fb_mmu_invalidate_replay_none_f(void)
172{
173 return 0x0U;
174}
175static inline u32 fb_mmu_invalidate_replay_start_f(void)
176{
177 return 0x8U;
178}
179static inline u32 fb_mmu_invalidate_replay_start_ack_all_f(void)
180{
181 return 0x10U;
182}
183static inline u32 fb_mmu_invalidate_replay_cancel_global_f(void)
184{
185 return 0x20U;
186}
187static inline u32 fb_mmu_invalidate_sys_membar_s(void)
188{
189 return 1U;
190}
191static inline u32 fb_mmu_invalidate_sys_membar_f(u32 v)
192{
193 return (v & 0x1U) << 6U;
194}
195static inline u32 fb_mmu_invalidate_sys_membar_m(void)
196{
197 return 0x1U << 6U;
198}
199static inline u32 fb_mmu_invalidate_sys_membar_v(u32 r)
200{
201 return (r >> 6U) & 0x1U;
202}
203static inline u32 fb_mmu_invalidate_sys_membar_true_f(void)
204{
205 return 0x40U;
206}
207static inline u32 fb_mmu_invalidate_ack_s(void)
208{
209 return 2U;
210}
211static inline u32 fb_mmu_invalidate_ack_f(u32 v)
212{
213 return (v & 0x3U) << 7U;
214}
215static inline u32 fb_mmu_invalidate_ack_m(void)
216{
217 return 0x3U << 7U;
218}
219static inline u32 fb_mmu_invalidate_ack_v(u32 r)
220{
221 return (r >> 7U) & 0x3U;
222}
223static inline u32 fb_mmu_invalidate_ack_ack_none_required_f(void)
224{
225 return 0x0U;
226}
227static inline u32 fb_mmu_invalidate_ack_ack_intranode_f(void)
228{
229 return 0x100U;
230}
231static inline u32 fb_mmu_invalidate_ack_ack_globally_f(void)
232{
233 return 0x80U;
234}
235static inline u32 fb_mmu_invalidate_cancel_client_id_s(void)
236{
237 return 6U;
238}
239static inline u32 fb_mmu_invalidate_cancel_client_id_f(u32 v)
240{
241 return (v & 0x3fU) << 9U;
242}
243static inline u32 fb_mmu_invalidate_cancel_client_id_m(void)
244{
245 return 0x3fU << 9U;
246}
247static inline u32 fb_mmu_invalidate_cancel_client_id_v(u32 r)
248{
249 return (r >> 9U) & 0x3fU;
250}
251static inline u32 fb_mmu_invalidate_cancel_gpc_id_s(void)
252{
253 return 5U;
254}
255static inline u32 fb_mmu_invalidate_cancel_gpc_id_f(u32 v)
256{
257 return (v & 0x1fU) << 15U;
258}
259static inline u32 fb_mmu_invalidate_cancel_gpc_id_m(void)
260{
261 return 0x1fU << 15U;
262}
263static inline u32 fb_mmu_invalidate_cancel_gpc_id_v(u32 r)
264{
265 return (r >> 15U) & 0x1fU;
266}
267static inline u32 fb_mmu_invalidate_cancel_client_type_s(void)
268{
269 return 1U;
270}
271static inline u32 fb_mmu_invalidate_cancel_client_type_f(u32 v)
272{
273 return (v & 0x1U) << 20U;
274}
275static inline u32 fb_mmu_invalidate_cancel_client_type_m(void)
276{
277 return 0x1U << 20U;
278}
279static inline u32 fb_mmu_invalidate_cancel_client_type_v(u32 r)
280{
281 return (r >> 20U) & 0x1U;
282}
283static inline u32 fb_mmu_invalidate_cancel_client_type_gpc_f(void)
284{
285 return 0x0U;
286}
287static inline u32 fb_mmu_invalidate_cancel_client_type_hub_f(void)
288{
289 return 0x100000U;
290}
291static inline u32 fb_mmu_invalidate_cancel_cache_level_s(void)
292{
293 return 3U;
294}
295static inline u32 fb_mmu_invalidate_cancel_cache_level_f(u32 v)
296{
297 return (v & 0x7U) << 24U;
298}
299static inline u32 fb_mmu_invalidate_cancel_cache_level_m(void)
300{
301 return 0x7U << 24U;
302}
303static inline u32 fb_mmu_invalidate_cancel_cache_level_v(u32 r)
304{
305 return (r >> 24U) & 0x7U;
306}
307static inline u32 fb_mmu_invalidate_cancel_cache_level_all_f(void)
308{
309 return 0x0U;
310}
311static inline u32 fb_mmu_invalidate_cancel_cache_level_pte_only_f(void)
312{
313 return 0x1000000U;
314}
315static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde0_f(void)
316{
317 return 0x2000000U;
318}
319static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde1_f(void)
320{
321 return 0x3000000U;
322}
323static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde2_f(void)
324{
325 return 0x4000000U;
326}
327static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde3_f(void)
328{
329 return 0x5000000U;
330}
331static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde4_f(void)
332{
333 return 0x6000000U;
334}
335static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde5_f(void)
336{
337 return 0x7000000U;
338}
339static inline u32 fb_mmu_invalidate_trigger_s(void)
340{
341 return 1U;
342}
343static inline u32 fb_mmu_invalidate_trigger_f(u32 v)
344{
345 return (v & 0x1U) << 31U;
346}
347static inline u32 fb_mmu_invalidate_trigger_m(void)
348{
349 return 0x1U << 31U;
350}
351static inline u32 fb_mmu_invalidate_trigger_v(u32 r)
352{
353 return (r >> 31U) & 0x1U;
354}
355static inline u32 fb_mmu_invalidate_trigger_true_f(void)
356{
357 return 0x80000000U;
358}
359static inline u32 fb_mmu_debug_wr_r(void)
360{
361 return 0x00100cc8U;
362}
363static inline u32 fb_mmu_debug_wr_aperture_s(void)
364{
365 return 2U;
366}
367static inline u32 fb_mmu_debug_wr_aperture_f(u32 v)
368{
369 return (v & 0x3U) << 0U;
370}
371static inline u32 fb_mmu_debug_wr_aperture_m(void)
372{
373 return 0x3U << 0U;
374}
375static inline u32 fb_mmu_debug_wr_aperture_v(u32 r)
376{
377 return (r >> 0U) & 0x3U;
378}
379static inline u32 fb_mmu_debug_wr_aperture_vid_mem_f(void)
380{
381 return 0x0U;
382}
383static inline u32 fb_mmu_debug_wr_aperture_sys_mem_coh_f(void)
384{
385 return 0x2U;
386}
387static inline u32 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(void)
388{
389 return 0x3U;
390}
391static inline u32 fb_mmu_debug_wr_vol_false_f(void)
392{
393 return 0x0U;
394}
395static inline u32 fb_mmu_debug_wr_vol_true_v(void)
396{
397 return 0x00000001U;
398}
399static inline u32 fb_mmu_debug_wr_vol_true_f(void)
400{
401 return 0x4U;
402}
403static inline u32 fb_mmu_debug_wr_addr_f(u32 v)
404{
405 return (v & 0xfffffffU) << 4U;
406}
407static inline u32 fb_mmu_debug_wr_addr_alignment_v(void)
408{
409 return 0x0000000cU;
410}
411static inline u32 fb_mmu_debug_rd_r(void)
412{
413 return 0x00100cccU;
414}
415static inline u32 fb_mmu_debug_rd_aperture_vid_mem_f(void)
416{
417 return 0x0U;
418}
419static inline u32 fb_mmu_debug_rd_aperture_sys_mem_coh_f(void)
420{
421 return 0x2U;
422}
423static inline u32 fb_mmu_debug_rd_aperture_sys_mem_ncoh_f(void)
424{
425 return 0x3U;
426}
427static inline u32 fb_mmu_debug_rd_vol_false_f(void)
428{
429 return 0x0U;
430}
431static inline u32 fb_mmu_debug_rd_addr_f(u32 v)
432{
433 return (v & 0xfffffffU) << 4U;
434}
435static inline u32 fb_mmu_debug_rd_addr_alignment_v(void)
436{
437 return 0x0000000cU;
438}
439static inline u32 fb_mmu_debug_ctrl_r(void)
440{
441 return 0x00100cc4U;
442}
443static inline u32 fb_mmu_debug_ctrl_debug_v(u32 r)
444{
445 return (r >> 16U) & 0x1U;
446}
447static inline u32 fb_mmu_debug_ctrl_debug_m(void)
448{
449 return 0x1U << 16U;
450}
451static inline u32 fb_mmu_debug_ctrl_debug_enabled_v(void)
452{
453 return 0x00000001U;
454}
455static inline u32 fb_mmu_debug_ctrl_debug_disabled_v(void)
456{
457 return 0x00000000U;
458}
459static inline u32 fb_mmu_vpr_info_r(void)
460{
461 return 0x00100cd0U;
462}
463static inline u32 fb_mmu_vpr_info_fetch_v(u32 r)
464{
465 return (r >> 2U) & 0x1U;
466}
467static inline u32 fb_mmu_vpr_info_fetch_false_v(void)
468{
469 return 0x00000000U;
470}
471static inline u32 fb_mmu_vpr_info_fetch_true_v(void)
472{
473 return 0x00000001U;
474}
475static inline u32 fb_mmu_l2tlb_ecc_status_r(void)
476{
477 return 0x00100e70U;
478}
479static inline u32 fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m(void)
480{
481 return 0x1U << 0U;
482}
483static inline u32 fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m(void)
484{
485 return 0x1U << 1U;
486}
487static inline u32 fb_mmu_l2tlb_ecc_status_corrected_err_total_counter_overflow_m(void)
488{
489 return 0x1U << 16U;
490}
491static inline u32 fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m(void)
492{
493 return 0x1U << 18U;
494}
495static inline u32 fb_mmu_l2tlb_ecc_status_reset_f(u32 v)
496{
497 return (v & 0x1U) << 30U;
498}
499static inline u32 fb_mmu_l2tlb_ecc_status_reset_clear_f(void)
500{
501 return 0x40000000U;
502}
503static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_r(void)
504{
505 return 0x00100e74U;
506}
507static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_s(void)
508{
509 return 16U;
510}
511static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_f(u32 v)
512{
513 return (v & 0xffffU) << 0U;
514}
515static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_m(void)
516{
517 return 0xffffU << 0U;
518}
519static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_v(u32 r)
520{
521 return (r >> 0U) & 0xffffU;
522}
523static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_r(void)
524{
525 return 0x00100e78U;
526}
527static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s(void)
528{
529 return 16U;
530}
531static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_f(u32 v)
532{
533 return (v & 0xffffU) << 0U;
534}
535static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_m(void)
536{
537 return 0xffffU << 0U;
538}
539static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_v(u32 r)
540{
541 return (r >> 0U) & 0xffffU;
542}
543static inline u32 fb_mmu_l2tlb_ecc_address_r(void)
544{
545 return 0x00100e7cU;
546}
547static inline u32 fb_mmu_l2tlb_ecc_address_index_s(void)
548{
549 return 32U;
550}
551static inline u32 fb_mmu_l2tlb_ecc_address_index_f(u32 v)
552{
553 return (v & 0xffffffffU) << 0U;
554}
555static inline u32 fb_mmu_l2tlb_ecc_address_index_m(void)
556{
557 return 0xffffffffU << 0U;
558}
559static inline u32 fb_mmu_l2tlb_ecc_address_index_v(u32 r)
560{
561 return (r >> 0U) & 0xffffffffU;
562}
563static inline u32 fb_mmu_hubtlb_ecc_status_r(void)
564{
565 return 0x00100e84U;
566}
567static inline u32 fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m(void)
568{
569 return 0x1U << 0U;
570}
571static inline u32 fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m(void)
572{
573 return 0x1U << 1U;
574}
575static inline u32 fb_mmu_hubtlb_ecc_status_corrected_err_total_counter_overflow_m(void)
576{
577 return 0x1U << 16U;
578}
579static inline u32 fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m(void)
580{
581 return 0x1U << 18U;
582}
583static inline u32 fb_mmu_hubtlb_ecc_status_reset_f(u32 v)
584{
585 return (v & 0x1U) << 30U;
586}
587static inline u32 fb_mmu_hubtlb_ecc_status_reset_clear_f(void)
588{
589 return 0x40000000U;
590}
591static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_r(void)
592{
593 return 0x00100e88U;
594}
595static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_s(void)
596{
597 return 16U;
598}
599static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_f(u32 v)
600{
601 return (v & 0xffffU) << 0U;
602}
603static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_m(void)
604{
605 return 0xffffU << 0U;
606}
607static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_v(u32 r)
608{
609 return (r >> 0U) & 0xffffU;
610}
611static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_r(void)
612{
613 return 0x00100e8cU;
614}
615static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s(void)
616{
617 return 16U;
618}
619static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_f(u32 v)
620{
621 return (v & 0xffffU) << 0U;
622}
623static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_m(void)
624{
625 return 0xffffU << 0U;
626}
627static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_v(u32 r)
628{
629 return (r >> 0U) & 0xffffU;
630}
631static inline u32 fb_mmu_hubtlb_ecc_address_r(void)
632{
633 return 0x00100e90U;
634}
635static inline u32 fb_mmu_hubtlb_ecc_address_index_s(void)
636{
637 return 32U;
638}
639static inline u32 fb_mmu_hubtlb_ecc_address_index_f(u32 v)
640{
641 return (v & 0xffffffffU) << 0U;
642}
643static inline u32 fb_mmu_hubtlb_ecc_address_index_m(void)
644{
645 return 0xffffffffU << 0U;
646}
647static inline u32 fb_mmu_hubtlb_ecc_address_index_v(u32 r)
648{
649 return (r >> 0U) & 0xffffffffU;
650}
651static inline u32 fb_mmu_fillunit_ecc_status_r(void)
652{
653 return 0x00100e98U;
654}
655static inline u32 fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m(void)
656{
657 return 0x1U << 0U;
658}
659static inline u32 fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m(void)
660{
661 return 0x1U << 1U;
662}
663static inline u32 fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m(void)
664{
665 return 0x1U << 2U;
666}
667static inline u32 fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m(void)
668{
669 return 0x1U << 3U;
670}
671static inline u32 fb_mmu_fillunit_ecc_status_corrected_err_total_counter_overflow_m(void)
672{
673 return 0x1U << 16U;
674}
675static inline u32 fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m(void)
676{
677 return 0x1U << 18U;
678}
679static inline u32 fb_mmu_fillunit_ecc_status_reset_f(u32 v)
680{
681 return (v & 0x1U) << 30U;
682}
683static inline u32 fb_mmu_fillunit_ecc_status_reset_clear_f(void)
684{
685 return 0x40000000U;
686}
687static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_r(void)
688{
689 return 0x00100e9cU;
690}
691static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_s(void)
692{
693 return 16U;
694}
695static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_f(u32 v)
696{
697 return (v & 0xffffU) << 0U;
698}
699static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_m(void)
700{
701 return 0xffffU << 0U;
702}
703static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_v(u32 r)
704{
705 return (r >> 0U) & 0xffffU;
706}
707static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_r(void)
708{
709 return 0x00100ea0U;
710}
711static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_s(void)
712{
713 return 16U;
714}
715static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_f(u32 v)
716{
717 return (v & 0xffffU) << 0U;
718}
719static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_m(void)
720{
721 return 0xffffU << 0U;
722}
723static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_v(u32 r)
724{
725 return (r >> 0U) & 0xffffU;
726}
727static inline u32 fb_mmu_fillunit_ecc_address_r(void)
728{
729 return 0x00100ea4U;
730}
731static inline u32 fb_mmu_fillunit_ecc_address_index_s(void)
732{
733 return 32U;
734}
735static inline u32 fb_mmu_fillunit_ecc_address_index_f(u32 v)
736{
737 return (v & 0xffffffffU) << 0U;
738}
739static inline u32 fb_mmu_fillunit_ecc_address_index_m(void)
740{
741 return 0xffffffffU << 0U;
742}
743static inline u32 fb_mmu_fillunit_ecc_address_index_v(u32 r)
744{
745 return (r >> 0U) & 0xffffffffU;
746}
747static inline u32 fb_niso_flush_sysmem_addr_r(void)
748{
749 return 0x00100c10U;
750}
751static inline u32 fb_niso_intr_r(void)
752{
753 return 0x00100a20U;
754}
755static inline u32 fb_niso_intr_hub_access_counter_notify_m(void)
756{
757 return 0x1U << 0U;
758}
759static inline u32 fb_niso_intr_hub_access_counter_notify_pending_f(void)
760{
761 return 0x1U;
762}
763static inline u32 fb_niso_intr_hub_access_counter_error_m(void)
764{
765 return 0x1U << 1U;
766}
767static inline u32 fb_niso_intr_hub_access_counter_error_pending_f(void)
768{
769 return 0x2U;
770}
771static inline u32 fb_niso_intr_mmu_replayable_fault_notify_m(void)
772{
773 return 0x1U << 27U;
774}
775static inline u32 fb_niso_intr_mmu_replayable_fault_notify_pending_f(void)
776{
777 return 0x8000000U;
778}
779static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_m(void)
780{
781 return 0x1U << 28U;
782}
783static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_pending_f(void)
784{
785 return 0x10000000U;
786}
787static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_m(void)
788{
789 return 0x1U << 29U;
790}
791static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_pending_f(void)
792{
793 return 0x20000000U;
794}
795static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_m(void)
796{
797 return 0x1U << 30U;
798}
799static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_pending_f(void)
800{
801 return 0x40000000U;
802}
803static inline u32 fb_niso_intr_mmu_other_fault_notify_m(void)
804{
805 return 0x1U << 31U;
806}
807static inline u32 fb_niso_intr_mmu_other_fault_notify_pending_f(void)
808{
809 return 0x80000000U;
810}
811static inline u32 fb_niso_intr_mmu_ecc_uncorrected_error_notify_m(void)
812{
813 return 0x1U << 26U;
814}
815static inline u32 fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f(void)
816{
817 return 0x4000000U;
818}
819static inline u32 fb_niso_intr_en_r(u32 i)
820{
821 return 0x00100a24U + i*4U;
822}
823static inline u32 fb_niso_intr_en__size_1_v(void)
824{
825 return 0x00000002U;
826}
827static inline u32 fb_niso_intr_en_hub_access_counter_notify_f(u32 v)
828{
829 return (v & 0x1U) << 0U;
830}
831static inline u32 fb_niso_intr_en_hub_access_counter_notify_enabled_f(void)
832{
833 return 0x1U;
834}
835static inline u32 fb_niso_intr_en_hub_access_counter_error_f(u32 v)
836{
837 return (v & 0x1U) << 1U;
838}
839static inline u32 fb_niso_intr_en_hub_access_counter_error_enabled_f(void)
840{
841 return 0x2U;
842}
843static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_f(u32 v)
844{
845 return (v & 0x1U) << 27U;
846}
847static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_enabled_f(void)
848{
849 return 0x8000000U;
850}
851static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_f(u32 v)
852{
853 return (v & 0x1U) << 28U;
854}
855static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_enabled_f(void)
856{
857 return 0x10000000U;
858}
859static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_f(u32 v)
860{
861 return (v & 0x1U) << 29U;
862}
863static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_enabled_f(void)
864{
865 return 0x20000000U;
866}
867static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_f(u32 v)
868{
869 return (v & 0x1U) << 30U;
870}
871static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_enabled_f(void)
872{
873 return 0x40000000U;
874}
875static inline u32 fb_niso_intr_en_mmu_other_fault_notify_f(u32 v)
876{
877 return (v & 0x1U) << 31U;
878}
879static inline u32 fb_niso_intr_en_mmu_other_fault_notify_enabled_f(void)
880{
881 return 0x80000000U;
882}
883static inline u32 fb_niso_intr_en_mmu_ecc_uncorrected_error_notify_f(u32 v)
884{
885 return (v & 0x1U) << 26U;
886}
887static inline u32 fb_niso_intr_en_mmu_ecc_uncorrected_error_notify_enabled_f(void)
888{
889 return 0x4000000U;
890}
891static inline u32 fb_niso_intr_en_set_r(u32 i)
892{
893 return 0x00100a2cU + i*4U;
894}
895static inline u32 fb_niso_intr_en_set__size_1_v(void)
896{
897 return 0x00000002U;
898}
899static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_m(void)
900{
901 return 0x1U << 0U;
902}
903static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_set_f(void)
904{
905 return 0x1U;
906}
907static inline u32 fb_niso_intr_en_set_hub_access_counter_error_m(void)
908{
909 return 0x1U << 1U;
910}
911static inline u32 fb_niso_intr_en_set_hub_access_counter_error_set_f(void)
912{
913 return 0x2U;
914}
915static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_m(void)
916{
917 return 0x1U << 27U;
918}
919static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_set_f(void)
920{
921 return 0x8000000U;
922}
923static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m(void)
924{
925 return 0x1U << 28U;
926}
927static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_set_f(void)
928{
929 return 0x10000000U;
930}
931static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m(void)
932{
933 return 0x1U << 29U;
934}
935static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_set_f(void)
936{
937 return 0x20000000U;
938}
939static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m(void)
940{
941 return 0x1U << 30U;
942}
943static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_set_f(void)
944{
945 return 0x40000000U;
946}
947static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_m(void)
948{
949 return 0x1U << 31U;
950}
951static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_set_f(void)
952{
953 return 0x80000000U;
954}
955static inline u32 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m(void)
956{
957 return 0x1U << 26U;
958}
959static inline u32 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_set_f(void)
960{
961 return 0x4000000U;
962}
963static inline u32 fb_niso_intr_en_clr_r(u32 i)
964{
965 return 0x00100a34U + i*4U;
966}
967static inline u32 fb_niso_intr_en_clr__size_1_v(void)
968{
969 return 0x00000002U;
970}
971static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_m(void)
972{
973 return 0x1U << 0U;
974}
975static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_set_f(void)
976{
977 return 0x1U;
978}
979static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_m(void)
980{
981 return 0x1U << 1U;
982}
983static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_set_f(void)
984{
985 return 0x2U;
986}
987static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_m(void)
988{
989 return 0x1U << 27U;
990}
991static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_set_f(void)
992{
993 return 0x8000000U;
994}
995static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m(void)
996{
997 return 0x1U << 28U;
998}
999static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_set_f(void)
1000{
1001 return 0x10000000U;
1002}
1003static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m(void)
1004{
1005 return 0x1U << 29U;
1006}
1007static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_set_f(void)
1008{
1009 return 0x20000000U;
1010}
1011static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m(void)
1012{
1013 return 0x1U << 30U;
1014}
1015static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_set_f(void)
1016{
1017 return 0x40000000U;
1018}
1019static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_m(void)
1020{
1021 return 0x1U << 31U;
1022}
1023static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_set_f(void)
1024{
1025 return 0x80000000U;
1026}
1027static inline u32 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_m(void)
1028{
1029 return 0x1U << 26U;
1030}
1031static inline u32 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_set_f(void)
1032{
1033 return 0x4000000U;
1034}
1035static inline u32 fb_niso_intr_en_clr_mmu_non_replay_fault_buffer_v(void)
1036{
1037 return 0x00000000U;
1038}
1039static inline u32 fb_niso_intr_en_clr_mmu_replay_fault_buffer_v(void)
1040{
1041 return 0x00000001U;
1042}
1043static inline u32 fb_mmu_fault_buffer_lo_r(u32 i)
1044{
1045 return 0x00100e24U + i*20U;
1046}
1047static inline u32 fb_mmu_fault_buffer_lo__size_1_v(void)
1048{
1049 return 0x00000002U;
1050}
1051static inline u32 fb_mmu_fault_buffer_lo_addr_mode_f(u32 v)
1052{
1053 return (v & 0x1U) << 0U;
1054}
1055static inline u32 fb_mmu_fault_buffer_lo_addr_mode_v(u32 r)
1056{
1057 return (r >> 0U) & 0x1U;
1058}
1059static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_v(void)
1060{
1061 return 0x00000000U;
1062}
1063static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_f(void)
1064{
1065 return 0x0U;
1066}
1067static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_v(void)
1068{
1069 return 0x00000001U;
1070}
1071static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_f(void)
1072{
1073 return 0x1U;
1074}
1075static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_f(u32 v)
1076{
1077 return (v & 0x3U) << 1U;
1078}
1079static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_v(u32 r)
1080{
1081 return (r >> 1U) & 0x3U;
1082}
1083static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_v(void)
1084{
1085 return 0x00000002U;
1086}
1087static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_f(void)
1088{
1089 return 0x4U;
1090}
1091static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_v(void)
1092{
1093 return 0x00000003U;
1094}
1095static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_f(void)
1096{
1097 return 0x6U;
1098}
1099static inline u32 fb_mmu_fault_buffer_lo_phys_vol_f(u32 v)
1100{
1101 return (v & 0x1U) << 3U;
1102}
1103static inline u32 fb_mmu_fault_buffer_lo_phys_vol_v(u32 r)
1104{
1105 return (r >> 3U) & 0x1U;
1106}
1107static inline u32 fb_mmu_fault_buffer_lo_addr_f(u32 v)
1108{
1109 return (v & 0xfffffU) << 12U;
1110}
1111static inline u32 fb_mmu_fault_buffer_lo_addr_v(u32 r)
1112{
1113 return (r >> 12U) & 0xfffffU;
1114}
1115static inline u32 fb_mmu_fault_buffer_hi_r(u32 i)
1116{
1117 return 0x00100e28U + i*20U;
1118}
1119static inline u32 fb_mmu_fault_buffer_hi__size_1_v(void)
1120{
1121 return 0x00000002U;
1122}
1123static inline u32 fb_mmu_fault_buffer_hi_addr_f(u32 v)
1124{
1125 return (v & 0xffffffffU) << 0U;
1126}
1127static inline u32 fb_mmu_fault_buffer_hi_addr_v(u32 r)
1128{
1129 return (r >> 0U) & 0xffffffffU;
1130}
1131static inline u32 fb_mmu_fault_buffer_get_r(u32 i)
1132{
1133 return 0x00100e2cU + i*20U;
1134}
1135static inline u32 fb_mmu_fault_buffer_get__size_1_v(void)
1136{
1137 return 0x00000002U;
1138}
1139static inline u32 fb_mmu_fault_buffer_get_ptr_f(u32 v)
1140{
1141 return (v & 0xfffffU) << 0U;
1142}
1143static inline u32 fb_mmu_fault_buffer_get_ptr_m(void)
1144{
1145 return 0xfffffU << 0U;
1146}
1147static inline u32 fb_mmu_fault_buffer_get_ptr_v(u32 r)
1148{
1149 return (r >> 0U) & 0xfffffU;
1150}
1151static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_f(u32 v)
1152{
1153 return (v & 0x1U) << 30U;
1154}
1155static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_m(void)
1156{
1157 return 0x1U << 30U;
1158}
1159static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_v(void)
1160{
1161 return 0x00000001U;
1162}
1163static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_f(void)
1164{
1165 return 0x40000000U;
1166}
1167static inline u32 fb_mmu_fault_buffer_get_overflow_f(u32 v)
1168{
1169 return (v & 0x1U) << 31U;
1170}
1171static inline u32 fb_mmu_fault_buffer_get_overflow_m(void)
1172{
1173 return 0x1U << 31U;
1174}
1175static inline u32 fb_mmu_fault_buffer_get_overflow_clear_v(void)
1176{
1177 return 0x00000001U;
1178}
1179static inline u32 fb_mmu_fault_buffer_get_overflow_clear_f(void)
1180{
1181 return 0x80000000U;
1182}
1183static inline u32 fb_mmu_fault_buffer_put_r(u32 i)
1184{
1185 return 0x00100e30U + i*20U;
1186}
1187static inline u32 fb_mmu_fault_buffer_put__size_1_v(void)
1188{
1189 return 0x00000002U;
1190}
1191static inline u32 fb_mmu_fault_buffer_put_ptr_f(u32 v)
1192{
1193 return (v & 0xfffffU) << 0U;
1194}
1195static inline u32 fb_mmu_fault_buffer_put_ptr_v(u32 r)
1196{
1197 return (r >> 0U) & 0xfffffU;
1198}
1199static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_f(u32 v)
1200{
1201 return (v & 0x1U) << 30U;
1202}
1203static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_v(u32 r)
1204{
1205 return (r >> 30U) & 0x1U;
1206}
1207static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_v(void)
1208{
1209 return 0x00000001U;
1210}
1211static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_f(void)
1212{
1213 return 0x40000000U;
1214}
1215static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_v(void)
1216{
1217 return 0x00000000U;
1218}
1219static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_f(void)
1220{
1221 return 0x0U;
1222}
1223static inline u32 fb_mmu_fault_buffer_put_overflow_f(u32 v)
1224{
1225 return (v & 0x1U) << 31U;
1226}
1227static inline u32 fb_mmu_fault_buffer_put_overflow_v(u32 r)
1228{
1229 return (r >> 31U) & 0x1U;
1230}
1231static inline u32 fb_mmu_fault_buffer_put_overflow_yes_v(void)
1232{
1233 return 0x00000001U;
1234}
1235static inline u32 fb_mmu_fault_buffer_put_overflow_yes_f(void)
1236{
1237 return 0x80000000U;
1238}
1239static inline u32 fb_mmu_fault_buffer_size_r(u32 i)
1240{
1241 return 0x00100e34U + i*20U;
1242}
1243static inline u32 fb_mmu_fault_buffer_size__size_1_v(void)
1244{
1245 return 0x00000002U;
1246}
1247static inline u32 fb_mmu_fault_buffer_size_val_f(u32 v)
1248{
1249 return (v & 0xfffffU) << 0U;
1250}
1251static inline u32 fb_mmu_fault_buffer_size_val_v(u32 r)
1252{
1253 return (r >> 0U) & 0xfffffU;
1254}
1255static inline u32 fb_mmu_fault_buffer_size_overflow_intr_f(u32 v)
1256{
1257 return (v & 0x1U) << 29U;
1258}
1259static inline u32 fb_mmu_fault_buffer_size_overflow_intr_v(u32 r)
1260{
1261 return (r >> 29U) & 0x1U;
1262}
1263static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_v(void)
1264{
1265 return 0x00000001U;
1266}
1267static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_f(void)
1268{
1269 return 0x20000000U;
1270}
1271static inline u32 fb_mmu_fault_buffer_size_set_default_f(u32 v)
1272{
1273 return (v & 0x1U) << 30U;
1274}
1275static inline u32 fb_mmu_fault_buffer_size_set_default_v(u32 r)
1276{
1277 return (r >> 30U) & 0x1U;
1278}
1279static inline u32 fb_mmu_fault_buffer_size_set_default_yes_v(void)
1280{
1281 return 0x00000001U;
1282}
1283static inline u32 fb_mmu_fault_buffer_size_set_default_yes_f(void)
1284{
1285 return 0x40000000U;
1286}
1287static inline u32 fb_mmu_fault_buffer_size_enable_f(u32 v)
1288{
1289 return (v & 0x1U) << 31U;
1290}
1291static inline u32 fb_mmu_fault_buffer_size_enable_m(void)
1292{
1293 return 0x1U << 31U;
1294}
1295static inline u32 fb_mmu_fault_buffer_size_enable_v(u32 r)
1296{
1297 return (r >> 31U) & 0x1U;
1298}
1299static inline u32 fb_mmu_fault_buffer_size_enable_true_v(void)
1300{
1301 return 0x00000001U;
1302}
1303static inline u32 fb_mmu_fault_buffer_size_enable_true_f(void)
1304{
1305 return 0x80000000U;
1306}
1307static inline u32 fb_mmu_fault_addr_lo_r(void)
1308{
1309 return 0x00100e4cU;
1310}
1311static inline u32 fb_mmu_fault_addr_lo_phys_aperture_f(u32 v)
1312{
1313 return (v & 0x3U) << 0U;
1314}
1315static inline u32 fb_mmu_fault_addr_lo_phys_aperture_v(u32 r)
1316{
1317 return (r >> 0U) & 0x3U;
1318}
1319static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_v(void)
1320{
1321 return 0x00000002U;
1322}
1323static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_f(void)
1324{
1325 return 0x2U;
1326}
1327static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_v(void)
1328{
1329 return 0x00000003U;
1330}
1331static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_f(void)
1332{
1333 return 0x3U;
1334}
1335static inline u32 fb_mmu_fault_addr_lo_addr_f(u32 v)
1336{
1337 return (v & 0xfffffU) << 12U;
1338}
1339static inline u32 fb_mmu_fault_addr_lo_addr_v(u32 r)
1340{
1341 return (r >> 12U) & 0xfffffU;
1342}
1343static inline u32 fb_mmu_fault_addr_hi_r(void)
1344{
1345 return 0x00100e50U;
1346}
1347static inline u32 fb_mmu_fault_addr_hi_addr_f(u32 v)
1348{
1349 return (v & 0xffffffffU) << 0U;
1350}
1351static inline u32 fb_mmu_fault_addr_hi_addr_v(u32 r)
1352{
1353 return (r >> 0U) & 0xffffffffU;
1354}
1355static inline u32 fb_mmu_fault_inst_lo_r(void)
1356{
1357 return 0x00100e54U;
1358}
1359static inline u32 fb_mmu_fault_inst_lo_engine_id_v(u32 r)
1360{
1361 return (r >> 0U) & 0x1ffU;
1362}
1363static inline u32 fb_mmu_fault_inst_lo_aperture_v(u32 r)
1364{
1365 return (r >> 10U) & 0x3U;
1366}
1367static inline u32 fb_mmu_fault_inst_lo_aperture_sys_coh_v(void)
1368{
1369 return 0x00000002U;
1370}
1371static inline u32 fb_mmu_fault_inst_lo_aperture_sys_nocoh_v(void)
1372{
1373 return 0x00000003U;
1374}
1375static inline u32 fb_mmu_fault_inst_lo_addr_f(u32 v)
1376{
1377 return (v & 0xfffffU) << 12U;
1378}
1379static inline u32 fb_mmu_fault_inst_lo_addr_v(u32 r)
1380{
1381 return (r >> 12U) & 0xfffffU;
1382}
1383static inline u32 fb_mmu_fault_inst_hi_r(void)
1384{
1385 return 0x00100e58U;
1386}
1387static inline u32 fb_mmu_fault_inst_hi_addr_v(u32 r)
1388{
1389 return (r >> 0U) & 0xffffffffU;
1390}
1391static inline u32 fb_mmu_fault_info_r(void)
1392{
1393 return 0x00100e5cU;
1394}
1395static inline u32 fb_mmu_fault_info_fault_type_v(u32 r)
1396{
1397 return (r >> 0U) & 0x1fU;
1398}
1399static inline u32 fb_mmu_fault_info_replayable_fault_v(u32 r)
1400{
1401 return (r >> 7U) & 0x1U;
1402}
1403static inline u32 fb_mmu_fault_info_client_v(u32 r)
1404{
1405 return (r >> 8U) & 0x7fU;
1406}
1407static inline u32 fb_mmu_fault_info_access_type_v(u32 r)
1408{
1409 return (r >> 16U) & 0xfU;
1410}
1411static inline u32 fb_mmu_fault_info_client_type_v(u32 r)
1412{
1413 return (r >> 20U) & 0x1U;
1414}
1415static inline u32 fb_mmu_fault_info_gpc_id_v(u32 r)
1416{
1417 return (r >> 24U) & 0x1fU;
1418}
1419static inline u32 fb_mmu_fault_info_protected_mode_v(u32 r)
1420{
1421 return (r >> 29U) & 0x1U;
1422}
1423static inline u32 fb_mmu_fault_info_replayable_fault_en_v(u32 r)
1424{
1425 return (r >> 30U) & 0x1U;
1426}
1427static inline u32 fb_mmu_fault_info_valid_v(u32 r)
1428{
1429 return (r >> 31U) & 0x1U;
1430}
1431static inline u32 fb_mmu_fault_status_r(void)
1432{
1433 return 0x00100e60U;
1434}
1435static inline u32 fb_mmu_fault_status_dropped_bar1_phys_m(void)
1436{
1437 return 0x1U << 0U;
1438}
1439static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_v(void)
1440{
1441 return 0x00000001U;
1442}
1443static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_f(void)
1444{
1445 return 0x1U;
1446}
1447static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_v(void)
1448{
1449 return 0x00000001U;
1450}
1451static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_f(void)
1452{
1453 return 0x1U;
1454}
1455static inline u32 fb_mmu_fault_status_dropped_bar1_virt_m(void)
1456{
1457 return 0x1U << 1U;
1458}
1459static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_v(void)
1460{
1461 return 0x00000001U;
1462}
1463static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_f(void)
1464{
1465 return 0x2U;
1466}
1467static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_v(void)
1468{
1469 return 0x00000001U;
1470}
1471static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_f(void)
1472{
1473 return 0x2U;
1474}
1475static inline u32 fb_mmu_fault_status_dropped_bar2_phys_m(void)
1476{
1477 return 0x1U << 2U;
1478}
1479static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_v(void)
1480{
1481 return 0x00000001U;
1482}
1483static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_f(void)
1484{
1485 return 0x4U;
1486}
1487static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_v(void)
1488{
1489 return 0x00000001U;
1490}
1491static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_f(void)
1492{
1493 return 0x4U;
1494}
1495static inline u32 fb_mmu_fault_status_dropped_bar2_virt_m(void)
1496{
1497 return 0x1U << 3U;
1498}
1499static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_v(void)
1500{
1501 return 0x00000001U;
1502}
1503static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_f(void)
1504{
1505 return 0x8U;
1506}
1507static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_v(void)
1508{
1509 return 0x00000001U;
1510}
1511static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_f(void)
1512{
1513 return 0x8U;
1514}
1515static inline u32 fb_mmu_fault_status_dropped_ifb_phys_m(void)
1516{
1517 return 0x1U << 4U;
1518}
1519static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_v(void)
1520{
1521 return 0x00000001U;
1522}
1523static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_f(void)
1524{
1525 return 0x10U;
1526}
1527static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_v(void)
1528{
1529 return 0x00000001U;
1530}
1531static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_f(void)
1532{
1533 return 0x10U;
1534}
1535static inline u32 fb_mmu_fault_status_dropped_ifb_virt_m(void)
1536{
1537 return 0x1U << 5U;
1538}
1539static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_v(void)
1540{
1541 return 0x00000001U;
1542}
1543static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_f(void)
1544{
1545 return 0x20U;
1546}
1547static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_v(void)
1548{
1549 return 0x00000001U;
1550}
1551static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_f(void)
1552{
1553 return 0x20U;
1554}
1555static inline u32 fb_mmu_fault_status_dropped_other_phys_m(void)
1556{
1557 return 0x1U << 6U;
1558}
1559static inline u32 fb_mmu_fault_status_dropped_other_phys_set_v(void)
1560{
1561 return 0x00000001U;
1562}
1563static inline u32 fb_mmu_fault_status_dropped_other_phys_set_f(void)
1564{
1565 return 0x40U;
1566}
1567static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_v(void)
1568{
1569 return 0x00000001U;
1570}
1571static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_f(void)
1572{
1573 return 0x40U;
1574}
1575static inline u32 fb_mmu_fault_status_dropped_other_virt_m(void)
1576{
1577 return 0x1U << 7U;
1578}
1579static inline u32 fb_mmu_fault_status_dropped_other_virt_set_v(void)
1580{
1581 return 0x00000001U;
1582}
1583static inline u32 fb_mmu_fault_status_dropped_other_virt_set_f(void)
1584{
1585 return 0x80U;
1586}
1587static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_v(void)
1588{
1589 return 0x00000001U;
1590}
1591static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_f(void)
1592{
1593 return 0x80U;
1594}
1595static inline u32 fb_mmu_fault_status_replayable_m(void)
1596{
1597 return 0x1U << 8U;
1598}
1599static inline u32 fb_mmu_fault_status_replayable_set_v(void)
1600{
1601 return 0x00000001U;
1602}
1603static inline u32 fb_mmu_fault_status_replayable_set_f(void)
1604{
1605 return 0x100U;
1606}
1607static inline u32 fb_mmu_fault_status_replayable_reset_f(void)
1608{
1609 return 0x0U;
1610}
1611static inline u32 fb_mmu_fault_status_non_replayable_m(void)
1612{
1613 return 0x1U << 9U;
1614}
1615static inline u32 fb_mmu_fault_status_non_replayable_set_v(void)
1616{
1617 return 0x00000001U;
1618}
1619static inline u32 fb_mmu_fault_status_non_replayable_set_f(void)
1620{
1621 return 0x200U;
1622}
1623static inline u32 fb_mmu_fault_status_non_replayable_reset_f(void)
1624{
1625 return 0x0U;
1626}
1627static inline u32 fb_mmu_fault_status_replayable_error_m(void)
1628{
1629 return 0x1U << 10U;
1630}
1631static inline u32 fb_mmu_fault_status_replayable_error_set_v(void)
1632{
1633 return 0x00000001U;
1634}
1635static inline u32 fb_mmu_fault_status_replayable_error_set_f(void)
1636{
1637 return 0x400U;
1638}
1639static inline u32 fb_mmu_fault_status_replayable_error_reset_f(void)
1640{
1641 return 0x0U;
1642}
1643static inline u32 fb_mmu_fault_status_non_replayable_error_m(void)
1644{
1645 return 0x1U << 11U;
1646}
1647static inline u32 fb_mmu_fault_status_non_replayable_error_set_v(void)
1648{
1649 return 0x00000001U;
1650}
1651static inline u32 fb_mmu_fault_status_non_replayable_error_set_f(void)
1652{
1653 return 0x800U;
1654}
1655static inline u32 fb_mmu_fault_status_non_replayable_error_reset_f(void)
1656{
1657 return 0x0U;
1658}
1659static inline u32 fb_mmu_fault_status_replayable_overflow_m(void)
1660{
1661 return 0x1U << 12U;
1662}
1663static inline u32 fb_mmu_fault_status_replayable_overflow_set_v(void)
1664{
1665 return 0x00000001U;
1666}
1667static inline u32 fb_mmu_fault_status_replayable_overflow_set_f(void)
1668{
1669 return 0x1000U;
1670}
1671static inline u32 fb_mmu_fault_status_replayable_overflow_reset_f(void)
1672{
1673 return 0x0U;
1674}
1675static inline u32 fb_mmu_fault_status_non_replayable_overflow_m(void)
1676{
1677 return 0x1U << 13U;
1678}
1679static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_v(void)
1680{
1681 return 0x00000001U;
1682}
1683static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_f(void)
1684{
1685 return 0x2000U;
1686}
1687static inline u32 fb_mmu_fault_status_non_replayable_overflow_reset_f(void)
1688{
1689 return 0x0U;
1690}
1691static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_m(void)
1692{
1693 return 0x1U << 14U;
1694}
1695static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_v(void)
1696{
1697 return 0x00000001U;
1698}
1699static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_f(void)
1700{
1701 return 0x4000U;
1702}
1703static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_m(void)
1704{
1705 return 0x1U << 15U;
1706}
1707static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_v(void)
1708{
1709 return 0x00000001U;
1710}
1711static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_f(void)
1712{
1713 return 0x8000U;
1714}
1715static inline u32 fb_mmu_fault_status_busy_m(void)
1716{
1717 return 0x1U << 30U;
1718}
1719static inline u32 fb_mmu_fault_status_busy_true_v(void)
1720{
1721 return 0x00000001U;
1722}
1723static inline u32 fb_mmu_fault_status_busy_true_f(void)
1724{
1725 return 0x40000000U;
1726}
1727static inline u32 fb_mmu_fault_status_valid_m(void)
1728{
1729 return 0x1U << 31U;
1730}
1731static inline u32 fb_mmu_fault_status_valid_set_v(void)
1732{
1733 return 0x00000001U;
1734}
1735static inline u32 fb_mmu_fault_status_valid_set_f(void)
1736{
1737 return 0x80000000U;
1738}
1739static inline u32 fb_mmu_fault_status_valid_clear_v(void)
1740{
1741 return 0x00000001U;
1742}
1743static inline u32 fb_mmu_fault_status_valid_clear_f(void)
1744{
1745 return 0x80000000U;
1746}
1747static inline u32 fb_mmu_num_active_ltcs_r(void)
1748{
1749 return 0x00100ec0U;
1750}
1751static inline u32 fb_mmu_num_active_ltcs_count_f(u32 v)
1752{
1753 return (v & 0x1fU) << 0U;
1754}
1755static inline u32 fb_mmu_num_active_ltcs_count_v(u32 r)
1756{
1757 return (r >> 0U) & 0x1fU;
1758}
1759static inline u32 fb_mmu_cbc_base_r(void)
1760{
1761 return 0x00100ec4U;
1762}
1763static inline u32 fb_mmu_cbc_base_address_f(u32 v)
1764{
1765 return (v & 0x3ffffffU) << 0U;
1766}
1767static inline u32 fb_mmu_cbc_base_address_v(u32 r)
1768{
1769 return (r >> 0U) & 0x3ffffffU;
1770}
1771static inline u32 fb_mmu_cbc_base_address_alignment_shift_v(void)
1772{
1773 return 0x0000000bU;
1774}
1775static inline u32 fb_mmu_cbc_top_r(void)
1776{
1777 return 0x00100ec8U;
1778}
1779static inline u32 fb_mmu_cbc_top_size_f(u32 v)
1780{
1781 return (v & 0x7fffU) << 0U;
1782}
1783static inline u32 fb_mmu_cbc_top_size_v(u32 r)
1784{
1785 return (r >> 0U) & 0x7fffU;
1786}
1787static inline u32 fb_mmu_cbc_top_size_alignment_shift_v(void)
1788{
1789 return 0x0000000bU;
1790}
1791static inline u32 fb_mmu_cbc_max_r(void)
1792{
1793 return 0x00100eccU;
1794}
1795static inline u32 fb_mmu_cbc_max_comptagline_f(u32 v)
1796{
1797 return (v & 0xffffffU) << 0U;
1798}
1799static inline u32 fb_mmu_cbc_max_comptagline_v(u32 r)
1800{
1801 return (r >> 0U) & 0xffffffU;
1802}
1803static inline u32 fb_mmu_cbc_max_safe_f(u32 v)
1804{
1805 return (v & 0x1U) << 30U;
1806}
1807static inline u32 fb_mmu_cbc_max_safe_true_v(void)
1808{
1809 return 0x00000001U;
1810}
1811static inline u32 fb_mmu_cbc_max_safe_false_v(void)
1812{
1813 return 0x00000000U;
1814}
1815static inline u32 fb_mmu_cbc_max_unsafe_fault_f(u32 v)
1816{
1817 return (v & 0x1U) << 31U;
1818}
1819static inline u32 fb_mmu_cbc_max_unsafe_fault_enabled_v(void)
1820{
1821 return 0x00000000U;
1822}
1823static inline u32 fb_mmu_cbc_max_unsafe_fault_disabled_v(void)
1824{
1825 return 0x00000001U;
1826}
1827#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h
new file mode 100644
index 00000000..59cc7a1d
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h
@@ -0,0 +1,687 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_fifo_gv11b_h_
57#define _hw_fifo_gv11b_h_
58
59static inline u32 fifo_bar1_base_r(void)
60{
61 return 0x00002254U;
62}
63static inline u32 fifo_bar1_base_ptr_f(u32 v)
64{
65 return (v & 0xfffffffU) << 0U;
66}
67static inline u32 fifo_bar1_base_ptr_align_shift_v(void)
68{
69 return 0x0000000cU;
70}
71static inline u32 fifo_bar1_base_valid_false_f(void)
72{
73 return 0x0U;
74}
75static inline u32 fifo_bar1_base_valid_true_f(void)
76{
77 return 0x10000000U;
78}
79static inline u32 fifo_userd_writeback_r(void)
80{
81 return 0x0000225cU;
82}
83static inline u32 fifo_userd_writeback_timer_f(u32 v)
84{
85 return (v & 0xffU) << 0U;
86}
87static inline u32 fifo_userd_writeback_timer_disabled_v(void)
88{
89 return 0x00000000U;
90}
91static inline u32 fifo_userd_writeback_timer_shorter_v(void)
92{
93 return 0x00000003U;
94}
95static inline u32 fifo_userd_writeback_timer_100us_v(void)
96{
97 return 0x00000064U;
98}
99static inline u32 fifo_userd_writeback_timescale_f(u32 v)
100{
101 return (v & 0xfU) << 12U;
102}
103static inline u32 fifo_userd_writeback_timescale_0_v(void)
104{
105 return 0x00000000U;
106}
107static inline u32 fifo_runlist_base_r(void)
108{
109 return 0x00002270U;
110}
111static inline u32 fifo_runlist_base_ptr_f(u32 v)
112{
113 return (v & 0xfffffffU) << 0U;
114}
115static inline u32 fifo_runlist_base_target_vid_mem_f(void)
116{
117 return 0x0U;
118}
119static inline u32 fifo_runlist_base_target_sys_mem_coh_f(void)
120{
121 return 0x20000000U;
122}
123static inline u32 fifo_runlist_base_target_sys_mem_ncoh_f(void)
124{
125 return 0x30000000U;
126}
127static inline u32 fifo_runlist_r(void)
128{
129 return 0x00002274U;
130}
131static inline u32 fifo_runlist_engine_f(u32 v)
132{
133 return (v & 0xfU) << 20U;
134}
135static inline u32 fifo_eng_runlist_base_r(u32 i)
136{
137 return 0x00002280U + i*8U;
138}
139static inline u32 fifo_eng_runlist_base__size_1_v(void)
140{
141 return 0x00000002U;
142}
143static inline u32 fifo_eng_runlist_r(u32 i)
144{
145 return 0x00002284U + i*8U;
146}
147static inline u32 fifo_eng_runlist__size_1_v(void)
148{
149 return 0x00000002U;
150}
151static inline u32 fifo_eng_runlist_length_f(u32 v)
152{
153 return (v & 0xffffU) << 0U;
154}
155static inline u32 fifo_eng_runlist_length_max_v(void)
156{
157 return 0x0000ffffU;
158}
159static inline u32 fifo_eng_runlist_pending_true_f(void)
160{
161 return 0x100000U;
162}
163static inline u32 fifo_pb_timeslice_r(u32 i)
164{
165 return 0x00002350U + i*4U;
166}
167static inline u32 fifo_pb_timeslice_timeout_16_f(void)
168{
169 return 0x10U;
170}
171static inline u32 fifo_pb_timeslice_timescale_0_f(void)
172{
173 return 0x0U;
174}
175static inline u32 fifo_pb_timeslice_enable_true_f(void)
176{
177 return 0x10000000U;
178}
179static inline u32 fifo_pbdma_map_r(u32 i)
180{
181 return 0x00002390U + i*4U;
182}
183static inline u32 fifo_intr_0_r(void)
184{
185 return 0x00002100U;
186}
187static inline u32 fifo_intr_0_bind_error_pending_f(void)
188{
189 return 0x1U;
190}
191static inline u32 fifo_intr_0_bind_error_reset_f(void)
192{
193 return 0x1U;
194}
195static inline u32 fifo_intr_0_sched_error_pending_f(void)
196{
197 return 0x100U;
198}
199static inline u32 fifo_intr_0_sched_error_reset_f(void)
200{
201 return 0x100U;
202}
203static inline u32 fifo_intr_0_chsw_error_pending_f(void)
204{
205 return 0x10000U;
206}
207static inline u32 fifo_intr_0_chsw_error_reset_f(void)
208{
209 return 0x10000U;
210}
211static inline u32 fifo_intr_0_fb_flush_timeout_pending_f(void)
212{
213 return 0x800000U;
214}
215static inline u32 fifo_intr_0_fb_flush_timeout_reset_f(void)
216{
217 return 0x800000U;
218}
219static inline u32 fifo_intr_0_lb_error_pending_f(void)
220{
221 return 0x1000000U;
222}
223static inline u32 fifo_intr_0_lb_error_reset_f(void)
224{
225 return 0x1000000U;
226}
227static inline u32 fifo_intr_0_pbdma_intr_pending_f(void)
228{
229 return 0x20000000U;
230}
231static inline u32 fifo_intr_0_runlist_event_pending_f(void)
232{
233 return 0x40000000U;
234}
235static inline u32 fifo_intr_0_channel_intr_pending_f(void)
236{
237 return 0x80000000U;
238}
239static inline u32 fifo_intr_0_ctxsw_timeout_pending_f(void)
240{
241 return 0x2U;
242}
243static inline u32 fifo_intr_en_0_r(void)
244{
245 return 0x00002140U;
246}
247static inline u32 fifo_intr_en_0_sched_error_f(u32 v)
248{
249 return (v & 0x1U) << 8U;
250}
251static inline u32 fifo_intr_en_0_sched_error_m(void)
252{
253 return 0x1U << 8U;
254}
255static inline u32 fifo_intr_en_0_ctxsw_timeout_pending_f(void)
256{
257 return 0x2U;
258}
259static inline u32 fifo_intr_en_1_r(void)
260{
261 return 0x00002528U;
262}
263static inline u32 fifo_intr_bind_error_r(void)
264{
265 return 0x0000252cU;
266}
267static inline u32 fifo_intr_sched_error_r(void)
268{
269 return 0x0000254cU;
270}
271static inline u32 fifo_intr_sched_error_code_f(u32 v)
272{
273 return (v & 0xffU) << 0U;
274}
275static inline u32 fifo_intr_chsw_error_r(void)
276{
277 return 0x0000256cU;
278}
279static inline u32 fifo_intr_ctxsw_timeout_r(void)
280{
281 return 0x00002a30U;
282}
283static inline u32 fifo_intr_ctxsw_timeout_engine_f(u32 v, u32 i)
284{
285 return (v & 0x1U) << (0U + i*1U);
286}
287static inline u32 fifo_intr_ctxsw_timeout_engine_v(u32 r, u32 i)
288{
289 return (r >> (0U + i*1U)) & 0x1U;
290}
291static inline u32 fifo_intr_ctxsw_timeout_engine__size_1_v(void)
292{
293 return 0x00000020U;
294}
295static inline u32 fifo_intr_ctxsw_timeout_engine_pending_v(void)
296{
297 return 0x00000001U;
298}
299static inline u32 fifo_intr_ctxsw_timeout_engine_pending_f(u32 i)
300{
301 return 0x1U << (0U + i*1U);
302}
303static inline u32 fifo_intr_ctxsw_timeout_info_r(u32 i)
304{
305 return 0x00003200U + i*4U;
306}
307static inline u32 fifo_intr_ctxsw_timeout_info__size_1_v(void)
308{
309 return 0x00000004U;
310}
311static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_v(u32 r)
312{
313 return (r >> 14U) & 0x3U;
314}
315static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_load_v(void)
316{
317 return 0x00000001U;
318}
319static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_save_v(void)
320{
321 return 0x00000002U;
322}
323static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_switch_v(void)
324{
325 return 0x00000003U;
326}
327static inline u32 fifo_intr_ctxsw_timeout_info_prev_tsgid_v(u32 r)
328{
329 return (r >> 0U) & 0x3fffU;
330}
331static inline u32 fifo_intr_ctxsw_timeout_info_next_tsgid_v(u32 r)
332{
333 return (r >> 16U) & 0x3fffU;
334}
335static inline u32 fifo_intr_ctxsw_timeout_info_status_v(u32 r)
336{
337 return (r >> 30U) & 0x3U;
338}
339static inline u32 fifo_intr_ctxsw_timeout_info_status_awaiting_ack_v(void)
340{
341 return 0x00000000U;
342}
343static inline u32 fifo_intr_ctxsw_timeout_info_status_eng_was_reset_v(void)
344{
345 return 0x00000001U;
346}
347static inline u32 fifo_intr_ctxsw_timeout_info_status_ack_received_v(void)
348{
349 return 0x00000002U;
350}
351static inline u32 fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v(void)
352{
353 return 0x00000003U;
354}
355static inline u32 fifo_intr_pbdma_id_r(void)
356{
357 return 0x000025a0U;
358}
359static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i)
360{
361 return (v & 0x1U) << (0U + i*1U);
362}
363static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i)
364{
365 return (r >> (0U + i*1U)) & 0x1U;
366}
367static inline u32 fifo_intr_pbdma_id_status__size_1_v(void)
368{
369 return 0x00000003U;
370}
371static inline u32 fifo_intr_runlist_r(void)
372{
373 return 0x00002a00U;
374}
375static inline u32 fifo_fb_timeout_r(void)
376{
377 return 0x00002a04U;
378}
379static inline u32 fifo_fb_timeout_period_m(void)
380{
381 return 0x3fffffffU << 0U;
382}
383static inline u32 fifo_fb_timeout_period_max_f(void)
384{
385 return 0x3fffffffU;
386}
387static inline u32 fifo_fb_timeout_period_init_f(void)
388{
389 return 0x3c00U;
390}
391static inline u32 fifo_fb_timeout_detection_m(void)
392{
393 return 0x1U << 31U;
394}
395static inline u32 fifo_fb_timeout_detection_enabled_f(void)
396{
397 return 0x80000000U;
398}
399static inline u32 fifo_fb_timeout_detection_disabled_f(void)
400{
401 return 0x0U;
402}
403static inline u32 fifo_sched_disable_r(void)
404{
405 return 0x00002630U;
406}
407static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i)
408{
409 return (v & 0x1U) << (0U + i*1U);
410}
411static inline u32 fifo_sched_disable_runlist_m(u32 i)
412{
413 return 0x1U << (0U + i*1U);
414}
415static inline u32 fifo_sched_disable_true_v(void)
416{
417 return 0x00000001U;
418}
419static inline u32 fifo_runlist_preempt_r(void)
420{
421 return 0x00002638U;
422}
423static inline u32 fifo_runlist_preempt_runlist_f(u32 v, u32 i)
424{
425 return (v & 0x1U) << (0U + i*1U);
426}
427static inline u32 fifo_runlist_preempt_runlist_m(u32 i)
428{
429 return 0x1U << (0U + i*1U);
430}
431static inline u32 fifo_runlist_preempt_runlist_pending_v(void)
432{
433 return 0x00000001U;
434}
435static inline u32 fifo_preempt_r(void)
436{
437 return 0x00002634U;
438}
439static inline u32 fifo_preempt_pending_true_f(void)
440{
441 return 0x100000U;
442}
443static inline u32 fifo_preempt_type_channel_f(void)
444{
445 return 0x0U;
446}
447static inline u32 fifo_preempt_type_tsg_f(void)
448{
449 return 0x1000000U;
450}
451static inline u32 fifo_preempt_chid_f(u32 v)
452{
453 return (v & 0xfffU) << 0U;
454}
455static inline u32 fifo_preempt_id_f(u32 v)
456{
457 return (v & 0xfffU) << 0U;
458}
459static inline u32 fifo_engine_status_r(u32 i)
460{
461 return 0x00002640U + i*8U;
462}
463static inline u32 fifo_engine_status__size_1_v(void)
464{
465 return 0x00000004U;
466}
467static inline u32 fifo_engine_status_id_v(u32 r)
468{
469 return (r >> 0U) & 0xfffU;
470}
471static inline u32 fifo_engine_status_id_type_v(u32 r)
472{
473 return (r >> 12U) & 0x1U;
474}
475static inline u32 fifo_engine_status_id_type_chid_v(void)
476{
477 return 0x00000000U;
478}
479static inline u32 fifo_engine_status_id_type_tsgid_v(void)
480{
481 return 0x00000001U;
482}
483static inline u32 fifo_engine_status_ctx_status_v(u32 r)
484{
485 return (r >> 13U) & 0x7U;
486}
487static inline u32 fifo_engine_status_ctx_status_valid_v(void)
488{
489 return 0x00000001U;
490}
491static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void)
492{
493 return 0x00000005U;
494}
495static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void)
496{
497 return 0x00000006U;
498}
499static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void)
500{
501 return 0x00000007U;
502}
503static inline u32 fifo_engine_status_next_id_v(u32 r)
504{
505 return (r >> 16U) & 0xfffU;
506}
507static inline u32 fifo_engine_status_next_id_type_v(u32 r)
508{
509 return (r >> 28U) & 0x1U;
510}
511static inline u32 fifo_engine_status_next_id_type_chid_v(void)
512{
513 return 0x00000000U;
514}
515static inline u32 fifo_engine_status_eng_reload_v(u32 r)
516{
517 return (r >> 29U) & 0x1U;
518}
519static inline u32 fifo_engine_status_faulted_v(u32 r)
520{
521 return (r >> 30U) & 0x1U;
522}
523static inline u32 fifo_engine_status_faulted_true_v(void)
524{
525 return 0x00000001U;
526}
527static inline u32 fifo_engine_status_engine_v(u32 r)
528{
529 return (r >> 31U) & 0x1U;
530}
531static inline u32 fifo_engine_status_engine_idle_v(void)
532{
533 return 0x00000000U;
534}
535static inline u32 fifo_engine_status_engine_busy_v(void)
536{
537 return 0x00000001U;
538}
539static inline u32 fifo_engine_status_ctxsw_v(u32 r)
540{
541 return (r >> 15U) & 0x1U;
542}
543static inline u32 fifo_engine_status_ctxsw_in_progress_v(void)
544{
545 return 0x00000001U;
546}
547static inline u32 fifo_engine_status_ctxsw_in_progress_f(void)
548{
549 return 0x8000U;
550}
551static inline u32 fifo_eng_ctxsw_timeout_r(void)
552{
553 return 0x00002a0cU;
554}
555static inline u32 fifo_eng_ctxsw_timeout_period_f(u32 v)
556{
557 return (v & 0x7fffffffU) << 0U;
558}
559static inline u32 fifo_eng_ctxsw_timeout_period_m(void)
560{
561 return 0x7fffffffU << 0U;
562}
563static inline u32 fifo_eng_ctxsw_timeout_period_v(u32 r)
564{
565 return (r >> 0U) & 0x7fffffffU;
566}
567static inline u32 fifo_eng_ctxsw_timeout_period_init_f(void)
568{
569 return 0x3fffffU;
570}
571static inline u32 fifo_eng_ctxsw_timeout_period_max_f(void)
572{
573 return 0x7fffffffU;
574}
575static inline u32 fifo_eng_ctxsw_timeout_detection_f(u32 v)
576{
577 return (v & 0x1U) << 31U;
578}
579static inline u32 fifo_eng_ctxsw_timeout_detection_m(void)
580{
581 return 0x1U << 31U;
582}
583static inline u32 fifo_eng_ctxsw_timeout_detection_enabled_f(void)
584{
585 return 0x80000000U;
586}
587static inline u32 fifo_eng_ctxsw_timeout_detection_disabled_f(void)
588{
589 return 0x0U;
590}
591static inline u32 fifo_pbdma_status_r(u32 i)
592{
593 return 0x00003080U + i*4U;
594}
595static inline u32 fifo_pbdma_status__size_1_v(void)
596{
597 return 0x00000003U;
598}
599static inline u32 fifo_pbdma_status_id_v(u32 r)
600{
601 return (r >> 0U) & 0xfffU;
602}
603static inline u32 fifo_pbdma_status_id_type_v(u32 r)
604{
605 return (r >> 12U) & 0x1U;
606}
607static inline u32 fifo_pbdma_status_id_type_chid_v(void)
608{
609 return 0x00000000U;
610}
611static inline u32 fifo_pbdma_status_id_type_tsgid_v(void)
612{
613 return 0x00000001U;
614}
615static inline u32 fifo_pbdma_status_chan_status_v(u32 r)
616{
617 return (r >> 13U) & 0x7U;
618}
619static inline u32 fifo_pbdma_status_chan_status_valid_v(void)
620{
621 return 0x00000001U;
622}
623static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void)
624{
625 return 0x00000005U;
626}
627static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void)
628{
629 return 0x00000006U;
630}
631static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void)
632{
633 return 0x00000007U;
634}
635static inline u32 fifo_pbdma_status_next_id_v(u32 r)
636{
637 return (r >> 16U) & 0xfffU;
638}
639static inline u32 fifo_pbdma_status_next_id_type_v(u32 r)
640{
641 return (r >> 28U) & 0x1U;
642}
643static inline u32 fifo_pbdma_status_next_id_type_chid_v(void)
644{
645 return 0x00000000U;
646}
647static inline u32 fifo_pbdma_status_chsw_v(u32 r)
648{
649 return (r >> 15U) & 0x1U;
650}
651static inline u32 fifo_pbdma_status_chsw_in_progress_v(void)
652{
653 return 0x00000001U;
654}
655static inline u32 fifo_cfg0_r(void)
656{
657 return 0x00002004U;
658}
659static inline u32 fifo_cfg0_num_pbdma_v(u32 r)
660{
661 return (r >> 0U) & 0xffU;
662}
663static inline u32 fifo_cfg0_pbdma_fault_id_v(u32 r)
664{
665 return (r >> 16U) & 0xffU;
666}
667static inline u32 fifo_fb_iface_r(void)
668{
669 return 0x000026f0U;
670}
671static inline u32 fifo_fb_iface_control_v(u32 r)
672{
673 return (r >> 0U) & 0x1U;
674}
675static inline u32 fifo_fb_iface_control_enable_f(void)
676{
677 return 0x1U;
678}
679static inline u32 fifo_fb_iface_status_v(u32 r)
680{
681 return (r >> 4U) & 0x1U;
682}
683static inline u32 fifo_fb_iface_status_enabled_f(void)
684{
685 return 0x10U;
686}
687#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_flush_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_flush_gv11b.h
new file mode 100644
index 00000000..45c01de0
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_flush_gv11b.h
@@ -0,0 +1,187 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_flush_gv11b_h_
57#define _hw_flush_gv11b_h_
58
59static inline u32 flush_l2_system_invalidate_r(void)
60{
61 return 0x00070004U;
62}
63static inline u32 flush_l2_system_invalidate_pending_v(u32 r)
64{
65 return (r >> 0U) & 0x1U;
66}
67static inline u32 flush_l2_system_invalidate_pending_busy_v(void)
68{
69 return 0x00000001U;
70}
71static inline u32 flush_l2_system_invalidate_pending_busy_f(void)
72{
73 return 0x1U;
74}
75static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r)
76{
77 return (r >> 1U) & 0x1U;
78}
79static inline u32 flush_l2_system_invalidate_outstanding_true_v(void)
80{
81 return 0x00000001U;
82}
83static inline u32 flush_l2_flush_dirty_r(void)
84{
85 return 0x00070010U;
86}
87static inline u32 flush_l2_flush_dirty_pending_v(u32 r)
88{
89 return (r >> 0U) & 0x1U;
90}
91static inline u32 flush_l2_flush_dirty_pending_empty_v(void)
92{
93 return 0x00000000U;
94}
95static inline u32 flush_l2_flush_dirty_pending_empty_f(void)
96{
97 return 0x0U;
98}
99static inline u32 flush_l2_flush_dirty_pending_busy_v(void)
100{
101 return 0x00000001U;
102}
103static inline u32 flush_l2_flush_dirty_pending_busy_f(void)
104{
105 return 0x1U;
106}
107static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r)
108{
109 return (r >> 1U) & 0x1U;
110}
111static inline u32 flush_l2_flush_dirty_outstanding_false_v(void)
112{
113 return 0x00000000U;
114}
115static inline u32 flush_l2_flush_dirty_outstanding_false_f(void)
116{
117 return 0x0U;
118}
119static inline u32 flush_l2_flush_dirty_outstanding_true_v(void)
120{
121 return 0x00000001U;
122}
123static inline u32 flush_l2_clean_comptags_r(void)
124{
125 return 0x0007000cU;
126}
127static inline u32 flush_l2_clean_comptags_pending_v(u32 r)
128{
129 return (r >> 0U) & 0x1U;
130}
131static inline u32 flush_l2_clean_comptags_pending_empty_v(void)
132{
133 return 0x00000000U;
134}
135static inline u32 flush_l2_clean_comptags_pending_empty_f(void)
136{
137 return 0x0U;
138}
139static inline u32 flush_l2_clean_comptags_pending_busy_v(void)
140{
141 return 0x00000001U;
142}
143static inline u32 flush_l2_clean_comptags_pending_busy_f(void)
144{
145 return 0x1U;
146}
147static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r)
148{
149 return (r >> 1U) & 0x1U;
150}
151static inline u32 flush_l2_clean_comptags_outstanding_false_v(void)
152{
153 return 0x00000000U;
154}
155static inline u32 flush_l2_clean_comptags_outstanding_false_f(void)
156{
157 return 0x0U;
158}
159static inline u32 flush_l2_clean_comptags_outstanding_true_v(void)
160{
161 return 0x00000001U;
162}
163static inline u32 flush_fb_flush_r(void)
164{
165 return 0x00070000U;
166}
167static inline u32 flush_fb_flush_pending_v(u32 r)
168{
169 return (r >> 0U) & 0x1U;
170}
171static inline u32 flush_fb_flush_pending_busy_v(void)
172{
173 return 0x00000001U;
174}
175static inline u32 flush_fb_flush_pending_busy_f(void)
176{
177 return 0x1U;
178}
179static inline u32 flush_fb_flush_outstanding_v(u32 r)
180{
181 return (r >> 1U) & 0x1U;
182}
183static inline u32 flush_fb_flush_outstanding_true_v(void)
184{
185 return 0x00000001U;
186}
187#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fuse_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fuse_gv11b.h
new file mode 100644
index 00000000..f8d9b196
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fuse_gv11b.h
@@ -0,0 +1,151 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_fuse_gv11b_h_
57#define _hw_fuse_gv11b_h_
58
59static inline u32 fuse_status_opt_tpc_gpc_r(u32 i)
60{
61 return 0x00021c38U + i*4U;
62}
63static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i)
64{
65 return 0x00021838U + i*4U;
66}
67static inline u32 fuse_ctrl_opt_ram_svop_pdp_r(void)
68{
69 return 0x00021944U;
70}
71static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_f(u32 v)
72{
73 return (v & 0xffU) << 0U;
74}
75static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_m(void)
76{
77 return 0xffU << 0U;
78}
79static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_v(u32 r)
80{
81 return (r >> 0U) & 0xffU;
82}
83static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_r(void)
84{
85 return 0x00021948U;
86}
87static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_f(u32 v)
88{
89 return (v & 0x1U) << 0U;
90}
91static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_m(void)
92{
93 return 0x1U << 0U;
94}
95static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_v(u32 r)
96{
97 return (r >> 0U) & 0x1U;
98}
99static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_yes_f(void)
100{
101 return 0x1U;
102}
103static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_no_f(void)
104{
105 return 0x0U;
106}
107static inline u32 fuse_status_opt_fbio_r(void)
108{
109 return 0x00021c14U;
110}
111static inline u32 fuse_status_opt_fbio_data_f(u32 v)
112{
113 return (v & 0xffffU) << 0U;
114}
115static inline u32 fuse_status_opt_fbio_data_m(void)
116{
117 return 0xffffU << 0U;
118}
119static inline u32 fuse_status_opt_fbio_data_v(u32 r)
120{
121 return (r >> 0U) & 0xffffU;
122}
123static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i)
124{
125 return 0x00021d70U + i*4U;
126}
127static inline u32 fuse_status_opt_fbp_r(void)
128{
129 return 0x00021d38U;
130}
131static inline u32 fuse_status_opt_fbp_idx_v(u32 r, u32 i)
132{
133 return (r >> (0U + i*1U)) & 0x1U;
134}
135static inline u32 fuse_opt_ecc_en_r(void)
136{
137 return 0x00021228U;
138}
139static inline u32 fuse_opt_feature_fuses_override_disable_r(void)
140{
141 return 0x000213f0U;
142}
143static inline u32 fuse_opt_sec_debug_en_r(void)
144{
145 return 0x00021218U;
146}
147static inline u32 fuse_opt_priv_sec_en_r(void)
148{
149 return 0x00021434U;
150}
151#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gmmu_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gmmu_gv11b.h
new file mode 100644
index 00000000..0a442b1f
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gmmu_gv11b.h
@@ -0,0 +1,1495 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_gmmu_gv11b_h_
57#define _hw_gmmu_gv11b_h_
58
59static inline u32 gmmu_new_pde_is_pte_w(void)
60{
61 return 0U;
62}
63static inline u32 gmmu_new_pde_is_pte_false_f(void)
64{
65 return 0x0U;
66}
67static inline u32 gmmu_new_pde_aperture_w(void)
68{
69 return 0U;
70}
71static inline u32 gmmu_new_pde_aperture_invalid_f(void)
72{
73 return 0x0U;
74}
75static inline u32 gmmu_new_pde_aperture_video_memory_f(void)
76{
77 return 0x2U;
78}
79static inline u32 gmmu_new_pde_aperture_sys_mem_coh_f(void)
80{
81 return 0x4U;
82}
83static inline u32 gmmu_new_pde_aperture_sys_mem_ncoh_f(void)
84{
85 return 0x6U;
86}
87static inline u32 gmmu_new_pde_address_sys_f(u32 v)
88{
89 return (v & 0xffffffU) << 8U;
90}
91static inline u32 gmmu_new_pde_address_sys_w(void)
92{
93 return 0U;
94}
95static inline u32 gmmu_new_pde_vol_w(void)
96{
97 return 0U;
98}
99static inline u32 gmmu_new_pde_vol_true_f(void)
100{
101 return 0x8U;
102}
103static inline u32 gmmu_new_pde_vol_false_f(void)
104{
105 return 0x0U;
106}
107static inline u32 gmmu_new_pde_address_shift_v(void)
108{
109 return 0x0000000cU;
110}
111static inline u32 gmmu_new_pde__size_v(void)
112{
113 return 0x00000008U;
114}
115static inline u32 gmmu_new_dual_pde_is_pte_w(void)
116{
117 return 0U;
118}
119static inline u32 gmmu_new_dual_pde_is_pte_false_f(void)
120{
121 return 0x0U;
122}
123static inline u32 gmmu_new_dual_pde_aperture_big_w(void)
124{
125 return 0U;
126}
127static inline u32 gmmu_new_dual_pde_aperture_big_invalid_f(void)
128{
129 return 0x0U;
130}
131static inline u32 gmmu_new_dual_pde_aperture_big_video_memory_f(void)
132{
133 return 0x2U;
134}
135static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_coh_f(void)
136{
137 return 0x4U;
138}
139static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(void)
140{
141 return 0x6U;
142}
143static inline u32 gmmu_new_dual_pde_address_big_sys_f(u32 v)
144{
145 return (v & 0xfffffffU) << 4U;
146}
147static inline u32 gmmu_new_dual_pde_address_big_sys_w(void)
148{
149 return 0U;
150}
151static inline u32 gmmu_new_dual_pde_aperture_small_w(void)
152{
153 return 2U;
154}
155static inline u32 gmmu_new_dual_pde_aperture_small_invalid_f(void)
156{
157 return 0x0U;
158}
159static inline u32 gmmu_new_dual_pde_aperture_small_video_memory_f(void)
160{
161 return 0x2U;
162}
163static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_coh_f(void)
164{
165 return 0x4U;
166}
167static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(void)
168{
169 return 0x6U;
170}
171static inline u32 gmmu_new_dual_pde_vol_small_w(void)
172{
173 return 2U;
174}
175static inline u32 gmmu_new_dual_pde_vol_small_true_f(void)
176{
177 return 0x8U;
178}
179static inline u32 gmmu_new_dual_pde_vol_small_false_f(void)
180{
181 return 0x0U;
182}
183static inline u32 gmmu_new_dual_pde_vol_big_w(void)
184{
185 return 0U;
186}
187static inline u32 gmmu_new_dual_pde_vol_big_true_f(void)
188{
189 return 0x8U;
190}
191static inline u32 gmmu_new_dual_pde_vol_big_false_f(void)
192{
193 return 0x0U;
194}
195static inline u32 gmmu_new_dual_pde_address_small_sys_f(u32 v)
196{
197 return (v & 0xffffffU) << 8U;
198}
199static inline u32 gmmu_new_dual_pde_address_small_sys_w(void)
200{
201 return 2U;
202}
203static inline u32 gmmu_new_dual_pde_address_shift_v(void)
204{
205 return 0x0000000cU;
206}
207static inline u32 gmmu_new_dual_pde_address_big_shift_v(void)
208{
209 return 0x00000008U;
210}
211static inline u32 gmmu_new_dual_pde__size_v(void)
212{
213 return 0x00000010U;
214}
215static inline u32 gmmu_new_pte__size_v(void)
216{
217 return 0x00000008U;
218}
219static inline u32 gmmu_new_pte_valid_w(void)
220{
221 return 0U;
222}
223static inline u32 gmmu_new_pte_valid_true_f(void)
224{
225 return 0x1U;
226}
227static inline u32 gmmu_new_pte_valid_false_f(void)
228{
229 return 0x0U;
230}
231static inline u32 gmmu_new_pte_privilege_w(void)
232{
233 return 0U;
234}
235static inline u32 gmmu_new_pte_privilege_true_f(void)
236{
237 return 0x20U;
238}
239static inline u32 gmmu_new_pte_privilege_false_f(void)
240{
241 return 0x0U;
242}
243static inline u32 gmmu_new_pte_address_sys_f(u32 v)
244{
245 return (v & 0xffffffU) << 8U;
246}
247static inline u32 gmmu_new_pte_address_sys_w(void)
248{
249 return 0U;
250}
251static inline u32 gmmu_new_pte_address_vid_f(u32 v)
252{
253 return (v & 0xffffffU) << 8U;
254}
255static inline u32 gmmu_new_pte_address_vid_w(void)
256{
257 return 0U;
258}
259static inline u32 gmmu_new_pte_vol_w(void)
260{
261 return 0U;
262}
263static inline u32 gmmu_new_pte_vol_true_f(void)
264{
265 return 0x8U;
266}
267static inline u32 gmmu_new_pte_vol_false_f(void)
268{
269 return 0x0U;
270}
271static inline u32 gmmu_new_pte_aperture_w(void)
272{
273 return 0U;
274}
275static inline u32 gmmu_new_pte_aperture_video_memory_f(void)
276{
277 return 0x0U;
278}
279static inline u32 gmmu_new_pte_aperture_sys_mem_coh_f(void)
280{
281 return 0x4U;
282}
283static inline u32 gmmu_new_pte_aperture_sys_mem_ncoh_f(void)
284{
285 return 0x6U;
286}
287static inline u32 gmmu_new_pte_read_only_w(void)
288{
289 return 0U;
290}
291static inline u32 gmmu_new_pte_read_only_true_f(void)
292{
293 return 0x40U;
294}
295static inline u32 gmmu_new_pte_comptagline_f(u32 v)
296{
297 return (v & 0x3ffffU) << 4U;
298}
299static inline u32 gmmu_new_pte_comptagline_w(void)
300{
301 return 1U;
302}
303static inline u32 gmmu_new_pte_kind_f(u32 v)
304{
305 return (v & 0xffU) << 24U;
306}
307static inline u32 gmmu_new_pte_kind_w(void)
308{
309 return 1U;
310}
311static inline u32 gmmu_new_pte_address_shift_v(void)
312{
313 return 0x0000000cU;
314}
315static inline u32 gmmu_pte_kind_f(u32 v)
316{
317 return (v & 0xffU) << 4U;
318}
319static inline u32 gmmu_pte_kind_w(void)
320{
321 return 1U;
322}
323static inline u32 gmmu_pte_kind_invalid_v(void)
324{
325 return 0x000000ffU;
326}
327static inline u32 gmmu_pte_kind_pitch_v(void)
328{
329 return 0x00000000U;
330}
331static inline u32 gmmu_pte_kind_z16_v(void)
332{
333 return 0x00000001U;
334}
335static inline u32 gmmu_pte_kind_z16_2c_v(void)
336{
337 return 0x00000002U;
338}
339static inline u32 gmmu_pte_kind_z16_ms2_2c_v(void)
340{
341 return 0x00000003U;
342}
343static inline u32 gmmu_pte_kind_z16_ms4_2c_v(void)
344{
345 return 0x00000004U;
346}
347static inline u32 gmmu_pte_kind_z16_ms8_2c_v(void)
348{
349 return 0x00000005U;
350}
351static inline u32 gmmu_pte_kind_z16_ms16_2c_v(void)
352{
353 return 0x00000006U;
354}
355static inline u32 gmmu_pte_kind_z16_2z_v(void)
356{
357 return 0x00000007U;
358}
359static inline u32 gmmu_pte_kind_z16_ms2_2z_v(void)
360{
361 return 0x00000008U;
362}
363static inline u32 gmmu_pte_kind_z16_ms4_2z_v(void)
364{
365 return 0x00000009U;
366}
367static inline u32 gmmu_pte_kind_z16_ms8_2z_v(void)
368{
369 return 0x0000000aU;
370}
371static inline u32 gmmu_pte_kind_z16_ms16_2z_v(void)
372{
373 return 0x0000000bU;
374}
375static inline u32 gmmu_pte_kind_z16_2cz_v(void)
376{
377 return 0x00000036U;
378}
379static inline u32 gmmu_pte_kind_z16_ms2_2cz_v(void)
380{
381 return 0x00000037U;
382}
383static inline u32 gmmu_pte_kind_z16_ms4_2cz_v(void)
384{
385 return 0x00000038U;
386}
387static inline u32 gmmu_pte_kind_z16_ms8_2cz_v(void)
388{
389 return 0x00000039U;
390}
391static inline u32 gmmu_pte_kind_z16_ms16_2cz_v(void)
392{
393 return 0x0000005fU;
394}
395static inline u32 gmmu_pte_kind_s8z24_v(void)
396{
397 return 0x00000011U;
398}
399static inline u32 gmmu_pte_kind_s8z24_1z_v(void)
400{
401 return 0x00000012U;
402}
403static inline u32 gmmu_pte_kind_s8z24_ms2_1z_v(void)
404{
405 return 0x00000013U;
406}
407static inline u32 gmmu_pte_kind_s8z24_ms4_1z_v(void)
408{
409 return 0x00000014U;
410}
411static inline u32 gmmu_pte_kind_s8z24_ms8_1z_v(void)
412{
413 return 0x00000015U;
414}
415static inline u32 gmmu_pte_kind_s8z24_ms16_1z_v(void)
416{
417 return 0x00000016U;
418}
419static inline u32 gmmu_pte_kind_s8z24_2cz_v(void)
420{
421 return 0x00000017U;
422}
423static inline u32 gmmu_pte_kind_s8z24_ms2_2cz_v(void)
424{
425 return 0x00000018U;
426}
427static inline u32 gmmu_pte_kind_s8z24_ms4_2cz_v(void)
428{
429 return 0x00000019U;
430}
431static inline u32 gmmu_pte_kind_s8z24_ms8_2cz_v(void)
432{
433 return 0x0000001aU;
434}
435static inline u32 gmmu_pte_kind_s8z24_ms16_2cz_v(void)
436{
437 return 0x0000001bU;
438}
439static inline u32 gmmu_pte_kind_s8z24_2cs_v(void)
440{
441 return 0x0000001cU;
442}
443static inline u32 gmmu_pte_kind_s8z24_ms2_2cs_v(void)
444{
445 return 0x0000001dU;
446}
447static inline u32 gmmu_pte_kind_s8z24_ms4_2cs_v(void)
448{
449 return 0x0000001eU;
450}
451static inline u32 gmmu_pte_kind_s8z24_ms8_2cs_v(void)
452{
453 return 0x0000001fU;
454}
455static inline u32 gmmu_pte_kind_s8z24_ms16_2cs_v(void)
456{
457 return 0x00000020U;
458}
459static inline u32 gmmu_pte_kind_s8z24_4cszv_v(void)
460{
461 return 0x00000021U;
462}
463static inline u32 gmmu_pte_kind_s8z24_ms2_4cszv_v(void)
464{
465 return 0x00000022U;
466}
467static inline u32 gmmu_pte_kind_s8z24_ms4_4cszv_v(void)
468{
469 return 0x00000023U;
470}
471static inline u32 gmmu_pte_kind_s8z24_ms8_4cszv_v(void)
472{
473 return 0x00000024U;
474}
475static inline u32 gmmu_pte_kind_s8z24_ms16_4cszv_v(void)
476{
477 return 0x00000025U;
478}
479static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_v(void)
480{
481 return 0x00000026U;
482}
483static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_v(void)
484{
485 return 0x00000027U;
486}
487static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_v(void)
488{
489 return 0x00000028U;
490}
491static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_v(void)
492{
493 return 0x00000029U;
494}
495static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_1zv_v(void)
496{
497 return 0x0000002eU;
498}
499static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_1zv_v(void)
500{
501 return 0x0000002fU;
502}
503static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_1zv_v(void)
504{
505 return 0x00000030U;
506}
507static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_1zv_v(void)
508{
509 return 0x00000031U;
510}
511static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2cs_v(void)
512{
513 return 0x00000032U;
514}
515static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2cs_v(void)
516{
517 return 0x00000033U;
518}
519static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2cs_v(void)
520{
521 return 0x00000034U;
522}
523static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2cs_v(void)
524{
525 return 0x00000035U;
526}
527static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2czv_v(void)
528{
529 return 0x0000003aU;
530}
531static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2czv_v(void)
532{
533 return 0x0000003bU;
534}
535static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2czv_v(void)
536{
537 return 0x0000003cU;
538}
539static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2czv_v(void)
540{
541 return 0x0000003dU;
542}
543static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2zv_v(void)
544{
545 return 0x0000003eU;
546}
547static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2zv_v(void)
548{
549 return 0x0000003fU;
550}
551static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2zv_v(void)
552{
553 return 0x00000040U;
554}
555static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2zv_v(void)
556{
557 return 0x00000041U;
558}
559static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_4cszv_v(void)
560{
561 return 0x00000042U;
562}
563static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_4cszv_v(void)
564{
565 return 0x00000043U;
566}
567static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_4cszv_v(void)
568{
569 return 0x00000044U;
570}
571static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_4cszv_v(void)
572{
573 return 0x00000045U;
574}
575static inline u32 gmmu_pte_kind_z24s8_v(void)
576{
577 return 0x00000046U;
578}
579static inline u32 gmmu_pte_kind_z24s8_1z_v(void)
580{
581 return 0x00000047U;
582}
583static inline u32 gmmu_pte_kind_z24s8_ms2_1z_v(void)
584{
585 return 0x00000048U;
586}
587static inline u32 gmmu_pte_kind_z24s8_ms4_1z_v(void)
588{
589 return 0x00000049U;
590}
591static inline u32 gmmu_pte_kind_z24s8_ms8_1z_v(void)
592{
593 return 0x0000004aU;
594}
595static inline u32 gmmu_pte_kind_z24s8_ms16_1z_v(void)
596{
597 return 0x0000004bU;
598}
599static inline u32 gmmu_pte_kind_z24s8_2cs_v(void)
600{
601 return 0x0000004cU;
602}
603static inline u32 gmmu_pte_kind_z24s8_ms2_2cs_v(void)
604{
605 return 0x0000004dU;
606}
607static inline u32 gmmu_pte_kind_z24s8_ms4_2cs_v(void)
608{
609 return 0x0000004eU;
610}
611static inline u32 gmmu_pte_kind_z24s8_ms8_2cs_v(void)
612{
613 return 0x0000004fU;
614}
615static inline u32 gmmu_pte_kind_z24s8_ms16_2cs_v(void)
616{
617 return 0x00000050U;
618}
619static inline u32 gmmu_pte_kind_z24s8_2cz_v(void)
620{
621 return 0x00000051U;
622}
623static inline u32 gmmu_pte_kind_z24s8_ms2_2cz_v(void)
624{
625 return 0x00000052U;
626}
627static inline u32 gmmu_pte_kind_z24s8_ms4_2cz_v(void)
628{
629 return 0x00000053U;
630}
631static inline u32 gmmu_pte_kind_z24s8_ms8_2cz_v(void)
632{
633 return 0x00000054U;
634}
635static inline u32 gmmu_pte_kind_z24s8_ms16_2cz_v(void)
636{
637 return 0x00000055U;
638}
639static inline u32 gmmu_pte_kind_z24s8_4cszv_v(void)
640{
641 return 0x00000056U;
642}
643static inline u32 gmmu_pte_kind_z24s8_ms2_4cszv_v(void)
644{
645 return 0x00000057U;
646}
647static inline u32 gmmu_pte_kind_z24s8_ms4_4cszv_v(void)
648{
649 return 0x00000058U;
650}
651static inline u32 gmmu_pte_kind_z24s8_ms8_4cszv_v(void)
652{
653 return 0x00000059U;
654}
655static inline u32 gmmu_pte_kind_z24s8_ms16_4cszv_v(void)
656{
657 return 0x0000005aU;
658}
659static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_v(void)
660{
661 return 0x0000005bU;
662}
663static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_v(void)
664{
665 return 0x0000005cU;
666}
667static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_v(void)
668{
669 return 0x0000005dU;
670}
671static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_v(void)
672{
673 return 0x0000005eU;
674}
675static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_1zv_v(void)
676{
677 return 0x00000063U;
678}
679static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_1zv_v(void)
680{
681 return 0x00000064U;
682}
683static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_1zv_v(void)
684{
685 return 0x00000065U;
686}
687static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_1zv_v(void)
688{
689 return 0x00000066U;
690}
691static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2cs_v(void)
692{
693 return 0x00000067U;
694}
695static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2cs_v(void)
696{
697 return 0x00000068U;
698}
699static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2cs_v(void)
700{
701 return 0x00000069U;
702}
703static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2cs_v(void)
704{
705 return 0x0000006aU;
706}
707static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2czv_v(void)
708{
709 return 0x0000006fU;
710}
711static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2czv_v(void)
712{
713 return 0x00000070U;
714}
715static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2czv_v(void)
716{
717 return 0x00000071U;
718}
719static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2czv_v(void)
720{
721 return 0x00000072U;
722}
723static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2zv_v(void)
724{
725 return 0x00000073U;
726}
727static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2zv_v(void)
728{
729 return 0x00000074U;
730}
731static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2zv_v(void)
732{
733 return 0x00000075U;
734}
735static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2zv_v(void)
736{
737 return 0x00000076U;
738}
739static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_4cszv_v(void)
740{
741 return 0x00000077U;
742}
743static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_4cszv_v(void)
744{
745 return 0x00000078U;
746}
747static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_4cszv_v(void)
748{
749 return 0x00000079U;
750}
751static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_4cszv_v(void)
752{
753 return 0x0000007aU;
754}
755static inline u32 gmmu_pte_kind_zf32_v(void)
756{
757 return 0x0000007bU;
758}
759static inline u32 gmmu_pte_kind_zf32_1z_v(void)
760{
761 return 0x0000007cU;
762}
763static inline u32 gmmu_pte_kind_zf32_ms2_1z_v(void)
764{
765 return 0x0000007dU;
766}
767static inline u32 gmmu_pte_kind_zf32_ms4_1z_v(void)
768{
769 return 0x0000007eU;
770}
771static inline u32 gmmu_pte_kind_zf32_ms8_1z_v(void)
772{
773 return 0x0000007fU;
774}
775static inline u32 gmmu_pte_kind_zf32_ms16_1z_v(void)
776{
777 return 0x00000080U;
778}
779static inline u32 gmmu_pte_kind_zf32_2cs_v(void)
780{
781 return 0x00000081U;
782}
783static inline u32 gmmu_pte_kind_zf32_ms2_2cs_v(void)
784{
785 return 0x00000082U;
786}
787static inline u32 gmmu_pte_kind_zf32_ms4_2cs_v(void)
788{
789 return 0x00000083U;
790}
791static inline u32 gmmu_pte_kind_zf32_ms8_2cs_v(void)
792{
793 return 0x00000084U;
794}
795static inline u32 gmmu_pte_kind_zf32_ms16_2cs_v(void)
796{
797 return 0x00000085U;
798}
799static inline u32 gmmu_pte_kind_zf32_2cz_v(void)
800{
801 return 0x00000086U;
802}
803static inline u32 gmmu_pte_kind_zf32_ms2_2cz_v(void)
804{
805 return 0x00000087U;
806}
807static inline u32 gmmu_pte_kind_zf32_ms4_2cz_v(void)
808{
809 return 0x00000088U;
810}
811static inline u32 gmmu_pte_kind_zf32_ms8_2cz_v(void)
812{
813 return 0x00000089U;
814}
815static inline u32 gmmu_pte_kind_zf32_ms16_2cz_v(void)
816{
817 return 0x0000008aU;
818}
819static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_v(void)
820{
821 return 0x0000008bU;
822}
823static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_v(void)
824{
825 return 0x0000008cU;
826}
827static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_v(void)
828{
829 return 0x0000008dU;
830}
831static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_v(void)
832{
833 return 0x0000008eU;
834}
835static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1cs_v(void)
836{
837 return 0x0000008fU;
838}
839static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1cs_v(void)
840{
841 return 0x00000090U;
842}
843static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1cs_v(void)
844{
845 return 0x00000091U;
846}
847static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1cs_v(void)
848{
849 return 0x00000092U;
850}
851static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1zv_v(void)
852{
853 return 0x00000097U;
854}
855static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1zv_v(void)
856{
857 return 0x00000098U;
858}
859static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1zv_v(void)
860{
861 return 0x00000099U;
862}
863static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1zv_v(void)
864{
865 return 0x0000009aU;
866}
867static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1czv_v(void)
868{
869 return 0x0000009bU;
870}
871static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1czv_v(void)
872{
873 return 0x0000009cU;
874}
875static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1czv_v(void)
876{
877 return 0x0000009dU;
878}
879static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1czv_v(void)
880{
881 return 0x0000009eU;
882}
883static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_2cs_v(void)
884{
885 return 0x0000009fU;
886}
887static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_2cs_v(void)
888{
889 return 0x000000a0U;
890}
891static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_2cs_v(void)
892{
893 return 0x000000a1U;
894}
895static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_2cs_v(void)
896{
897 return 0x000000a2U;
898}
899static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_2cszv_v(void)
900{
901 return 0x000000a3U;
902}
903static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_2cszv_v(void)
904{
905 return 0x000000a4U;
906}
907static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_2cszv_v(void)
908{
909 return 0x000000a5U;
910}
911static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_2cszv_v(void)
912{
913 return 0x000000a6U;
914}
915static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_v(void)
916{
917 return 0x000000a7U;
918}
919static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_v(void)
920{
921 return 0x000000a8U;
922}
923static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_v(void)
924{
925 return 0x000000a9U;
926}
927static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_v(void)
928{
929 return 0x000000aaU;
930}
931static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1cs_v(void)
932{
933 return 0x000000abU;
934}
935static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1cs_v(void)
936{
937 return 0x000000acU;
938}
939static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1cs_v(void)
940{
941 return 0x000000adU;
942}
943static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1cs_v(void)
944{
945 return 0x000000aeU;
946}
947static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1zv_v(void)
948{
949 return 0x000000b3U;
950}
951static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1zv_v(void)
952{
953 return 0x000000b4U;
954}
955static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1zv_v(void)
956{
957 return 0x000000b5U;
958}
959static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1zv_v(void)
960{
961 return 0x000000b6U;
962}
963static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1czv_v(void)
964{
965 return 0x000000b7U;
966}
967static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1czv_v(void)
968{
969 return 0x000000b8U;
970}
971static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1czv_v(void)
972{
973 return 0x000000b9U;
974}
975static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1czv_v(void)
976{
977 return 0x000000baU;
978}
979static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_2cs_v(void)
980{
981 return 0x000000bbU;
982}
983static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_2cs_v(void)
984{
985 return 0x000000bcU;
986}
987static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_2cs_v(void)
988{
989 return 0x000000bdU;
990}
991static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_2cs_v(void)
992{
993 return 0x000000beU;
994}
995static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_2cszv_v(void)
996{
997 return 0x000000bfU;
998}
999static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_2cszv_v(void)
1000{
1001 return 0x000000c0U;
1002}
1003static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_2cszv_v(void)
1004{
1005 return 0x000000c1U;
1006}
1007static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_2cszv_v(void)
1008{
1009 return 0x000000c2U;
1010}
1011static inline u32 gmmu_pte_kind_zf32_x24s8_v(void)
1012{
1013 return 0x000000c3U;
1014}
1015static inline u32 gmmu_pte_kind_zf32_x24s8_1cs_v(void)
1016{
1017 return 0x000000c4U;
1018}
1019static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_1cs_v(void)
1020{
1021 return 0x000000c5U;
1022}
1023static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_1cs_v(void)
1024{
1025 return 0x000000c6U;
1026}
1027static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_1cs_v(void)
1028{
1029 return 0x000000c7U;
1030}
1031static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_1cs_v(void)
1032{
1033 return 0x000000c8U;
1034}
1035static inline u32 gmmu_pte_kind_zf32_x24s8_2cszv_v(void)
1036{
1037 return 0x000000ceU;
1038}
1039static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_2cszv_v(void)
1040{
1041 return 0x000000cfU;
1042}
1043static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_2cszv_v(void)
1044{
1045 return 0x000000d0U;
1046}
1047static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_2cszv_v(void)
1048{
1049 return 0x000000d1U;
1050}
1051static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_2cszv_v(void)
1052{
1053 return 0x000000d2U;
1054}
1055static inline u32 gmmu_pte_kind_zf32_x24s8_2cs_v(void)
1056{
1057 return 0x000000d3U;
1058}
1059static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_2cs_v(void)
1060{
1061 return 0x000000d4U;
1062}
1063static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_2cs_v(void)
1064{
1065 return 0x000000d5U;
1066}
1067static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_2cs_v(void)
1068{
1069 return 0x000000d6U;
1070}
1071static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_2cs_v(void)
1072{
1073 return 0x000000d7U;
1074}
1075static inline u32 gmmu_pte_kind_generic_16bx2_v(void)
1076{
1077 return 0x000000feU;
1078}
1079static inline u32 gmmu_pte_kind_c32_2c_v(void)
1080{
1081 return 0x000000d8U;
1082}
1083static inline u32 gmmu_pte_kind_c32_2cbr_v(void)
1084{
1085 return 0x000000d9U;
1086}
1087static inline u32 gmmu_pte_kind_c32_2cba_v(void)
1088{
1089 return 0x000000daU;
1090}
1091static inline u32 gmmu_pte_kind_c32_2cra_v(void)
1092{
1093 return 0x000000dbU;
1094}
1095static inline u32 gmmu_pte_kind_c32_2bra_v(void)
1096{
1097 return 0x000000dcU;
1098}
1099static inline u32 gmmu_pte_kind_c32_ms2_2c_v(void)
1100{
1101 return 0x000000ddU;
1102}
1103static inline u32 gmmu_pte_kind_c32_ms2_2cbr_v(void)
1104{
1105 return 0x000000deU;
1106}
1107static inline u32 gmmu_pte_kind_c32_ms2_4cbra_v(void)
1108{
1109 return 0x000000ccU;
1110}
1111static inline u32 gmmu_pte_kind_c32_ms4_2c_v(void)
1112{
1113 return 0x000000dfU;
1114}
1115static inline u32 gmmu_pte_kind_c32_ms4_2cbr_v(void)
1116{
1117 return 0x000000e0U;
1118}
1119static inline u32 gmmu_pte_kind_c32_ms4_2cba_v(void)
1120{
1121 return 0x000000e1U;
1122}
1123static inline u32 gmmu_pte_kind_c32_ms4_2cra_v(void)
1124{
1125 return 0x000000e2U;
1126}
1127static inline u32 gmmu_pte_kind_c32_ms4_2bra_v(void)
1128{
1129 return 0x000000e3U;
1130}
1131static inline u32 gmmu_pte_kind_c32_ms4_4cbra_v(void)
1132{
1133 return 0x0000002cU;
1134}
1135static inline u32 gmmu_pte_kind_c32_ms8_ms16_2c_v(void)
1136{
1137 return 0x000000e4U;
1138}
1139static inline u32 gmmu_pte_kind_c32_ms8_ms16_2cra_v(void)
1140{
1141 return 0x000000e5U;
1142}
1143static inline u32 gmmu_pte_kind_c64_2c_v(void)
1144{
1145 return 0x000000e6U;
1146}
1147static inline u32 gmmu_pte_kind_c64_2cbr_v(void)
1148{
1149 return 0x000000e7U;
1150}
1151static inline u32 gmmu_pte_kind_c64_2cba_v(void)
1152{
1153 return 0x000000e8U;
1154}
1155static inline u32 gmmu_pte_kind_c64_2cra_v(void)
1156{
1157 return 0x000000e9U;
1158}
1159static inline u32 gmmu_pte_kind_c64_2bra_v(void)
1160{
1161 return 0x000000eaU;
1162}
1163static inline u32 gmmu_pte_kind_c64_ms2_2c_v(void)
1164{
1165 return 0x000000ebU;
1166}
1167static inline u32 gmmu_pte_kind_c64_ms2_2cbr_v(void)
1168{
1169 return 0x000000ecU;
1170}
1171static inline u32 gmmu_pte_kind_c64_ms2_4cbra_v(void)
1172{
1173 return 0x000000cdU;
1174}
1175static inline u32 gmmu_pte_kind_c64_ms4_2c_v(void)
1176{
1177 return 0x000000edU;
1178}
1179static inline u32 gmmu_pte_kind_c64_ms4_2cbr_v(void)
1180{
1181 return 0x000000eeU;
1182}
1183static inline u32 gmmu_pte_kind_c64_ms4_2cba_v(void)
1184{
1185 return 0x000000efU;
1186}
1187static inline u32 gmmu_pte_kind_c64_ms4_2cra_v(void)
1188{
1189 return 0x000000f0U;
1190}
1191static inline u32 gmmu_pte_kind_c64_ms4_2bra_v(void)
1192{
1193 return 0x000000f1U;
1194}
1195static inline u32 gmmu_pte_kind_c64_ms4_4cbra_v(void)
1196{
1197 return 0x0000002dU;
1198}
1199static inline u32 gmmu_pte_kind_c64_ms8_ms16_2c_v(void)
1200{
1201 return 0x000000f2U;
1202}
1203static inline u32 gmmu_pte_kind_c64_ms8_ms16_2cra_v(void)
1204{
1205 return 0x000000f3U;
1206}
1207static inline u32 gmmu_pte_kind_c128_2c_v(void)
1208{
1209 return 0x000000f4U;
1210}
1211static inline u32 gmmu_pte_kind_c128_2cr_v(void)
1212{
1213 return 0x000000f5U;
1214}
1215static inline u32 gmmu_pte_kind_c128_ms2_2c_v(void)
1216{
1217 return 0x000000f6U;
1218}
1219static inline u32 gmmu_pte_kind_c128_ms2_2cr_v(void)
1220{
1221 return 0x000000f7U;
1222}
1223static inline u32 gmmu_pte_kind_c128_ms4_2c_v(void)
1224{
1225 return 0x000000f8U;
1226}
1227static inline u32 gmmu_pte_kind_c128_ms4_2cr_v(void)
1228{
1229 return 0x000000f9U;
1230}
1231static inline u32 gmmu_pte_kind_c128_ms8_ms16_2c_v(void)
1232{
1233 return 0x000000faU;
1234}
1235static inline u32 gmmu_pte_kind_c128_ms8_ms16_2cr_v(void)
1236{
1237 return 0x000000fbU;
1238}
1239static inline u32 gmmu_pte_kind_x8c24_v(void)
1240{
1241 return 0x000000fcU;
1242}
1243static inline u32 gmmu_pte_kind_pitch_no_swizzle_v(void)
1244{
1245 return 0x000000fdU;
1246}
1247static inline u32 gmmu_pte_kind_smsked_message_v(void)
1248{
1249 return 0x000000caU;
1250}
1251static inline u32 gmmu_pte_kind_smhost_message_v(void)
1252{
1253 return 0x000000cbU;
1254}
1255static inline u32 gmmu_pte_kind_s8_v(void)
1256{
1257 return 0x0000002aU;
1258}
1259static inline u32 gmmu_pte_kind_s8_2s_v(void)
1260{
1261 return 0x0000002bU;
1262}
1263static inline u32 gmmu_fault_client_type_gpc_v(void)
1264{
1265 return 0x00000000U;
1266}
1267static inline u32 gmmu_fault_client_type_hub_v(void)
1268{
1269 return 0x00000001U;
1270}
1271static inline u32 gmmu_fault_type_unbound_inst_block_v(void)
1272{
1273 return 0x00000004U;
1274}
1275static inline u32 gmmu_fault_type_pte_v(void)
1276{
1277 return 0x00000002U;
1278}
1279static inline u32 gmmu_fault_mmu_eng_id_bar2_v(void)
1280{
1281 return 0x00000005U;
1282}
1283static inline u32 gmmu_fault_mmu_eng_id_physical_v(void)
1284{
1285 return 0x0000001fU;
1286}
1287static inline u32 gmmu_fault_mmu_eng_id_ce0_v(void)
1288{
1289 return 0x0000000fU;
1290}
1291static inline u32 gmmu_fault_buf_size_v(void)
1292{
1293 return 0x00000020U;
1294}
1295static inline u32 gmmu_fault_buf_entry_inst_aperture_v(u32 r)
1296{
1297 return (r >> 8U) & 0x3U;
1298}
1299static inline u32 gmmu_fault_buf_entry_inst_aperture_w(void)
1300{
1301 return 0U;
1302}
1303static inline u32 gmmu_fault_buf_entry_inst_aperture_vid_mem_v(void)
1304{
1305 return 0x00000000U;
1306}
1307static inline u32 gmmu_fault_buf_entry_inst_aperture_sys_coh_v(void)
1308{
1309 return 0x00000002U;
1310}
1311static inline u32 gmmu_fault_buf_entry_inst_aperture_sys_nocoh_v(void)
1312{
1313 return 0x00000003U;
1314}
1315static inline u32 gmmu_fault_buf_entry_inst_lo_f(u32 v)
1316{
1317 return (v & 0xfffffU) << 12U;
1318}
1319static inline u32 gmmu_fault_buf_entry_inst_lo_v(u32 r)
1320{
1321 return (r >> 12U) & 0xfffffU;
1322}
1323static inline u32 gmmu_fault_buf_entry_inst_lo_w(void)
1324{
1325 return 0U;
1326}
1327static inline u32 gmmu_fault_buf_entry_inst_hi_v(u32 r)
1328{
1329 return (r >> 0U) & 0xffffffffU;
1330}
1331static inline u32 gmmu_fault_buf_entry_inst_hi_w(void)
1332{
1333 return 1U;
1334}
1335static inline u32 gmmu_fault_buf_entry_addr_phys_aperture_v(u32 r)
1336{
1337 return (r >> 0U) & 0x3U;
1338}
1339static inline u32 gmmu_fault_buf_entry_addr_phys_aperture_w(void)
1340{
1341 return 2U;
1342}
1343static inline u32 gmmu_fault_buf_entry_addr_lo_f(u32 v)
1344{
1345 return (v & 0xfffffU) << 12U;
1346}
1347static inline u32 gmmu_fault_buf_entry_addr_lo_v(u32 r)
1348{
1349 return (r >> 12U) & 0xfffffU;
1350}
1351static inline u32 gmmu_fault_buf_entry_addr_lo_w(void)
1352{
1353 return 2U;
1354}
1355static inline u32 gmmu_fault_buf_entry_addr_hi_v(u32 r)
1356{
1357 return (r >> 0U) & 0xffffffffU;
1358}
1359static inline u32 gmmu_fault_buf_entry_addr_hi_w(void)
1360{
1361 return 3U;
1362}
1363static inline u32 gmmu_fault_buf_entry_timestamp_lo_v(u32 r)
1364{
1365 return (r >> 0U) & 0xffffffffU;
1366}
1367static inline u32 gmmu_fault_buf_entry_timestamp_lo_w(void)
1368{
1369 return 4U;
1370}
1371static inline u32 gmmu_fault_buf_entry_timestamp_hi_v(u32 r)
1372{
1373 return (r >> 0U) & 0xffffffffU;
1374}
1375static inline u32 gmmu_fault_buf_entry_timestamp_hi_w(void)
1376{
1377 return 5U;
1378}
1379static inline u32 gmmu_fault_buf_entry_engine_id_v(u32 r)
1380{
1381 return (r >> 0U) & 0x1ffU;
1382}
1383static inline u32 gmmu_fault_buf_entry_engine_id_w(void)
1384{
1385 return 6U;
1386}
1387static inline u32 gmmu_fault_buf_entry_fault_type_v(u32 r)
1388{
1389 return (r >> 0U) & 0x1fU;
1390}
1391static inline u32 gmmu_fault_buf_entry_fault_type_w(void)
1392{
1393 return 7U;
1394}
1395static inline u32 gmmu_fault_buf_entry_replayable_fault_v(u32 r)
1396{
1397 return (r >> 7U) & 0x1U;
1398}
1399static inline u32 gmmu_fault_buf_entry_replayable_fault_w(void)
1400{
1401 return 7U;
1402}
1403static inline u32 gmmu_fault_buf_entry_replayable_fault_true_v(void)
1404{
1405 return 0x00000001U;
1406}
1407static inline u32 gmmu_fault_buf_entry_replayable_fault_true_f(void)
1408{
1409 return 0x80U;
1410}
1411static inline u32 gmmu_fault_buf_entry_client_v(u32 r)
1412{
1413 return (r >> 8U) & 0x7fU;
1414}
1415static inline u32 gmmu_fault_buf_entry_client_w(void)
1416{
1417 return 7U;
1418}
1419static inline u32 gmmu_fault_buf_entry_access_type_v(u32 r)
1420{
1421 return (r >> 16U) & 0xfU;
1422}
1423static inline u32 gmmu_fault_buf_entry_access_type_w(void)
1424{
1425 return 7U;
1426}
1427static inline u32 gmmu_fault_buf_entry_mmu_client_type_v(u32 r)
1428{
1429 return (r >> 20U) & 0x1U;
1430}
1431static inline u32 gmmu_fault_buf_entry_mmu_client_type_w(void)
1432{
1433 return 7U;
1434}
1435static inline u32 gmmu_fault_buf_entry_gpc_id_v(u32 r)
1436{
1437 return (r >> 24U) & 0x1fU;
1438}
1439static inline u32 gmmu_fault_buf_entry_gpc_id_w(void)
1440{
1441 return 7U;
1442}
1443static inline u32 gmmu_fault_buf_entry_protected_mode_v(u32 r)
1444{
1445 return (r >> 29U) & 0x1U;
1446}
1447static inline u32 gmmu_fault_buf_entry_protected_mode_w(void)
1448{
1449 return 7U;
1450}
1451static inline u32 gmmu_fault_buf_entry_protected_mode_true_v(void)
1452{
1453 return 0x00000001U;
1454}
1455static inline u32 gmmu_fault_buf_entry_protected_mode_true_f(void)
1456{
1457 return 0x20000000U;
1458}
1459static inline u32 gmmu_fault_buf_entry_replayable_fault_en_v(u32 r)
1460{
1461 return (r >> 30U) & 0x1U;
1462}
1463static inline u32 gmmu_fault_buf_entry_replayable_fault_en_w(void)
1464{
1465 return 7U;
1466}
1467static inline u32 gmmu_fault_buf_entry_replayable_fault_en_true_v(void)
1468{
1469 return 0x00000001U;
1470}
1471static inline u32 gmmu_fault_buf_entry_replayable_fault_en_true_f(void)
1472{
1473 return 0x40000000U;
1474}
1475static inline u32 gmmu_fault_buf_entry_valid_m(void)
1476{
1477 return 0x1U << 31U;
1478}
1479static inline u32 gmmu_fault_buf_entry_valid_v(u32 r)
1480{
1481 return (r >> 31U) & 0x1U;
1482}
1483static inline u32 gmmu_fault_buf_entry_valid_w(void)
1484{
1485 return 7U;
1486}
1487static inline u32 gmmu_fault_buf_entry_valid_true_v(void)
1488{
1489 return 0x00000001U;
1490}
1491static inline u32 gmmu_fault_buf_entry_valid_true_f(void)
1492{
1493 return 0x80000000U;
1494}
1495#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gr_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gr_gv11b.h
new file mode 100644
index 00000000..692b7ba3
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gr_gv11b.h
@@ -0,0 +1,4939 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_gr_gv11b_h_
57#define _hw_gr_gv11b_h_
58
59static inline u32 gr_intr_r(void)
60{
61 return 0x00400100U;
62}
63static inline u32 gr_intr_notify_pending_f(void)
64{
65 return 0x1U;
66}
67static inline u32 gr_intr_notify_reset_f(void)
68{
69 return 0x1U;
70}
71static inline u32 gr_intr_semaphore_pending_f(void)
72{
73 return 0x2U;
74}
75static inline u32 gr_intr_semaphore_reset_f(void)
76{
77 return 0x2U;
78}
79static inline u32 gr_intr_illegal_method_pending_f(void)
80{
81 return 0x10U;
82}
83static inline u32 gr_intr_illegal_method_reset_f(void)
84{
85 return 0x10U;
86}
87static inline u32 gr_intr_illegal_notify_pending_f(void)
88{
89 return 0x40U;
90}
91static inline u32 gr_intr_illegal_notify_reset_f(void)
92{
93 return 0x40U;
94}
95static inline u32 gr_intr_firmware_method_f(u32 v)
96{
97 return (v & 0x1U) << 8U;
98}
99static inline u32 gr_intr_firmware_method_pending_f(void)
100{
101 return 0x100U;
102}
103static inline u32 gr_intr_firmware_method_reset_f(void)
104{
105 return 0x100U;
106}
107static inline u32 gr_intr_illegal_class_pending_f(void)
108{
109 return 0x20U;
110}
111static inline u32 gr_intr_illegal_class_reset_f(void)
112{
113 return 0x20U;
114}
115static inline u32 gr_intr_fecs_error_pending_f(void)
116{
117 return 0x80000U;
118}
119static inline u32 gr_intr_fecs_error_reset_f(void)
120{
121 return 0x80000U;
122}
123static inline u32 gr_intr_class_error_pending_f(void)
124{
125 return 0x100000U;
126}
127static inline u32 gr_intr_class_error_reset_f(void)
128{
129 return 0x100000U;
130}
131static inline u32 gr_intr_exception_pending_f(void)
132{
133 return 0x200000U;
134}
135static inline u32 gr_intr_exception_reset_f(void)
136{
137 return 0x200000U;
138}
139static inline u32 gr_fecs_intr_r(void)
140{
141 return 0x00400144U;
142}
143static inline u32 gr_class_error_r(void)
144{
145 return 0x00400110U;
146}
147static inline u32 gr_class_error_code_v(u32 r)
148{
149 return (r >> 0U) & 0xffffU;
150}
151static inline u32 gr_intr_nonstall_r(void)
152{
153 return 0x00400120U;
154}
155static inline u32 gr_intr_nonstall_trap_pending_f(void)
156{
157 return 0x2U;
158}
159static inline u32 gr_intr_en_r(void)
160{
161 return 0x0040013cU;
162}
163static inline u32 gr_exception_r(void)
164{
165 return 0x00400108U;
166}
167static inline u32 gr_exception_fe_m(void)
168{
169 return 0x1U << 0U;
170}
171static inline u32 gr_exception_gpc_m(void)
172{
173 return 0x1U << 24U;
174}
175static inline u32 gr_exception_memfmt_m(void)
176{
177 return 0x1U << 1U;
178}
179static inline u32 gr_exception_ds_m(void)
180{
181 return 0x1U << 4U;
182}
183static inline u32 gr_exception_sked_m(void)
184{
185 return 0x1U << 8U;
186}
187static inline u32 gr_exception1_r(void)
188{
189 return 0x00400118U;
190}
191static inline u32 gr_exception1_gpc_0_pending_f(void)
192{
193 return 0x1U;
194}
195static inline u32 gr_exception2_r(void)
196{
197 return 0x0040011cU;
198}
199static inline u32 gr_exception_en_r(void)
200{
201 return 0x00400138U;
202}
203static inline u32 gr_exception_en_fe_m(void)
204{
205 return 0x1U << 0U;
206}
207static inline u32 gr_exception_en_fe_enabled_f(void)
208{
209 return 0x1U;
210}
211static inline u32 gr_exception_en_gpc_m(void)
212{
213 return 0x1U << 24U;
214}
215static inline u32 gr_exception_en_gpc_enabled_f(void)
216{
217 return 0x1000000U;
218}
219static inline u32 gr_exception_en_memfmt_m(void)
220{
221 return 0x1U << 1U;
222}
223static inline u32 gr_exception_en_memfmt_enabled_f(void)
224{
225 return 0x2U;
226}
227static inline u32 gr_exception_en_ds_m(void)
228{
229 return 0x1U << 4U;
230}
231static inline u32 gr_exception_en_ds_enabled_f(void)
232{
233 return 0x10U;
234}
235static inline u32 gr_exception1_en_r(void)
236{
237 return 0x00400130U;
238}
239static inline u32 gr_exception2_en_r(void)
240{
241 return 0x00400134U;
242}
243static inline u32 gr_gpfifo_ctl_r(void)
244{
245 return 0x00400500U;
246}
247static inline u32 gr_gpfifo_ctl_access_f(u32 v)
248{
249 return (v & 0x1U) << 0U;
250}
251static inline u32 gr_gpfifo_ctl_access_disabled_f(void)
252{
253 return 0x0U;
254}
255static inline u32 gr_gpfifo_ctl_access_enabled_f(void)
256{
257 return 0x1U;
258}
259static inline u32 gr_gpfifo_ctl_semaphore_access_f(u32 v)
260{
261 return (v & 0x1U) << 16U;
262}
263static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_v(void)
264{
265 return 0x00000001U;
266}
267static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_f(void)
268{
269 return 0x10000U;
270}
271static inline u32 gr_gpfifo_status_r(void)
272{
273 return 0x00400504U;
274}
275static inline u32 gr_trapped_addr_r(void)
276{
277 return 0x00400704U;
278}
279static inline u32 gr_trapped_addr_mthd_v(u32 r)
280{
281 return (r >> 2U) & 0xfffU;
282}
283static inline u32 gr_trapped_addr_subch_v(u32 r)
284{
285 return (r >> 16U) & 0x7U;
286}
287static inline u32 gr_trapped_addr_mme_generated_v(u32 r)
288{
289 return (r >> 20U) & 0x1U;
290}
291static inline u32 gr_trapped_addr_datahigh_v(u32 r)
292{
293 return (r >> 24U) & 0x1U;
294}
295static inline u32 gr_trapped_addr_priv_v(u32 r)
296{
297 return (r >> 28U) & 0x1U;
298}
299static inline u32 gr_trapped_addr_status_v(u32 r)
300{
301 return (r >> 31U) & 0x1U;
302}
303static inline u32 gr_trapped_data_lo_r(void)
304{
305 return 0x00400708U;
306}
307static inline u32 gr_trapped_data_hi_r(void)
308{
309 return 0x0040070cU;
310}
311static inline u32 gr_trapped_data_mme_r(void)
312{
313 return 0x00400710U;
314}
315static inline u32 gr_trapped_data_mme_pc_v(u32 r)
316{
317 return (r >> 0U) & 0xfffU;
318}
319static inline u32 gr_status_r(void)
320{
321 return 0x00400700U;
322}
323static inline u32 gr_status_fe_method_upper_v(u32 r)
324{
325 return (r >> 1U) & 0x1U;
326}
327static inline u32 gr_status_fe_method_lower_v(u32 r)
328{
329 return (r >> 2U) & 0x1U;
330}
331static inline u32 gr_status_fe_method_lower_idle_v(void)
332{
333 return 0x00000000U;
334}
335static inline u32 gr_status_fe_gi_v(u32 r)
336{
337 return (r >> 21U) & 0x1U;
338}
339static inline u32 gr_status_mask_r(void)
340{
341 return 0x00400610U;
342}
343static inline u32 gr_status_1_r(void)
344{
345 return 0x00400604U;
346}
347static inline u32 gr_status_2_r(void)
348{
349 return 0x00400608U;
350}
351static inline u32 gr_engine_status_r(void)
352{
353 return 0x0040060cU;
354}
355static inline u32 gr_engine_status_value_busy_f(void)
356{
357 return 0x1U;
358}
359static inline u32 gr_pri_be0_becs_be_exception_r(void)
360{
361 return 0x00410204U;
362}
363static inline u32 gr_pri_be0_becs_be_exception_en_r(void)
364{
365 return 0x00410208U;
366}
367static inline u32 gr_pri_gpc0_gpccs_gpc_exception_r(void)
368{
369 return 0x00502c90U;
370}
371static inline u32 gr_pri_gpc0_gpccs_gpc_exception_en_r(void)
372{
373 return 0x00502c94U;
374}
375static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_r(void)
376{
377 return 0x00504508U;
378}
379static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r(void)
380{
381 return 0x0050450cU;
382}
383static inline u32 gr_activity_0_r(void)
384{
385 return 0x00400380U;
386}
387static inline u32 gr_activity_1_r(void)
388{
389 return 0x00400384U;
390}
391static inline u32 gr_activity_2_r(void)
392{
393 return 0x00400388U;
394}
395static inline u32 gr_activity_4_r(void)
396{
397 return 0x00400390U;
398}
399static inline u32 gr_activity_4_gpc0_s(void)
400{
401 return 3U;
402}
403static inline u32 gr_activity_4_gpc0_f(u32 v)
404{
405 return (v & 0x7U) << 0U;
406}
407static inline u32 gr_activity_4_gpc0_m(void)
408{
409 return 0x7U << 0U;
410}
411static inline u32 gr_activity_4_gpc0_v(u32 r)
412{
413 return (r >> 0U) & 0x7U;
414}
415static inline u32 gr_activity_4_gpc0_empty_v(void)
416{
417 return 0x00000000U;
418}
419static inline u32 gr_activity_4_gpc0_preempted_v(void)
420{
421 return 0x00000004U;
422}
423static inline u32 gr_pri_gpc0_gcc_dbg_r(void)
424{
425 return 0x00501000U;
426}
427static inline u32 gr_pri_gpcs_gcc_dbg_r(void)
428{
429 return 0x00419000U;
430}
431static inline u32 gr_pri_gpcs_gcc_dbg_invalidate_m(void)
432{
433 return 0x1U << 1U;
434}
435static inline u32 gr_pri_gpc0_tpc0_sm_cache_control_r(void)
436{
437 return 0x0050433cU;
438}
439static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_r(void)
440{
441 return 0x00419b3cU;
442}
443static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_invalidate_cache_m(void)
444{
445 return 0x1U << 0U;
446}
447static inline u32 gr_pri_sked_activity_r(void)
448{
449 return 0x00407054U;
450}
451static inline u32 gr_pri_gpc0_gpccs_gpc_activity0_r(void)
452{
453 return 0x00502c80U;
454}
455static inline u32 gr_pri_gpc0_gpccs_gpc_activity1_r(void)
456{
457 return 0x00502c84U;
458}
459static inline u32 gr_pri_gpc0_gpccs_gpc_activity2_r(void)
460{
461 return 0x00502c88U;
462}
463static inline u32 gr_pri_gpc0_gpccs_gpc_activity3_r(void)
464{
465 return 0x00502c8cU;
466}
467static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r(void)
468{
469 return 0x00504500U;
470}
471static inline u32 gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r(void)
472{
473 return 0x00504d00U;
474}
475static inline u32 gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r(void)
476{
477 return 0x00501d00U;
478}
479static inline u32 gr_pri_gpcs_gpccs_gpc_activity_0_r(void)
480{
481 return 0x0041ac80U;
482}
483static inline u32 gr_pri_gpcs_gpccs_gpc_activity_1_r(void)
484{
485 return 0x0041ac84U;
486}
487static inline u32 gr_pri_gpcs_gpccs_gpc_activity_2_r(void)
488{
489 return 0x0041ac88U;
490}
491static inline u32 gr_pri_gpcs_gpccs_gpc_activity_3_r(void)
492{
493 return 0x0041ac8cU;
494}
495static inline u32 gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r(void)
496{
497 return 0x0041c500U;
498}
499static inline u32 gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r(void)
500{
501 return 0x0041cd00U;
502}
503static inline u32 gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r(void)
504{
505 return 0x00419d00U;
506}
507static inline u32 gr_pri_be0_becs_be_activity0_r(void)
508{
509 return 0x00410200U;
510}
511static inline u32 gr_pri_be1_becs_be_activity0_r(void)
512{
513 return 0x00410600U;
514}
515static inline u32 gr_pri_bes_becs_be_activity0_r(void)
516{
517 return 0x00408a00U;
518}
519static inline u32 gr_pri_ds_mpipe_status_r(void)
520{
521 return 0x00405858U;
522}
523static inline u32 gr_pri_fe_go_idle_info_r(void)
524{
525 return 0x00404194U;
526}
527static inline u32 gr_pri_fe_chip_def_info_r(void)
528{
529 return 0x00404030U;
530}
531static inline u32 gr_pri_fe_chip_def_info_max_veid_count_v(u32 r)
532{
533 return (r >> 0U) & 0xfffU;
534}
535static inline u32 gr_pri_fe_chip_def_info_max_veid_count_init_v(void)
536{
537 return 0x00000040U;
538}
539static inline u32 gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r(void)
540{
541 return 0x00504238U;
542}
543static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r(void)
544{
545 return 0x00504358U;
546}
547static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp0_m(void)
548{
549 return 0x1U << 0U;
550}
551static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp1_m(void)
552{
553 return 0x1U << 1U;
554}
555static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp2_m(void)
556{
557 return 0x1U << 2U;
558}
559static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp3_m(void)
560{
561 return 0x1U << 3U;
562}
563static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp4_m(void)
564{
565 return 0x1U << 4U;
566}
567static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp5_m(void)
568{
569 return 0x1U << 5U;
570}
571static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp6_m(void)
572{
573 return 0x1U << 6U;
574}
575static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp7_m(void)
576{
577 return 0x1U << 7U;
578}
579static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp0_m(void)
580{
581 return 0x1U << 8U;
582}
583static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp1_m(void)
584{
585 return 0x1U << 9U;
586}
587static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp2_m(void)
588{
589 return 0x1U << 10U;
590}
591static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp3_m(void)
592{
593 return 0x1U << 11U;
594}
595static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp4_m(void)
596{
597 return 0x1U << 12U;
598}
599static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp5_m(void)
600{
601 return 0x1U << 13U;
602}
603static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp6_m(void)
604{
605 return 0x1U << 14U;
606}
607static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp7_m(void)
608{
609 return 0x1U << 15U;
610}
611static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_total_counter_overflow_v(u32 r)
612{
613 return (r >> 24U) & 0x1U;
614}
615static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r)
616{
617 return (r >> 26U) & 0x1U;
618}
619static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_reset_task_f(void)
620{
621 return 0x40000000U;
622}
623static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_r(void)
624{
625 return 0x0050435cU;
626}
627static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_s(void)
628{
629 return 16U;
630}
631static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_v(u32 r)
632{
633 return (r >> 0U) & 0xffffU;
634}
635static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_r(void)
636{
637 return 0x00504360U;
638}
639static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_s(void)
640{
641 return 16U;
642}
643static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_v(u32 r)
644{
645 return (r >> 0U) & 0xffffU;
646}
647static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_r(void)
648{
649 return 0x0050436cU;
650}
651static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_0_m(void)
652{
653 return 0x1U << 0U;
654}
655static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_1_m(void)
656{
657 return 0x1U << 1U;
658}
659static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_0_m(void)
660{
661 return 0x1U << 2U;
662}
663static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_1_m(void)
664{
665 return 0x1U << 3U;
666}
667static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_total_counter_overflow_v(u32 r)
668{
669 return (r >> 8U) & 0x1U;
670}
671static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r)
672{
673 return (r >> 10U) & 0x1U;
674}
675static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_reset_task_f(void)
676{
677 return 0x40000000U;
678}
679static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r(void)
680{
681 return 0x00504370U;
682}
683static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_s(void)
684{
685 return 16U;
686}
687static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_v(u32 r)
688{
689 return (r >> 0U) & 0xffffU;
690}
691static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r(void)
692{
693 return 0x00504374U;
694}
695static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_s(void)
696{
697 return 16U;
698}
699static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_v(u32 r)
700{
701 return (r >> 0U) & 0xffffU;
702}
703static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_r(void)
704{
705 return 0x0050464cU;
706}
707static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l0_data_m(void)
708{
709 return 0x1U << 0U;
710}
711static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l0_predecode_m(void)
712{
713 return 0x1U << 1U;
714}
715static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l1_data_m(void)
716{
717 return 0x1U << 2U;
718}
719static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l1_predecode_m(void)
720{
721 return 0x1U << 3U;
722}
723static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l0_data_m(void)
724{
725 return 0x1U << 4U;
726}
727static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l0_predecode_m(void)
728{
729 return 0x1U << 5U;
730}
731static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l1_data_m(void)
732{
733 return 0x1U << 6U;
734}
735static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l1_predecode_m(void)
736{
737 return 0x1U << 7U;
738}
739static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_total_counter_overflow_v(u32 r)
740{
741 return (r >> 16U) & 0x1U;
742}
743static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r)
744{
745 return (r >> 18U) & 0x1U;
746}
747static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_reset_task_f(void)
748{
749 return 0x40000000U;
750}
751static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_r(void)
752{
753 return 0x00504650U;
754}
755static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_s(void)
756{
757 return 16U;
758}
759static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_v(u32 r)
760{
761 return (r >> 0U) & 0xffffU;
762}
763static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_r(void)
764{
765 return 0x00504654U;
766}
767static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_s(void)
768{
769 return 16U;
770}
771static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_v(u32 r)
772{
773 return (r >> 0U) & 0xffffU;
774}
775static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_r(void)
776{
777 return 0x00504624U;
778}
779static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_0_m(void)
780{
781 return 0x1U << 0U;
782}
783static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_1_m(void)
784{
785 return 0x1U << 1U;
786}
787static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_0_m(void)
788{
789 return 0x1U << 2U;
790}
791static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_1_m(void)
792{
793 return 0x1U << 3U;
794}
795static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_pixrpf_m(void)
796{
797 return 0x1U << 4U;
798}
799static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_miss_fifo_m(void)
800{
801 return 0x1U << 5U;
802}
803static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_pixrpf_m(void)
804{
805 return 0x1U << 6U;
806}
807static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_miss_fifo_m(void)
808{
809 return 0x1U << 7U;
810}
811static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_total_counter_overflow_v(u32 r)
812{
813 return (r >> 8U) & 0x1U;
814}
815static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r)
816{
817 return (r >> 10U) & 0x1U;
818}
819static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_reset_task_f(void)
820{
821 return 0x40000000U;
822}
823static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_r(void)
824{
825 return 0x00504628U;
826}
827static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_s(void)
828{
829 return 16U;
830}
831static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_v(u32 r)
832{
833 return (r >> 0U) & 0xffffU;
834}
835static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_r(void)
836{
837 return 0x0050462cU;
838}
839static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_s(void)
840{
841 return 16U;
842}
843static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_v(u32 r)
844{
845 return (r >> 0U) & 0xffffU;
846}
847static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_r(void)
848{
849 return 0x00504638U;
850}
851static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm0_m(void)
852{
853 return 0x1U << 0U;
854}
855static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm1_m(void)
856{
857 return 0x1U << 1U;
858}
859static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm0_m(void)
860{
861 return 0x1U << 2U;
862}
863static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm1_m(void)
864{
865 return 0x1U << 3U;
866}
867static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm0_m(void)
868{
869 return 0x1U << 4U;
870}
871static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm1_m(void)
872{
873 return 0x1U << 5U;
874}
875static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm0_m(void)
876{
877 return 0x1U << 6U;
878}
879static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm1_m(void)
880{
881 return 0x1U << 7U;
882}
883static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_total_counter_overflow_v(u32 r)
884{
885 return (r >> 16U) & 0x1U;
886}
887static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r)
888{
889 return (r >> 18U) & 0x1U;
890}
891static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_reset_task_f(void)
892{
893 return 0x40000000U;
894}
895static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r(void)
896{
897 return 0x0050463cU;
898}
899static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_s(void)
900{
901 return 16U;
902}
903static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_v(u32 r)
904{
905 return (r >> 0U) & 0xffffU;
906}
907static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r(void)
908{
909 return 0x00504640U;
910}
911static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_s(void)
912{
913 return 16U;
914}
915static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_v(u32 r)
916{
917 return (r >> 0U) & 0xffffU;
918}
919static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_r(void)
920{
921 return 0x005042c4U;
922}
923static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f(void)
924{
925 return 0x0U;
926}
927static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe0_f(void)
928{
929 return 0x1U;
930}
931static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe1_f(void)
932{
933 return 0x2U;
934}
935static inline u32 gr_gpc0_tpc0_mpc_hww_esr_r(void)
936{
937 return 0x00504430U;
938}
939static inline u32 gr_gpc0_tpc0_mpc_hww_esr_reset_trigger_f(void)
940{
941 return 0x40000000U;
942}
943static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_r(void)
944{
945 return 0x00504434U;
946}
947static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_veid_v(u32 r)
948{
949 return (r >> 0U) & 0x3fU;
950}
951static inline u32 gr_pri_be0_crop_status1_r(void)
952{
953 return 0x00410134U;
954}
955static inline u32 gr_pri_bes_crop_status1_r(void)
956{
957 return 0x00408934U;
958}
959static inline u32 gr_pri_be0_zrop_status_r(void)
960{
961 return 0x00410048U;
962}
963static inline u32 gr_pri_be0_zrop_status2_r(void)
964{
965 return 0x0041004cU;
966}
967static inline u32 gr_pri_bes_zrop_status_r(void)
968{
969 return 0x00408848U;
970}
971static inline u32 gr_pri_bes_zrop_status2_r(void)
972{
973 return 0x0040884cU;
974}
975static inline u32 gr_pipe_bundle_address_r(void)
976{
977 return 0x00400200U;
978}
979static inline u32 gr_pipe_bundle_address_value_v(u32 r)
980{
981 return (r >> 0U) & 0xffffU;
982}
983static inline u32 gr_pipe_bundle_address_veid_f(u32 v)
984{
985 return (v & 0x3fU) << 20U;
986}
987static inline u32 gr_pipe_bundle_address_veid_w(void)
988{
989 return 0U;
990}
991static inline u32 gr_pipe_bundle_data_r(void)
992{
993 return 0x00400204U;
994}
995static inline u32 gr_pipe_bundle_config_r(void)
996{
997 return 0x00400208U;
998}
999static inline u32 gr_pipe_bundle_config_override_pipe_mode_disabled_f(void)
1000{
1001 return 0x0U;
1002}
1003static inline u32 gr_pipe_bundle_config_override_pipe_mode_enabled_f(void)
1004{
1005 return 0x80000000U;
1006}
1007static inline u32 gr_fe_hww_esr_r(void)
1008{
1009 return 0x00404000U;
1010}
1011static inline u32 gr_fe_hww_esr_reset_active_f(void)
1012{
1013 return 0x40000000U;
1014}
1015static inline u32 gr_fe_hww_esr_en_enable_f(void)
1016{
1017 return 0x80000000U;
1018}
1019static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r(void)
1020{
1021 return 0x00419eacU;
1022}
1023static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r(void)
1024{
1025 return 0x0050472cU;
1026}
1027static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_multiple_warp_errors_report_f(void)
1028{
1029 return 0x4U;
1030}
1031static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_int_report_f(void)
1032{
1033 return 0x10U;
1034}
1035static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_pause_report_f(void)
1036{
1037 return 0x20U;
1038}
1039static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_single_step_complete_report_f(void)
1040{
1041 return 0x40U;
1042}
1043static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_error_in_trap_report_f(void)
1044{
1045 return 0x100U;
1046}
1047static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_r(void)
1048{
1049 return 0x00419eb4U;
1050}
1051static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_r(void)
1052{
1053 return 0x00504734U;
1054}
1055static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_m(void)
1056{
1057 return 0x1U << 4U;
1058}
1059static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f(void)
1060{
1061 return 0x10U;
1062}
1063static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_m(void)
1064{
1065 return 0x1U << 5U;
1066}
1067static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_pending_f(void)
1068{
1069 return 0x20U;
1070}
1071static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_m(void)
1072{
1073 return 0x1U << 6U;
1074}
1075static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f(void)
1076{
1077 return 0x40U;
1078}
1079static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_m(void)
1080{
1081 return 0x1U << 2U;
1082}
1083static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_pending_f(void)
1084{
1085 return 0x4U;
1086}
1087static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_m(void)
1088{
1089 return 0x1U << 8U;
1090}
1091static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_pending_f(void)
1092{
1093 return 0x100U;
1094}
1095static inline u32 gr_fe_go_idle_timeout_r(void)
1096{
1097 return 0x00404154U;
1098}
1099static inline u32 gr_fe_go_idle_timeout_count_f(u32 v)
1100{
1101 return (v & 0xffffffffU) << 0U;
1102}
1103static inline u32 gr_fe_go_idle_timeout_count_disabled_f(void)
1104{
1105 return 0x0U;
1106}
1107static inline u32 gr_fe_go_idle_timeout_count_prod_f(void)
1108{
1109 return 0x1800U;
1110}
1111static inline u32 gr_fe_object_table_r(u32 i)
1112{
1113 return 0x00404200U + i*4U;
1114}
1115static inline u32 gr_fe_object_table_nvclass_v(u32 r)
1116{
1117 return (r >> 0U) & 0xffffU;
1118}
1119static inline u32 gr_fe_tpc_fs_r(u32 i)
1120{
1121 return 0x0040a200U + i*4U;
1122}
1123static inline u32 gr_pri_mme_shadow_raw_index_r(void)
1124{
1125 return 0x00404488U;
1126}
1127static inline u32 gr_pri_mme_shadow_raw_index_write_trigger_f(void)
1128{
1129 return 0x80000000U;
1130}
1131static inline u32 gr_pri_mme_shadow_raw_data_r(void)
1132{
1133 return 0x0040448cU;
1134}
1135static inline u32 gr_mme_hww_esr_r(void)
1136{
1137 return 0x00404490U;
1138}
1139static inline u32 gr_mme_hww_esr_reset_active_f(void)
1140{
1141 return 0x40000000U;
1142}
1143static inline u32 gr_mme_hww_esr_en_enable_f(void)
1144{
1145 return 0x80000000U;
1146}
1147static inline u32 gr_memfmt_hww_esr_r(void)
1148{
1149 return 0x00404600U;
1150}
1151static inline u32 gr_memfmt_hww_esr_reset_active_f(void)
1152{
1153 return 0x40000000U;
1154}
1155static inline u32 gr_memfmt_hww_esr_en_enable_f(void)
1156{
1157 return 0x80000000U;
1158}
1159static inline u32 gr_fecs_cpuctl_r(void)
1160{
1161 return 0x00409100U;
1162}
1163static inline u32 gr_fecs_cpuctl_startcpu_f(u32 v)
1164{
1165 return (v & 0x1U) << 1U;
1166}
1167static inline u32 gr_fecs_cpuctl_alias_r(void)
1168{
1169 return 0x00409130U;
1170}
1171static inline u32 gr_fecs_cpuctl_alias_startcpu_f(u32 v)
1172{
1173 return (v & 0x1U) << 1U;
1174}
1175static inline u32 gr_fecs_dmactl_r(void)
1176{
1177 return 0x0040910cU;
1178}
1179static inline u32 gr_fecs_dmactl_require_ctx_f(u32 v)
1180{
1181 return (v & 0x1U) << 0U;
1182}
1183static inline u32 gr_fecs_dmactl_dmem_scrubbing_m(void)
1184{
1185 return 0x1U << 1U;
1186}
1187static inline u32 gr_fecs_dmactl_imem_scrubbing_m(void)
1188{
1189 return 0x1U << 2U;
1190}
1191static inline u32 gr_fecs_os_r(void)
1192{
1193 return 0x00409080U;
1194}
1195static inline u32 gr_fecs_idlestate_r(void)
1196{
1197 return 0x0040904cU;
1198}
1199static inline u32 gr_fecs_mailbox0_r(void)
1200{
1201 return 0x00409040U;
1202}
1203static inline u32 gr_fecs_mailbox1_r(void)
1204{
1205 return 0x00409044U;
1206}
1207static inline u32 gr_fecs_irqstat_r(void)
1208{
1209 return 0x00409008U;
1210}
1211static inline u32 gr_fecs_irqmode_r(void)
1212{
1213 return 0x0040900cU;
1214}
1215static inline u32 gr_fecs_irqmask_r(void)
1216{
1217 return 0x00409018U;
1218}
1219static inline u32 gr_fecs_irqdest_r(void)
1220{
1221 return 0x0040901cU;
1222}
1223static inline u32 gr_fecs_curctx_r(void)
1224{
1225 return 0x00409050U;
1226}
1227static inline u32 gr_fecs_nxtctx_r(void)
1228{
1229 return 0x00409054U;
1230}
1231static inline u32 gr_fecs_engctl_r(void)
1232{
1233 return 0x004090a4U;
1234}
1235static inline u32 gr_fecs_debug1_r(void)
1236{
1237 return 0x00409090U;
1238}
1239static inline u32 gr_fecs_debuginfo_r(void)
1240{
1241 return 0x00409094U;
1242}
1243static inline u32 gr_fecs_icd_cmd_r(void)
1244{
1245 return 0x00409200U;
1246}
1247static inline u32 gr_fecs_icd_cmd_opc_s(void)
1248{
1249 return 4U;
1250}
1251static inline u32 gr_fecs_icd_cmd_opc_f(u32 v)
1252{
1253 return (v & 0xfU) << 0U;
1254}
1255static inline u32 gr_fecs_icd_cmd_opc_m(void)
1256{
1257 return 0xfU << 0U;
1258}
1259static inline u32 gr_fecs_icd_cmd_opc_v(u32 r)
1260{
1261 return (r >> 0U) & 0xfU;
1262}
1263static inline u32 gr_fecs_icd_cmd_opc_rreg_f(void)
1264{
1265 return 0x8U;
1266}
1267static inline u32 gr_fecs_icd_cmd_opc_rstat_f(void)
1268{
1269 return 0xeU;
1270}
1271static inline u32 gr_fecs_icd_cmd_idx_f(u32 v)
1272{
1273 return (v & 0x1fU) << 8U;
1274}
1275static inline u32 gr_fecs_icd_rdata_r(void)
1276{
1277 return 0x0040920cU;
1278}
1279static inline u32 gr_fecs_imemc_r(u32 i)
1280{
1281 return 0x00409180U + i*16U;
1282}
1283static inline u32 gr_fecs_imemc_offs_f(u32 v)
1284{
1285 return (v & 0x3fU) << 2U;
1286}
1287static inline u32 gr_fecs_imemc_blk_f(u32 v)
1288{
1289 return (v & 0xffU) << 8U;
1290}
1291static inline u32 gr_fecs_imemc_aincw_f(u32 v)
1292{
1293 return (v & 0x1U) << 24U;
1294}
1295static inline u32 gr_fecs_imemd_r(u32 i)
1296{
1297 return 0x00409184U + i*16U;
1298}
1299static inline u32 gr_fecs_imemt_r(u32 i)
1300{
1301 return 0x00409188U + i*16U;
1302}
1303static inline u32 gr_fecs_imemt_tag_f(u32 v)
1304{
1305 return (v & 0xffffU) << 0U;
1306}
1307static inline u32 gr_fecs_dmemc_r(u32 i)
1308{
1309 return 0x004091c0U + i*8U;
1310}
1311static inline u32 gr_fecs_dmemc_offs_s(void)
1312{
1313 return 6U;
1314}
1315static inline u32 gr_fecs_dmemc_offs_f(u32 v)
1316{
1317 return (v & 0x3fU) << 2U;
1318}
1319static inline u32 gr_fecs_dmemc_offs_m(void)
1320{
1321 return 0x3fU << 2U;
1322}
1323static inline u32 gr_fecs_dmemc_offs_v(u32 r)
1324{
1325 return (r >> 2U) & 0x3fU;
1326}
1327static inline u32 gr_fecs_dmemc_blk_f(u32 v)
1328{
1329 return (v & 0xffU) << 8U;
1330}
1331static inline u32 gr_fecs_dmemc_aincw_f(u32 v)
1332{
1333 return (v & 0x1U) << 24U;
1334}
1335static inline u32 gr_fecs_dmemd_r(u32 i)
1336{
1337 return 0x004091c4U + i*8U;
1338}
1339static inline u32 gr_fecs_dmatrfbase_r(void)
1340{
1341 return 0x00409110U;
1342}
1343static inline u32 gr_fecs_dmatrfmoffs_r(void)
1344{
1345 return 0x00409114U;
1346}
1347static inline u32 gr_fecs_dmatrffboffs_r(void)
1348{
1349 return 0x0040911cU;
1350}
1351static inline u32 gr_fecs_dmatrfcmd_r(void)
1352{
1353 return 0x00409118U;
1354}
1355static inline u32 gr_fecs_dmatrfcmd_imem_f(u32 v)
1356{
1357 return (v & 0x1U) << 4U;
1358}
1359static inline u32 gr_fecs_dmatrfcmd_write_f(u32 v)
1360{
1361 return (v & 0x1U) << 5U;
1362}
1363static inline u32 gr_fecs_dmatrfcmd_size_f(u32 v)
1364{
1365 return (v & 0x7U) << 8U;
1366}
1367static inline u32 gr_fecs_dmatrfcmd_ctxdma_f(u32 v)
1368{
1369 return (v & 0x7U) << 12U;
1370}
1371static inline u32 gr_fecs_bootvec_r(void)
1372{
1373 return 0x00409104U;
1374}
1375static inline u32 gr_fecs_bootvec_vec_f(u32 v)
1376{
1377 return (v & 0xffffffffU) << 0U;
1378}
1379static inline u32 gr_fecs_falcon_hwcfg_r(void)
1380{
1381 return 0x00409108U;
1382}
1383static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void)
1384{
1385 return 0x0041a108U;
1386}
1387static inline u32 gr_fecs_falcon_rm_r(void)
1388{
1389 return 0x00409084U;
1390}
1391static inline u32 gr_fecs_current_ctx_r(void)
1392{
1393 return 0x00409b00U;
1394}
1395static inline u32 gr_fecs_current_ctx_ptr_f(u32 v)
1396{
1397 return (v & 0xfffffffU) << 0U;
1398}
1399static inline u32 gr_fecs_current_ctx_ptr_v(u32 r)
1400{
1401 return (r >> 0U) & 0xfffffffU;
1402}
1403static inline u32 gr_fecs_current_ctx_target_s(void)
1404{
1405 return 2U;
1406}
1407static inline u32 gr_fecs_current_ctx_target_f(u32 v)
1408{
1409 return (v & 0x3U) << 28U;
1410}
1411static inline u32 gr_fecs_current_ctx_target_m(void)
1412{
1413 return 0x3U << 28U;
1414}
1415static inline u32 gr_fecs_current_ctx_target_v(u32 r)
1416{
1417 return (r >> 28U) & 0x3U;
1418}
1419static inline u32 gr_fecs_current_ctx_target_vid_mem_f(void)
1420{
1421 return 0x0U;
1422}
1423static inline u32 gr_fecs_current_ctx_target_sys_mem_coh_f(void)
1424{
1425 return 0x20000000U;
1426}
1427static inline u32 gr_fecs_current_ctx_target_sys_mem_ncoh_f(void)
1428{
1429 return 0x30000000U;
1430}
1431static inline u32 gr_fecs_current_ctx_valid_s(void)
1432{
1433 return 1U;
1434}
1435static inline u32 gr_fecs_current_ctx_valid_f(u32 v)
1436{
1437 return (v & 0x1U) << 31U;
1438}
1439static inline u32 gr_fecs_current_ctx_valid_m(void)
1440{
1441 return 0x1U << 31U;
1442}
1443static inline u32 gr_fecs_current_ctx_valid_v(u32 r)
1444{
1445 return (r >> 31U) & 0x1U;
1446}
1447static inline u32 gr_fecs_current_ctx_valid_false_f(void)
1448{
1449 return 0x0U;
1450}
1451static inline u32 gr_fecs_method_data_r(void)
1452{
1453 return 0x00409500U;
1454}
1455static inline u32 gr_fecs_method_push_r(void)
1456{
1457 return 0x00409504U;
1458}
1459static inline u32 gr_fecs_method_push_adr_f(u32 v)
1460{
1461 return (v & 0xfffU) << 0U;
1462}
1463static inline u32 gr_fecs_method_push_adr_bind_pointer_v(void)
1464{
1465 return 0x00000003U;
1466}
1467static inline u32 gr_fecs_method_push_adr_bind_pointer_f(void)
1468{
1469 return 0x3U;
1470}
1471static inline u32 gr_fecs_method_push_adr_discover_image_size_v(void)
1472{
1473 return 0x00000010U;
1474}
1475static inline u32 gr_fecs_method_push_adr_wfi_golden_save_v(void)
1476{
1477 return 0x00000009U;
1478}
1479static inline u32 gr_fecs_method_push_adr_restore_golden_v(void)
1480{
1481 return 0x00000015U;
1482}
1483static inline u32 gr_fecs_method_push_adr_discover_zcull_image_size_v(void)
1484{
1485 return 0x00000016U;
1486}
1487static inline u32 gr_fecs_method_push_adr_discover_pm_image_size_v(void)
1488{
1489 return 0x00000025U;
1490}
1491static inline u32 gr_fecs_method_push_adr_discover_reglist_image_size_v(void)
1492{
1493 return 0x00000030U;
1494}
1495static inline u32 gr_fecs_method_push_adr_set_reglist_bind_instance_v(void)
1496{
1497 return 0x00000031U;
1498}
1499static inline u32 gr_fecs_method_push_adr_set_reglist_virtual_address_v(void)
1500{
1501 return 0x00000032U;
1502}
1503static inline u32 gr_fecs_method_push_adr_stop_ctxsw_v(void)
1504{
1505 return 0x00000038U;
1506}
1507static inline u32 gr_fecs_method_push_adr_start_ctxsw_v(void)
1508{
1509 return 0x00000039U;
1510}
1511static inline u32 gr_fecs_method_push_adr_set_watchdog_timeout_f(void)
1512{
1513 return 0x21U;
1514}
1515static inline u32 gr_fecs_method_push_adr_discover_preemption_image_size_v(void)
1516{
1517 return 0x0000001aU;
1518}
1519static inline u32 gr_fecs_method_push_adr_halt_pipeline_v(void)
1520{
1521 return 0x00000004U;
1522}
1523static inline u32 gr_fecs_method_push_adr_configure_interrupt_completion_option_v(void)
1524{
1525 return 0x0000003aU;
1526}
1527static inline u32 gr_fecs_host_int_status_r(void)
1528{
1529 return 0x00409c18U;
1530}
1531static inline u32 gr_fecs_host_int_status_fault_during_ctxsw_f(u32 v)
1532{
1533 return (v & 0x1U) << 16U;
1534}
1535static inline u32 gr_fecs_host_int_status_umimp_firmware_method_f(u32 v)
1536{
1537 return (v & 0x1U) << 17U;
1538}
1539static inline u32 gr_fecs_host_int_status_umimp_illegal_method_f(u32 v)
1540{
1541 return (v & 0x1U) << 18U;
1542}
1543static inline u32 gr_fecs_host_int_status_ctxsw_intr_f(u32 v)
1544{
1545 return (v & 0xffffU) << 0U;
1546}
1547static inline u32 gr_fecs_host_int_status_ecc_corrected_f(u32 v)
1548{
1549 return (v & 0x1U) << 21U;
1550}
1551static inline u32 gr_fecs_host_int_status_ecc_corrected_m(void)
1552{
1553 return 0x1U << 21U;
1554}
1555static inline u32 gr_fecs_host_int_status_ecc_uncorrected_f(u32 v)
1556{
1557 return (v & 0x1U) << 22U;
1558}
1559static inline u32 gr_fecs_host_int_status_ecc_uncorrected_m(void)
1560{
1561 return 0x1U << 22U;
1562}
1563static inline u32 gr_fecs_host_int_clear_r(void)
1564{
1565 return 0x00409c20U;
1566}
1567static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_f(u32 v)
1568{
1569 return (v & 0x1U) << 1U;
1570}
1571static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_clear_f(void)
1572{
1573 return 0x2U;
1574}
1575static inline u32 gr_fecs_host_int_enable_r(void)
1576{
1577 return 0x00409c24U;
1578}
1579static inline u32 gr_fecs_host_int_enable_ctxsw_intr1_enable_f(void)
1580{
1581 return 0x2U;
1582}
1583static inline u32 gr_fecs_host_int_enable_fault_during_ctxsw_enable_f(void)
1584{
1585 return 0x10000U;
1586}
1587static inline u32 gr_fecs_host_int_enable_umimp_firmware_method_enable_f(void)
1588{
1589 return 0x20000U;
1590}
1591static inline u32 gr_fecs_host_int_enable_umimp_illegal_method_enable_f(void)
1592{
1593 return 0x40000U;
1594}
1595static inline u32 gr_fecs_host_int_enable_watchdog_enable_f(void)
1596{
1597 return 0x80000U;
1598}
1599static inline u32 gr_fecs_ctxsw_reset_ctl_r(void)
1600{
1601 return 0x00409614U;
1602}
1603static inline u32 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f(void)
1604{
1605 return 0x0U;
1606}
1607static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f(void)
1608{
1609 return 0x0U;
1610}
1611static inline u32 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f(void)
1612{
1613 return 0x0U;
1614}
1615static inline u32 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f(void)
1616{
1617 return 0x10U;
1618}
1619static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f(void)
1620{
1621 return 0x20U;
1622}
1623static inline u32 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f(void)
1624{
1625 return 0x40U;
1626}
1627static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f(void)
1628{
1629 return 0x0U;
1630}
1631static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f(void)
1632{
1633 return 0x100U;
1634}
1635static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f(void)
1636{
1637 return 0x0U;
1638}
1639static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f(void)
1640{
1641 return 0x200U;
1642}
1643static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_s(void)
1644{
1645 return 1U;
1646}
1647static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_f(u32 v)
1648{
1649 return (v & 0x1U) << 10U;
1650}
1651static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_m(void)
1652{
1653 return 0x1U << 10U;
1654}
1655static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_v(u32 r)
1656{
1657 return (r >> 10U) & 0x1U;
1658}
1659static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f(void)
1660{
1661 return 0x0U;
1662}
1663static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f(void)
1664{
1665 return 0x400U;
1666}
1667static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void)
1668{
1669 return 0x0040960cU;
1670}
1671static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i)
1672{
1673 return 0x00409800U + i*4U;
1674}
1675static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void)
1676{
1677 return 0x00000010U;
1678}
1679static inline u32 gr_fecs_ctxsw_mailbox_value_f(u32 v)
1680{
1681 return (v & 0xffffffffU) << 0U;
1682}
1683static inline u32 gr_fecs_ctxsw_mailbox_value_pass_v(void)
1684{
1685 return 0x00000001U;
1686}
1687static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void)
1688{
1689 return 0x00000002U;
1690}
1691static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i)
1692{
1693 return 0x004098c0U + i*4U;
1694}
1695static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v)
1696{
1697 return (v & 0xffffffffU) << 0U;
1698}
1699static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i)
1700{
1701 return 0x00409840U + i*4U;
1702}
1703static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v)
1704{
1705 return (v & 0xffffffffU) << 0U;
1706}
1707static inline u32 gr_fecs_fs_r(void)
1708{
1709 return 0x00409604U;
1710}
1711static inline u32 gr_fecs_fs_num_available_gpcs_s(void)
1712{
1713 return 5U;
1714}
1715static inline u32 gr_fecs_fs_num_available_gpcs_f(u32 v)
1716{
1717 return (v & 0x1fU) << 0U;
1718}
1719static inline u32 gr_fecs_fs_num_available_gpcs_m(void)
1720{
1721 return 0x1fU << 0U;
1722}
1723static inline u32 gr_fecs_fs_num_available_gpcs_v(u32 r)
1724{
1725 return (r >> 0U) & 0x1fU;
1726}
1727static inline u32 gr_fecs_fs_num_available_fbps_s(void)
1728{
1729 return 5U;
1730}
1731static inline u32 gr_fecs_fs_num_available_fbps_f(u32 v)
1732{
1733 return (v & 0x1fU) << 16U;
1734}
1735static inline u32 gr_fecs_fs_num_available_fbps_m(void)
1736{
1737 return 0x1fU << 16U;
1738}
1739static inline u32 gr_fecs_fs_num_available_fbps_v(u32 r)
1740{
1741 return (r >> 16U) & 0x1fU;
1742}
1743static inline u32 gr_fecs_cfg_r(void)
1744{
1745 return 0x00409620U;
1746}
1747static inline u32 gr_fecs_cfg_imem_sz_v(u32 r)
1748{
1749 return (r >> 0U) & 0xffU;
1750}
1751static inline u32 gr_fecs_rc_lanes_r(void)
1752{
1753 return 0x00409880U;
1754}
1755static inline u32 gr_fecs_rc_lanes_num_chains_s(void)
1756{
1757 return 6U;
1758}
1759static inline u32 gr_fecs_rc_lanes_num_chains_f(u32 v)
1760{
1761 return (v & 0x3fU) << 0U;
1762}
1763static inline u32 gr_fecs_rc_lanes_num_chains_m(void)
1764{
1765 return 0x3fU << 0U;
1766}
1767static inline u32 gr_fecs_rc_lanes_num_chains_v(u32 r)
1768{
1769 return (r >> 0U) & 0x3fU;
1770}
1771static inline u32 gr_fecs_ctxsw_status_1_r(void)
1772{
1773 return 0x00409400U;
1774}
1775static inline u32 gr_fecs_ctxsw_status_1_arb_busy_s(void)
1776{
1777 return 1U;
1778}
1779static inline u32 gr_fecs_ctxsw_status_1_arb_busy_f(u32 v)
1780{
1781 return (v & 0x1U) << 12U;
1782}
1783static inline u32 gr_fecs_ctxsw_status_1_arb_busy_m(void)
1784{
1785 return 0x1U << 12U;
1786}
1787static inline u32 gr_fecs_ctxsw_status_1_arb_busy_v(u32 r)
1788{
1789 return (r >> 12U) & 0x1U;
1790}
1791static inline u32 gr_fecs_arb_ctx_adr_r(void)
1792{
1793 return 0x00409a24U;
1794}
1795static inline u32 gr_fecs_new_ctx_r(void)
1796{
1797 return 0x00409b04U;
1798}
1799static inline u32 gr_fecs_new_ctx_ptr_s(void)
1800{
1801 return 28U;
1802}
1803static inline u32 gr_fecs_new_ctx_ptr_f(u32 v)
1804{
1805 return (v & 0xfffffffU) << 0U;
1806}
1807static inline u32 gr_fecs_new_ctx_ptr_m(void)
1808{
1809 return 0xfffffffU << 0U;
1810}
1811static inline u32 gr_fecs_new_ctx_ptr_v(u32 r)
1812{
1813 return (r >> 0U) & 0xfffffffU;
1814}
1815static inline u32 gr_fecs_new_ctx_target_s(void)
1816{
1817 return 2U;
1818}
1819static inline u32 gr_fecs_new_ctx_target_f(u32 v)
1820{
1821 return (v & 0x3U) << 28U;
1822}
1823static inline u32 gr_fecs_new_ctx_target_m(void)
1824{
1825 return 0x3U << 28U;
1826}
1827static inline u32 gr_fecs_new_ctx_target_v(u32 r)
1828{
1829 return (r >> 28U) & 0x3U;
1830}
1831static inline u32 gr_fecs_new_ctx_valid_s(void)
1832{
1833 return 1U;
1834}
1835static inline u32 gr_fecs_new_ctx_valid_f(u32 v)
1836{
1837 return (v & 0x1U) << 31U;
1838}
1839static inline u32 gr_fecs_new_ctx_valid_m(void)
1840{
1841 return 0x1U << 31U;
1842}
1843static inline u32 gr_fecs_new_ctx_valid_v(u32 r)
1844{
1845 return (r >> 31U) & 0x1U;
1846}
1847static inline u32 gr_fecs_arb_ctx_ptr_r(void)
1848{
1849 return 0x00409a0cU;
1850}
1851static inline u32 gr_fecs_arb_ctx_ptr_ptr_s(void)
1852{
1853 return 28U;
1854}
1855static inline u32 gr_fecs_arb_ctx_ptr_ptr_f(u32 v)
1856{
1857 return (v & 0xfffffffU) << 0U;
1858}
1859static inline u32 gr_fecs_arb_ctx_ptr_ptr_m(void)
1860{
1861 return 0xfffffffU << 0U;
1862}
1863static inline u32 gr_fecs_arb_ctx_ptr_ptr_v(u32 r)
1864{
1865 return (r >> 0U) & 0xfffffffU;
1866}
1867static inline u32 gr_fecs_arb_ctx_ptr_target_s(void)
1868{
1869 return 2U;
1870}
1871static inline u32 gr_fecs_arb_ctx_ptr_target_f(u32 v)
1872{
1873 return (v & 0x3U) << 28U;
1874}
1875static inline u32 gr_fecs_arb_ctx_ptr_target_m(void)
1876{
1877 return 0x3U << 28U;
1878}
1879static inline u32 gr_fecs_arb_ctx_ptr_target_v(u32 r)
1880{
1881 return (r >> 28U) & 0x3U;
1882}
1883static inline u32 gr_fecs_arb_ctx_cmd_r(void)
1884{
1885 return 0x00409a10U;
1886}
1887static inline u32 gr_fecs_arb_ctx_cmd_cmd_s(void)
1888{
1889 return 5U;
1890}
1891static inline u32 gr_fecs_arb_ctx_cmd_cmd_f(u32 v)
1892{
1893 return (v & 0x1fU) << 0U;
1894}
1895static inline u32 gr_fecs_arb_ctx_cmd_cmd_m(void)
1896{
1897 return 0x1fU << 0U;
1898}
1899static inline u32 gr_fecs_arb_ctx_cmd_cmd_v(u32 r)
1900{
1901 return (r >> 0U) & 0x1fU;
1902}
1903static inline u32 gr_fecs_ctxsw_status_fe_0_r(void)
1904{
1905 return 0x00409c00U;
1906}
1907static inline u32 gr_gpc0_gpccs_ctxsw_status_gpc_0_r(void)
1908{
1909 return 0x00502c04U;
1910}
1911static inline u32 gr_gpc0_gpccs_ctxsw_status_1_r(void)
1912{
1913 return 0x00502400U;
1914}
1915static inline u32 gr_fecs_ctxsw_idlestate_r(void)
1916{
1917 return 0x00409420U;
1918}
1919static inline u32 gr_fecs_feature_override_ecc_r(void)
1920{
1921 return 0x00409658U;
1922}
1923static inline u32 gr_fecs_feature_override_ecc_sm_lrf_override_v(u32 r)
1924{
1925 return (r >> 3U) & 0x1U;
1926}
1927static inline u32 gr_fecs_feature_override_ecc_ltc_override_v(u32 r)
1928{
1929 return (r >> 15U) & 0x1U;
1930}
1931static inline u32 gr_fecs_feature_override_ecc_sm_lrf_v(u32 r)
1932{
1933 return (r >> 0U) & 0x1U;
1934}
1935static inline u32 gr_fecs_feature_override_ecc_ltc_v(u32 r)
1936{
1937 return (r >> 12U) & 0x1U;
1938}
1939static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void)
1940{
1941 return 0x00502420U;
1942}
1943static inline u32 gr_rstr2d_gpc_map_r(u32 i)
1944{
1945 return 0x0040780cU + i*4U;
1946}
1947static inline u32 gr_rstr2d_map_table_cfg_r(void)
1948{
1949 return 0x004078bcU;
1950}
1951static inline u32 gr_rstr2d_map_table_cfg_row_offset_f(u32 v)
1952{
1953 return (v & 0xffU) << 0U;
1954}
1955static inline u32 gr_rstr2d_map_table_cfg_num_entries_f(u32 v)
1956{
1957 return (v & 0xffU) << 8U;
1958}
1959static inline u32 gr_pd_hww_esr_r(void)
1960{
1961 return 0x00406018U;
1962}
1963static inline u32 gr_pd_hww_esr_reset_active_f(void)
1964{
1965 return 0x40000000U;
1966}
1967static inline u32 gr_pd_hww_esr_en_enable_f(void)
1968{
1969 return 0x80000000U;
1970}
1971static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i)
1972{
1973 return 0x00406028U + i*4U;
1974}
1975static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void)
1976{
1977 return 0x00000004U;
1978}
1979static inline u32 gr_pd_num_tpc_per_gpc_count0_f(u32 v)
1980{
1981 return (v & 0xfU) << 0U;
1982}
1983static inline u32 gr_pd_num_tpc_per_gpc_count1_f(u32 v)
1984{
1985 return (v & 0xfU) << 4U;
1986}
1987static inline u32 gr_pd_num_tpc_per_gpc_count2_f(u32 v)
1988{
1989 return (v & 0xfU) << 8U;
1990}
1991static inline u32 gr_pd_num_tpc_per_gpc_count3_f(u32 v)
1992{
1993 return (v & 0xfU) << 12U;
1994}
1995static inline u32 gr_pd_num_tpc_per_gpc_count4_f(u32 v)
1996{
1997 return (v & 0xfU) << 16U;
1998}
1999static inline u32 gr_pd_num_tpc_per_gpc_count5_f(u32 v)
2000{
2001 return (v & 0xfU) << 20U;
2002}
2003static inline u32 gr_pd_num_tpc_per_gpc_count6_f(u32 v)
2004{
2005 return (v & 0xfU) << 24U;
2006}
2007static inline u32 gr_pd_num_tpc_per_gpc_count7_f(u32 v)
2008{
2009 return (v & 0xfU) << 28U;
2010}
2011static inline u32 gr_pd_ab_dist_cfg0_r(void)
2012{
2013 return 0x004064c0U;
2014}
2015static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_en_f(void)
2016{
2017 return 0x80000000U;
2018}
2019static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_dis_f(void)
2020{
2021 return 0x0U;
2022}
2023static inline u32 gr_pd_ab_dist_cfg1_r(void)
2024{
2025 return 0x004064c4U;
2026}
2027static inline u32 gr_pd_ab_dist_cfg1_max_batches_init_f(void)
2028{
2029 return 0xffffU;
2030}
2031static inline u32 gr_pd_ab_dist_cfg1_max_output_f(u32 v)
2032{
2033 return (v & 0xffffU) << 16U;
2034}
2035static inline u32 gr_pd_ab_dist_cfg1_max_output_granularity_v(void)
2036{
2037 return 0x00000080U;
2038}
2039static inline u32 gr_pd_ab_dist_cfg2_r(void)
2040{
2041 return 0x004064c8U;
2042}
2043static inline u32 gr_pd_ab_dist_cfg2_token_limit_f(u32 v)
2044{
2045 return (v & 0x1fffU) << 0U;
2046}
2047static inline u32 gr_pd_ab_dist_cfg2_token_limit_init_v(void)
2048{
2049 return 0x00000380U;
2050}
2051static inline u32 gr_pd_ab_dist_cfg2_state_limit_f(u32 v)
2052{
2053 return (v & 0x1fffU) << 16U;
2054}
2055static inline u32 gr_pd_ab_dist_cfg2_state_limit_scc_bundle_granularity_v(void)
2056{
2057 return 0x00000020U;
2058}
2059static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void)
2060{
2061 return 0x00000302U;
2062}
2063static inline u32 gr_pd_dist_skip_table_r(u32 i)
2064{
2065 return 0x004064d0U + i*4U;
2066}
2067static inline u32 gr_pd_dist_skip_table__size_1_v(void)
2068{
2069 return 0x00000008U;
2070}
2071static inline u32 gr_pd_dist_skip_table_gpc_4n0_mask_f(u32 v)
2072{
2073 return (v & 0xffU) << 0U;
2074}
2075static inline u32 gr_pd_dist_skip_table_gpc_4n1_mask_f(u32 v)
2076{
2077 return (v & 0xffU) << 8U;
2078}
2079static inline u32 gr_pd_dist_skip_table_gpc_4n2_mask_f(u32 v)
2080{
2081 return (v & 0xffU) << 16U;
2082}
2083static inline u32 gr_pd_dist_skip_table_gpc_4n3_mask_f(u32 v)
2084{
2085 return (v & 0xffU) << 24U;
2086}
2087static inline u32 gr_ds_debug_r(void)
2088{
2089 return 0x00405800U;
2090}
2091static inline u32 gr_ds_debug_timeslice_mode_disable_f(void)
2092{
2093 return 0x0U;
2094}
2095static inline u32 gr_ds_debug_timeslice_mode_enable_f(void)
2096{
2097 return 0x8000000U;
2098}
2099static inline u32 gr_ds_zbc_color_r_r(void)
2100{
2101 return 0x00405804U;
2102}
2103static inline u32 gr_ds_zbc_color_r_val_f(u32 v)
2104{
2105 return (v & 0xffffffffU) << 0U;
2106}
2107static inline u32 gr_ds_zbc_color_g_r(void)
2108{
2109 return 0x00405808U;
2110}
2111static inline u32 gr_ds_zbc_color_g_val_f(u32 v)
2112{
2113 return (v & 0xffffffffU) << 0U;
2114}
2115static inline u32 gr_ds_zbc_color_b_r(void)
2116{
2117 return 0x0040580cU;
2118}
2119static inline u32 gr_ds_zbc_color_b_val_f(u32 v)
2120{
2121 return (v & 0xffffffffU) << 0U;
2122}
2123static inline u32 gr_ds_zbc_color_a_r(void)
2124{
2125 return 0x00405810U;
2126}
2127static inline u32 gr_ds_zbc_color_a_val_f(u32 v)
2128{
2129 return (v & 0xffffffffU) << 0U;
2130}
2131static inline u32 gr_ds_zbc_color_fmt_r(void)
2132{
2133 return 0x00405814U;
2134}
2135static inline u32 gr_ds_zbc_color_fmt_val_f(u32 v)
2136{
2137 return (v & 0x7fU) << 0U;
2138}
2139static inline u32 gr_ds_zbc_color_fmt_val_invalid_f(void)
2140{
2141 return 0x0U;
2142}
2143static inline u32 gr_ds_zbc_color_fmt_val_zero_v(void)
2144{
2145 return 0x00000001U;
2146}
2147static inline u32 gr_ds_zbc_color_fmt_val_unorm_one_v(void)
2148{
2149 return 0x00000002U;
2150}
2151static inline u32 gr_ds_zbc_color_fmt_val_rf32_gf32_bf32_af32_v(void)
2152{
2153 return 0x00000004U;
2154}
2155static inline u32 gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v(void)
2156{
2157 return 0x00000028U;
2158}
2159static inline u32 gr_ds_zbc_z_r(void)
2160{
2161 return 0x00405818U;
2162}
2163static inline u32 gr_ds_zbc_z_val_s(void)
2164{
2165 return 32U;
2166}
2167static inline u32 gr_ds_zbc_z_val_f(u32 v)
2168{
2169 return (v & 0xffffffffU) << 0U;
2170}
2171static inline u32 gr_ds_zbc_z_val_m(void)
2172{
2173 return 0xffffffffU << 0U;
2174}
2175static inline u32 gr_ds_zbc_z_val_v(u32 r)
2176{
2177 return (r >> 0U) & 0xffffffffU;
2178}
2179static inline u32 gr_ds_zbc_z_val__init_v(void)
2180{
2181 return 0x00000000U;
2182}
2183static inline u32 gr_ds_zbc_z_val__init_f(void)
2184{
2185 return 0x0U;
2186}
2187static inline u32 gr_ds_zbc_z_fmt_r(void)
2188{
2189 return 0x0040581cU;
2190}
2191static inline u32 gr_ds_zbc_z_fmt_val_f(u32 v)
2192{
2193 return (v & 0x1U) << 0U;
2194}
2195static inline u32 gr_ds_zbc_z_fmt_val_invalid_f(void)
2196{
2197 return 0x0U;
2198}
2199static inline u32 gr_ds_zbc_z_fmt_val_fp32_v(void)
2200{
2201 return 0x00000001U;
2202}
2203static inline u32 gr_ds_zbc_tbl_index_r(void)
2204{
2205 return 0x00405820U;
2206}
2207static inline u32 gr_ds_zbc_tbl_index_val_f(u32 v)
2208{
2209 return (v & 0xfU) << 0U;
2210}
2211static inline u32 gr_ds_zbc_tbl_ld_r(void)
2212{
2213 return 0x00405824U;
2214}
2215static inline u32 gr_ds_zbc_tbl_ld_select_c_f(void)
2216{
2217 return 0x0U;
2218}
2219static inline u32 gr_ds_zbc_tbl_ld_select_z_f(void)
2220{
2221 return 0x1U;
2222}
2223static inline u32 gr_ds_zbc_tbl_ld_action_write_f(void)
2224{
2225 return 0x0U;
2226}
2227static inline u32 gr_ds_zbc_tbl_ld_trigger_active_f(void)
2228{
2229 return 0x4U;
2230}
2231static inline u32 gr_ds_tga_constraintlogic_beta_r(void)
2232{
2233 return 0x00405830U;
2234}
2235static inline u32 gr_ds_tga_constraintlogic_beta_cbsize_f(u32 v)
2236{
2237 return (v & 0x3fffffU) << 0U;
2238}
2239static inline u32 gr_ds_tga_constraintlogic_alpha_r(void)
2240{
2241 return 0x0040585cU;
2242}
2243static inline u32 gr_ds_tga_constraintlogic_alpha_cbsize_f(u32 v)
2244{
2245 return (v & 0xffffU) << 0U;
2246}
2247static inline u32 gr_ds_hww_esr_r(void)
2248{
2249 return 0x00405840U;
2250}
2251static inline u32 gr_ds_hww_esr_reset_s(void)
2252{
2253 return 1U;
2254}
2255static inline u32 gr_ds_hww_esr_reset_f(u32 v)
2256{
2257 return (v & 0x1U) << 30U;
2258}
2259static inline u32 gr_ds_hww_esr_reset_m(void)
2260{
2261 return 0x1U << 30U;
2262}
2263static inline u32 gr_ds_hww_esr_reset_v(u32 r)
2264{
2265 return (r >> 30U) & 0x1U;
2266}
2267static inline u32 gr_ds_hww_esr_reset_task_v(void)
2268{
2269 return 0x00000001U;
2270}
2271static inline u32 gr_ds_hww_esr_reset_task_f(void)
2272{
2273 return 0x40000000U;
2274}
2275static inline u32 gr_ds_hww_esr_en_enabled_f(void)
2276{
2277 return 0x80000000U;
2278}
2279static inline u32 gr_ds_hww_esr_2_r(void)
2280{
2281 return 0x00405848U;
2282}
2283static inline u32 gr_ds_hww_esr_2_reset_s(void)
2284{
2285 return 1U;
2286}
2287static inline u32 gr_ds_hww_esr_2_reset_f(u32 v)
2288{
2289 return (v & 0x1U) << 30U;
2290}
2291static inline u32 gr_ds_hww_esr_2_reset_m(void)
2292{
2293 return 0x1U << 30U;
2294}
2295static inline u32 gr_ds_hww_esr_2_reset_v(u32 r)
2296{
2297 return (r >> 30U) & 0x1U;
2298}
2299static inline u32 gr_ds_hww_esr_2_reset_task_v(void)
2300{
2301 return 0x00000001U;
2302}
2303static inline u32 gr_ds_hww_esr_2_reset_task_f(void)
2304{
2305 return 0x40000000U;
2306}
2307static inline u32 gr_ds_hww_esr_2_en_enabled_f(void)
2308{
2309 return 0x80000000U;
2310}
2311static inline u32 gr_ds_hww_report_mask_r(void)
2312{
2313 return 0x00405844U;
2314}
2315static inline u32 gr_ds_hww_report_mask_sph0_err_report_f(void)
2316{
2317 return 0x1U;
2318}
2319static inline u32 gr_ds_hww_report_mask_sph1_err_report_f(void)
2320{
2321 return 0x2U;
2322}
2323static inline u32 gr_ds_hww_report_mask_sph2_err_report_f(void)
2324{
2325 return 0x4U;
2326}
2327static inline u32 gr_ds_hww_report_mask_sph3_err_report_f(void)
2328{
2329 return 0x8U;
2330}
2331static inline u32 gr_ds_hww_report_mask_sph4_err_report_f(void)
2332{
2333 return 0x10U;
2334}
2335static inline u32 gr_ds_hww_report_mask_sph5_err_report_f(void)
2336{
2337 return 0x20U;
2338}
2339static inline u32 gr_ds_hww_report_mask_sph6_err_report_f(void)
2340{
2341 return 0x40U;
2342}
2343static inline u32 gr_ds_hww_report_mask_sph7_err_report_f(void)
2344{
2345 return 0x80U;
2346}
2347static inline u32 gr_ds_hww_report_mask_sph8_err_report_f(void)
2348{
2349 return 0x100U;
2350}
2351static inline u32 gr_ds_hww_report_mask_sph9_err_report_f(void)
2352{
2353 return 0x200U;
2354}
2355static inline u32 gr_ds_hww_report_mask_sph10_err_report_f(void)
2356{
2357 return 0x400U;
2358}
2359static inline u32 gr_ds_hww_report_mask_sph11_err_report_f(void)
2360{
2361 return 0x800U;
2362}
2363static inline u32 gr_ds_hww_report_mask_sph12_err_report_f(void)
2364{
2365 return 0x1000U;
2366}
2367static inline u32 gr_ds_hww_report_mask_sph13_err_report_f(void)
2368{
2369 return 0x2000U;
2370}
2371static inline u32 gr_ds_hww_report_mask_sph14_err_report_f(void)
2372{
2373 return 0x4000U;
2374}
2375static inline u32 gr_ds_hww_report_mask_sph15_err_report_f(void)
2376{
2377 return 0x8000U;
2378}
2379static inline u32 gr_ds_hww_report_mask_sph16_err_report_f(void)
2380{
2381 return 0x10000U;
2382}
2383static inline u32 gr_ds_hww_report_mask_sph17_err_report_f(void)
2384{
2385 return 0x20000U;
2386}
2387static inline u32 gr_ds_hww_report_mask_sph18_err_report_f(void)
2388{
2389 return 0x40000U;
2390}
2391static inline u32 gr_ds_hww_report_mask_sph19_err_report_f(void)
2392{
2393 return 0x80000U;
2394}
2395static inline u32 gr_ds_hww_report_mask_sph20_err_report_f(void)
2396{
2397 return 0x100000U;
2398}
2399static inline u32 gr_ds_hww_report_mask_sph21_err_report_f(void)
2400{
2401 return 0x200000U;
2402}
2403static inline u32 gr_ds_hww_report_mask_sph22_err_report_f(void)
2404{
2405 return 0x400000U;
2406}
2407static inline u32 gr_ds_hww_report_mask_sph23_err_report_f(void)
2408{
2409 return 0x800000U;
2410}
2411static inline u32 gr_ds_hww_report_mask_2_r(void)
2412{
2413 return 0x0040584cU;
2414}
2415static inline u32 gr_ds_hww_report_mask_2_sph24_err_report_f(void)
2416{
2417 return 0x1U;
2418}
2419static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i)
2420{
2421 return 0x00405870U + i*4U;
2422}
2423static inline u32 gr_scc_bundle_cb_base_r(void)
2424{
2425 return 0x00408004U;
2426}
2427static inline u32 gr_scc_bundle_cb_base_addr_39_8_f(u32 v)
2428{
2429 return (v & 0xffffffffU) << 0U;
2430}
2431static inline u32 gr_scc_bundle_cb_base_addr_39_8_align_bits_v(void)
2432{
2433 return 0x00000008U;
2434}
2435static inline u32 gr_scc_bundle_cb_size_r(void)
2436{
2437 return 0x00408008U;
2438}
2439static inline u32 gr_scc_bundle_cb_size_div_256b_f(u32 v)
2440{
2441 return (v & 0x7ffU) << 0U;
2442}
2443static inline u32 gr_scc_bundle_cb_size_div_256b__prod_v(void)
2444{
2445 return 0x00000030U;
2446}
2447static inline u32 gr_scc_bundle_cb_size_div_256b_byte_granularity_v(void)
2448{
2449 return 0x00000100U;
2450}
2451static inline u32 gr_scc_bundle_cb_size_valid_false_v(void)
2452{
2453 return 0x00000000U;
2454}
2455static inline u32 gr_scc_bundle_cb_size_valid_false_f(void)
2456{
2457 return 0x0U;
2458}
2459static inline u32 gr_scc_bundle_cb_size_valid_true_f(void)
2460{
2461 return 0x80000000U;
2462}
2463static inline u32 gr_scc_pagepool_base_r(void)
2464{
2465 return 0x0040800cU;
2466}
2467static inline u32 gr_scc_pagepool_base_addr_39_8_f(u32 v)
2468{
2469 return (v & 0xffffffffU) << 0U;
2470}
2471static inline u32 gr_scc_pagepool_base_addr_39_8_align_bits_v(void)
2472{
2473 return 0x00000008U;
2474}
2475static inline u32 gr_scc_pagepool_r(void)
2476{
2477 return 0x00408010U;
2478}
2479static inline u32 gr_scc_pagepool_total_pages_f(u32 v)
2480{
2481 return (v & 0x3ffU) << 0U;
2482}
2483static inline u32 gr_scc_pagepool_total_pages_hwmax_v(void)
2484{
2485 return 0x00000000U;
2486}
2487static inline u32 gr_scc_pagepool_total_pages_hwmax_value_v(void)
2488{
2489 return 0x00000200U;
2490}
2491static inline u32 gr_scc_pagepool_total_pages_byte_granularity_v(void)
2492{
2493 return 0x00000100U;
2494}
2495static inline u32 gr_scc_pagepool_max_valid_pages_s(void)
2496{
2497 return 10U;
2498}
2499static inline u32 gr_scc_pagepool_max_valid_pages_f(u32 v)
2500{
2501 return (v & 0x3ffU) << 10U;
2502}
2503static inline u32 gr_scc_pagepool_max_valid_pages_m(void)
2504{
2505 return 0x3ffU << 10U;
2506}
2507static inline u32 gr_scc_pagepool_max_valid_pages_v(u32 r)
2508{
2509 return (r >> 10U) & 0x3ffU;
2510}
2511static inline u32 gr_scc_pagepool_valid_true_f(void)
2512{
2513 return 0x80000000U;
2514}
2515static inline u32 gr_scc_init_r(void)
2516{
2517 return 0x0040802cU;
2518}
2519static inline u32 gr_scc_init_ram_trigger_f(void)
2520{
2521 return 0x1U;
2522}
2523static inline u32 gr_scc_hww_esr_r(void)
2524{
2525 return 0x00408030U;
2526}
2527static inline u32 gr_scc_hww_esr_reset_active_f(void)
2528{
2529 return 0x40000000U;
2530}
2531static inline u32 gr_scc_hww_esr_en_enable_f(void)
2532{
2533 return 0x80000000U;
2534}
2535static inline u32 gr_sked_hww_esr_r(void)
2536{
2537 return 0x00407020U;
2538}
2539static inline u32 gr_sked_hww_esr_reset_active_f(void)
2540{
2541 return 0x40000000U;
2542}
2543static inline u32 gr_sked_hww_esr_en_r(void)
2544{
2545 return 0x00407024U;
2546}
2547static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_m(void)
2548{
2549 return 0x1U << 25U;
2550}
2551static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_disabled_f(void)
2552{
2553 return 0x0U;
2554}
2555static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_enabled_f(void)
2556{
2557 return 0x2000000U;
2558}
2559static inline u32 gr_cwd_fs_r(void)
2560{
2561 return 0x00405b00U;
2562}
2563static inline u32 gr_cwd_fs_num_gpcs_f(u32 v)
2564{
2565 return (v & 0xffU) << 0U;
2566}
2567static inline u32 gr_cwd_fs_num_tpcs_f(u32 v)
2568{
2569 return (v & 0xffU) << 8U;
2570}
2571static inline u32 gr_cwd_gpc_tpc_id_r(u32 i)
2572{
2573 return 0x00405b60U + i*4U;
2574}
2575static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void)
2576{
2577 return 4U;
2578}
2579static inline u32 gr_cwd_gpc_tpc_id_tpc0_f(u32 v)
2580{
2581 return (v & 0xfU) << 0U;
2582}
2583static inline u32 gr_cwd_gpc_tpc_id_gpc0_s(void)
2584{
2585 return 4U;
2586}
2587static inline u32 gr_cwd_gpc_tpc_id_gpc0_f(u32 v)
2588{
2589 return (v & 0xfU) << 4U;
2590}
2591static inline u32 gr_cwd_gpc_tpc_id_tpc1_f(u32 v)
2592{
2593 return (v & 0xfU) << 8U;
2594}
2595static inline u32 gr_cwd_sm_id_r(u32 i)
2596{
2597 return 0x00405ba0U + i*4U;
2598}
2599static inline u32 gr_cwd_sm_id__size_1_v(void)
2600{
2601 return 0x00000010U;
2602}
2603static inline u32 gr_cwd_sm_id_tpc0_f(u32 v)
2604{
2605 return (v & 0xffU) << 0U;
2606}
2607static inline u32 gr_cwd_sm_id_tpc1_f(u32 v)
2608{
2609 return (v & 0xffU) << 8U;
2610}
2611static inline u32 gr_gpc0_fs_gpc_r(void)
2612{
2613 return 0x00502608U;
2614}
2615static inline u32 gr_gpc0_fs_gpc_num_available_tpcs_v(u32 r)
2616{
2617 return (r >> 0U) & 0x1fU;
2618}
2619static inline u32 gr_gpc0_fs_gpc_num_available_zculls_v(u32 r)
2620{
2621 return (r >> 16U) & 0x1fU;
2622}
2623static inline u32 gr_gpc0_cfg_r(void)
2624{
2625 return 0x00502620U;
2626}
2627static inline u32 gr_gpc0_cfg_imem_sz_v(u32 r)
2628{
2629 return (r >> 0U) & 0xffU;
2630}
2631static inline u32 gr_gpccs_rc_lanes_r(void)
2632{
2633 return 0x00502880U;
2634}
2635static inline u32 gr_gpccs_rc_lanes_num_chains_s(void)
2636{
2637 return 6U;
2638}
2639static inline u32 gr_gpccs_rc_lanes_num_chains_f(u32 v)
2640{
2641 return (v & 0x3fU) << 0U;
2642}
2643static inline u32 gr_gpccs_rc_lanes_num_chains_m(void)
2644{
2645 return 0x3fU << 0U;
2646}
2647static inline u32 gr_gpccs_rc_lanes_num_chains_v(u32 r)
2648{
2649 return (r >> 0U) & 0x3fU;
2650}
2651static inline u32 gr_gpccs_rc_lane_size_r(void)
2652{
2653 return 0x00502910U;
2654}
2655static inline u32 gr_gpccs_rc_lane_size_v_s(void)
2656{
2657 return 24U;
2658}
2659static inline u32 gr_gpccs_rc_lane_size_v_f(u32 v)
2660{
2661 return (v & 0xffffffU) << 0U;
2662}
2663static inline u32 gr_gpccs_rc_lane_size_v_m(void)
2664{
2665 return 0xffffffU << 0U;
2666}
2667static inline u32 gr_gpccs_rc_lane_size_v_v(u32 r)
2668{
2669 return (r >> 0U) & 0xffffffU;
2670}
2671static inline u32 gr_gpccs_rc_lane_size_v_0_v(void)
2672{
2673 return 0x00000000U;
2674}
2675static inline u32 gr_gpccs_rc_lane_size_v_0_f(void)
2676{
2677 return 0x0U;
2678}
2679static inline u32 gr_gpc0_zcull_fs_r(void)
2680{
2681 return 0x00500910U;
2682}
2683static inline u32 gr_gpc0_zcull_fs_num_sms_f(u32 v)
2684{
2685 return (v & 0x1ffU) << 0U;
2686}
2687static inline u32 gr_gpc0_zcull_fs_num_active_banks_f(u32 v)
2688{
2689 return (v & 0xfU) << 16U;
2690}
2691static inline u32 gr_gpc0_zcull_ram_addr_r(void)
2692{
2693 return 0x00500914U;
2694}
2695static inline u32 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f(u32 v)
2696{
2697 return (v & 0xfU) << 0U;
2698}
2699static inline u32 gr_gpc0_zcull_ram_addr_row_offset_f(u32 v)
2700{
2701 return (v & 0xfU) << 8U;
2702}
2703static inline u32 gr_gpc0_zcull_sm_num_rcp_r(void)
2704{
2705 return 0x00500918U;
2706}
2707static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative_f(u32 v)
2708{
2709 return (v & 0xffffffU) << 0U;
2710}
2711static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative__max_v(void)
2712{
2713 return 0x00800000U;
2714}
2715static inline u32 gr_gpc0_zcull_total_ram_size_r(void)
2716{
2717 return 0x00500920U;
2718}
2719static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v)
2720{
2721 return (v & 0xffffU) << 0U;
2722}
2723static inline u32 gr_gpc0_zcull_zcsize_r(u32 i)
2724{
2725 return 0x00500a04U + i*32U;
2726}
2727static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void)
2728{
2729 return 0x00000040U;
2730}
2731static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void)
2732{
2733 return 0x00000010U;
2734}
2735static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i)
2736{
2737 return 0x00500c10U + i*4U;
2738}
2739static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v)
2740{
2741 return (v & 0xffU) << 0U;
2742}
2743static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i)
2744{
2745 return 0x00500c30U + i*4U;
2746}
2747static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r)
2748{
2749 return (r >> 0U) & 0xffU;
2750}
2751static inline u32 gr_gpc0_tpc0_pe_cfg_smid_r(void)
2752{
2753 return 0x00504088U;
2754}
2755static inline u32 gr_gpc0_tpc0_pe_cfg_smid_value_f(u32 v)
2756{
2757 return (v & 0xffffU) << 0U;
2758}
2759static inline u32 gr_gpc0_tpc0_sm_cfg_r(void)
2760{
2761 return 0x00504608U;
2762}
2763static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_f(u32 v)
2764{
2765 return (v & 0xffffU) << 0U;
2766}
2767static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_v(u32 r)
2768{
2769 return (r >> 0U) & 0xffffU;
2770}
2771static inline u32 gr_gpc0_tpc0_sm_arch_r(void)
2772{
2773 return 0x00504330U;
2774}
2775static inline u32 gr_gpc0_tpc0_sm_arch_warp_count_v(u32 r)
2776{
2777 return (r >> 0U) & 0xffU;
2778}
2779static inline u32 gr_gpc0_tpc0_sm_arch_spa_version_v(u32 r)
2780{
2781 return (r >> 8U) & 0xfffU;
2782}
2783static inline u32 gr_gpc0_tpc0_sm_arch_sm_version_v(u32 r)
2784{
2785 return (r >> 20U) & 0xfffU;
2786}
2787static inline u32 gr_gpc0_ppc0_pes_vsc_strem_r(void)
2788{
2789 return 0x00503018U;
2790}
2791static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_m(void)
2792{
2793 return 0x1U << 0U;
2794}
2795static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_true_f(void)
2796{
2797 return 0x1U;
2798}
2799static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_r(void)
2800{
2801 return 0x005030c0U;
2802}
2803static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_f(u32 v)
2804{
2805 return (v & 0x3fffffU) << 0U;
2806}
2807static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_m(void)
2808{
2809 return 0x3fffffU << 0U;
2810}
2811static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(void)
2812{
2813 return 0x00000800U;
2814}
2815static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v(void)
2816{
2817 return 0x00001100U;
2818}
2819static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(void)
2820{
2821 return 0x00000020U;
2822}
2823static inline u32 gr_gpc0_ppc0_cbm_beta_cb_offset_r(void)
2824{
2825 return 0x005030f4U;
2826}
2827static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_r(void)
2828{
2829 return 0x005030e4U;
2830}
2831static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(u32 v)
2832{
2833 return (v & 0xffffU) << 0U;
2834}
2835static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_m(void)
2836{
2837 return 0xffffU << 0U;
2838}
2839static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(void)
2840{
2841 return 0x00000800U;
2842}
2843static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(void)
2844{
2845 return 0x00000020U;
2846}
2847static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_offset_r(void)
2848{
2849 return 0x005030f8U;
2850}
2851static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r(void)
2852{
2853 return 0x005030f0U;
2854}
2855static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_f(u32 v)
2856{
2857 return (v & 0x3fffffU) << 0U;
2858}
2859static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_default_v(void)
2860{
2861 return 0x00000800U;
2862}
2863static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_r(void)
2864{
2865 return 0x00419e00U;
2866}
2867static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_base_addr_43_12_f(u32 v)
2868{
2869 return (v & 0xffffffffU) << 0U;
2870}
2871static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_r(void)
2872{
2873 return 0x00419e04U;
2874}
2875static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_s(void)
2876{
2877 return 21U;
2878}
2879static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_f(u32 v)
2880{
2881 return (v & 0x1fffffU) << 0U;
2882}
2883static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_m(void)
2884{
2885 return 0x1fffffU << 0U;
2886}
2887static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_v(u32 r)
2888{
2889 return (r >> 0U) & 0x1fffffU;
2890}
2891static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_granularity_f(void)
2892{
2893 return 0x80U;
2894}
2895static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_s(void)
2896{
2897 return 1U;
2898}
2899static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_f(u32 v)
2900{
2901 return (v & 0x1U) << 31U;
2902}
2903static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_m(void)
2904{
2905 return 0x1U << 31U;
2906}
2907static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_v(u32 r)
2908{
2909 return (r >> 31U) & 0x1U;
2910}
2911static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(void)
2912{
2913 return 0x80000000U;
2914}
2915static inline u32 gr_gpccs_falcon_addr_r(void)
2916{
2917 return 0x0041a0acU;
2918}
2919static inline u32 gr_gpccs_falcon_addr_lsb_s(void)
2920{
2921 return 6U;
2922}
2923static inline u32 gr_gpccs_falcon_addr_lsb_f(u32 v)
2924{
2925 return (v & 0x3fU) << 0U;
2926}
2927static inline u32 gr_gpccs_falcon_addr_lsb_m(void)
2928{
2929 return 0x3fU << 0U;
2930}
2931static inline u32 gr_gpccs_falcon_addr_lsb_v(u32 r)
2932{
2933 return (r >> 0U) & 0x3fU;
2934}
2935static inline u32 gr_gpccs_falcon_addr_lsb_init_v(void)
2936{
2937 return 0x00000000U;
2938}
2939static inline u32 gr_gpccs_falcon_addr_lsb_init_f(void)
2940{
2941 return 0x0U;
2942}
2943static inline u32 gr_gpccs_falcon_addr_msb_s(void)
2944{
2945 return 6U;
2946}
2947static inline u32 gr_gpccs_falcon_addr_msb_f(u32 v)
2948{
2949 return (v & 0x3fU) << 6U;
2950}
2951static inline u32 gr_gpccs_falcon_addr_msb_m(void)
2952{
2953 return 0x3fU << 6U;
2954}
2955static inline u32 gr_gpccs_falcon_addr_msb_v(u32 r)
2956{
2957 return (r >> 6U) & 0x3fU;
2958}
2959static inline u32 gr_gpccs_falcon_addr_msb_init_v(void)
2960{
2961 return 0x00000000U;
2962}
2963static inline u32 gr_gpccs_falcon_addr_msb_init_f(void)
2964{
2965 return 0x0U;
2966}
2967static inline u32 gr_gpccs_falcon_addr_ext_s(void)
2968{
2969 return 12U;
2970}
2971static inline u32 gr_gpccs_falcon_addr_ext_f(u32 v)
2972{
2973 return (v & 0xfffU) << 0U;
2974}
2975static inline u32 gr_gpccs_falcon_addr_ext_m(void)
2976{
2977 return 0xfffU << 0U;
2978}
2979static inline u32 gr_gpccs_falcon_addr_ext_v(u32 r)
2980{
2981 return (r >> 0U) & 0xfffU;
2982}
2983static inline u32 gr_gpccs_cpuctl_r(void)
2984{
2985 return 0x0041a100U;
2986}
2987static inline u32 gr_gpccs_cpuctl_startcpu_f(u32 v)
2988{
2989 return (v & 0x1U) << 1U;
2990}
2991static inline u32 gr_gpccs_dmactl_r(void)
2992{
2993 return 0x0041a10cU;
2994}
2995static inline u32 gr_gpccs_dmactl_require_ctx_f(u32 v)
2996{
2997 return (v & 0x1U) << 0U;
2998}
2999static inline u32 gr_gpccs_dmactl_dmem_scrubbing_m(void)
3000{
3001 return 0x1U << 1U;
3002}
3003static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void)
3004{
3005 return 0x1U << 2U;
3006}
3007static inline u32 gr_gpccs_imemc_r(u32 i)
3008{
3009 return 0x0041a180U + i*16U;
3010}
3011static inline u32 gr_gpccs_imemc_offs_f(u32 v)
3012{
3013 return (v & 0x3fU) << 2U;
3014}
3015static inline u32 gr_gpccs_imemc_blk_f(u32 v)
3016{
3017 return (v & 0xffU) << 8U;
3018}
3019static inline u32 gr_gpccs_imemc_aincw_f(u32 v)
3020{
3021 return (v & 0x1U) << 24U;
3022}
3023static inline u32 gr_gpccs_imemd_r(u32 i)
3024{
3025 return 0x0041a184U + i*16U;
3026}
3027static inline u32 gr_gpccs_imemt_r(u32 i)
3028{
3029 return 0x0041a188U + i*16U;
3030}
3031static inline u32 gr_gpccs_imemt__size_1_v(void)
3032{
3033 return 0x00000004U;
3034}
3035static inline u32 gr_gpccs_imemt_tag_f(u32 v)
3036{
3037 return (v & 0xffffU) << 0U;
3038}
3039static inline u32 gr_gpccs_dmemc_r(u32 i)
3040{
3041 return 0x0041a1c0U + i*8U;
3042}
3043static inline u32 gr_gpccs_dmemc_offs_f(u32 v)
3044{
3045 return (v & 0x3fU) << 2U;
3046}
3047static inline u32 gr_gpccs_dmemc_blk_f(u32 v)
3048{
3049 return (v & 0xffU) << 8U;
3050}
3051static inline u32 gr_gpccs_dmemc_aincw_f(u32 v)
3052{
3053 return (v & 0x1U) << 24U;
3054}
3055static inline u32 gr_gpccs_dmemd_r(u32 i)
3056{
3057 return 0x0041a1c4U + i*8U;
3058}
3059static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i)
3060{
3061 return 0x0041a800U + i*4U;
3062}
3063static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v)
3064{
3065 return (v & 0xffffffffU) << 0U;
3066}
3067static inline u32 gr_gpcs_swdx_bundle_cb_base_r(void)
3068{
3069 return 0x00418e24U;
3070}
3071static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_s(void)
3072{
3073 return 32U;
3074}
3075static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(u32 v)
3076{
3077 return (v & 0xffffffffU) << 0U;
3078}
3079static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_m(void)
3080{
3081 return 0xffffffffU << 0U;
3082}
3083static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_v(u32 r)
3084{
3085 return (r >> 0U) & 0xffffffffU;
3086}
3087static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_v(void)
3088{
3089 return 0x00000000U;
3090}
3091static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_f(void)
3092{
3093 return 0x0U;
3094}
3095static inline u32 gr_gpcs_swdx_bundle_cb_size_r(void)
3096{
3097 return 0x00418e28U;
3098}
3099static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_s(void)
3100{
3101 return 11U;
3102}
3103static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_f(u32 v)
3104{
3105 return (v & 0x7ffU) << 0U;
3106}
3107static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_m(void)
3108{
3109 return 0x7ffU << 0U;
3110}
3111static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_v(u32 r)
3112{
3113 return (r >> 0U) & 0x7ffU;
3114}
3115static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_v(void)
3116{
3117 return 0x00000030U;
3118}
3119static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_f(void)
3120{
3121 return 0x30U;
3122}
3123static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_s(void)
3124{
3125 return 1U;
3126}
3127static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_f(u32 v)
3128{
3129 return (v & 0x1U) << 31U;
3130}
3131static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_m(void)
3132{
3133 return 0x1U << 31U;
3134}
3135static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_v(u32 r)
3136{
3137 return (r >> 31U) & 0x1U;
3138}
3139static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_v(void)
3140{
3141 return 0x00000000U;
3142}
3143static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_f(void)
3144{
3145 return 0x0U;
3146}
3147static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_v(void)
3148{
3149 return 0x00000001U;
3150}
3151static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_f(void)
3152{
3153 return 0x80000000U;
3154}
3155static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_r(void)
3156{
3157 return 0x005001dcU;
3158}
3159static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_f(u32 v)
3160{
3161 return (v & 0xffffU) << 0U;
3162}
3163static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(void)
3164{
3165 return 0x00000170U;
3166}
3167static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v(void)
3168{
3169 return 0x00000100U;
3170}
3171static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_r(void)
3172{
3173 return 0x005001d8U;
3174}
3175static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f(u32 v)
3176{
3177 return (v & 0xffffffffU) << 0U;
3178}
3179static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v(void)
3180{
3181 return 0x00000008U;
3182}
3183static inline u32 gr_gpcs_swdx_beta_cb_ctrl_r(void)
3184{
3185 return 0x004181e4U;
3186}
3187static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_f(u32 v)
3188{
3189 return (v & 0xfffU) << 0U;
3190}
3191static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_gfxp_v(void)
3192{
3193 return 0x00000100U;
3194}
3195static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_r(void)
3196{
3197 return 0x0041befcU;
3198}
3199static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(u32 v)
3200{
3201 return (v & 0xfffU) << 0U;
3202}
3203static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i)
3204{
3205 return 0x00418ea0U + i*4U;
3206}
3207static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v)
3208{
3209 return (v & 0x3fffffU) << 0U;
3210}
3211static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_m(void)
3212{
3213 return 0x3fffffU << 0U;
3214}
3215static inline u32 gr_gpcs_swdx_dss_zbc_color_r_r(u32 i)
3216{
3217 return 0x00418010U + i*4U;
3218}
3219static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v)
3220{
3221 return (v & 0xffffffffU) << 0U;
3222}
3223static inline u32 gr_gpcs_swdx_dss_zbc_color_g_r(u32 i)
3224{
3225 return 0x0041804cU + i*4U;
3226}
3227static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v)
3228{
3229 return (v & 0xffffffffU) << 0U;
3230}
3231static inline u32 gr_gpcs_swdx_dss_zbc_color_b_r(u32 i)
3232{
3233 return 0x00418088U + i*4U;
3234}
3235static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v)
3236{
3237 return (v & 0xffffffffU) << 0U;
3238}
3239static inline u32 gr_gpcs_swdx_dss_zbc_color_a_r(u32 i)
3240{
3241 return 0x004180c4U + i*4U;
3242}
3243static inline u32 gr_gpcs_swdx_dss_zbc_color_a_val_f(u32 v)
3244{
3245 return (v & 0xffffffffU) << 0U;
3246}
3247static inline u32 gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r(void)
3248{
3249 return 0x00418100U;
3250}
3251static inline u32 gr_gpcs_swdx_dss_zbc_z_r(u32 i)
3252{
3253 return 0x00418110U + i*4U;
3254}
3255static inline u32 gr_gpcs_swdx_dss_zbc_z_val_f(u32 v)
3256{
3257 return (v & 0xffffffffU) << 0U;
3258}
3259static inline u32 gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r(void)
3260{
3261 return 0x0041814cU;
3262}
3263static inline u32 gr_gpcs_swdx_dss_zbc_s_r(u32 i)
3264{
3265 return 0x0041815cU + i*4U;
3266}
3267static inline u32 gr_gpcs_swdx_dss_zbc_s_val_f(u32 v)
3268{
3269 return (v & 0xffU) << 0U;
3270}
3271static inline u32 gr_gpcs_swdx_dss_zbc_s_01_to_04_format_r(void)
3272{
3273 return 0x00418198U;
3274}
3275static inline u32 gr_gpcs_setup_attrib_cb_base_r(void)
3276{
3277 return 0x00418810U;
3278}
3279static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_f(u32 v)
3280{
3281 return (v & 0xfffffffU) << 0U;
3282}
3283static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v(void)
3284{
3285 return 0x0000000cU;
3286}
3287static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void)
3288{
3289 return 0x80000000U;
3290}
3291static inline u32 gr_crstr_gpc_map_r(u32 i)
3292{
3293 return 0x00418b08U + i*4U;
3294}
3295static inline u32 gr_crstr_gpc_map_tile0_f(u32 v)
3296{
3297 return (v & 0x1fU) << 0U;
3298}
3299static inline u32 gr_crstr_gpc_map_tile1_f(u32 v)
3300{
3301 return (v & 0x1fU) << 5U;
3302}
3303static inline u32 gr_crstr_gpc_map_tile2_f(u32 v)
3304{
3305 return (v & 0x1fU) << 10U;
3306}
3307static inline u32 gr_crstr_gpc_map_tile3_f(u32 v)
3308{
3309 return (v & 0x1fU) << 15U;
3310}
3311static inline u32 gr_crstr_gpc_map_tile4_f(u32 v)
3312{
3313 return (v & 0x1fU) << 20U;
3314}
3315static inline u32 gr_crstr_gpc_map_tile5_f(u32 v)
3316{
3317 return (v & 0x1fU) << 25U;
3318}
3319static inline u32 gr_crstr_map_table_cfg_r(void)
3320{
3321 return 0x00418bb8U;
3322}
3323static inline u32 gr_crstr_map_table_cfg_row_offset_f(u32 v)
3324{
3325 return (v & 0xffU) << 0U;
3326}
3327static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v)
3328{
3329 return (v & 0xffU) << 8U;
3330}
3331static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_r(u32 i)
3332{
3333 return 0x00418980U + i*4U;
3334}
3335static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_0_f(u32 v)
3336{
3337 return (v & 0x7U) << 0U;
3338}
3339static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_1_f(u32 v)
3340{
3341 return (v & 0x7U) << 4U;
3342}
3343static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_2_f(u32 v)
3344{
3345 return (v & 0x7U) << 8U;
3346}
3347static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_3_f(u32 v)
3348{
3349 return (v & 0x7U) << 12U;
3350}
3351static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_4_f(u32 v)
3352{
3353 return (v & 0x7U) << 16U;
3354}
3355static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_5_f(u32 v)
3356{
3357 return (v & 0x7U) << 20U;
3358}
3359static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_6_f(u32 v)
3360{
3361 return (v & 0x7U) << 24U;
3362}
3363static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_7_f(u32 v)
3364{
3365 return (v & 0x7U) << 28U;
3366}
3367static inline u32 gr_gpcs_gpm_pd_cfg_r(void)
3368{
3369 return 0x00418c6cU;
3370}
3371static inline u32 gr_gpcs_gcc_pagepool_base_r(void)
3372{
3373 return 0x00419004U;
3374}
3375static inline u32 gr_gpcs_gcc_pagepool_base_addr_39_8_f(u32 v)
3376{
3377 return (v & 0xffffffffU) << 0U;
3378}
3379static inline u32 gr_gpcs_gcc_pagepool_r(void)
3380{
3381 return 0x00419008U;
3382}
3383static inline u32 gr_gpcs_gcc_pagepool_total_pages_f(u32 v)
3384{
3385 return (v & 0x3ffU) << 0U;
3386}
3387static inline u32 gr_gpcs_tpcs_pe_vaf_r(void)
3388{
3389 return 0x0041980cU;
3390}
3391static inline u32 gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f(void)
3392{
3393 return 0x10U;
3394}
3395static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_r(void)
3396{
3397 return 0x00419848U;
3398}
3399static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(u32 v)
3400{
3401 return (v & 0xfffffffU) << 0U;
3402}
3403static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_f(u32 v)
3404{
3405 return (v & 0x1U) << 28U;
3406}
3407static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(void)
3408{
3409 return 0x10000000U;
3410}
3411static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_r(void)
3412{
3413 return 0x00419c00U;
3414}
3415static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_disabled_f(void)
3416{
3417 return 0x0U;
3418}
3419static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f(void)
3420{
3421 return 0x8U;
3422}
3423static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(void)
3424{
3425 return 0x00419c2cU;
3426}
3427static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(u32 v)
3428{
3429 return (v & 0xfffffffU) << 0U;
3430}
3431static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_f(u32 v)
3432{
3433 return (v & 0x1U) << 28U;
3434}
3435static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(void)
3436{
3437 return 0x10000000U;
3438}
3439static inline u32 gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(void)
3440{
3441 return 0x00419ea8U;
3442}
3443static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r(void)
3444{
3445 return 0x00504728U;
3446}
3447static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_error_report_f(void)
3448{
3449 return 0x2U;
3450}
3451static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_api_stack_error_report_f(void)
3452{
3453 return 0x4U;
3454}
3455static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_wrap_report_f(void)
3456{
3457 return 0x10U;
3458}
3459static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_pc_report_f(void)
3460{
3461 return 0x20U;
3462}
3463static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_overflow_report_f(void)
3464{
3465 return 0x40U;
3466}
3467static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_reg_report_f(void)
3468{
3469 return 0x100U;
3470}
3471static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_encoding_report_f(void)
3472{
3473 return 0x200U;
3474}
3475static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_param_report_f(void)
3476{
3477 return 0x800U;
3478}
3479static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_reg_report_f(void)
3480{
3481 return 0x2000U;
3482}
3483static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_addr_report_f(void)
3484{
3485 return 0x4000U;
3486}
3487static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_addr_report_f(void)
3488{
3489 return 0x8000U;
3490}
3491static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_addr_space_report_f(void)
3492{
3493 return 0x10000U;
3494}
3495static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f(void)
3496{
3497 return 0x40000U;
3498}
3499static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_mmu_fault_report_f(void)
3500{
3501 return 0x800000U;
3502}
3503static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_overflow_report_f(void)
3504{
3505 return 0x400000U;
3506}
3507static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_r(void)
3508{
3509 return 0x00419d0cU;
3510}
3511static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void)
3512{
3513 return 0x2U;
3514}
3515static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void)
3516{
3517 return 0x1U;
3518}
3519static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_mpc_enabled_f(void)
3520{
3521 return 0x10U;
3522}
3523static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void)
3524{
3525 return 0x0050450cU;
3526}
3527static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(u32 r)
3528{
3529 return (r >> 1U) & 0x1U;
3530}
3531static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(void)
3532{
3533 return 0x2U;
3534}
3535static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_mpc_enabled_f(void)
3536{
3537 return 0x10U;
3538}
3539static inline u32 gr_gpcs_gpccs_gpc_exception_en_r(void)
3540{
3541 return 0x0041ac94U;
3542}
3543static inline u32 gr_gpcs_gpccs_gpc_exception_en_gcc_f(u32 v)
3544{
3545 return (v & 0x1U) << 2U;
3546}
3547static inline u32 gr_gpcs_gpccs_gpc_exception_en_tpc_f(u32 v)
3548{
3549 return (v & 0xffU) << 16U;
3550}
3551static inline u32 gr_gpcs_gpccs_gpc_exception_en_gpccs_f(u32 v)
3552{
3553 return (v & 0x1U) << 14U;
3554}
3555static inline u32 gr_gpcs_gpccs_gpc_exception_en_gpcmmu_f(u32 v)
3556{
3557 return (v & 0x1U) << 15U;
3558}
3559static inline u32 gr_gpc0_gpccs_gpc_exception_r(void)
3560{
3561 return 0x00502c90U;
3562}
3563static inline u32 gr_gpc0_gpccs_gpc_exception_gcc_v(u32 r)
3564{
3565 return (r >> 2U) & 0x1U;
3566}
3567static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_v(u32 r)
3568{
3569 return (r >> 16U) & 0xffU;
3570}
3571static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_0_pending_v(void)
3572{
3573 return 0x00000001U;
3574}
3575static inline u32 gr_gpc0_gpccs_gpc_exception_gpccs_f(u32 v)
3576{
3577 return (v & 0x1U) << 14U;
3578}
3579static inline u32 gr_gpc0_gpccs_gpc_exception_gpccs_m(void)
3580{
3581 return 0x1U << 14U;
3582}
3583static inline u32 gr_gpc0_gpccs_gpc_exception_gpccs_pending_f(void)
3584{
3585 return 0x4000U;
3586}
3587static inline u32 gr_gpc0_gpccs_gpc_exception_gpcmmu_f(u32 v)
3588{
3589 return (v & 0x1U) << 15U;
3590}
3591static inline u32 gr_gpc0_gpccs_gpc_exception_gpcmmu_m(void)
3592{
3593 return 0x1U << 15U;
3594}
3595static inline u32 gr_gpc0_gpccs_gpc_exception_gpcmmu_pending_f(void)
3596{
3597 return 0x8000U;
3598}
3599static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_r(void)
3600{
3601 return 0x00501048U;
3602}
3603static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_bank0_m(void)
3604{
3605 return 0x1U << 0U;
3606}
3607static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_bank1_m(void)
3608{
3609 return 0x1U << 1U;
3610}
3611static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_bank0_m(void)
3612{
3613 return 0x1U << 4U;
3614}
3615static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_bank1_m(void)
3616{
3617 return 0x1U << 5U;
3618}
3619static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_total_counter_overflow_v(u32 r)
3620{
3621 return (r >> 8U) & 0x1U;
3622}
3623static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r)
3624{
3625 return (r >> 10U) & 0x1U;
3626}
3627static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_reset_task_f(void)
3628{
3629 return 0x40000000U;
3630}
3631static inline u32 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_r(void)
3632{
3633 return 0x0050104cU;
3634}
3635static inline u32 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_s(void)
3636{
3637 return 16U;
3638}
3639static inline u32 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_v(u32 r)
3640{
3641 return (r >> 0U) & 0xffffU;
3642}
3643static inline u32 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_r(void)
3644{
3645 return 0x00501054U;
3646}
3647static inline u32 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_s(void)
3648{
3649 return 16U;
3650}
3651static inline u32 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_v(u32 r)
3652{
3653 return (r >> 0U) & 0xffffU;
3654}
3655static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void)
3656{
3657 return 0x00504508U;
3658}
3659static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r)
3660{
3661 return (r >> 0U) & 0x1U;
3662}
3663static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void)
3664{
3665 return 0x00000001U;
3666}
3667static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r)
3668{
3669 return (r >> 1U) & 0x1U;
3670}
3671static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v(void)
3672{
3673 return 0x00000001U;
3674}
3675static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m(void)
3676{
3677 return 0x1U << 4U;
3678}
3679static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_pending_f(void)
3680{
3681 return 0x10U;
3682}
3683static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_r(void)
3684{
3685 return 0x00504704U;
3686}
3687static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_m(void)
3688{
3689 return 0x1U << 0U;
3690}
3691static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(u32 r)
3692{
3693 return (r >> 0U) & 0x1U;
3694}
3695static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v(void)
3696{
3697 return 0x00000001U;
3698}
3699static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_f(void)
3700{
3701 return 0x1U;
3702}
3703static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_v(void)
3704{
3705 return 0x00000000U;
3706}
3707static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_f(void)
3708{
3709 return 0x0U;
3710}
3711static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_m(void)
3712{
3713 return 0x1U << 31U;
3714}
3715static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f(void)
3716{
3717 return 0x80000000U;
3718}
3719static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_disable_f(void)
3720{
3721 return 0x0U;
3722}
3723static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_m(void)
3724{
3725 return 0x1U << 3U;
3726}
3727static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f(void)
3728{
3729 return 0x8U;
3730}
3731static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_disable_f(void)
3732{
3733 return 0x0U;
3734}
3735static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_run_trigger_task_f(void)
3736{
3737 return 0x40000000U;
3738}
3739static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_0_r(void)
3740{
3741 return 0x00504708U;
3742}
3743static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_1_r(void)
3744{
3745 return 0x0050470cU;
3746}
3747static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_0_r(void)
3748{
3749 return 0x00504710U;
3750}
3751static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_1_r(void)
3752{
3753 return 0x00504714U;
3754}
3755static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_0_r(void)
3756{
3757 return 0x00504718U;
3758}
3759static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_1_r(void)
3760{
3761 return 0x0050471cU;
3762}
3763static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r(void)
3764{
3765 return 0x00419e90U;
3766}
3767static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_1_r(void)
3768{
3769 return 0x00419e94U;
3770}
3771static inline u32 gr_gpcs_tpcs_sms_dbgr_status0_r(void)
3772{
3773 return 0x00419e80U;
3774}
3775static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_r(void)
3776{
3777 return 0x00504700U;
3778}
3779static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_sm_in_trap_mode_v(u32 r)
3780{
3781 return (r >> 0U) & 0x1U;
3782}
3783static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_v(u32 r)
3784{
3785 return (r >> 4U) & 0x1U;
3786}
3787static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_true_v(void)
3788{
3789 return 0x00000001U;
3790}
3791static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_r(void)
3792{
3793 return 0x00504730U;
3794}
3795static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(u32 r)
3796{
3797 return (r >> 0U) & 0xffffU;
3798}
3799static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_v(void)
3800{
3801 return 0x00000000U;
3802}
3803static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_f(void)
3804{
3805 return 0x0U;
3806}
3807static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_wrap_id_m(void)
3808{
3809 return 0xffU << 16U;
3810}
3811static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_m(void)
3812{
3813 return 0xfU << 24U;
3814}
3815static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_none_f(void)
3816{
3817 return 0x0U;
3818}
3819static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r(void)
3820{
3821 return 0x0050460cU;
3822}
3823static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(u32 r)
3824{
3825 return (r >> 0U) & 0x1U;
3826}
3827static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(u32 r)
3828{
3829 return (r >> 1U) & 0x1U;
3830}
3831static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_pc_r(void)
3832{
3833 return 0x00504738U;
3834}
3835static inline u32 gr_gpc0_tpc0_sm_halfctl_ctrl_r(void)
3836{
3837 return 0x005043a0U;
3838}
3839static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_r(void)
3840{
3841 return 0x00419ba0U;
3842}
3843static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(void)
3844{
3845 return 0x1U << 4U;
3846}
3847static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(u32 v)
3848{
3849 return (v & 0x1U) << 4U;
3850}
3851static inline u32 gr_gpc0_tpc0_sm_debug_sfe_control_r(void)
3852{
3853 return 0x005043b0U;
3854}
3855static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_r(void)
3856{
3857 return 0x00419bb0U;
3858}
3859static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(void)
3860{
3861 return 0x1U << 0U;
3862}
3863static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(u32 v)
3864{
3865 return (v & 0x1U) << 0U;
3866}
3867static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_r(void)
3868{
3869 return 0x0041be08U;
3870}
3871static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void)
3872{
3873 return 0x4U;
3874}
3875static inline u32 gr_ppcs_wwdx_map_gpc_map_r(u32 i)
3876{
3877 return 0x0041bf00U + i*4U;
3878}
3879static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void)
3880{
3881 return 0x0041bfd0U;
3882}
3883static inline u32 gr_ppcs_wwdx_map_table_cfg_row_offset_f(u32 v)
3884{
3885 return (v & 0xffU) << 0U;
3886}
3887static inline u32 gr_ppcs_wwdx_map_table_cfg_num_entries_f(u32 v)
3888{
3889 return (v & 0xffU) << 8U;
3890}
3891static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_num_entries_f(u32 v)
3892{
3893 return (v & 0x1fU) << 16U;
3894}
3895static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_shift_value_f(u32 v)
3896{
3897 return (v & 0x7U) << 21U;
3898}
3899static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_r(void)
3900{
3901 return 0x0041bfd4U;
3902}
3903static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v)
3904{
3905 return (v & 0xffffffU) << 0U;
3906}
3907static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_r(u32 i)
3908{
3909 return 0x0041bfb0U + i*4U;
3910}
3911static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff__size_1_v(void)
3912{
3913 return 0x00000005U;
3914}
3915static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_0_mod_value_f(u32 v)
3916{
3917 return (v & 0xffU) << 0U;
3918}
3919static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_1_mod_value_f(u32 v)
3920{
3921 return (v & 0xffU) << 8U;
3922}
3923static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_2_mod_value_f(u32 v)
3924{
3925 return (v & 0xffU) << 16U;
3926}
3927static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_3_mod_value_f(u32 v)
3928{
3929 return (v & 0xffU) << 24U;
3930}
3931static inline u32 gr_bes_zrop_settings_r(void)
3932{
3933 return 0x00408850U;
3934}
3935static inline u32 gr_bes_zrop_settings_num_active_ltcs_f(u32 v)
3936{
3937 return (v & 0xfU) << 0U;
3938}
3939static inline u32 gr_be0_crop_debug3_r(void)
3940{
3941 return 0x00410108U;
3942}
3943static inline u32 gr_bes_crop_debug3_r(void)
3944{
3945 return 0x00408908U;
3946}
3947static inline u32 gr_bes_crop_debug3_comp_vdc_4to2_disable_m(void)
3948{
3949 return 0x1U << 31U;
3950}
3951static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_m(void)
3952{
3953 return 0x1U << 1U;
3954}
3955static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_disabled_f(void)
3956{
3957 return 0x0U;
3958}
3959static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_enabled_f(void)
3960{
3961 return 0x2U;
3962}
3963static inline u32 gr_bes_crop_debug3_blendopt_fill_override_m(void)
3964{
3965 return 0x1U << 2U;
3966}
3967static inline u32 gr_bes_crop_debug3_blendopt_fill_override_disabled_f(void)
3968{
3969 return 0x0U;
3970}
3971static inline u32 gr_bes_crop_debug3_blendopt_fill_override_enabled_f(void)
3972{
3973 return 0x4U;
3974}
3975static inline u32 gr_bes_crop_settings_r(void)
3976{
3977 return 0x00408958U;
3978}
3979static inline u32 gr_bes_crop_settings_num_active_ltcs_f(u32 v)
3980{
3981 return (v & 0xfU) << 0U;
3982}
3983static inline u32 gr_zcull_bytes_per_aliquot_per_gpu_v(void)
3984{
3985 return 0x00000020U;
3986}
3987static inline u32 gr_zcull_save_restore_header_bytes_per_gpc_v(void)
3988{
3989 return 0x00000020U;
3990}
3991static inline u32 gr_zcull_save_restore_subregion_header_bytes_per_gpc_v(void)
3992{
3993 return 0x000000c0U;
3994}
3995static inline u32 gr_zcull_subregion_qty_v(void)
3996{
3997 return 0x00000010U;
3998}
3999static inline u32 gr_gpcs_tpcs_tex_in_dbg_r(void)
4000{
4001 return 0x00419a00U;
4002}
4003static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_f(u32 v)
4004{
4005 return (v & 0x1U) << 19U;
4006}
4007static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_m(void)
4008{
4009 return 0x1U << 19U;
4010}
4011static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_r(void)
4012{
4013 return 0x00419bf0U;
4014}
4015static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_f(u32 v)
4016{
4017 return (v & 0x1U) << 5U;
4018}
4019static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_m(void)
4020{
4021 return 0x1U << 5U;
4022}
4023static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_f(u32 v)
4024{
4025 return (v & 0x1U) << 10U;
4026}
4027static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_m(void)
4028{
4029 return 0x1U << 10U;
4030}
4031static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control_sel0_r(void)
4032{
4033 return 0x00584200U;
4034}
4035static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control_sel1_r(void)
4036{
4037 return 0x00584204U;
4038}
4039static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control0_r(void)
4040{
4041 return 0x00584208U;
4042}
4043static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control1_r(void)
4044{
4045 return 0x00584210U;
4046}
4047static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control2_r(void)
4048{
4049 return 0x00584214U;
4050}
4051static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control3_r(void)
4052{
4053 return 0x00584218U;
4054}
4055static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control4_r(void)
4056{
4057 return 0x0058421cU;
4058}
4059static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control5_r(void)
4060{
4061 return 0x0058420cU;
4062}
4063static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter0_control_r(void)
4064{
4065 return 0x00584220U;
4066}
4067static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter1_control_r(void)
4068{
4069 return 0x00584224U;
4070}
4071static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter2_control_r(void)
4072{
4073 return 0x00584228U;
4074}
4075static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter3_control_r(void)
4076{
4077 return 0x0058422cU;
4078}
4079static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter4_control_r(void)
4080{
4081 return 0x00584230U;
4082}
4083static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter5_control_r(void)
4084{
4085 return 0x00584234U;
4086}
4087static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter6_control_r(void)
4088{
4089 return 0x00584238U;
4090}
4091static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter7_control_r(void)
4092{
4093 return 0x0058423cU;
4094}
4095static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter_status_s0_r(void)
4096{
4097 return 0x00584600U;
4098}
4099static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter_status_s1_r(void)
4100{
4101 return 0x00584604U;
4102}
4103static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s0_r(void)
4104{
4105 return 0x00584624U;
4106}
4107static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s0_r(void)
4108{
4109 return 0x00584628U;
4110}
4111static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s0_r(void)
4112{
4113 return 0x0058462cU;
4114}
4115static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s0_r(void)
4116{
4117 return 0x00584630U;
4118}
4119static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s1_r(void)
4120{
4121 return 0x00584634U;
4122}
4123static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s1_r(void)
4124{
4125 return 0x00584638U;
4126}
4127static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s1_r(void)
4128{
4129 return 0x0058463cU;
4130}
4131static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s1_r(void)
4132{
4133 return 0x00584640U;
4134}
4135static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s2_r(void)
4136{
4137 return 0x00584644U;
4138}
4139static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s2_r(void)
4140{
4141 return 0x00584648U;
4142}
4143static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s2_r(void)
4144{
4145 return 0x0058464cU;
4146}
4147static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s2_r(void)
4148{
4149 return 0x00584650U;
4150}
4151static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s3_r(void)
4152{
4153 return 0x00584654U;
4154}
4155static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s3_r(void)
4156{
4157 return 0x00584658U;
4158}
4159static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s3_r(void)
4160{
4161 return 0x0058465cU;
4162}
4163static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s3_r(void)
4164{
4165 return 0x00584660U;
4166}
4167static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter4_r(void)
4168{
4169 return 0x00584614U;
4170}
4171static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter5_r(void)
4172{
4173 return 0x00584618U;
4174}
4175static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter6_r(void)
4176{
4177 return 0x0058461cU;
4178}
4179static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter7_r(void)
4180{
4181 return 0x00584620U;
4182}
4183static inline u32 gr_fe_pwr_mode_r(void)
4184{
4185 return 0x00404170U;
4186}
4187static inline u32 gr_fe_pwr_mode_mode_auto_f(void)
4188{
4189 return 0x0U;
4190}
4191static inline u32 gr_fe_pwr_mode_mode_force_on_f(void)
4192{
4193 return 0x2U;
4194}
4195static inline u32 gr_fe_pwr_mode_req_v(u32 r)
4196{
4197 return (r >> 4U) & 0x1U;
4198}
4199static inline u32 gr_fe_pwr_mode_req_send_f(void)
4200{
4201 return 0x10U;
4202}
4203static inline u32 gr_fe_pwr_mode_req_done_v(void)
4204{
4205 return 0x00000000U;
4206}
4207static inline u32 gr_gpcs_pri_mmu_ctrl_r(void)
4208{
4209 return 0x00418880U;
4210}
4211static inline u32 gr_gpcs_pri_mmu_ctrl_vm_pg_size_m(void)
4212{
4213 return 0x1U << 0U;
4214}
4215static inline u32 gr_gpcs_pri_mmu_ctrl_use_pdb_big_page_size_m(void)
4216{
4217 return 0x1U << 11U;
4218}
4219static inline u32 gr_gpcs_pri_mmu_ctrl_vol_fault_m(void)
4220{
4221 return 0x1U << 1U;
4222}
4223static inline u32 gr_gpcs_pri_mmu_ctrl_comp_fault_m(void)
4224{
4225 return 0x1U << 2U;
4226}
4227static inline u32 gr_gpcs_pri_mmu_ctrl_miss_gran_m(void)
4228{
4229 return 0x3U << 3U;
4230}
4231static inline u32 gr_gpcs_pri_mmu_ctrl_cache_mode_m(void)
4232{
4233 return 0x3U << 5U;
4234}
4235static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_aperture_m(void)
4236{
4237 return 0x3U << 28U;
4238}
4239static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_vol_m(void)
4240{
4241 return 0x1U << 30U;
4242}
4243static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_disable_m(void)
4244{
4245 return 0x1U << 31U;
4246}
4247static inline u32 gr_gpcs_pri_mmu_pm_unit_mask_r(void)
4248{
4249 return 0x00418890U;
4250}
4251static inline u32 gr_gpcs_pri_mmu_pm_req_mask_r(void)
4252{
4253 return 0x00418894U;
4254}
4255static inline u32 gr_gpcs_pri_mmu_debug_ctrl_r(void)
4256{
4257 return 0x004188b0U;
4258}
4259static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_v(u32 r)
4260{
4261 return (r >> 16U) & 0x1U;
4262}
4263static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(void)
4264{
4265 return 0x00000001U;
4266}
4267static inline u32 gr_gpcs_pri_mmu_debug_wr_r(void)
4268{
4269 return 0x004188b4U;
4270}
4271static inline u32 gr_gpcs_pri_mmu_debug_rd_r(void)
4272{
4273 return 0x004188b8U;
4274}
4275static inline u32 gr_gpcs_mmu_num_active_ltcs_r(void)
4276{
4277 return 0x004188acU;
4278}
4279static inline u32 gr_gpcs_tpcs_sms_dbgr_control0_r(void)
4280{
4281 return 0x00419e84U;
4282}
4283static inline u32 gr_fe_gfxp_wfi_timeout_r(void)
4284{
4285 return 0x004041c0U;
4286}
4287static inline u32 gr_fe_gfxp_wfi_timeout_count_f(u32 v)
4288{
4289 return (v & 0xffffffffU) << 0U;
4290}
4291static inline u32 gr_fe_gfxp_wfi_timeout_count_disabled_f(void)
4292{
4293 return 0x0U;
4294}
4295static inline u32 gr_gpcs_tpcs_sm_texio_control_r(void)
4296{
4297 return 0x00419bd8U;
4298}
4299static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_f(u32 v)
4300{
4301 return (v & 0x7U) << 8U;
4302}
4303static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(void)
4304{
4305 return 0x7U << 8U;
4306}
4307static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_arm_63_48_match_f(void)
4308{
4309 return 0x100U;
4310}
4311static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_r(void)
4312{
4313 return 0x00419ba4U;
4314}
4315static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_m(void)
4316{
4317 return 0x3U << 11U;
4318}
4319static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f(void)
4320{
4321 return 0x1000U;
4322}
4323static inline u32 gr_gpcs_tc_debug0_r(void)
4324{
4325 return 0x00418708U;
4326}
4327static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(u32 v)
4328{
4329 return (v & 0x1ffU) << 0U;
4330}
4331static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(void)
4332{
4333 return 0x1ffU << 0U;
4334}
4335static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_r(void)
4336{
4337 return 0x00500324U;
4338}
4339static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_corrected_f(u32 v)
4340{
4341 return (v & 0x1U) << 0U;
4342}
4343static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_corrected_m(void)
4344{
4345 return 0x1U << 0U;
4346}
4347static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_uncorrected_f(u32 v)
4348{
4349 return (v & 0x1U) << 1U;
4350}
4351static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_uncorrected_m(void)
4352{
4353 return 0x1U << 1U;
4354}
4355static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_r(void)
4356{
4357 return 0x00500314U;
4358}
4359static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_f(u32 v)
4360{
4361 return (v & 0x1U) << 0U;
4362}
4363static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_m(void)
4364{
4365 return 0x1U << 0U;
4366}
4367static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_fa_data_f(u32 v)
4368{
4369 return (v & 0x1U) << 2U;
4370}
4371static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_fa_data_m(void)
4372{
4373 return 0x1U << 2U;
4374}
4375static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_sa_data_f(u32 v)
4376{
4377 return (v & 0x1U) << 1U;
4378}
4379static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_sa_data_m(void)
4380{
4381 return 0x1U << 1U;
4382}
4383static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_fa_data_f(u32 v)
4384{
4385 return (v & 0x1U) << 3U;
4386}
4387static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_fa_data_m(void)
4388{
4389 return 0x1U << 3U;
4390}
4391static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v)
4392{
4393 return (v & 0x1U) << 18U;
4394}
4395static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_total_counter_overflow_m(void)
4396{
4397 return 0x1U << 18U;
4398}
4399static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_total_counter_overflow_f(u32 v)
4400{
4401 return (v & 0x1U) << 16U;
4402}
4403static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_total_counter_overflow_m(void)
4404{
4405 return 0x1U << 16U;
4406}
4407static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v)
4408{
4409 return (v & 0x1U) << 19U;
4410}
4411static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_unique_counter_overflow_m(void)
4412{
4413 return 0x1U << 19U;
4414}
4415static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_unique_counter_overflow_f(u32 v)
4416{
4417 return (v & 0x1U) << 17U;
4418}
4419static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_unique_counter_overflow_m(void)
4420{
4421 return 0x1U << 17U;
4422}
4423static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_reset_f(u32 v)
4424{
4425 return (v & 0x1U) << 30U;
4426}
4427static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_reset_task_f(void)
4428{
4429 return 0x40000000U;
4430}
4431static inline u32 gr_gpc0_mmu_l1tlb_ecc_address_r(void)
4432{
4433 return 0x00500320U;
4434}
4435static inline u32 gr_gpc0_mmu_l1tlb_ecc_address_index_f(u32 v)
4436{
4437 return (v & 0xffffffffU) << 0U;
4438}
4439static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_r(void)
4440{
4441 return 0x00500318U;
4442}
4443static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_s(void)
4444{
4445 return 16U;
4446}
4447static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_f(u32 v)
4448{
4449 return (v & 0xffffU) << 0U;
4450}
4451static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_m(void)
4452{
4453 return 0xffffU << 0U;
4454}
4455static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_v(u32 r)
4456{
4457 return (r >> 0U) & 0xffffU;
4458}
4459static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_s(void)
4460{
4461 return 16U;
4462}
4463static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_f(u32 v)
4464{
4465 return (v & 0xffffU) << 16U;
4466}
4467static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_m(void)
4468{
4469 return 0xffffU << 16U;
4470}
4471static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_v(u32 r)
4472{
4473 return (r >> 16U) & 0xffffU;
4474}
4475static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_r(void)
4476{
4477 return 0x0050031cU;
4478}
4479static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_s(void)
4480{
4481 return 16U;
4482}
4483static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_f(u32 v)
4484{
4485 return (v & 0xffffU) << 0U;
4486}
4487static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_m(void)
4488{
4489 return 0xffffU << 0U;
4490}
4491static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_v(u32 r)
4492{
4493 return (r >> 0U) & 0xffffU;
4494}
4495static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_s(void)
4496{
4497 return 16U;
4498}
4499static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_f(u32 v)
4500{
4501 return (v & 0xffffU) << 16U;
4502}
4503static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_m(void)
4504{
4505 return 0xffffU << 16U;
4506}
4507static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_v(u32 r)
4508{
4509 return (r >> 16U) & 0xffffU;
4510}
4511static inline u32 gr_gpc0_gpccs_hww_esr_r(void)
4512{
4513 return 0x00502c98U;
4514}
4515static inline u32 gr_gpc0_gpccs_hww_esr_ecc_corrected_f(u32 v)
4516{
4517 return (v & 0x1U) << 0U;
4518}
4519static inline u32 gr_gpc0_gpccs_hww_esr_ecc_corrected_m(void)
4520{
4521 return 0x1U << 0U;
4522}
4523static inline u32 gr_gpc0_gpccs_hww_esr_ecc_corrected_pending_f(void)
4524{
4525 return 0x1U;
4526}
4527static inline u32 gr_gpc0_gpccs_hww_esr_ecc_uncorrected_f(u32 v)
4528{
4529 return (v & 0x1U) << 1U;
4530}
4531static inline u32 gr_gpc0_gpccs_hww_esr_ecc_uncorrected_m(void)
4532{
4533 return 0x1U << 1U;
4534}
4535static inline u32 gr_gpc0_gpccs_hww_esr_ecc_uncorrected_pending_f(void)
4536{
4537 return 0x2U;
4538}
4539static inline u32 gr_gpc0_gpccs_falcon_ecc_status_r(void)
4540{
4541 return 0x00502678U;
4542}
4543static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_f(u32 v)
4544{
4545 return (v & 0x1U) << 0U;
4546}
4547static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_m(void)
4548{
4549 return 0x1U << 0U;
4550}
4551static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_pending_f(void)
4552{
4553 return 0x1U;
4554}
4555static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_f(u32 v)
4556{
4557 return (v & 0x1U) << 1U;
4558}
4559static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_m(void)
4560{
4561 return 0x1U << 1U;
4562}
4563static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_pending_f(void)
4564{
4565 return 0x2U;
4566}
4567static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_f(u32 v)
4568{
4569 return (v & 0x1U) << 4U;
4570}
4571static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_m(void)
4572{
4573 return 0x1U << 4U;
4574}
4575static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_pending_f(void)
4576{
4577 return 0x10U;
4578}
4579static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_f(u32 v)
4580{
4581 return (v & 0x1U) << 5U;
4582}
4583static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_m(void)
4584{
4585 return 0x1U << 5U;
4586}
4587static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_pending_f(void)
4588{
4589 return 0x20U;
4590}
4591static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v)
4592{
4593 return (v & 0x1U) << 10U;
4594}
4595static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_total_counter_overflow_m(void)
4596{
4597 return 0x1U << 10U;
4598}
4599static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_total_counter_overflow_pending_f(void)
4600{
4601 return 0x400U;
4602}
4603static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_total_counter_overflow_f(u32 v)
4604{
4605 return (v & 0x1U) << 8U;
4606}
4607static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_total_counter_overflow_m(void)
4608{
4609 return 0x1U << 8U;
4610}
4611static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_total_counter_overflow_pending_f(void)
4612{
4613 return 0x100U;
4614}
4615static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v)
4616{
4617 return (v & 0x1U) << 11U;
4618}
4619static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_m(void)
4620{
4621 return 0x1U << 11U;
4622}
4623static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_pending_f(void)
4624{
4625 return 0x800U;
4626}
4627static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_unique_counter_overflow_f(u32 v)
4628{
4629 return (v & 0x1U) << 9U;
4630}
4631static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_unique_counter_overflow_m(void)
4632{
4633 return 0x1U << 9U;
4634}
4635static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_unique_counter_overflow_pending_f(void)
4636{
4637 return 0x200U;
4638}
4639static inline u32 gr_gpc0_gpccs_falcon_ecc_status_reset_f(u32 v)
4640{
4641 return (v & 0x1U) << 31U;
4642}
4643static inline u32 gr_gpc0_gpccs_falcon_ecc_status_reset_task_f(void)
4644{
4645 return 0x80000000U;
4646}
4647static inline u32 gr_gpc0_gpccs_falcon_ecc_address_r(void)
4648{
4649 return 0x00502684U;
4650}
4651static inline u32 gr_gpc0_gpccs_falcon_ecc_address_index_f(u32 v)
4652{
4653 return (v & 0x7fffffU) << 0U;
4654}
4655static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_s(void)
4656{
4657 return 20U;
4658}
4659static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_f(u32 v)
4660{
4661 return (v & 0xfffffU) << 0U;
4662}
4663static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_m(void)
4664{
4665 return 0xfffffU << 0U;
4666}
4667static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_v(u32 r)
4668{
4669 return (r >> 0U) & 0xfffffU;
4670}
4671static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_r(void)
4672{
4673 return 0x0050267cU;
4674}
4675static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_s(void)
4676{
4677 return 16U;
4678}
4679static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_f(u32 v)
4680{
4681 return (v & 0xffffU) << 0U;
4682}
4683static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_m(void)
4684{
4685 return 0xffffU << 0U;
4686}
4687static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_v(u32 r)
4688{
4689 return (r >> 0U) & 0xffffU;
4690}
4691static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_s(void)
4692{
4693 return 16U;
4694}
4695static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_f(u32 v)
4696{
4697 return (v & 0xffffU) << 16U;
4698}
4699static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_m(void)
4700{
4701 return 0xffffU << 16U;
4702}
4703static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_v(u32 r)
4704{
4705 return (r >> 16U) & 0xffffU;
4706}
4707static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_r(void)
4708{
4709 return 0x00502680U;
4710}
4711static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_total_f(u32 v)
4712{
4713 return (v & 0xffffU) << 0U;
4714}
4715static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_total_m(void)
4716{
4717 return 0xffffU << 0U;
4718}
4719static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_total_v(u32 r)
4720{
4721 return (r >> 0U) & 0xffffU;
4722}
4723static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_s(void)
4724{
4725 return 16U;
4726}
4727static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_f(u32 v)
4728{
4729 return (v & 0xffffU) << 16U;
4730}
4731static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_m(void)
4732{
4733 return 0xffffU << 16U;
4734}
4735static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_v(u32 r)
4736{
4737 return (r >> 16U) & 0xffffU;
4738}
4739static inline u32 gr_fecs_falcon_ecc_status_r(void)
4740{
4741 return 0x00409678U;
4742}
4743static inline u32 gr_fecs_falcon_ecc_status_corrected_err_imem_f(u32 v)
4744{
4745 return (v & 0x1U) << 0U;
4746}
4747static inline u32 gr_fecs_falcon_ecc_status_corrected_err_imem_m(void)
4748{
4749 return 0x1U << 0U;
4750}
4751static inline u32 gr_fecs_falcon_ecc_status_corrected_err_imem_pending_f(void)
4752{
4753 return 0x1U;
4754}
4755static inline u32 gr_fecs_falcon_ecc_status_corrected_err_dmem_f(u32 v)
4756{
4757 return (v & 0x1U) << 1U;
4758}
4759static inline u32 gr_fecs_falcon_ecc_status_corrected_err_dmem_m(void)
4760{
4761 return 0x1U << 1U;
4762}
4763static inline u32 gr_fecs_falcon_ecc_status_corrected_err_dmem_pending_f(void)
4764{
4765 return 0x2U;
4766}
4767static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_imem_f(u32 v)
4768{
4769 return (v & 0x1U) << 4U;
4770}
4771static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_imem_m(void)
4772{
4773 return 0x1U << 4U;
4774}
4775static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_imem_pending_f(void)
4776{
4777 return 0x10U;
4778}
4779static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_dmem_f(u32 v)
4780{
4781 return (v & 0x1U) << 5U;
4782}
4783static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_dmem_m(void)
4784{
4785 return 0x1U << 5U;
4786}
4787static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_dmem_pending_f(void)
4788{
4789 return 0x20U;
4790}
4791static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v)
4792{
4793 return (v & 0x1U) << 10U;
4794}
4795static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_m(void)
4796{
4797 return 0x1U << 10U;
4798}
4799static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_pending_f(void)
4800{
4801 return 0x400U;
4802}
4803static inline u32 gr_fecs_falcon_ecc_status_corrected_err_total_counter_overflow_f(u32 v)
4804{
4805 return (v & 0x1U) << 8U;
4806}
4807static inline u32 gr_fecs_falcon_ecc_status_corrected_err_total_counter_overflow_m(void)
4808{
4809 return 0x1U << 8U;
4810}
4811static inline u32 gr_fecs_falcon_ecc_status_corrected_err_total_counter_overflow_pending_f(void)
4812{
4813 return 0x100U;
4814}
4815static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v)
4816{
4817 return (v & 0x1U) << 11U;
4818}
4819static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_m(void)
4820{
4821 return 0x1U << 11U;
4822}
4823static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_pending_f(void)
4824{
4825 return 0x800U;
4826}
4827static inline u32 gr_fecs_falcon_ecc_status_corrected_err_unique_counter_overflow_f(u32 v)
4828{
4829 return (v & 0x1U) << 9U;
4830}
4831static inline u32 gr_fecs_falcon_ecc_status_corrected_err_unique_counter_overflow_m(void)
4832{
4833 return 0x1U << 9U;
4834}
4835static inline u32 gr_fecs_falcon_ecc_status_corrected_err_unique_counter_overflow_pending_f(void)
4836{
4837 return 0x200U;
4838}
4839static inline u32 gr_fecs_falcon_ecc_status_reset_f(u32 v)
4840{
4841 return (v & 0x1U) << 31U;
4842}
4843static inline u32 gr_fecs_falcon_ecc_status_reset_task_f(void)
4844{
4845 return 0x80000000U;
4846}
4847static inline u32 gr_fecs_falcon_ecc_address_r(void)
4848{
4849 return 0x00409684U;
4850}
4851static inline u32 gr_fecs_falcon_ecc_address_index_f(u32 v)
4852{
4853 return (v & 0x7fffffU) << 0U;
4854}
4855static inline u32 gr_fecs_falcon_ecc_address_row_address_s(void)
4856{
4857 return 20U;
4858}
4859static inline u32 gr_fecs_falcon_ecc_address_row_address_f(u32 v)
4860{
4861 return (v & 0xfffffU) << 0U;
4862}
4863static inline u32 gr_fecs_falcon_ecc_address_row_address_m(void)
4864{
4865 return 0xfffffU << 0U;
4866}
4867static inline u32 gr_fecs_falcon_ecc_address_row_address_v(u32 r)
4868{
4869 return (r >> 0U) & 0xfffffU;
4870}
4871static inline u32 gr_fecs_falcon_ecc_corrected_err_count_r(void)
4872{
4873 return 0x0040967cU;
4874}
4875static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_s(void)
4876{
4877 return 16U;
4878}
4879static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_f(u32 v)
4880{
4881 return (v & 0xffffU) << 0U;
4882}
4883static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_m(void)
4884{
4885 return 0xffffU << 0U;
4886}
4887static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_v(u32 r)
4888{
4889 return (r >> 0U) & 0xffffU;
4890}
4891static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_s(void)
4892{
4893 return 16U;
4894}
4895static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_f(u32 v)
4896{
4897 return (v & 0xffffU) << 16U;
4898}
4899static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_m(void)
4900{
4901 return 0xffffU << 16U;
4902}
4903static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_v(u32 r)
4904{
4905 return (r >> 16U) & 0xffffU;
4906}
4907static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_r(void)
4908{
4909 return 0x00409680U;
4910}
4911static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_total_f(u32 v)
4912{
4913 return (v & 0xffffU) << 0U;
4914}
4915static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_total_m(void)
4916{
4917 return 0xffffU << 0U;
4918}
4919static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_total_v(u32 r)
4920{
4921 return (r >> 0U) & 0xffffU;
4922}
4923static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_s(void)
4924{
4925 return 16U;
4926}
4927static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_f(u32 v)
4928{
4929 return (v & 0xffffU) << 16U;
4930}
4931static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_m(void)
4932{
4933 return 0xffffU << 16U;
4934}
4935static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_v(u32 r)
4936{
4937 return (r >> 16U) & 0xffffU;
4938}
4939#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ltc_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ltc_gv11b.h
new file mode 100644
index 00000000..769bcf0c
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ltc_gv11b.h
@@ -0,0 +1,803 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_ltc_gv11b_h_
57#define _hw_ltc_gv11b_h_
58
59static inline u32 ltc_pltcg_base_v(void)
60{
61 return 0x00140000U;
62}
63static inline u32 ltc_pltcg_extent_v(void)
64{
65 return 0x0017ffffU;
66}
67static inline u32 ltc_ltc0_ltss_v(void)
68{
69 return 0x00140200U;
70}
71static inline u32 ltc_ltc0_lts0_v(void)
72{
73 return 0x00140400U;
74}
75static inline u32 ltc_ltcs_ltss_v(void)
76{
77 return 0x0017e200U;
78}
79static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void)
80{
81 return 0x0014046cU;
82}
83static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void)
84{
85 return 0x00140518U;
86}
87static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void)
88{
89 return 0x0017e318U;
90}
91static inline u32 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m(void)
92{
93 return 0x1U << 15U;
94}
95static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void)
96{
97 return 0x00140494U;
98}
99static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r)
100{
101 return (r >> 0U) & 0xffffU;
102}
103static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r)
104{
105 return (r >> 16U) & 0x3U;
106}
107static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void)
108{
109 return 0x00000000U;
110}
111static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void)
112{
113 return 0x00000001U;
114}
115static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void)
116{
117 return 0x00000002U;
118}
119static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void)
120{
121 return 0x0017e26cU;
122}
123static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void)
124{
125 return 0x1U;
126}
127static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void)
128{
129 return 0x2U;
130}
131static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r)
132{
133 return (r >> 2U) & 0x1U;
134}
135static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void)
136{
137 return 0x00000001U;
138}
139static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void)
140{
141 return 0x4U;
142}
143static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void)
144{
145 return 0x0014046cU;
146}
147static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void)
148{
149 return 0x0017e270U;
150}
151static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v)
152{
153 return (v & 0x3ffffU) << 0U;
154}
155static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void)
156{
157 return 0x0017e274U;
158}
159static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v)
160{
161 return (v & 0x3ffffU) << 0U;
162}
163static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void)
164{
165 return 0x0003ffffU;
166}
167static inline u32 ltc_ltcs_ltss_cbc_base_r(void)
168{
169 return 0x0017e278U;
170}
171static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void)
172{
173 return 0x0000000bU;
174}
175static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r)
176{
177 return (r >> 0U) & 0x3ffffffU;
178}
179static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_r(void)
180{
181 return 0x0017e27cU;
182}
183static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs__v(u32 r)
184{
185 return (r >> 0U) & 0x1fU;
186}
187static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_f(u32 v)
188{
189 return (v & 0x1U) << 24U;
190}
191static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_v(u32 r)
192{
193 return (r >> 24U) & 0x1U;
194}
195static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_f(u32 v)
196{
197 return (v & 0x1U) << 25U;
198}
199static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_v(u32 r)
200{
201 return (r >> 25U) & 0x1U;
202}
203static inline u32 ltc_ltcs_misc_ltc_num_active_ltcs_r(void)
204{
205 return 0x0017e000U;
206}
207static inline u32 ltc_ltcs_ltss_cbc_param_r(void)
208{
209 return 0x0017e280U;
210}
211static inline u32 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(u32 r)
212{
213 return (r >> 0U) & 0xffffU;
214}
215static inline u32 ltc_ltcs_ltss_cbc_param_cache_line_size_v(u32 r)
216{
217 return (r >> 24U) & 0xfU;
218}
219static inline u32 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(u32 r)
220{
221 return (r >> 28U) & 0xfU;
222}
223static inline u32 ltc_ltcs_ltss_cbc_param2_r(void)
224{
225 return 0x0017e3f4U;
226}
227static inline u32 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(u32 r)
228{
229 return (r >> 0U) & 0xffffU;
230}
231static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void)
232{
233 return 0x0017e2acU;
234}
235static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v)
236{
237 return (v & 0x1fU) << 16U;
238}
239static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void)
240{
241 return 0x0017e338U;
242}
243static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v)
244{
245 return (v & 0xfU) << 0U;
246}
247static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i)
248{
249 return 0x0017e33cU + i*4U;
250}
251static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void)
252{
253 return 0x00000004U;
254}
255static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void)
256{
257 return 0x0017e34cU;
258}
259static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void)
260{
261 return 32U;
262}
263static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v)
264{
265 return (v & 0xffffffffU) << 0U;
266}
267static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void)
268{
269 return 0xffffffffU << 0U;
270}
271static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r)
272{
273 return (r >> 0U) & 0xffffffffU;
274}
275static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(void)
276{
277 return 0x0017e204U;
278}
279static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_s(void)
280{
281 return 8U;
282}
283static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_f(u32 v)
284{
285 return (v & 0xffU) << 0U;
286}
287static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_m(void)
288{
289 return 0xffU << 0U;
290}
291static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_v(u32 r)
292{
293 return (r >> 0U) & 0xffU;
294}
295static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void)
296{
297 return 0x0017e2b0U;
298}
299static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void)
300{
301 return 0x10000000U;
302}
303static inline u32 ltc_ltcs_ltss_g_elpg_r(void)
304{
305 return 0x0017e214U;
306}
307static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r)
308{
309 return (r >> 0U) & 0x1U;
310}
311static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void)
312{
313 return 0x00000001U;
314}
315static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void)
316{
317 return 0x1U;
318}
319static inline u32 ltc_ltc0_ltss_g_elpg_r(void)
320{
321 return 0x00140214U;
322}
323static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r)
324{
325 return (r >> 0U) & 0x1U;
326}
327static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void)
328{
329 return 0x00000001U;
330}
331static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void)
332{
333 return 0x1U;
334}
335static inline u32 ltc_ltc1_ltss_g_elpg_r(void)
336{
337 return 0x00142214U;
338}
339static inline u32 ltc_ltc1_ltss_g_elpg_flush_v(u32 r)
340{
341 return (r >> 0U) & 0x1U;
342}
343static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_v(void)
344{
345 return 0x00000001U;
346}
347static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_f(void)
348{
349 return 0x1U;
350}
351static inline u32 ltc_ltcs_ltss_intr_r(void)
352{
353 return 0x0017e20cU;
354}
355static inline u32 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f(void)
356{
357 return 0x100U;
358}
359static inline u32 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f(void)
360{
361 return 0x200U;
362}
363static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void)
364{
365 return 0x1U << 20U;
366}
367static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(void)
368{
369 return 0x1U << 30U;
370}
371static inline u32 ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f(void)
372{
373 return 0x1000000U;
374}
375static inline u32 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(void)
376{
377 return 0x2000000U;
378}
379static inline u32 ltc_ltc0_lts0_intr_r(void)
380{
381 return 0x0014040cU;
382}
383static inline u32 ltc_ltcs_ltss_intr3_r(void)
384{
385 return 0x0017e388U;
386}
387static inline u32 ltc_ltcs_ltss_intr3_ecc_corrected_m(void)
388{
389 return 0x1U << 7U;
390}
391static inline u32 ltc_ltcs_ltss_intr3_ecc_uncorrected_m(void)
392{
393 return 0x1U << 8U;
394}
395static inline u32 ltc_ltc0_lts0_intr3_r(void)
396{
397 return 0x00140588U;
398}
399static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_r(void)
400{
401 return 0x001404f0U;
402}
403static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_f(u32 v)
404{
405 return (v & 0x1U) << 1U;
406}
407static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m(void)
408{
409 return 0x1U << 1U;
410}
411static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_f(u32 v)
412{
413 return (v & 0x1U) << 3U;
414}
415static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m(void)
416{
417 return 0x1U << 3U;
418}
419static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_f(u32 v)
420{
421 return (v & 0x1U) << 5U;
422}
423static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m(void)
424{
425 return 0x1U << 5U;
426}
427static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_f(u32 v)
428{
429 return (v & 0x1U) << 0U;
430}
431static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m(void)
432{
433 return 0x1U << 0U;
434}
435static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_f(u32 v)
436{
437 return (v & 0x1U) << 2U;
438}
439static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m(void)
440{
441 return 0x1U << 2U;
442}
443static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_f(u32 v)
444{
445 return (v & 0x1U) << 4U;
446}
447static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m(void)
448{
449 return 0x1U << 4U;
450}
451static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v)
452{
453 return (v & 0x1U) << 18U;
454}
455static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m(void)
456{
457 return 0x1U << 18U;
458}
459static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_f(u32 v)
460{
461 return (v & 0x1U) << 16U;
462}
463static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_m(void)
464{
465 return 0x1U << 16U;
466}
467static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v)
468{
469 return (v & 0x1U) << 19U;
470}
471static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_unique_counter_overflow_m(void)
472{
473 return 0x1U << 19U;
474}
475static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_unique_counter_overflow_f(u32 v)
476{
477 return (v & 0x1U) << 17U;
478}
479static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_unique_counter_overflow_m(void)
480{
481 return 0x1U << 17U;
482}
483static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_reset_f(u32 v)
484{
485 return (v & 0x1U) << 30U;
486}
487static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f(void)
488{
489 return 0x40000000U;
490}
491static inline u32 ltc_ltc0_lts0_l2_cache_ecc_address_r(void)
492{
493 return 0x001404fcU;
494}
495static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r(void)
496{
497 return 0x001404f4U;
498}
499static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s(void)
500{
501 return 16U;
502}
503static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_f(u32 v)
504{
505 return (v & 0xffffU) << 0U;
506}
507static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_m(void)
508{
509 return 0xffffU << 0U;
510}
511static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(u32 r)
512{
513 return (r >> 0U) & 0xffffU;
514}
515static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_s(void)
516{
517 return 16U;
518}
519static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_f(u32 v)
520{
521 return (v & 0xffffU) << 16U;
522}
523static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_m(void)
524{
525 return 0xffffU << 16U;
526}
527static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_v(u32 r)
528{
529 return (r >> 16U) & 0xffffU;
530}
531static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r(void)
532{
533 return 0x001404f8U;
534}
535static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s(void)
536{
537 return 16U;
538}
539static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_f(u32 v)
540{
541 return (v & 0xffffU) << 0U;
542}
543static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_m(void)
544{
545 return 0xffffU << 0U;
546}
547static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_v(u32 r)
548{
549 return (r >> 0U) & 0xffffU;
550}
551static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_s(void)
552{
553 return 16U;
554}
555static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_f(u32 v)
556{
557 return (v & 0xffffU) << 16U;
558}
559static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_m(void)
560{
561 return 0xffffU << 16U;
562}
563static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_v(u32 r)
564{
565 return (r >> 16U) & 0xffffU;
566}
567static inline u32 ltc_ltc0_lts0_dstg_ecc_report_r(void)
568{
569 return 0x0014051cU;
570}
571static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_m(void)
572{
573 return 0xffU << 0U;
574}
575static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(u32 r)
576{
577 return (r >> 0U) & 0xffU;
578}
579static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_m(void)
580{
581 return 0xffU << 16U;
582}
583static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(u32 r)
584{
585 return (r >> 16U) & 0xffU;
586}
587static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void)
588{
589 return 0x0017e2a0U;
590}
591static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r)
592{
593 return (r >> 0U) & 0x1U;
594}
595static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void)
596{
597 return 0x00000001U;
598}
599static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void)
600{
601 return 0x1U;
602}
603static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r)
604{
605 return (r >> 8U) & 0xfU;
606}
607static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void)
608{
609 return 0x00000003U;
610}
611static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void)
612{
613 return 0x300U;
614}
615static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r)
616{
617 return (r >> 28U) & 0x1U;
618}
619static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void)
620{
621 return 0x00000001U;
622}
623static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void)
624{
625 return 0x10000000U;
626}
627static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r)
628{
629 return (r >> 29U) & 0x1U;
630}
631static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void)
632{
633 return 0x00000001U;
634}
635static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void)
636{
637 return 0x20000000U;
638}
639static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r)
640{
641 return (r >> 30U) & 0x1U;
642}
643static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void)
644{
645 return 0x00000001U;
646}
647static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void)
648{
649 return 0x40000000U;
650}
651static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void)
652{
653 return 0x0017e2a4U;
654}
655static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r)
656{
657 return (r >> 0U) & 0x1U;
658}
659static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void)
660{
661 return 0x00000001U;
662}
663static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void)
664{
665 return 0x1U;
666}
667static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r)
668{
669 return (r >> 8U) & 0xfU;
670}
671static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void)
672{
673 return 0x00000003U;
674}
675static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void)
676{
677 return 0x300U;
678}
679static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r)
680{
681 return (r >> 16U) & 0x1U;
682}
683static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void)
684{
685 return 0x00000001U;
686}
687static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void)
688{
689 return 0x10000U;
690}
691static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r)
692{
693 return (r >> 28U) & 0x1U;
694}
695static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void)
696{
697 return 0x00000001U;
698}
699static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void)
700{
701 return 0x10000000U;
702}
703static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r)
704{
705 return (r >> 29U) & 0x1U;
706}
707static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void)
708{
709 return 0x00000001U;
710}
711static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void)
712{
713 return 0x20000000U;
714}
715static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r)
716{
717 return (r >> 30U) & 0x1U;
718}
719static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void)
720{
721 return 0x00000001U;
722}
723static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void)
724{
725 return 0x40000000U;
726}
727static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void)
728{
729 return 0x001402a0U;
730}
731static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r)
732{
733 return (r >> 0U) & 0x1U;
734}
735static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void)
736{
737 return 0x00000001U;
738}
739static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void)
740{
741 return 0x1U;
742}
743static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void)
744{
745 return 0x001402a4U;
746}
747static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r)
748{
749 return (r >> 0U) & 0x1U;
750}
751static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void)
752{
753 return 0x00000001U;
754}
755static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void)
756{
757 return 0x1U;
758}
759static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_r(void)
760{
761 return 0x001422a0U;
762}
763static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_v(u32 r)
764{
765 return (r >> 0U) & 0x1U;
766}
767static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_v(void)
768{
769 return 0x00000001U;
770}
771static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_f(void)
772{
773 return 0x1U;
774}
775static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_r(void)
776{
777 return 0x001422a4U;
778}
779static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_v(u32 r)
780{
781 return (r >> 0U) & 0x1U;
782}
783static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_v(void)
784{
785 return 0x00000001U;
786}
787static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_f(void)
788{
789 return 0x1U;
790}
791static inline u32 ltc_ltc0_lts0_tstg_info_1_r(void)
792{
793 return 0x0014058cU;
794}
795static inline u32 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(u32 r)
796{
797 return (r >> 0U) & 0xffffU;
798}
799static inline u32 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(u32 r)
800{
801 return (r >> 16U) & 0x1fU;
802}
803#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_mc_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_mc_gv11b.h
new file mode 100644
index 00000000..bff73076
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_mc_gv11b.h
@@ -0,0 +1,251 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_mc_gv11b_h_
57#define _hw_mc_gv11b_h_
58
59static inline u32 mc_boot_0_r(void)
60{
61 return 0x00000000U;
62}
63static inline u32 mc_boot_0_architecture_v(u32 r)
64{
65 return (r >> 24U) & 0x1fU;
66}
67static inline u32 mc_boot_0_implementation_v(u32 r)
68{
69 return (r >> 20U) & 0xfU;
70}
71static inline u32 mc_boot_0_major_revision_v(u32 r)
72{
73 return (r >> 4U) & 0xfU;
74}
75static inline u32 mc_boot_0_minor_revision_v(u32 r)
76{
77 return (r >> 0U) & 0xfU;
78}
79static inline u32 mc_intr_r(u32 i)
80{
81 return 0x00000100U + i*4U;
82}
83static inline u32 mc_intr_pfifo_pending_f(void)
84{
85 return 0x100U;
86}
87static inline u32 mc_intr_hub_pending_f(void)
88{
89 return 0x200U;
90}
91static inline u32 mc_intr_pgraph_pending_f(void)
92{
93 return 0x1000U;
94}
95static inline u32 mc_intr_pmu_pending_f(void)
96{
97 return 0x1000000U;
98}
99static inline u32 mc_intr_ltc_pending_f(void)
100{
101 return 0x2000000U;
102}
103static inline u32 mc_intr_priv_ring_pending_f(void)
104{
105 return 0x40000000U;
106}
107static inline u32 mc_intr_pbus_pending_f(void)
108{
109 return 0x10000000U;
110}
111static inline u32 mc_intr_en_r(u32 i)
112{
113 return 0x00000140U + i*4U;
114}
115static inline u32 mc_intr_en_set_r(u32 i)
116{
117 return 0x00000160U + i*4U;
118}
119static inline u32 mc_intr_en_clear_r(u32 i)
120{
121 return 0x00000180U + i*4U;
122}
123static inline u32 mc_enable_r(void)
124{
125 return 0x00000200U;
126}
127static inline u32 mc_enable_xbar_enabled_f(void)
128{
129 return 0x4U;
130}
131static inline u32 mc_enable_l2_enabled_f(void)
132{
133 return 0x8U;
134}
135static inline u32 mc_enable_pmedia_s(void)
136{
137 return 1U;
138}
139static inline u32 mc_enable_pmedia_f(u32 v)
140{
141 return (v & 0x1U) << 4U;
142}
143static inline u32 mc_enable_pmedia_m(void)
144{
145 return 0x1U << 4U;
146}
147static inline u32 mc_enable_pmedia_v(u32 r)
148{
149 return (r >> 4U) & 0x1U;
150}
151static inline u32 mc_enable_ce0_m(void)
152{
153 return 0x1U << 6U;
154}
155static inline u32 mc_enable_pfifo_enabled_f(void)
156{
157 return 0x100U;
158}
159static inline u32 mc_enable_pgraph_enabled_f(void)
160{
161 return 0x1000U;
162}
163static inline u32 mc_enable_pwr_v(u32 r)
164{
165 return (r >> 13U) & 0x1U;
166}
167static inline u32 mc_enable_pwr_disabled_v(void)
168{
169 return 0x00000000U;
170}
171static inline u32 mc_enable_pwr_enabled_f(void)
172{
173 return 0x2000U;
174}
175static inline u32 mc_enable_pfb_enabled_f(void)
176{
177 return 0x100000U;
178}
179static inline u32 mc_enable_ce2_m(void)
180{
181 return 0x1U << 21U;
182}
183static inline u32 mc_enable_ce2_enabled_f(void)
184{
185 return 0x200000U;
186}
187static inline u32 mc_enable_blg_enabled_f(void)
188{
189 return 0x8000000U;
190}
191static inline u32 mc_enable_perfmon_enabled_f(void)
192{
193 return 0x10000000U;
194}
195static inline u32 mc_enable_hub_enabled_f(void)
196{
197 return 0x20000000U;
198}
199static inline u32 mc_intr_ltc_r(void)
200{
201 return 0x000001c0U;
202}
203static inline u32 mc_enable_pb_r(void)
204{
205 return 0x00000204U;
206}
207static inline u32 mc_enable_pb_0_s(void)
208{
209 return 1U;
210}
211static inline u32 mc_enable_pb_0_f(u32 v)
212{
213 return (v & 0x1U) << 0U;
214}
215static inline u32 mc_enable_pb_0_m(void)
216{
217 return 0x1U << 0U;
218}
219static inline u32 mc_enable_pb_0_v(u32 r)
220{
221 return (r >> 0U) & 0x1U;
222}
223static inline u32 mc_enable_pb_0_enabled_v(void)
224{
225 return 0x00000001U;
226}
227static inline u32 mc_enable_pb_sel_f(u32 v, u32 i)
228{
229 return (v & 0x1U) << (0U + i*1U);
230}
231static inline u32 mc_elpg_enable_r(void)
232{
233 return 0x0000020cU;
234}
235static inline u32 mc_elpg_enable_xbar_enabled_f(void)
236{
237 return 0x4U;
238}
239static inline u32 mc_elpg_enable_pfb_enabled_f(void)
240{
241 return 0x100000U;
242}
243static inline u32 mc_elpg_enable_hub_enabled_f(void)
244{
245 return 0x20000000U;
246}
247static inline u32 mc_elpg_enable_l2_enabled_f(void)
248{
249 return 0x8U;
250}
251#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pbdma_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pbdma_gv11b.h
new file mode 100644
index 00000000..9b9017ee
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pbdma_gv11b.h
@@ -0,0 +1,659 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pbdma_gv11b_h_
57#define _hw_pbdma_gv11b_h_
58
59static inline u32 pbdma_gp_entry1_r(void)
60{
61 return 0x10000004U;
62}
63static inline u32 pbdma_gp_entry1_get_hi_v(u32 r)
64{
65 return (r >> 0U) & 0xffU;
66}
67static inline u32 pbdma_gp_entry1_length_f(u32 v)
68{
69 return (v & 0x1fffffU) << 10U;
70}
71static inline u32 pbdma_gp_entry1_length_v(u32 r)
72{
73 return (r >> 10U) & 0x1fffffU;
74}
75static inline u32 pbdma_gp_base_r(u32 i)
76{
77 return 0x00040048U + i*8192U;
78}
79static inline u32 pbdma_gp_base__size_1_v(void)
80{
81 return 0x00000003U;
82}
83static inline u32 pbdma_gp_base_offset_f(u32 v)
84{
85 return (v & 0x1fffffffU) << 3U;
86}
87static inline u32 pbdma_gp_base_rsvd_s(void)
88{
89 return 3U;
90}
91static inline u32 pbdma_gp_base_hi_r(u32 i)
92{
93 return 0x0004004cU + i*8192U;
94}
95static inline u32 pbdma_gp_base_hi_offset_f(u32 v)
96{
97 return (v & 0xffU) << 0U;
98}
99static inline u32 pbdma_gp_base_hi_limit2_f(u32 v)
100{
101 return (v & 0x1fU) << 16U;
102}
103static inline u32 pbdma_gp_fetch_r(u32 i)
104{
105 return 0x00040050U + i*8192U;
106}
107static inline u32 pbdma_gp_get_r(u32 i)
108{
109 return 0x00040014U + i*8192U;
110}
111static inline u32 pbdma_gp_put_r(u32 i)
112{
113 return 0x00040000U + i*8192U;
114}
115static inline u32 pbdma_pb_fetch_r(u32 i)
116{
117 return 0x00040054U + i*8192U;
118}
119static inline u32 pbdma_pb_fetch_hi_r(u32 i)
120{
121 return 0x00040058U + i*8192U;
122}
123static inline u32 pbdma_get_r(u32 i)
124{
125 return 0x00040018U + i*8192U;
126}
127static inline u32 pbdma_get_hi_r(u32 i)
128{
129 return 0x0004001cU + i*8192U;
130}
131static inline u32 pbdma_put_r(u32 i)
132{
133 return 0x0004005cU + i*8192U;
134}
135static inline u32 pbdma_put_hi_r(u32 i)
136{
137 return 0x00040060U + i*8192U;
138}
139static inline u32 pbdma_pb_header_r(u32 i)
140{
141 return 0x00040084U + i*8192U;
142}
143static inline u32 pbdma_pb_header_priv_user_f(void)
144{
145 return 0x0U;
146}
147static inline u32 pbdma_pb_header_method_zero_f(void)
148{
149 return 0x0U;
150}
151static inline u32 pbdma_pb_header_subchannel_zero_f(void)
152{
153 return 0x0U;
154}
155static inline u32 pbdma_pb_header_level_main_f(void)
156{
157 return 0x0U;
158}
159static inline u32 pbdma_pb_header_first_true_f(void)
160{
161 return 0x400000U;
162}
163static inline u32 pbdma_pb_header_type_inc_f(void)
164{
165 return 0x20000000U;
166}
167static inline u32 pbdma_pb_header_type_non_inc_f(void)
168{
169 return 0x60000000U;
170}
171static inline u32 pbdma_hdr_shadow_r(u32 i)
172{
173 return 0x00040118U + i*8192U;
174}
175static inline u32 pbdma_gp_shadow_0_r(u32 i)
176{
177 return 0x00040110U + i*8192U;
178}
179static inline u32 pbdma_gp_shadow_1_r(u32 i)
180{
181 return 0x00040114U + i*8192U;
182}
183static inline u32 pbdma_subdevice_r(u32 i)
184{
185 return 0x00040094U + i*8192U;
186}
187static inline u32 pbdma_subdevice_id_f(u32 v)
188{
189 return (v & 0xfffU) << 0U;
190}
191static inline u32 pbdma_subdevice_status_active_f(void)
192{
193 return 0x10000000U;
194}
195static inline u32 pbdma_subdevice_channel_dma_enable_f(void)
196{
197 return 0x20000000U;
198}
199static inline u32 pbdma_method0_r(u32 i)
200{
201 return 0x000400c0U + i*8192U;
202}
203static inline u32 pbdma_method0_fifo_size_v(void)
204{
205 return 0x00000004U;
206}
207static inline u32 pbdma_method0_addr_f(u32 v)
208{
209 return (v & 0xfffU) << 2U;
210}
211static inline u32 pbdma_method0_addr_v(u32 r)
212{
213 return (r >> 2U) & 0xfffU;
214}
215static inline u32 pbdma_method0_subch_v(u32 r)
216{
217 return (r >> 16U) & 0x7U;
218}
219static inline u32 pbdma_method0_first_true_f(void)
220{
221 return 0x400000U;
222}
223static inline u32 pbdma_method0_valid_true_f(void)
224{
225 return 0x80000000U;
226}
227static inline u32 pbdma_method1_r(u32 i)
228{
229 return 0x000400c8U + i*8192U;
230}
231static inline u32 pbdma_method2_r(u32 i)
232{
233 return 0x000400d0U + i*8192U;
234}
235static inline u32 pbdma_method3_r(u32 i)
236{
237 return 0x000400d8U + i*8192U;
238}
239static inline u32 pbdma_data0_r(u32 i)
240{
241 return 0x000400c4U + i*8192U;
242}
243static inline u32 pbdma_acquire_r(u32 i)
244{
245 return 0x00040030U + i*8192U;
246}
247static inline u32 pbdma_acquire_retry_man_2_f(void)
248{
249 return 0x2U;
250}
251static inline u32 pbdma_acquire_retry_exp_2_f(void)
252{
253 return 0x100U;
254}
255static inline u32 pbdma_acquire_timeout_exp_f(u32 v)
256{
257 return (v & 0xfU) << 11U;
258}
259static inline u32 pbdma_acquire_timeout_exp_max_v(void)
260{
261 return 0x0000000fU;
262}
263static inline u32 pbdma_acquire_timeout_exp_max_f(void)
264{
265 return 0x7800U;
266}
267static inline u32 pbdma_acquire_timeout_man_f(u32 v)
268{
269 return (v & 0xffffU) << 15U;
270}
271static inline u32 pbdma_acquire_timeout_man_max_v(void)
272{
273 return 0x0000ffffU;
274}
275static inline u32 pbdma_acquire_timeout_man_max_f(void)
276{
277 return 0x7fff8000U;
278}
279static inline u32 pbdma_acquire_timeout_en_enable_f(void)
280{
281 return 0x80000000U;
282}
283static inline u32 pbdma_acquire_timeout_en_disable_f(void)
284{
285 return 0x0U;
286}
287static inline u32 pbdma_status_r(u32 i)
288{
289 return 0x00040100U + i*8192U;
290}
291static inline u32 pbdma_channel_r(u32 i)
292{
293 return 0x00040120U + i*8192U;
294}
295static inline u32 pbdma_signature_r(u32 i)
296{
297 return 0x00040010U + i*8192U;
298}
299static inline u32 pbdma_signature_hw_valid_f(void)
300{
301 return 0xfaceU;
302}
303static inline u32 pbdma_signature_sw_zero_f(void)
304{
305 return 0x0U;
306}
307static inline u32 pbdma_userd_r(u32 i)
308{
309 return 0x00040008U + i*8192U;
310}
311static inline u32 pbdma_userd_target_vid_mem_f(void)
312{
313 return 0x0U;
314}
315static inline u32 pbdma_userd_target_sys_mem_coh_f(void)
316{
317 return 0x2U;
318}
319static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void)
320{
321 return 0x3U;
322}
323static inline u32 pbdma_userd_addr_f(u32 v)
324{
325 return (v & 0x7fffffU) << 9U;
326}
327static inline u32 pbdma_config_r(u32 i)
328{
329 return 0x000400f4U + i*8192U;
330}
331static inline u32 pbdma_config_l2_evict_first_f(void)
332{
333 return 0x0U;
334}
335static inline u32 pbdma_config_l2_evict_normal_f(void)
336{
337 return 0x1U;
338}
339static inline u32 pbdma_config_l2_evict_last_f(void)
340{
341 return 0x2U;
342}
343static inline u32 pbdma_config_ce_split_enable_f(void)
344{
345 return 0x0U;
346}
347static inline u32 pbdma_config_ce_split_disable_f(void)
348{
349 return 0x10U;
350}
351static inline u32 pbdma_config_auth_level_non_privileged_f(void)
352{
353 return 0x0U;
354}
355static inline u32 pbdma_config_auth_level_privileged_f(void)
356{
357 return 0x100U;
358}
359static inline u32 pbdma_config_userd_writeback_disable_f(void)
360{
361 return 0x0U;
362}
363static inline u32 pbdma_config_userd_writeback_enable_f(void)
364{
365 return 0x1000U;
366}
367static inline u32 pbdma_userd_hi_r(u32 i)
368{
369 return 0x0004000cU + i*8192U;
370}
371static inline u32 pbdma_userd_hi_addr_f(u32 v)
372{
373 return (v & 0xffU) << 0U;
374}
375static inline u32 pbdma_hce_ctrl_r(u32 i)
376{
377 return 0x000400e4U + i*8192U;
378}
379static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void)
380{
381 return 0x20U;
382}
383static inline u32 pbdma_intr_0_r(u32 i)
384{
385 return 0x00040108U + i*8192U;
386}
387static inline u32 pbdma_intr_0_memreq_v(u32 r)
388{
389 return (r >> 0U) & 0x1U;
390}
391static inline u32 pbdma_intr_0_memreq_pending_f(void)
392{
393 return 0x1U;
394}
395static inline u32 pbdma_intr_0_memack_timeout_pending_f(void)
396{
397 return 0x2U;
398}
399static inline u32 pbdma_intr_0_memack_extra_pending_f(void)
400{
401 return 0x4U;
402}
403static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void)
404{
405 return 0x8U;
406}
407static inline u32 pbdma_intr_0_memdat_extra_pending_f(void)
408{
409 return 0x10U;
410}
411static inline u32 pbdma_intr_0_memflush_pending_f(void)
412{
413 return 0x20U;
414}
415static inline u32 pbdma_intr_0_memop_pending_f(void)
416{
417 return 0x40U;
418}
419static inline u32 pbdma_intr_0_lbconnect_pending_f(void)
420{
421 return 0x80U;
422}
423static inline u32 pbdma_intr_0_lbreq_pending_f(void)
424{
425 return 0x100U;
426}
427static inline u32 pbdma_intr_0_lback_timeout_pending_f(void)
428{
429 return 0x200U;
430}
431static inline u32 pbdma_intr_0_lback_extra_pending_f(void)
432{
433 return 0x400U;
434}
435static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void)
436{
437 return 0x800U;
438}
439static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void)
440{
441 return 0x1000U;
442}
443static inline u32 pbdma_intr_0_gpfifo_pending_f(void)
444{
445 return 0x2000U;
446}
447static inline u32 pbdma_intr_0_gpptr_pending_f(void)
448{
449 return 0x4000U;
450}
451static inline u32 pbdma_intr_0_gpentry_pending_f(void)
452{
453 return 0x8000U;
454}
455static inline u32 pbdma_intr_0_gpcrc_pending_f(void)
456{
457 return 0x10000U;
458}
459static inline u32 pbdma_intr_0_pbptr_pending_f(void)
460{
461 return 0x20000U;
462}
463static inline u32 pbdma_intr_0_pbentry_pending_f(void)
464{
465 return 0x40000U;
466}
467static inline u32 pbdma_intr_0_pbcrc_pending_f(void)
468{
469 return 0x80000U;
470}
471static inline u32 pbdma_intr_0_clear_faulted_error_pending_f(void)
472{
473 return 0x100000U;
474}
475static inline u32 pbdma_intr_0_method_pending_f(void)
476{
477 return 0x200000U;
478}
479static inline u32 pbdma_intr_0_methodcrc_pending_f(void)
480{
481 return 0x400000U;
482}
483static inline u32 pbdma_intr_0_device_pending_f(void)
484{
485 return 0x800000U;
486}
487static inline u32 pbdma_intr_0_eng_reset_pending_f(void)
488{
489 return 0x1000000U;
490}
491static inline u32 pbdma_intr_0_semaphore_pending_f(void)
492{
493 return 0x2000000U;
494}
495static inline u32 pbdma_intr_0_acquire_pending_f(void)
496{
497 return 0x4000000U;
498}
499static inline u32 pbdma_intr_0_pri_pending_f(void)
500{
501 return 0x8000000U;
502}
503static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void)
504{
505 return 0x20000000U;
506}
507static inline u32 pbdma_intr_0_pbseg_pending_f(void)
508{
509 return 0x40000000U;
510}
511static inline u32 pbdma_intr_0_signature_pending_f(void)
512{
513 return 0x80000000U;
514}
515static inline u32 pbdma_intr_1_r(u32 i)
516{
517 return 0x00040148U + i*8192U;
518}
519static inline u32 pbdma_intr_1_ctxnotvalid_m(void)
520{
521 return 0x1U << 31U;
522}
523static inline u32 pbdma_intr_1_ctxnotvalid_pending_f(void)
524{
525 return 0x80000000U;
526}
527static inline u32 pbdma_intr_en_0_r(u32 i)
528{
529 return 0x0004010cU + i*8192U;
530}
531static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void)
532{
533 return 0x100U;
534}
535static inline u32 pbdma_intr_en_1_r(u32 i)
536{
537 return 0x0004014cU + i*8192U;
538}
539static inline u32 pbdma_intr_stall_r(u32 i)
540{
541 return 0x0004013cU + i*8192U;
542}
543static inline u32 pbdma_intr_stall_lbreq_enabled_f(void)
544{
545 return 0x100U;
546}
547static inline u32 pbdma_intr_stall_1_r(u32 i)
548{
549 return 0x00040140U + i*8192U;
550}
551static inline u32 pbdma_udma_nop_r(void)
552{
553 return 0x00000008U;
554}
555static inline u32 pbdma_runlist_timeslice_r(u32 i)
556{
557 return 0x000400f8U + i*8192U;
558}
559static inline u32 pbdma_runlist_timeslice_timeout_128_f(void)
560{
561 return 0x80U;
562}
563static inline u32 pbdma_runlist_timeslice_timescale_3_f(void)
564{
565 return 0x3000U;
566}
567static inline u32 pbdma_runlist_timeslice_enable_true_f(void)
568{
569 return 0x10000000U;
570}
571static inline u32 pbdma_target_r(u32 i)
572{
573 return 0x000400acU + i*8192U;
574}
575static inline u32 pbdma_target_engine_sw_f(void)
576{
577 return 0x1fU;
578}
579static inline u32 pbdma_target_eng_ctx_valid_true_f(void)
580{
581 return 0x10000U;
582}
583static inline u32 pbdma_target_eng_ctx_valid_false_f(void)
584{
585 return 0x0U;
586}
587static inline u32 pbdma_target_ce_ctx_valid_true_f(void)
588{
589 return 0x20000U;
590}
591static inline u32 pbdma_target_ce_ctx_valid_false_f(void)
592{
593 return 0x0U;
594}
595static inline u32 pbdma_target_host_tsg_event_reason_pbdma_idle_f(void)
596{
597 return 0x0U;
598}
599static inline u32 pbdma_target_host_tsg_event_reason_semaphore_acquire_failure_f(void)
600{
601 return 0x1000000U;
602}
603static inline u32 pbdma_target_host_tsg_event_reason_tsg_yield_f(void)
604{
605 return 0x2000000U;
606}
607static inline u32 pbdma_target_host_tsg_event_reason_host_subchannel_switch_f(void)
608{
609 return 0x3000000U;
610}
611static inline u32 pbdma_target_should_send_tsg_event_true_f(void)
612{
613 return 0x20000000U;
614}
615static inline u32 pbdma_target_should_send_tsg_event_false_f(void)
616{
617 return 0x0U;
618}
619static inline u32 pbdma_target_needs_host_tsg_event_true_f(void)
620{
621 return 0x80000000U;
622}
623static inline u32 pbdma_target_needs_host_tsg_event_false_f(void)
624{
625 return 0x0U;
626}
627static inline u32 pbdma_set_channel_info_r(u32 i)
628{
629 return 0x000400fcU + i*8192U;
630}
631static inline u32 pbdma_set_channel_info_scg_type_graphics_compute0_f(void)
632{
633 return 0x0U;
634}
635static inline u32 pbdma_set_channel_info_scg_type_compute1_f(void)
636{
637 return 0x1U;
638}
639static inline u32 pbdma_set_channel_info_veid_f(u32 v)
640{
641 return (v & 0x3fU) << 8U;
642}
643static inline u32 pbdma_timeout_r(u32 i)
644{
645 return 0x0004012cU + i*8192U;
646}
647static inline u32 pbdma_timeout_period_m(void)
648{
649 return 0xffffffffU << 0U;
650}
651static inline u32 pbdma_timeout_period_max_f(void)
652{
653 return 0xffffffffU;
654}
655static inline u32 pbdma_timeout_period_init_f(void)
656{
657 return 0x10000U;
658}
659#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_perf_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_perf_gv11b.h
new file mode 100644
index 00000000..788a6ab6
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_perf_gv11b.h
@@ -0,0 +1,211 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_perf_gv11b_h_
57#define _hw_perf_gv11b_h_
58
59static inline u32 perf_pmasys_control_r(void)
60{
61 return 0x0024a000U;
62}
63static inline u32 perf_pmasys_control_membuf_status_v(u32 r)
64{
65 return (r >> 4U) & 0x1U;
66}
67static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void)
68{
69 return 0x00000001U;
70}
71static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void)
72{
73 return 0x10U;
74}
75static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v)
76{
77 return (v & 0x1U) << 5U;
78}
79static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r)
80{
81 return (r >> 5U) & 0x1U;
82}
83static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void)
84{
85 return 0x00000001U;
86}
87static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void)
88{
89 return 0x20U;
90}
91static inline u32 perf_pmasys_mem_block_r(void)
92{
93 return 0x0024a070U;
94}
95static inline u32 perf_pmasys_mem_block_base_f(u32 v)
96{
97 return (v & 0xfffffffU) << 0U;
98}
99static inline u32 perf_pmasys_mem_block_target_f(u32 v)
100{
101 return (v & 0x3U) << 28U;
102}
103static inline u32 perf_pmasys_mem_block_target_v(u32 r)
104{
105 return (r >> 28U) & 0x3U;
106}
107static inline u32 perf_pmasys_mem_block_target_lfb_v(void)
108{
109 return 0x00000000U;
110}
111static inline u32 perf_pmasys_mem_block_target_lfb_f(void)
112{
113 return 0x0U;
114}
115static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void)
116{
117 return 0x00000002U;
118}
119static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void)
120{
121 return 0x20000000U;
122}
123static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void)
124{
125 return 0x00000003U;
126}
127static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void)
128{
129 return 0x30000000U;
130}
131static inline u32 perf_pmasys_mem_block_valid_f(u32 v)
132{
133 return (v & 0x1U) << 31U;
134}
135static inline u32 perf_pmasys_mem_block_valid_v(u32 r)
136{
137 return (r >> 31U) & 0x1U;
138}
139static inline u32 perf_pmasys_mem_block_valid_true_v(void)
140{
141 return 0x00000001U;
142}
143static inline u32 perf_pmasys_mem_block_valid_true_f(void)
144{
145 return 0x80000000U;
146}
147static inline u32 perf_pmasys_mem_block_valid_false_v(void)
148{
149 return 0x00000000U;
150}
151static inline u32 perf_pmasys_mem_block_valid_false_f(void)
152{
153 return 0x0U;
154}
155static inline u32 perf_pmasys_outbase_r(void)
156{
157 return 0x0024a074U;
158}
159static inline u32 perf_pmasys_outbase_ptr_f(u32 v)
160{
161 return (v & 0x7ffffffU) << 5U;
162}
163static inline u32 perf_pmasys_outbaseupper_r(void)
164{
165 return 0x0024a078U;
166}
167static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v)
168{
169 return (v & 0xffU) << 0U;
170}
171static inline u32 perf_pmasys_outsize_r(void)
172{
173 return 0x0024a07cU;
174}
175static inline u32 perf_pmasys_outsize_numbytes_f(u32 v)
176{
177 return (v & 0x7ffffffU) << 5U;
178}
179static inline u32 perf_pmasys_mem_bytes_r(void)
180{
181 return 0x0024a084U;
182}
183static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v)
184{
185 return (v & 0xfffffffU) << 4U;
186}
187static inline u32 perf_pmasys_mem_bump_r(void)
188{
189 return 0x0024a088U;
190}
191static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v)
192{
193 return (v & 0xfffffffU) << 4U;
194}
195static inline u32 perf_pmasys_enginestatus_r(void)
196{
197 return 0x0024a0a4U;
198}
199static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v)
200{
201 return (v & 0x1U) << 4U;
202}
203static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void)
204{
205 return 0x00000001U;
206}
207static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void)
208{
209 return 0x10U;
210}
211#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pram_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pram_gv11b.h
new file mode 100644
index 00000000..456d6316
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pram_gv11b.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pram_gv11b_h_
57#define _hw_pram_gv11b_h_
58
59static inline u32 pram_data032_r(u32 i)
60{
61 return 0x00700000U + i*4U;
62}
63#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h
new file mode 100644
index 00000000..a653681d
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h
@@ -0,0 +1,167 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pri_ringmaster_gv11b_h_
57#define _hw_pri_ringmaster_gv11b_h_
58
59static inline u32 pri_ringmaster_command_r(void)
60{
61 return 0x0012004cU;
62}
63static inline u32 pri_ringmaster_command_cmd_m(void)
64{
65 return 0x3fU << 0U;
66}
67static inline u32 pri_ringmaster_command_cmd_v(u32 r)
68{
69 return (r >> 0U) & 0x3fU;
70}
71static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void)
72{
73 return 0x00000000U;
74}
75static inline u32 pri_ringmaster_command_cmd_start_ring_f(void)
76{
77 return 0x1U;
78}
79static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void)
80{
81 return 0x2U;
82}
83static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void)
84{
85 return 0x3U;
86}
87static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void)
88{
89 return 0x0U;
90}
91static inline u32 pri_ringmaster_command_data_r(void)
92{
93 return 0x00120048U;
94}
95static inline u32 pri_ringmaster_start_results_r(void)
96{
97 return 0x00120050U;
98}
99static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r)
100{
101 return (r >> 0U) & 0x1U;
102}
103static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void)
104{
105 return 0x00000001U;
106}
107static inline u32 pri_ringmaster_intr_status0_r(void)
108{
109 return 0x00120058U;
110}
111static inline u32 pri_ringmaster_intr_status0_ring_start_conn_fault_v(u32 r)
112{
113 return (r >> 0U) & 0x1U;
114}
115static inline u32 pri_ringmaster_intr_status0_disconnect_fault_v(u32 r)
116{
117 return (r >> 1U) & 0x1U;
118}
119static inline u32 pri_ringmaster_intr_status0_overflow_fault_v(u32 r)
120{
121 return (r >> 2U) & 0x1U;
122}
123static inline u32 pri_ringmaster_intr_status0_gbl_write_error_sys_v(u32 r)
124{
125 return (r >> 8U) & 0x1U;
126}
127static inline u32 pri_ringmaster_intr_status1_r(void)
128{
129 return 0x0012005cU;
130}
131static inline u32 pri_ringmaster_global_ctl_r(void)
132{
133 return 0x00120060U;
134}
135static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void)
136{
137 return 0x1U;
138}
139static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void)
140{
141 return 0x0U;
142}
143static inline u32 pri_ringmaster_enum_fbp_r(void)
144{
145 return 0x00120074U;
146}
147static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r)
148{
149 return (r >> 0U) & 0x1fU;
150}
151static inline u32 pri_ringmaster_enum_gpc_r(void)
152{
153 return 0x00120078U;
154}
155static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r)
156{
157 return (r >> 0U) & 0x1fU;
158}
159static inline u32 pri_ringmaster_enum_ltc_r(void)
160{
161 return 0x0012006cU;
162}
163static inline u32 pri_ringmaster_enum_ltc_count_v(u32 r)
164{
165 return (r >> 0U) & 0x1fU;
166}
167#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_gpc_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_gpc_gv11b.h
new file mode 100644
index 00000000..47da22c0
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_gpc_gv11b.h
@@ -0,0 +1,79 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pri_ringstation_gpc_gv11b_h_
57#define _hw_pri_ringstation_gpc_gv11b_h_
58
59static inline u32 pri_ringstation_gpc_master_config_r(u32 i)
60{
61 return 0x00128300U + i*4U;
62}
63static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void)
64{
65 return 0x00128120U;
66}
67static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void)
68{
69 return 0x00128124U;
70}
71static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void)
72{
73 return 0x00128128U;
74}
75static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void)
76{
77 return 0x0012812cU;
78}
79#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_sys_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_sys_gv11b.h
new file mode 100644
index 00000000..622b6d7b
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_sys_gv11b.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pri_ringstation_sys_gv11b_h_
57#define _hw_pri_ringstation_sys_gv11b_h_
58
59static inline u32 pri_ringstation_sys_master_config_r(u32 i)
60{
61 return 0x00122300U + i*4U;
62}
63static inline u32 pri_ringstation_sys_decode_config_r(void)
64{
65 return 0x00122204U;
66}
67static inline u32 pri_ringstation_sys_decode_config_ring_m(void)
68{
69 return 0x7U << 0U;
70}
71static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void)
72{
73 return 0x1U;
74}
75static inline u32 pri_ringstation_sys_priv_error_adr_r(void)
76{
77 return 0x00122120U;
78}
79static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void)
80{
81 return 0x00122124U;
82}
83static inline u32 pri_ringstation_sys_priv_error_info_r(void)
84{
85 return 0x00122128U;
86}
87static inline u32 pri_ringstation_sys_priv_error_code_r(void)
88{
89 return 0x0012212cU;
90}
91#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_proj_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_proj_gv11b.h
new file mode 100644
index 00000000..808fe316
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_proj_gv11b.h
@@ -0,0 +1,187 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_proj_gv11b_h_
57#define _hw_proj_gv11b_h_
58
59static inline u32 proj_gpc_base_v(void)
60{
61 return 0x00500000U;
62}
63static inline u32 proj_gpc_shared_base_v(void)
64{
65 return 0x00418000U;
66}
67static inline u32 proj_gpc_stride_v(void)
68{
69 return 0x00008000U;
70}
71static inline u32 proj_ltc_stride_v(void)
72{
73 return 0x00002000U;
74}
75static inline u32 proj_lts_stride_v(void)
76{
77 return 0x00000200U;
78}
79static inline u32 proj_fbpa_stride_v(void)
80{
81 return 0x00004000U;
82}
83static inline u32 proj_ppc_in_gpc_base_v(void)
84{
85 return 0x00003000U;
86}
87static inline u32 proj_ppc_in_gpc_shared_base_v(void)
88{
89 return 0x00003e00U;
90}
91static inline u32 proj_ppc_in_gpc_stride_v(void)
92{
93 return 0x00000200U;
94}
95static inline u32 proj_rop_base_v(void)
96{
97 return 0x00410000U;
98}
99static inline u32 proj_rop_shared_base_v(void)
100{
101 return 0x00408800U;
102}
103static inline u32 proj_rop_stride_v(void)
104{
105 return 0x00000400U;
106}
107static inline u32 proj_tpc_in_gpc_base_v(void)
108{
109 return 0x00004000U;
110}
111static inline u32 proj_tpc_in_gpc_stride_v(void)
112{
113 return 0x00000800U;
114}
115static inline u32 proj_tpc_in_gpc_shared_base_v(void)
116{
117 return 0x00001800U;
118}
119static inline u32 proj_smpc_base_v(void)
120{
121 return 0x00000200U;
122}
123static inline u32 proj_smpc_shared_base_v(void)
124{
125 return 0x00000300U;
126}
127static inline u32 proj_smpc_unique_base_v(void)
128{
129 return 0x00000600U;
130}
131static inline u32 proj_smpc_stride_v(void)
132{
133 return 0x00000100U;
134}
135static inline u32 proj_host_num_engines_v(void)
136{
137 return 0x00000004U;
138}
139static inline u32 proj_host_num_pbdma_v(void)
140{
141 return 0x00000003U;
142}
143static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void)
144{
145 return 0x00000004U;
146}
147static inline u32 proj_scal_litter_num_fbps_v(void)
148{
149 return 0x00000001U;
150}
151static inline u32 proj_scal_litter_num_fbpas_v(void)
152{
153 return 0x00000001U;
154}
155static inline u32 proj_scal_litter_num_gpcs_v(void)
156{
157 return 0x00000001U;
158}
159static inline u32 proj_scal_litter_num_pes_per_gpc_v(void)
160{
161 return 0x00000002U;
162}
163static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void)
164{
165 return 0x00000002U;
166}
167static inline u32 proj_scal_litter_num_zcull_banks_v(void)
168{
169 return 0x00000004U;
170}
171static inline u32 proj_scal_litter_num_sm_per_tpc_v(void)
172{
173 return 0x00000002U;
174}
175static inline u32 proj_scal_max_gpcs_v(void)
176{
177 return 0x00000020U;
178}
179static inline u32 proj_scal_max_tpc_per_gpc_v(void)
180{
181 return 0x00000008U;
182}
183static inline u32 proj_sm_stride_v(void)
184{
185 return 0x00000080U;
186}
187#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h
new file mode 100644
index 00000000..eba6d806
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h
@@ -0,0 +1,951 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_pwr_gv11b_h_
57#define _hw_pwr_gv11b_h_
58
59static inline u32 pwr_falcon_irqsset_r(void)
60{
61 return 0x0010a000U;
62}
63static inline u32 pwr_falcon_irqsset_swgen0_set_f(void)
64{
65 return 0x40U;
66}
67static inline u32 pwr_falcon_irqsclr_r(void)
68{
69 return 0x0010a004U;
70}
71static inline u32 pwr_falcon_irqstat_r(void)
72{
73 return 0x0010a008U;
74}
75static inline u32 pwr_falcon_irqstat_halt_true_f(void)
76{
77 return 0x10U;
78}
79static inline u32 pwr_falcon_irqstat_exterr_true_f(void)
80{
81 return 0x20U;
82}
83static inline u32 pwr_falcon_irqstat_swgen0_true_f(void)
84{
85 return 0x40U;
86}
87static inline u32 pwr_falcon_irqstat_ext_second_true_f(void)
88{
89 return 0x800U;
90}
91static inline u32 pwr_falcon_irqmode_r(void)
92{
93 return 0x0010a00cU;
94}
95static inline u32 pwr_falcon_irqmset_r(void)
96{
97 return 0x0010a010U;
98}
99static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v)
100{
101 return (v & 0x1U) << 0U;
102}
103static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v)
104{
105 return (v & 0x1U) << 1U;
106}
107static inline u32 pwr_falcon_irqmset_mthd_f(u32 v)
108{
109 return (v & 0x1U) << 2U;
110}
111static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v)
112{
113 return (v & 0x1U) << 3U;
114}
115static inline u32 pwr_falcon_irqmset_halt_f(u32 v)
116{
117 return (v & 0x1U) << 4U;
118}
119static inline u32 pwr_falcon_irqmset_exterr_f(u32 v)
120{
121 return (v & 0x1U) << 5U;
122}
123static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v)
124{
125 return (v & 0x1U) << 6U;
126}
127static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v)
128{
129 return (v & 0x1U) << 7U;
130}
131static inline u32 pwr_falcon_irqmset_ext_f(u32 v)
132{
133 return (v & 0xffU) << 8U;
134}
135static inline u32 pwr_falcon_irqmset_ext_ctxe_f(u32 v)
136{
137 return (v & 0x1U) << 8U;
138}
139static inline u32 pwr_falcon_irqmset_ext_limitv_f(u32 v)
140{
141 return (v & 0x1U) << 9U;
142}
143static inline u32 pwr_falcon_irqmset_ext_second_f(u32 v)
144{
145 return (v & 0x1U) << 11U;
146}
147static inline u32 pwr_falcon_irqmset_ext_therm_f(u32 v)
148{
149 return (v & 0x1U) << 12U;
150}
151static inline u32 pwr_falcon_irqmset_ext_miscio_f(u32 v)
152{
153 return (v & 0x1U) << 13U;
154}
155static inline u32 pwr_falcon_irqmset_ext_rttimer_f(u32 v)
156{
157 return (v & 0x1U) << 14U;
158}
159static inline u32 pwr_falcon_irqmset_ext_rsvd8_f(u32 v)
160{
161 return (v & 0x1U) << 15U;
162}
163static inline u32 pwr_falcon_irqmclr_r(void)
164{
165 return 0x0010a014U;
166}
167static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v)
168{
169 return (v & 0x1U) << 0U;
170}
171static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v)
172{
173 return (v & 0x1U) << 1U;
174}
175static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v)
176{
177 return (v & 0x1U) << 2U;
178}
179static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v)
180{
181 return (v & 0x1U) << 3U;
182}
183static inline u32 pwr_falcon_irqmclr_halt_f(u32 v)
184{
185 return (v & 0x1U) << 4U;
186}
187static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v)
188{
189 return (v & 0x1U) << 5U;
190}
191static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v)
192{
193 return (v & 0x1U) << 6U;
194}
195static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v)
196{
197 return (v & 0x1U) << 7U;
198}
199static inline u32 pwr_falcon_irqmclr_ext_f(u32 v)
200{
201 return (v & 0xffU) << 8U;
202}
203static inline u32 pwr_falcon_irqmclr_ext_ctxe_f(u32 v)
204{
205 return (v & 0x1U) << 8U;
206}
207static inline u32 pwr_falcon_irqmclr_ext_limitv_f(u32 v)
208{
209 return (v & 0x1U) << 9U;
210}
211static inline u32 pwr_falcon_irqmclr_ext_second_f(u32 v)
212{
213 return (v & 0x1U) << 11U;
214}
215static inline u32 pwr_falcon_irqmclr_ext_therm_f(u32 v)
216{
217 return (v & 0x1U) << 12U;
218}
219static inline u32 pwr_falcon_irqmclr_ext_miscio_f(u32 v)
220{
221 return (v & 0x1U) << 13U;
222}
223static inline u32 pwr_falcon_irqmclr_ext_rttimer_f(u32 v)
224{
225 return (v & 0x1U) << 14U;
226}
227static inline u32 pwr_falcon_irqmclr_ext_rsvd8_f(u32 v)
228{
229 return (v & 0x1U) << 15U;
230}
231static inline u32 pwr_falcon_irqmask_r(void)
232{
233 return 0x0010a018U;
234}
235static inline u32 pwr_falcon_irqdest_r(void)
236{
237 return 0x0010a01cU;
238}
239static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v)
240{
241 return (v & 0x1U) << 0U;
242}
243static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v)
244{
245 return (v & 0x1U) << 1U;
246}
247static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v)
248{
249 return (v & 0x1U) << 2U;
250}
251static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v)
252{
253 return (v & 0x1U) << 3U;
254}
255static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v)
256{
257 return (v & 0x1U) << 4U;
258}
259static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v)
260{
261 return (v & 0x1U) << 5U;
262}
263static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v)
264{
265 return (v & 0x1U) << 6U;
266}
267static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v)
268{
269 return (v & 0x1U) << 7U;
270}
271static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v)
272{
273 return (v & 0xffU) << 8U;
274}
275static inline u32 pwr_falcon_irqdest_host_ext_ctxe_f(u32 v)
276{
277 return (v & 0x1U) << 8U;
278}
279static inline u32 pwr_falcon_irqdest_host_ext_limitv_f(u32 v)
280{
281 return (v & 0x1U) << 9U;
282}
283static inline u32 pwr_falcon_irqdest_host_ext_second_f(u32 v)
284{
285 return (v & 0x1U) << 11U;
286}
287static inline u32 pwr_falcon_irqdest_host_ext_therm_f(u32 v)
288{
289 return (v & 0x1U) << 12U;
290}
291static inline u32 pwr_falcon_irqdest_host_ext_miscio_f(u32 v)
292{
293 return (v & 0x1U) << 13U;
294}
295static inline u32 pwr_falcon_irqdest_host_ext_rttimer_f(u32 v)
296{
297 return (v & 0x1U) << 14U;
298}
299static inline u32 pwr_falcon_irqdest_host_ext_rsvd8_f(u32 v)
300{
301 return (v & 0x1U) << 15U;
302}
303static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v)
304{
305 return (v & 0x1U) << 16U;
306}
307static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v)
308{
309 return (v & 0x1U) << 17U;
310}
311static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v)
312{
313 return (v & 0x1U) << 18U;
314}
315static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v)
316{
317 return (v & 0x1U) << 19U;
318}
319static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v)
320{
321 return (v & 0x1U) << 20U;
322}
323static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v)
324{
325 return (v & 0x1U) << 21U;
326}
327static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v)
328{
329 return (v & 0x1U) << 22U;
330}
331static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v)
332{
333 return (v & 0x1U) << 23U;
334}
335static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v)
336{
337 return (v & 0xffU) << 24U;
338}
339static inline u32 pwr_falcon_irqdest_target_ext_ctxe_f(u32 v)
340{
341 return (v & 0x1U) << 24U;
342}
343static inline u32 pwr_falcon_irqdest_target_ext_limitv_f(u32 v)
344{
345 return (v & 0x1U) << 25U;
346}
347static inline u32 pwr_falcon_irqdest_target_ext_second_f(u32 v)
348{
349 return (v & 0x1U) << 27U;
350}
351static inline u32 pwr_falcon_irqdest_target_ext_therm_f(u32 v)
352{
353 return (v & 0x1U) << 28U;
354}
355static inline u32 pwr_falcon_irqdest_target_ext_miscio_f(u32 v)
356{
357 return (v & 0x1U) << 29U;
358}
359static inline u32 pwr_falcon_irqdest_target_ext_rttimer_f(u32 v)
360{
361 return (v & 0x1U) << 30U;
362}
363static inline u32 pwr_falcon_irqdest_target_ext_rsvd8_f(u32 v)
364{
365 return (v & 0x1U) << 31U;
366}
367static inline u32 pwr_falcon_curctx_r(void)
368{
369 return 0x0010a050U;
370}
371static inline u32 pwr_falcon_nxtctx_r(void)
372{
373 return 0x0010a054U;
374}
375static inline u32 pwr_falcon_mailbox0_r(void)
376{
377 return 0x0010a040U;
378}
379static inline u32 pwr_falcon_mailbox1_r(void)
380{
381 return 0x0010a044U;
382}
383static inline u32 pwr_falcon_itfen_r(void)
384{
385 return 0x0010a048U;
386}
387static inline u32 pwr_falcon_itfen_ctxen_enable_f(void)
388{
389 return 0x1U;
390}
391static inline u32 pwr_falcon_idlestate_r(void)
392{
393 return 0x0010a04cU;
394}
395static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r)
396{
397 return (r >> 0U) & 0x1U;
398}
399static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r)
400{
401 return (r >> 1U) & 0x7fffU;
402}
403static inline u32 pwr_falcon_os_r(void)
404{
405 return 0x0010a080U;
406}
407static inline u32 pwr_falcon_engctl_r(void)
408{
409 return 0x0010a0a4U;
410}
411static inline u32 pwr_falcon_cpuctl_r(void)
412{
413 return 0x0010a100U;
414}
415static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v)
416{
417 return (v & 0x1U) << 1U;
418}
419static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v)
420{
421 return (v & 0x1U) << 4U;
422}
423static inline u32 pwr_falcon_cpuctl_halt_intr_m(void)
424{
425 return 0x1U << 4U;
426}
427static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r)
428{
429 return (r >> 4U) & 0x1U;
430}
431static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_f(u32 v)
432{
433 return (v & 0x1U) << 6U;
434}
435static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_m(void)
436{
437 return 0x1U << 6U;
438}
439static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_v(u32 r)
440{
441 return (r >> 6U) & 0x1U;
442}
443static inline u32 pwr_falcon_cpuctl_alias_r(void)
444{
445 return 0x0010a130U;
446}
447static inline u32 pwr_falcon_cpuctl_alias_startcpu_f(u32 v)
448{
449 return (v & 0x1U) << 1U;
450}
451static inline u32 pwr_pmu_scpctl_stat_r(void)
452{
453 return 0x0010ac08U;
454}
455static inline u32 pwr_pmu_scpctl_stat_debug_mode_f(u32 v)
456{
457 return (v & 0x1U) << 20U;
458}
459static inline u32 pwr_pmu_scpctl_stat_debug_mode_m(void)
460{
461 return 0x1U << 20U;
462}
463static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r)
464{
465 return (r >> 20U) & 0x1U;
466}
467static inline u32 pwr_falcon_imemc_r(u32 i)
468{
469 return 0x0010a180U + i*16U;
470}
471static inline u32 pwr_falcon_imemc_offs_f(u32 v)
472{
473 return (v & 0x3fU) << 2U;
474}
475static inline u32 pwr_falcon_imemc_blk_f(u32 v)
476{
477 return (v & 0xffU) << 8U;
478}
479static inline u32 pwr_falcon_imemc_aincw_f(u32 v)
480{
481 return (v & 0x1U) << 24U;
482}
483static inline u32 pwr_falcon_imemd_r(u32 i)
484{
485 return 0x0010a184U + i*16U;
486}
487static inline u32 pwr_falcon_imemt_r(u32 i)
488{
489 return 0x0010a188U + i*16U;
490}
491static inline u32 pwr_falcon_sctl_r(void)
492{
493 return 0x0010a240U;
494}
495static inline u32 pwr_falcon_mmu_phys_sec_r(void)
496{
497 return 0x00100ce4U;
498}
499static inline u32 pwr_falcon_bootvec_r(void)
500{
501 return 0x0010a104U;
502}
503static inline u32 pwr_falcon_bootvec_vec_f(u32 v)
504{
505 return (v & 0xffffffffU) << 0U;
506}
507static inline u32 pwr_falcon_dmactl_r(void)
508{
509 return 0x0010a10cU;
510}
511static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void)
512{
513 return 0x1U << 1U;
514}
515static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void)
516{
517 return 0x1U << 2U;
518}
519static inline u32 pwr_falcon_hwcfg_r(void)
520{
521 return 0x0010a108U;
522}
523static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r)
524{
525 return (r >> 0U) & 0x1ffU;
526}
527static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r)
528{
529 return (r >> 9U) & 0x1ffU;
530}
531static inline u32 pwr_falcon_dmatrfbase_r(void)
532{
533 return 0x0010a110U;
534}
535static inline u32 pwr_falcon_dmatrfbase1_r(void)
536{
537 return 0x0010a128U;
538}
539static inline u32 pwr_falcon_dmatrfmoffs_r(void)
540{
541 return 0x0010a114U;
542}
543static inline u32 pwr_falcon_dmatrfcmd_r(void)
544{
545 return 0x0010a118U;
546}
547static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v)
548{
549 return (v & 0x1U) << 4U;
550}
551static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v)
552{
553 return (v & 0x1U) << 5U;
554}
555static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v)
556{
557 return (v & 0x7U) << 8U;
558}
559static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v)
560{
561 return (v & 0x7U) << 12U;
562}
563static inline u32 pwr_falcon_dmatrffboffs_r(void)
564{
565 return 0x0010a11cU;
566}
567static inline u32 pwr_falcon_exterraddr_r(void)
568{
569 return 0x0010a168U;
570}
571static inline u32 pwr_falcon_exterrstat_r(void)
572{
573 return 0x0010a16cU;
574}
575static inline u32 pwr_falcon_exterrstat_valid_m(void)
576{
577 return 0x1U << 31U;
578}
579static inline u32 pwr_falcon_exterrstat_valid_v(u32 r)
580{
581 return (r >> 31U) & 0x1U;
582}
583static inline u32 pwr_falcon_exterrstat_valid_true_v(void)
584{
585 return 0x00000001U;
586}
587static inline u32 pwr_pmu_falcon_icd_cmd_r(void)
588{
589 return 0x0010a200U;
590}
591static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void)
592{
593 return 4U;
594}
595static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v)
596{
597 return (v & 0xfU) << 0U;
598}
599static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void)
600{
601 return 0xfU << 0U;
602}
603static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r)
604{
605 return (r >> 0U) & 0xfU;
606}
607static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void)
608{
609 return 0x8U;
610}
611static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void)
612{
613 return 0xeU;
614}
615static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v)
616{
617 return (v & 0x1fU) << 8U;
618}
619static inline u32 pwr_pmu_falcon_icd_rdata_r(void)
620{
621 return 0x0010a20cU;
622}
623static inline u32 pwr_falcon_dmemc_r(u32 i)
624{
625 return 0x0010a1c0U + i*8U;
626}
627static inline u32 pwr_falcon_dmemc_offs_f(u32 v)
628{
629 return (v & 0x3fU) << 2U;
630}
631static inline u32 pwr_falcon_dmemc_offs_m(void)
632{
633 return 0x3fU << 2U;
634}
635static inline u32 pwr_falcon_dmemc_blk_f(u32 v)
636{
637 return (v & 0xffU) << 8U;
638}
639static inline u32 pwr_falcon_dmemc_blk_m(void)
640{
641 return 0xffU << 8U;
642}
643static inline u32 pwr_falcon_dmemc_aincw_f(u32 v)
644{
645 return (v & 0x1U) << 24U;
646}
647static inline u32 pwr_falcon_dmemc_aincr_f(u32 v)
648{
649 return (v & 0x1U) << 25U;
650}
651static inline u32 pwr_falcon_dmemd_r(u32 i)
652{
653 return 0x0010a1c4U + i*8U;
654}
655static inline u32 pwr_pmu_new_instblk_r(void)
656{
657 return 0x0010a480U;
658}
659static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v)
660{
661 return (v & 0xfffffffU) << 0U;
662}
663static inline u32 pwr_pmu_new_instblk_target_fb_f(void)
664{
665 return 0x0U;
666}
667static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void)
668{
669 return 0x20000000U;
670}
671static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void)
672{
673 return 0x30000000U;
674}
675static inline u32 pwr_pmu_new_instblk_valid_f(u32 v)
676{
677 return (v & 0x1U) << 30U;
678}
679static inline u32 pwr_pmu_mutex_id_r(void)
680{
681 return 0x0010a488U;
682}
683static inline u32 pwr_pmu_mutex_id_value_v(u32 r)
684{
685 return (r >> 0U) & 0xffU;
686}
687static inline u32 pwr_pmu_mutex_id_value_init_v(void)
688{
689 return 0x00000000U;
690}
691static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void)
692{
693 return 0x000000ffU;
694}
695static inline u32 pwr_pmu_mutex_id_release_r(void)
696{
697 return 0x0010a48cU;
698}
699static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v)
700{
701 return (v & 0xffU) << 0U;
702}
703static inline u32 pwr_pmu_mutex_id_release_value_m(void)
704{
705 return 0xffU << 0U;
706}
707static inline u32 pwr_pmu_mutex_id_release_value_init_v(void)
708{
709 return 0x00000000U;
710}
711static inline u32 pwr_pmu_mutex_id_release_value_init_f(void)
712{
713 return 0x0U;
714}
715static inline u32 pwr_pmu_mutex_r(u32 i)
716{
717 return 0x0010a580U + i*4U;
718}
719static inline u32 pwr_pmu_mutex__size_1_v(void)
720{
721 return 0x00000010U;
722}
723static inline u32 pwr_pmu_mutex_value_f(u32 v)
724{
725 return (v & 0xffU) << 0U;
726}
727static inline u32 pwr_pmu_mutex_value_v(u32 r)
728{
729 return (r >> 0U) & 0xffU;
730}
731static inline u32 pwr_pmu_mutex_value_initial_lock_f(void)
732{
733 return 0x0U;
734}
735static inline u32 pwr_pmu_queue_head_r(u32 i)
736{
737 return 0x0010a800U + i*4U;
738}
739static inline u32 pwr_pmu_queue_head__size_1_v(void)
740{
741 return 0x00000008U;
742}
743static inline u32 pwr_pmu_queue_head_address_f(u32 v)
744{
745 return (v & 0xffffffffU) << 0U;
746}
747static inline u32 pwr_pmu_queue_head_address_v(u32 r)
748{
749 return (r >> 0U) & 0xffffffffU;
750}
751static inline u32 pwr_pmu_queue_tail_r(u32 i)
752{
753 return 0x0010a820U + i*4U;
754}
755static inline u32 pwr_pmu_queue_tail__size_1_v(void)
756{
757 return 0x00000008U;
758}
759static inline u32 pwr_pmu_queue_tail_address_f(u32 v)
760{
761 return (v & 0xffffffffU) << 0U;
762}
763static inline u32 pwr_pmu_queue_tail_address_v(u32 r)
764{
765 return (r >> 0U) & 0xffffffffU;
766}
767static inline u32 pwr_pmu_msgq_head_r(void)
768{
769 return 0x0010a4c8U;
770}
771static inline u32 pwr_pmu_msgq_head_val_f(u32 v)
772{
773 return (v & 0xffffffffU) << 0U;
774}
775static inline u32 pwr_pmu_msgq_head_val_v(u32 r)
776{
777 return (r >> 0U) & 0xffffffffU;
778}
779static inline u32 pwr_pmu_msgq_tail_r(void)
780{
781 return 0x0010a4ccU;
782}
783static inline u32 pwr_pmu_msgq_tail_val_f(u32 v)
784{
785 return (v & 0xffffffffU) << 0U;
786}
787static inline u32 pwr_pmu_msgq_tail_val_v(u32 r)
788{
789 return (r >> 0U) & 0xffffffffU;
790}
791static inline u32 pwr_pmu_idle_mask_r(u32 i)
792{
793 return 0x0010a504U + i*16U;
794}
795static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void)
796{
797 return 0x1U;
798}
799static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void)
800{
801 return 0x200000U;
802}
803static inline u32 pwr_pmu_idle_count_r(u32 i)
804{
805 return 0x0010a508U + i*16U;
806}
807static inline u32 pwr_pmu_idle_count_value_f(u32 v)
808{
809 return (v & 0x7fffffffU) << 0U;
810}
811static inline u32 pwr_pmu_idle_count_value_v(u32 r)
812{
813 return (r >> 0U) & 0x7fffffffU;
814}
815static inline u32 pwr_pmu_idle_count_reset_f(u32 v)
816{
817 return (v & 0x1U) << 31U;
818}
819static inline u32 pwr_pmu_idle_ctrl_r(u32 i)
820{
821 return 0x0010a50cU + i*16U;
822}
823static inline u32 pwr_pmu_idle_ctrl_value_m(void)
824{
825 return 0x3U << 0U;
826}
827static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void)
828{
829 return 0x2U;
830}
831static inline u32 pwr_pmu_idle_ctrl_value_always_f(void)
832{
833 return 0x3U;
834}
835static inline u32 pwr_pmu_idle_ctrl_filter_m(void)
836{
837 return 0x1U << 2U;
838}
839static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void)
840{
841 return 0x0U;
842}
843static inline u32 pwr_pmu_idle_mask_supp_r(u32 i)
844{
845 return 0x0010a9f0U + i*8U;
846}
847static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i)
848{
849 return 0x0010a9f4U + i*8U;
850}
851static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i)
852{
853 return 0x0010aa30U + i*8U;
854}
855static inline u32 pwr_pmu_debug_r(u32 i)
856{
857 return 0x0010a5c0U + i*4U;
858}
859static inline u32 pwr_pmu_debug__size_1_v(void)
860{
861 return 0x00000004U;
862}
863static inline u32 pwr_pmu_mailbox_r(u32 i)
864{
865 return 0x0010a450U + i*4U;
866}
867static inline u32 pwr_pmu_mailbox__size_1_v(void)
868{
869 return 0x0000000cU;
870}
871static inline u32 pwr_pmu_bar0_addr_r(void)
872{
873 return 0x0010a7a0U;
874}
875static inline u32 pwr_pmu_bar0_data_r(void)
876{
877 return 0x0010a7a4U;
878}
879static inline u32 pwr_pmu_bar0_ctl_r(void)
880{
881 return 0x0010a7acU;
882}
883static inline u32 pwr_pmu_bar0_timeout_r(void)
884{
885 return 0x0010a7a8U;
886}
887static inline u32 pwr_pmu_bar0_fecs_error_r(void)
888{
889 return 0x0010a988U;
890}
891static inline u32 pwr_pmu_bar0_error_status_r(void)
892{
893 return 0x0010a7b0U;
894}
895static inline u32 pwr_pmu_pg_idlefilth_r(u32 i)
896{
897 return 0x0010a6c0U + i*4U;
898}
899static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i)
900{
901 return 0x0010a6e8U + i*4U;
902}
903static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i)
904{
905 return 0x0010a710U + i*4U;
906}
907static inline u32 pwr_pmu_pg_intren_r(u32 i)
908{
909 return 0x0010a760U + i*4U;
910}
911static inline u32 pwr_fbif_transcfg_r(u32 i)
912{
913 return 0x0010ae00U + i*4U;
914}
915static inline u32 pwr_fbif_transcfg_target_local_fb_f(void)
916{
917 return 0x0U;
918}
919static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void)
920{
921 return 0x1U;
922}
923static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void)
924{
925 return 0x2U;
926}
927static inline u32 pwr_fbif_transcfg_mem_type_s(void)
928{
929 return 1U;
930}
931static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v)
932{
933 return (v & 0x1U) << 2U;
934}
935static inline u32 pwr_fbif_transcfg_mem_type_m(void)
936{
937 return 0x1U << 2U;
938}
939static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r)
940{
941 return (r >> 2U) & 0x1U;
942}
943static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void)
944{
945 return 0x0U;
946}
947static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void)
948{
949 return 0x4U;
950}
951#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ram_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ram_gv11b.h
new file mode 100644
index 00000000..1191e580
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ram_gv11b.h
@@ -0,0 +1,775 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_ram_gv11b_h_
57#define _hw_ram_gv11b_h_
58
59static inline u32 ram_in_ramfc_s(void)
60{
61 return 4096U;
62}
63static inline u32 ram_in_ramfc_w(void)
64{
65 return 0U;
66}
67static inline u32 ram_in_page_dir_base_target_f(u32 v)
68{
69 return (v & 0x3U) << 0U;
70}
71static inline u32 ram_in_page_dir_base_target_w(void)
72{
73 return 128U;
74}
75static inline u32 ram_in_page_dir_base_target_vid_mem_f(void)
76{
77 return 0x0U;
78}
79static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void)
80{
81 return 0x2U;
82}
83static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void)
84{
85 return 0x3U;
86}
87static inline u32 ram_in_page_dir_base_vol_w(void)
88{
89 return 128U;
90}
91static inline u32 ram_in_page_dir_base_vol_true_f(void)
92{
93 return 0x4U;
94}
95static inline u32 ram_in_page_dir_base_vol_false_f(void)
96{
97 return 0x0U;
98}
99static inline u32 ram_in_page_dir_base_fault_replay_tex_f(u32 v)
100{
101 return (v & 0x1U) << 4U;
102}
103static inline u32 ram_in_page_dir_base_fault_replay_tex_m(void)
104{
105 return 0x1U << 4U;
106}
107static inline u32 ram_in_page_dir_base_fault_replay_tex_w(void)
108{
109 return 128U;
110}
111static inline u32 ram_in_page_dir_base_fault_replay_tex_true_f(void)
112{
113 return 0x10U;
114}
115static inline u32 ram_in_page_dir_base_fault_replay_gcc_f(u32 v)
116{
117 return (v & 0x1U) << 5U;
118}
119static inline u32 ram_in_page_dir_base_fault_replay_gcc_m(void)
120{
121 return 0x1U << 5U;
122}
123static inline u32 ram_in_page_dir_base_fault_replay_gcc_w(void)
124{
125 return 128U;
126}
127static inline u32 ram_in_page_dir_base_fault_replay_gcc_true_f(void)
128{
129 return 0x20U;
130}
131static inline u32 ram_in_big_page_size_f(u32 v)
132{
133 return (v & 0x1U) << 11U;
134}
135static inline u32 ram_in_big_page_size_m(void)
136{
137 return 0x1U << 11U;
138}
139static inline u32 ram_in_big_page_size_w(void)
140{
141 return 128U;
142}
143static inline u32 ram_in_big_page_size_128kb_f(void)
144{
145 return 0x0U;
146}
147static inline u32 ram_in_big_page_size_64kb_f(void)
148{
149 return 0x800U;
150}
151static inline u32 ram_in_page_dir_base_lo_f(u32 v)
152{
153 return (v & 0xfffffU) << 12U;
154}
155static inline u32 ram_in_page_dir_base_lo_w(void)
156{
157 return 128U;
158}
159static inline u32 ram_in_page_dir_base_hi_f(u32 v)
160{
161 return (v & 0xffffffffU) << 0U;
162}
163static inline u32 ram_in_page_dir_base_hi_w(void)
164{
165 return 129U;
166}
167static inline u32 ram_in_engine_cs_w(void)
168{
169 return 132U;
170}
171static inline u32 ram_in_engine_cs_wfi_v(void)
172{
173 return 0x00000000U;
174}
175static inline u32 ram_in_engine_cs_wfi_f(void)
176{
177 return 0x0U;
178}
179static inline u32 ram_in_engine_cs_fg_v(void)
180{
181 return 0x00000001U;
182}
183static inline u32 ram_in_engine_cs_fg_f(void)
184{
185 return 0x8U;
186}
187static inline u32 ram_in_engine_wfi_mode_f(u32 v)
188{
189 return (v & 0x1U) << 2U;
190}
191static inline u32 ram_in_engine_wfi_mode_w(void)
192{
193 return 132U;
194}
195static inline u32 ram_in_engine_wfi_mode_physical_v(void)
196{
197 return 0x00000000U;
198}
199static inline u32 ram_in_engine_wfi_mode_virtual_v(void)
200{
201 return 0x00000001U;
202}
203static inline u32 ram_in_engine_wfi_target_f(u32 v)
204{
205 return (v & 0x3U) << 0U;
206}
207static inline u32 ram_in_engine_wfi_target_w(void)
208{
209 return 132U;
210}
211static inline u32 ram_in_engine_wfi_target_sys_mem_coh_v(void)
212{
213 return 0x00000002U;
214}
215static inline u32 ram_in_engine_wfi_target_sys_mem_ncoh_v(void)
216{
217 return 0x00000003U;
218}
219static inline u32 ram_in_engine_wfi_target_local_mem_v(void)
220{
221 return 0x00000000U;
222}
223static inline u32 ram_in_engine_wfi_ptr_lo_f(u32 v)
224{
225 return (v & 0xfffffU) << 12U;
226}
227static inline u32 ram_in_engine_wfi_ptr_lo_w(void)
228{
229 return 132U;
230}
231static inline u32 ram_in_engine_wfi_ptr_hi_f(u32 v)
232{
233 return (v & 0xffU) << 0U;
234}
235static inline u32 ram_in_engine_wfi_ptr_hi_w(void)
236{
237 return 133U;
238}
239static inline u32 ram_in_engine_wfi_veid_f(u32 v)
240{
241 return (v & 0x3fU) << 0U;
242}
243static inline u32 ram_in_engine_wfi_veid_w(void)
244{
245 return 134U;
246}
247static inline u32 ram_in_eng_method_buffer_addr_lo_f(u32 v)
248{
249 return (v & 0xffffffffU) << 0U;
250}
251static inline u32 ram_in_eng_method_buffer_addr_lo_w(void)
252{
253 return 136U;
254}
255static inline u32 ram_in_eng_method_buffer_addr_hi_f(u32 v)
256{
257 return (v & 0x1ffffU) << 0U;
258}
259static inline u32 ram_in_eng_method_buffer_addr_hi_w(void)
260{
261 return 137U;
262}
263static inline u32 ram_in_sc_page_dir_base_target_f(u32 v, u32 i)
264{
265 return (v & 0x3U) << (0U + i*0U);
266}
267static inline u32 ram_in_sc_page_dir_base_target__size_1_v(void)
268{
269 return 0x00000040U;
270}
271static inline u32 ram_in_sc_page_dir_base_target_vid_mem_v(void)
272{
273 return 0x00000000U;
274}
275static inline u32 ram_in_sc_page_dir_base_target_invalid_v(void)
276{
277 return 0x00000001U;
278}
279static inline u32 ram_in_sc_page_dir_base_target_sys_mem_coh_v(void)
280{
281 return 0x00000002U;
282}
283static inline u32 ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(void)
284{
285 return 0x00000003U;
286}
287static inline u32 ram_in_sc_page_dir_base_vol_f(u32 v, u32 i)
288{
289 return (v & 0x1U) << (2U + i*0U);
290}
291static inline u32 ram_in_sc_page_dir_base_vol__size_1_v(void)
292{
293 return 0x00000040U;
294}
295static inline u32 ram_in_sc_page_dir_base_vol_true_v(void)
296{
297 return 0x00000001U;
298}
299static inline u32 ram_in_sc_page_dir_base_vol_false_v(void)
300{
301 return 0x00000000U;
302}
303static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_f(u32 v, u32 i)
304{
305 return (v & 0x1U) << (4U + i*0U);
306}
307static inline u32 ram_in_sc_page_dir_base_fault_replay_tex__size_1_v(void)
308{
309 return 0x00000040U;
310}
311static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_enabled_v(void)
312{
313 return 0x00000001U;
314}
315static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_disabled_v(void)
316{
317 return 0x00000000U;
318}
319static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_f(u32 v, u32 i)
320{
321 return (v & 0x1U) << (5U + i*0U);
322}
323static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc__size_1_v(void)
324{
325 return 0x00000040U;
326}
327static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_enabled_v(void)
328{
329 return 0x00000001U;
330}
331static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_disabled_v(void)
332{
333 return 0x00000000U;
334}
335static inline u32 ram_in_sc_use_ver2_pt_format_f(u32 v, u32 i)
336{
337 return (v & 0x1U) << (10U + i*0U);
338}
339static inline u32 ram_in_sc_use_ver2_pt_format__size_1_v(void)
340{
341 return 0x00000040U;
342}
343static inline u32 ram_in_sc_use_ver2_pt_format_false_v(void)
344{
345 return 0x00000000U;
346}
347static inline u32 ram_in_sc_use_ver2_pt_format_true_v(void)
348{
349 return 0x00000001U;
350}
351static inline u32 ram_in_sc_big_page_size_f(u32 v, u32 i)
352{
353 return (v & 0x1U) << (11U + i*0U);
354}
355static inline u32 ram_in_sc_big_page_size__size_1_v(void)
356{
357 return 0x00000040U;
358}
359static inline u32 ram_in_sc_big_page_size_64kb_v(void)
360{
361 return 0x00000001U;
362}
363static inline u32 ram_in_sc_page_dir_base_lo_f(u32 v, u32 i)
364{
365 return (v & 0xfffffU) << (12U + i*0U);
366}
367static inline u32 ram_in_sc_page_dir_base_lo__size_1_v(void)
368{
369 return 0x00000040U;
370}
371static inline u32 ram_in_sc_page_dir_base_hi_f(u32 v, u32 i)
372{
373 return (v & 0xffffffffU) << (0U + i*0U);
374}
375static inline u32 ram_in_sc_page_dir_base_hi__size_1_v(void)
376{
377 return 0x00000040U;
378}
379static inline u32 ram_in_sc_page_dir_base_target_0_f(u32 v)
380{
381 return (v & 0x3U) << 0U;
382}
383static inline u32 ram_in_sc_page_dir_base_target_0_w(void)
384{
385 return 168U;
386}
387static inline u32 ram_in_sc_page_dir_base_vol_0_f(u32 v)
388{
389 return (v & 0x1U) << 2U;
390}
391static inline u32 ram_in_sc_page_dir_base_vol_0_w(void)
392{
393 return 168U;
394}
395static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_f(u32 v)
396{
397 return (v & 0x1U) << 4U;
398}
399static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_w(void)
400{
401 return 168U;
402}
403static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_f(u32 v)
404{
405 return (v & 0x1U) << 5U;
406}
407static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_w(void)
408{
409 return 168U;
410}
411static inline u32 ram_in_sc_use_ver2_pt_format_0_f(u32 v)
412{
413 return (v & 0x1U) << 10U;
414}
415static inline u32 ram_in_sc_use_ver2_pt_format_0_w(void)
416{
417 return 168U;
418}
419static inline u32 ram_in_sc_big_page_size_0_f(u32 v)
420{
421 return (v & 0x1U) << 11U;
422}
423static inline u32 ram_in_sc_big_page_size_0_w(void)
424{
425 return 168U;
426}
427static inline u32 ram_in_sc_page_dir_base_lo_0_f(u32 v)
428{
429 return (v & 0xfffffU) << 12U;
430}
431static inline u32 ram_in_sc_page_dir_base_lo_0_w(void)
432{
433 return 168U;
434}
435static inline u32 ram_in_sc_page_dir_base_hi_0_f(u32 v)
436{
437 return (v & 0xffffffffU) << 0U;
438}
439static inline u32 ram_in_sc_page_dir_base_hi_0_w(void)
440{
441 return 169U;
442}
443static inline u32 ram_in_base_shift_v(void)
444{
445 return 0x0000000cU;
446}
447static inline u32 ram_in_alloc_size_v(void)
448{
449 return 0x00001000U;
450}
451static inline u32 ram_fc_size_val_v(void)
452{
453 return 0x00000200U;
454}
455static inline u32 ram_fc_gp_put_w(void)
456{
457 return 0U;
458}
459static inline u32 ram_fc_userd_w(void)
460{
461 return 2U;
462}
463static inline u32 ram_fc_userd_hi_w(void)
464{
465 return 3U;
466}
467static inline u32 ram_fc_signature_w(void)
468{
469 return 4U;
470}
471static inline u32 ram_fc_gp_get_w(void)
472{
473 return 5U;
474}
475static inline u32 ram_fc_pb_get_w(void)
476{
477 return 6U;
478}
479static inline u32 ram_fc_pb_get_hi_w(void)
480{
481 return 7U;
482}
483static inline u32 ram_fc_pb_top_level_get_w(void)
484{
485 return 8U;
486}
487static inline u32 ram_fc_pb_top_level_get_hi_w(void)
488{
489 return 9U;
490}
491static inline u32 ram_fc_acquire_w(void)
492{
493 return 12U;
494}
495static inline u32 ram_fc_sem_addr_hi_w(void)
496{
497 return 14U;
498}
499static inline u32 ram_fc_sem_addr_lo_w(void)
500{
501 return 15U;
502}
503static inline u32 ram_fc_sem_payload_lo_w(void)
504{
505 return 16U;
506}
507static inline u32 ram_fc_sem_payload_hi_w(void)
508{
509 return 39U;
510}
511static inline u32 ram_fc_sem_execute_w(void)
512{
513 return 17U;
514}
515static inline u32 ram_fc_gp_base_w(void)
516{
517 return 18U;
518}
519static inline u32 ram_fc_gp_base_hi_w(void)
520{
521 return 19U;
522}
523static inline u32 ram_fc_gp_fetch_w(void)
524{
525 return 20U;
526}
527static inline u32 ram_fc_pb_fetch_w(void)
528{
529 return 21U;
530}
531static inline u32 ram_fc_pb_fetch_hi_w(void)
532{
533 return 22U;
534}
535static inline u32 ram_fc_pb_put_w(void)
536{
537 return 23U;
538}
539static inline u32 ram_fc_pb_put_hi_w(void)
540{
541 return 24U;
542}
543static inline u32 ram_fc_pb_header_w(void)
544{
545 return 33U;
546}
547static inline u32 ram_fc_pb_count_w(void)
548{
549 return 34U;
550}
551static inline u32 ram_fc_subdevice_w(void)
552{
553 return 37U;
554}
555static inline u32 ram_fc_target_w(void)
556{
557 return 43U;
558}
559static inline u32 ram_fc_hce_ctrl_w(void)
560{
561 return 57U;
562}
563static inline u32 ram_fc_chid_w(void)
564{
565 return 58U;
566}
567static inline u32 ram_fc_chid_id_f(u32 v)
568{
569 return (v & 0xfffU) << 0U;
570}
571static inline u32 ram_fc_chid_id_w(void)
572{
573 return 0U;
574}
575static inline u32 ram_fc_config_w(void)
576{
577 return 61U;
578}
579static inline u32 ram_fc_runlist_timeslice_w(void)
580{
581 return 62U;
582}
583static inline u32 ram_fc_set_channel_info_w(void)
584{
585 return 63U;
586}
587static inline u32 ram_userd_base_shift_v(void)
588{
589 return 0x00000009U;
590}
591static inline u32 ram_userd_chan_size_v(void)
592{
593 return 0x00000200U;
594}
595static inline u32 ram_userd_put_w(void)
596{
597 return 16U;
598}
599static inline u32 ram_userd_get_w(void)
600{
601 return 17U;
602}
603static inline u32 ram_userd_ref_w(void)
604{
605 return 18U;
606}
607static inline u32 ram_userd_put_hi_w(void)
608{
609 return 19U;
610}
611static inline u32 ram_userd_ref_threshold_w(void)
612{
613 return 20U;
614}
615static inline u32 ram_userd_top_level_get_w(void)
616{
617 return 22U;
618}
619static inline u32 ram_userd_top_level_get_hi_w(void)
620{
621 return 23U;
622}
623static inline u32 ram_userd_get_hi_w(void)
624{
625 return 24U;
626}
627static inline u32 ram_userd_gp_get_w(void)
628{
629 return 34U;
630}
631static inline u32 ram_userd_gp_put_w(void)
632{
633 return 35U;
634}
635static inline u32 ram_userd_gp_top_level_get_w(void)
636{
637 return 22U;
638}
639static inline u32 ram_userd_gp_top_level_get_hi_w(void)
640{
641 return 23U;
642}
643static inline u32 ram_rl_entry_size_v(void)
644{
645 return 0x00000010U;
646}
647static inline u32 ram_rl_entry_type_f(u32 v)
648{
649 return (v & 0x1U) << 0U;
650}
651static inline u32 ram_rl_entry_type_channel_v(void)
652{
653 return 0x00000000U;
654}
655static inline u32 ram_rl_entry_type_tsg_v(void)
656{
657 return 0x00000001U;
658}
659static inline u32 ram_rl_entry_id_f(u32 v)
660{
661 return (v & 0xfffU) << 0U;
662}
663static inline u32 ram_rl_entry_chan_runqueue_selector_f(u32 v)
664{
665 return (v & 0x1U) << 1U;
666}
667static inline u32 ram_rl_entry_chan_inst_target_f(u32 v)
668{
669 return (v & 0x3U) << 4U;
670}
671static inline u32 ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(void)
672{
673 return 0x00000003U;
674}
675static inline u32 ram_rl_entry_chan_inst_target_sys_mem_coh_v(void)
676{
677 return 0x00000002U;
678}
679static inline u32 ram_rl_entry_chan_inst_target_vid_mem_v(void)
680{
681 return 0x00000000U;
682}
683static inline u32 ram_rl_entry_chan_userd_target_f(u32 v)
684{
685 return (v & 0x3U) << 6U;
686}
687static inline u32 ram_rl_entry_chan_userd_target_vid_mem_v(void)
688{
689 return 0x00000000U;
690}
691static inline u32 ram_rl_entry_chan_userd_target_vid_mem_nvlink_coh_v(void)
692{
693 return 0x00000001U;
694}
695static inline u32 ram_rl_entry_chan_userd_target_sys_mem_coh_v(void)
696{
697 return 0x00000002U;
698}
699static inline u32 ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(void)
700{
701 return 0x00000003U;
702}
703static inline u32 ram_rl_entry_chan_userd_ptr_lo_f(u32 v)
704{
705 return (v & 0xffffffU) << 8U;
706}
707static inline u32 ram_rl_entry_chan_userd_ptr_hi_f(u32 v)
708{
709 return (v & 0xffffffffU) << 0U;
710}
711static inline u32 ram_rl_entry_chid_f(u32 v)
712{
713 return (v & 0xfffU) << 0U;
714}
715static inline u32 ram_rl_entry_chan_inst_ptr_lo_f(u32 v)
716{
717 return (v & 0xfffffU) << 12U;
718}
719static inline u32 ram_rl_entry_chan_inst_ptr_hi_f(u32 v)
720{
721 return (v & 0xffffffffU) << 0U;
722}
723static inline u32 ram_rl_entry_tsg_timeslice_scale_f(u32 v)
724{
725 return (v & 0xfU) << 16U;
726}
727static inline u32 ram_rl_entry_tsg_timeslice_scale_3_v(void)
728{
729 return 0x00000003U;
730}
731static inline u32 ram_rl_entry_tsg_timeslice_timeout_f(u32 v)
732{
733 return (v & 0xffU) << 24U;
734}
735static inline u32 ram_rl_entry_tsg_timeslice_timeout_128_v(void)
736{
737 return 0x00000080U;
738}
739static inline u32 ram_rl_entry_tsg_timeslice_timeout_disable_v(void)
740{
741 return 0x00000000U;
742}
743static inline u32 ram_rl_entry_tsg_length_f(u32 v)
744{
745 return (v & 0xffU) << 0U;
746}
747static inline u32 ram_rl_entry_tsg_length_init_v(void)
748{
749 return 0x00000000U;
750}
751static inline u32 ram_rl_entry_tsg_length_min_v(void)
752{
753 return 0x00000001U;
754}
755static inline u32 ram_rl_entry_tsg_length_max_v(void)
756{
757 return 0x00000080U;
758}
759static inline u32 ram_rl_entry_tsg_tsgid_f(u32 v)
760{
761 return (v & 0xfffU) << 0U;
762}
763static inline u32 ram_rl_entry_chan_userd_ptr_align_shift_v(void)
764{
765 return 0x00000008U;
766}
767static inline u32 ram_rl_entry_chan_userd_align_shift_v(void)
768{
769 return 0x00000008U;
770}
771static inline u32 ram_rl_entry_chan_inst_ptr_align_shift_v(void)
772{
773 return 0x0000000cU;
774}
775#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_therm_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_therm_gv11b.h
new file mode 100644
index 00000000..8f8981e8
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_therm_gv11b.h
@@ -0,0 +1,435 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_therm_gv11b_h_
57#define _hw_therm_gv11b_h_
58
59static inline u32 therm_use_a_r(void)
60{
61 return 0x00020798U;
62}
63static inline u32 therm_use_a_ext_therm_0_enable_f(void)
64{
65 return 0x1U;
66}
67static inline u32 therm_use_a_ext_therm_1_enable_f(void)
68{
69 return 0x2U;
70}
71static inline u32 therm_use_a_ext_therm_2_enable_f(void)
72{
73 return 0x4U;
74}
75static inline u32 therm_evt_ext_therm_0_r(void)
76{
77 return 0x00020700U;
78}
79static inline u32 therm_evt_ext_therm_0_slow_factor_f(u32 v)
80{
81 return (v & 0x3fU) << 24U;
82}
83static inline u32 therm_evt_ext_therm_0_slow_factor_init_v(void)
84{
85 return 0x00000001U;
86}
87static inline u32 therm_evt_ext_therm_0_mode_f(u32 v)
88{
89 return (v & 0x3U) << 30U;
90}
91static inline u32 therm_evt_ext_therm_0_mode_normal_v(void)
92{
93 return 0x00000000U;
94}
95static inline u32 therm_evt_ext_therm_0_mode_inverted_v(void)
96{
97 return 0x00000001U;
98}
99static inline u32 therm_evt_ext_therm_0_mode_forced_v(void)
100{
101 return 0x00000002U;
102}
103static inline u32 therm_evt_ext_therm_0_mode_cleared_v(void)
104{
105 return 0x00000003U;
106}
107static inline u32 therm_evt_ext_therm_1_r(void)
108{
109 return 0x00020704U;
110}
111static inline u32 therm_evt_ext_therm_1_slow_factor_f(u32 v)
112{
113 return (v & 0x3fU) << 24U;
114}
115static inline u32 therm_evt_ext_therm_1_slow_factor_init_v(void)
116{
117 return 0x00000002U;
118}
119static inline u32 therm_evt_ext_therm_1_mode_f(u32 v)
120{
121 return (v & 0x3U) << 30U;
122}
123static inline u32 therm_evt_ext_therm_1_mode_normal_v(void)
124{
125 return 0x00000000U;
126}
127static inline u32 therm_evt_ext_therm_1_mode_inverted_v(void)
128{
129 return 0x00000001U;
130}
131static inline u32 therm_evt_ext_therm_1_mode_forced_v(void)
132{
133 return 0x00000002U;
134}
135static inline u32 therm_evt_ext_therm_1_mode_cleared_v(void)
136{
137 return 0x00000003U;
138}
139static inline u32 therm_evt_ext_therm_2_r(void)
140{
141 return 0x00020708U;
142}
143static inline u32 therm_evt_ext_therm_2_slow_factor_f(u32 v)
144{
145 return (v & 0x3fU) << 24U;
146}
147static inline u32 therm_evt_ext_therm_2_slow_factor_init_v(void)
148{
149 return 0x00000003U;
150}
151static inline u32 therm_evt_ext_therm_2_mode_f(u32 v)
152{
153 return (v & 0x3U) << 30U;
154}
155static inline u32 therm_evt_ext_therm_2_mode_normal_v(void)
156{
157 return 0x00000000U;
158}
159static inline u32 therm_evt_ext_therm_2_mode_inverted_v(void)
160{
161 return 0x00000001U;
162}
163static inline u32 therm_evt_ext_therm_2_mode_forced_v(void)
164{
165 return 0x00000002U;
166}
167static inline u32 therm_evt_ext_therm_2_mode_cleared_v(void)
168{
169 return 0x00000003U;
170}
171static inline u32 therm_weight_1_r(void)
172{
173 return 0x00020024U;
174}
175static inline u32 therm_config1_r(void)
176{
177 return 0x00020050U;
178}
179static inline u32 therm_config2_r(void)
180{
181 return 0x00020130U;
182}
183static inline u32 therm_config2_slowdown_factor_extended_f(u32 v)
184{
185 return (v & 0x1U) << 24U;
186}
187static inline u32 therm_config2_grad_enable_f(u32 v)
188{
189 return (v & 0x1U) << 31U;
190}
191static inline u32 therm_gate_ctrl_r(u32 i)
192{
193 return 0x00020200U + i*4U;
194}
195static inline u32 therm_gate_ctrl_eng_clk_m(void)
196{
197 return 0x3U << 0U;
198}
199static inline u32 therm_gate_ctrl_eng_clk_run_f(void)
200{
201 return 0x0U;
202}
203static inline u32 therm_gate_ctrl_eng_clk_auto_f(void)
204{
205 return 0x1U;
206}
207static inline u32 therm_gate_ctrl_eng_clk_stop_f(void)
208{
209 return 0x2U;
210}
211static inline u32 therm_gate_ctrl_blk_clk_m(void)
212{
213 return 0x3U << 2U;
214}
215static inline u32 therm_gate_ctrl_blk_clk_run_f(void)
216{
217 return 0x0U;
218}
219static inline u32 therm_gate_ctrl_blk_clk_auto_f(void)
220{
221 return 0x4U;
222}
223static inline u32 therm_gate_ctrl_idle_holdoff_m(void)
224{
225 return 0x1U << 4U;
226}
227static inline u32 therm_gate_ctrl_idle_holdoff_off_f(void)
228{
229 return 0x0U;
230}
231static inline u32 therm_gate_ctrl_idle_holdoff_on_f(void)
232{
233 return 0x10U;
234}
235static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v)
236{
237 return (v & 0x1fU) << 8U;
238}
239static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void)
240{
241 return 0x1fU << 8U;
242}
243static inline u32 therm_gate_ctrl_eng_idle_filt_exp__prod_f(void)
244{
245 return 0x200U;
246}
247static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v)
248{
249 return (v & 0x7U) << 13U;
250}
251static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void)
252{
253 return 0x7U << 13U;
254}
255static inline u32 therm_gate_ctrl_eng_idle_filt_mant__prod_f(void)
256{
257 return 0x2000U;
258}
259static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v)
260{
261 return (v & 0xfU) << 16U;
262}
263static inline u32 therm_gate_ctrl_eng_delay_before_m(void)
264{
265 return 0xfU << 16U;
266}
267static inline u32 therm_gate_ctrl_eng_delay_before__prod_f(void)
268{
269 return 0x40000U;
270}
271static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v)
272{
273 return (v & 0xfU) << 20U;
274}
275static inline u32 therm_gate_ctrl_eng_delay_after_m(void)
276{
277 return 0xfU << 20U;
278}
279static inline u32 therm_gate_ctrl_eng_delay_after__prod_f(void)
280{
281 return 0x0U;
282}
283static inline u32 therm_fecs_idle_filter_r(void)
284{
285 return 0x00020288U;
286}
287static inline u32 therm_fecs_idle_filter_value_m(void)
288{
289 return 0xffffffffU << 0U;
290}
291static inline u32 therm_fecs_idle_filter_value__prod_f(void)
292{
293 return 0x0U;
294}
295static inline u32 therm_hubmmu_idle_filter_r(void)
296{
297 return 0x0002028cU;
298}
299static inline u32 therm_hubmmu_idle_filter_value_m(void)
300{
301 return 0xffffffffU << 0U;
302}
303static inline u32 therm_hubmmu_idle_filter_value__prod_f(void)
304{
305 return 0x0U;
306}
307static inline u32 therm_clk_slowdown_r(u32 i)
308{
309 return 0x00020160U + i*4U;
310}
311static inline u32 therm_clk_slowdown_idle_factor_f(u32 v)
312{
313 return (v & 0x3fU) << 16U;
314}
315static inline u32 therm_clk_slowdown_idle_factor_m(void)
316{
317 return 0x3fU << 16U;
318}
319static inline u32 therm_clk_slowdown_idle_factor_v(u32 r)
320{
321 return (r >> 16U) & 0x3fU;
322}
323static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void)
324{
325 return 0x0U;
326}
327static inline u32 therm_grad_stepping_table_r(u32 i)
328{
329 return 0x000202c8U + i*4U;
330}
331static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v)
332{
333 return (v & 0x3fU) << 0U;
334}
335static inline u32 therm_grad_stepping_table_slowdown_factor0_m(void)
336{
337 return 0x3fU << 0U;
338}
339static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f(void)
340{
341 return 0x1U;
342}
343static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f(void)
344{
345 return 0x2U;
346}
347static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f(void)
348{
349 return 0x6U;
350}
351static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f(void)
352{
353 return 0xeU;
354}
355static inline u32 therm_grad_stepping_table_slowdown_factor1_f(u32 v)
356{
357 return (v & 0x3fU) << 6U;
358}
359static inline u32 therm_grad_stepping_table_slowdown_factor1_m(void)
360{
361 return 0x3fU << 6U;
362}
363static inline u32 therm_grad_stepping_table_slowdown_factor2_f(u32 v)
364{
365 return (v & 0x3fU) << 12U;
366}
367static inline u32 therm_grad_stepping_table_slowdown_factor2_m(void)
368{
369 return 0x3fU << 12U;
370}
371static inline u32 therm_grad_stepping_table_slowdown_factor3_f(u32 v)
372{
373 return (v & 0x3fU) << 18U;
374}
375static inline u32 therm_grad_stepping_table_slowdown_factor3_m(void)
376{
377 return 0x3fU << 18U;
378}
379static inline u32 therm_grad_stepping_table_slowdown_factor4_f(u32 v)
380{
381 return (v & 0x3fU) << 24U;
382}
383static inline u32 therm_grad_stepping_table_slowdown_factor4_m(void)
384{
385 return 0x3fU << 24U;
386}
387static inline u32 therm_grad_stepping0_r(void)
388{
389 return 0x000202c0U;
390}
391static inline u32 therm_grad_stepping0_feature_s(void)
392{
393 return 1U;
394}
395static inline u32 therm_grad_stepping0_feature_f(u32 v)
396{
397 return (v & 0x1U) << 0U;
398}
399static inline u32 therm_grad_stepping0_feature_m(void)
400{
401 return 0x1U << 0U;
402}
403static inline u32 therm_grad_stepping0_feature_v(u32 r)
404{
405 return (r >> 0U) & 0x1U;
406}
407static inline u32 therm_grad_stepping0_feature_enable_f(void)
408{
409 return 0x1U;
410}
411static inline u32 therm_grad_stepping1_r(void)
412{
413 return 0x000202c4U;
414}
415static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v)
416{
417 return (v & 0x1ffffU) << 0U;
418}
419static inline u32 therm_clk_timing_r(u32 i)
420{
421 return 0x000203c0U + i*4U;
422}
423static inline u32 therm_clk_timing_grad_slowdown_f(u32 v)
424{
425 return (v & 0x1U) << 16U;
426}
427static inline u32 therm_clk_timing_grad_slowdown_m(void)
428{
429 return 0x1U << 16U;
430}
431static inline u32 therm_clk_timing_grad_slowdown_enabled_f(void)
432{
433 return 0x10000U;
434}
435#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_timer_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_timer_gv11b.h
new file mode 100644
index 00000000..61440213
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_timer_gv11b.h
@@ -0,0 +1,115 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_timer_gv11b_h_
57#define _hw_timer_gv11b_h_
58
59static inline u32 timer_pri_timeout_r(void)
60{
61 return 0x00009080U;
62}
63static inline u32 timer_pri_timeout_period_f(u32 v)
64{
65 return (v & 0xffffffU) << 0U;
66}
67static inline u32 timer_pri_timeout_period_m(void)
68{
69 return 0xffffffU << 0U;
70}
71static inline u32 timer_pri_timeout_period_v(u32 r)
72{
73 return (r >> 0U) & 0xffffffU;
74}
75static inline u32 timer_pri_timeout_en_f(u32 v)
76{
77 return (v & 0x1U) << 31U;
78}
79static inline u32 timer_pri_timeout_en_m(void)
80{
81 return 0x1U << 31U;
82}
83static inline u32 timer_pri_timeout_en_v(u32 r)
84{
85 return (r >> 31U) & 0x1U;
86}
87static inline u32 timer_pri_timeout_en_en_enabled_f(void)
88{
89 return 0x80000000U;
90}
91static inline u32 timer_pri_timeout_en_en_disabled_f(void)
92{
93 return 0x0U;
94}
95static inline u32 timer_pri_timeout_save_0_r(void)
96{
97 return 0x00009084U;
98}
99static inline u32 timer_pri_timeout_save_1_r(void)
100{
101 return 0x00009088U;
102}
103static inline u32 timer_pri_timeout_fecs_errcode_r(void)
104{
105 return 0x0000908cU;
106}
107static inline u32 timer_time_0_r(void)
108{
109 return 0x00009400U;
110}
111static inline u32 timer_time_1_r(void)
112{
113 return 0x00009410U;
114}
115#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_top_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_top_gv11b.h
new file mode 100644
index 00000000..89e4aebb
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_top_gv11b.h
@@ -0,0 +1,235 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_top_gv11b_h_
57#define _hw_top_gv11b_h_
58
59static inline u32 top_num_gpcs_r(void)
60{
61 return 0x00022430U;
62}
63static inline u32 top_num_gpcs_value_v(u32 r)
64{
65 return (r >> 0U) & 0x1fU;
66}
67static inline u32 top_tpc_per_gpc_r(void)
68{
69 return 0x00022434U;
70}
71static inline u32 top_tpc_per_gpc_value_v(u32 r)
72{
73 return (r >> 0U) & 0x1fU;
74}
75static inline u32 top_num_fbps_r(void)
76{
77 return 0x00022438U;
78}
79static inline u32 top_num_fbps_value_v(u32 r)
80{
81 return (r >> 0U) & 0x1fU;
82}
83static inline u32 top_ltc_per_fbp_r(void)
84{
85 return 0x00022450U;
86}
87static inline u32 top_ltc_per_fbp_value_v(u32 r)
88{
89 return (r >> 0U) & 0x1fU;
90}
91static inline u32 top_slices_per_ltc_r(void)
92{
93 return 0x0002245cU;
94}
95static inline u32 top_slices_per_ltc_value_v(u32 r)
96{
97 return (r >> 0U) & 0x1fU;
98}
99static inline u32 top_num_ltcs_r(void)
100{
101 return 0x00022454U;
102}
103static inline u32 top_num_ces_r(void)
104{
105 return 0x00022444U;
106}
107static inline u32 top_num_ces_value_v(u32 r)
108{
109 return (r >> 0U) & 0x1fU;
110}
111static inline u32 top_device_info_r(u32 i)
112{
113 return 0x00022700U + i*4U;
114}
115static inline u32 top_device_info__size_1_v(void)
116{
117 return 0x00000040U;
118}
119static inline u32 top_device_info_chain_v(u32 r)
120{
121 return (r >> 31U) & 0x1U;
122}
123static inline u32 top_device_info_chain_enable_v(void)
124{
125 return 0x00000001U;
126}
127static inline u32 top_device_info_engine_enum_v(u32 r)
128{
129 return (r >> 26U) & 0xfU;
130}
131static inline u32 top_device_info_runlist_enum_v(u32 r)
132{
133 return (r >> 21U) & 0xfU;
134}
135static inline u32 top_device_info_intr_enum_v(u32 r)
136{
137 return (r >> 15U) & 0x1fU;
138}
139static inline u32 top_device_info_reset_enum_v(u32 r)
140{
141 return (r >> 9U) & 0x1fU;
142}
143static inline u32 top_device_info_type_enum_v(u32 r)
144{
145 return (r >> 2U) & 0x1fffffffU;
146}
147static inline u32 top_device_info_type_enum_graphics_v(void)
148{
149 return 0x00000000U;
150}
151static inline u32 top_device_info_type_enum_graphics_f(void)
152{
153 return 0x0U;
154}
155static inline u32 top_device_info_type_enum_copy2_v(void)
156{
157 return 0x00000003U;
158}
159static inline u32 top_device_info_type_enum_copy2_f(void)
160{
161 return 0xcU;
162}
163static inline u32 top_device_info_type_enum_lce_v(void)
164{
165 return 0x00000013U;
166}
167static inline u32 top_device_info_type_enum_lce_f(void)
168{
169 return 0x4cU;
170}
171static inline u32 top_device_info_engine_v(u32 r)
172{
173 return (r >> 5U) & 0x1U;
174}
175static inline u32 top_device_info_runlist_v(u32 r)
176{
177 return (r >> 4U) & 0x1U;
178}
179static inline u32 top_device_info_intr_v(u32 r)
180{
181 return (r >> 3U) & 0x1U;
182}
183static inline u32 top_device_info_reset_v(u32 r)
184{
185 return (r >> 2U) & 0x1U;
186}
187static inline u32 top_device_info_entry_v(u32 r)
188{
189 return (r >> 0U) & 0x3U;
190}
191static inline u32 top_device_info_entry_not_valid_v(void)
192{
193 return 0x00000000U;
194}
195static inline u32 top_device_info_entry_enum_v(void)
196{
197 return 0x00000002U;
198}
199static inline u32 top_device_info_entry_data_v(void)
200{
201 return 0x00000001U;
202}
203static inline u32 top_device_info_data_type_v(u32 r)
204{
205 return (r >> 30U) & 0x1U;
206}
207static inline u32 top_device_info_data_type_enum2_v(void)
208{
209 return 0x00000000U;
210}
211static inline u32 top_device_info_data_inst_id_v(u32 r)
212{
213 return (r >> 26U) & 0xfU;
214}
215static inline u32 top_device_info_data_pri_base_v(u32 r)
216{
217 return (r >> 12U) & 0xfffU;
218}
219static inline u32 top_device_info_data_pri_base_align_v(void)
220{
221 return 0x0000000cU;
222}
223static inline u32 top_device_info_data_fault_id_enum_v(u32 r)
224{
225 return (r >> 3U) & 0x7fU;
226}
227static inline u32 top_device_info_data_fault_id_v(u32 r)
228{
229 return (r >> 2U) & 0x1U;
230}
231static inline u32 top_device_info_data_fault_id_valid_v(void)
232{
233 return 0x00000001U;
234}
235#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_usermode_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_usermode_gv11b.h
new file mode 100644
index 00000000..e3749690
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_usermode_gv11b.h
@@ -0,0 +1,95 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22/*
23 * Function naming determines intended use:
24 *
25 * <x>_r(void) : Returns the offset for register <x>.
26 *
27 * <x>_o(void) : Returns the offset for element <x>.
28 *
29 * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
30 *
31 * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
32 *
33 * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
34 * and masked to place it at field <y> of register <x>. This value
35 * can be |'d with others to produce a full register value for
36 * register <x>.
37 *
38 * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
39 * value can be ~'d and then &'d to clear the value of field <y> for
40 * register <x>.
41 *
42 * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
43 * to place it at field <y> of register <x>. This value can be |'d
44 * with others to produce a full register value for <x>.
45 *
46 * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
47 * <x> value 'r' after being shifted to place its LSB at bit 0.
48 * This value is suitable for direct comparison with other unshifted
49 * values appropriate for use in field <y> of register <x>.
50 *
51 * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
52 * field <y> of register <x>. This value is suitable for direct
53 * comparison with unshifted values appropriate for use in field <y>
54 * of register <x>.
55 */
56#ifndef _hw_usermode_gv11b_h_
57#define _hw_usermode_gv11b_h_
58
59static inline u32 usermode_cfg0_r(void)
60{
61 return 0x00810000;
62}
63static inline u32 usermode_cfg0_usermode_class_id_f(u32 v)
64{
65 return (v & 0xffff) << 0;
66}
67static inline u32 usermode_cfg0_usermode_class_id_value_v(void)
68{
69 return 0x0000c361;
70}
71static inline u32 usermode_time_0_r(void)
72{
73 return 0x00810080;
74}
75static inline u32 usermode_time_0_nsec_f(u32 v)
76{
77 return (v & 0x7ffffff) << 5;
78}
79static inline u32 usermode_time_1_r(void)
80{
81 return 0x00810084;
82}
83static inline u32 usermode_time_1_nsec_f(u32 v)
84{
85 return (v & 0x1fffffff) << 0;
86}
87static inline u32 usermode_notify_channel_pending_r(void)
88{
89 return 0x00810090;
90}
91static inline u32 usermode_notify_channel_pending_id_f(u32 v)
92{
93 return (v & 0xffffffff) << 0;
94}
95#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/io_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/io_t19x.h
new file mode 100644
index 00000000..f8c7dbbd
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/io_t19x.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#ifndef __NVGPU_IO_T19X_H__
23#define __NVGPU_IO_T19X_H__
24
25#ifdef __KERNEL__
26#include "linux/io_t19x.h"
27#endif
28
29#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/io_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/linux/io_t19x.h
new file mode 100644
index 00000000..f71a6ecf
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/linux/io_t19x.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __NVGPU_IO_T19X_LINUX_H__
18#define __NVGPU_IO_T19X_LINUX_H__
19
20#include <nvgpu/types.h>
21
22struct gk20a;
23
24void gv11b_usermode_writel(struct gk20a *g, u32 r, u32 v);
25
26#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/module_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/linux/module_t19x.h
new file mode 100644
index 00000000..a105c6dc
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/linux/module_t19x.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __NVGPU_MODULE_T19X_H__
18#define __NVGPU_MODULE_T19X_H__
19
20struct gk20a;
21
22void t19x_init_support(struct gk20a *g);
23void t19x_remove_support(struct gk20a *g);
24void t19x_lockout_registers(struct gk20a *g);
25void t19x_restore_registers(struct gk20a *g);
26
27#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/os_linux_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/linux/os_linux_t19x.h
new file mode 100644
index 00000000..a306bfb8
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/linux/os_linux_t19x.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef NVGPU_OS_LINUX_T19X_H
17#define NVGPU_OS_LINUX_T19X_H
18
19#include <linux/compiler.h>
20
21struct nvgpu_os_linux_t19x {
22 void __iomem *usermode_regs;
23 void __iomem *usermode_regs_saved;
24};
25
26#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/pci_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/linux/pci_t19x.h
new file mode 100644
index 00000000..c94176cc
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/linux/pci_t19x.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __NVGPU_PCI_T19X_H__
17#define __NVGPU_PCI_T19X_H__
18
19struct nvgpu_os_linux;
20
21void t19x_nvgpu_pci_init_support(struct nvgpu_os_linux *l);
22
23#endif
diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvhost_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/nvhost_t19x.h
new file mode 100644
index 00000000..4b499882
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/nvhost_t19x.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NVGPU_NVHOST_T19X_H__
24#define __NVGPU_NVHOST_T19X_H__
25
26#ifdef CONFIG_TEGRA_GK20A_NVHOST
27#include <nvgpu/types.h>
28
29struct nvgpu_nvhost_dev;
30
31int nvgpu_nvhost_syncpt_unit_interface_get_aperture(
32 struct nvgpu_nvhost_dev *nvhost_dev,
33 u64 *base, size_t *size);
34u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id);
35
36#endif
37#endif /* __NVGPU_NVHOST_T19X_H__ */
diff --git a/drivers/gpu/nvgpu/nvgpu_gpuid_t19x.h b/drivers/gpu/nvgpu/nvgpu_gpuid_t19x.h
new file mode 100644
index 00000000..8689a535
--- /dev/null
+++ b/drivers/gpu/nvgpu/nvgpu_gpuid_t19x.h
@@ -0,0 +1,47 @@
1/*
2 * NVIDIA GPU ID functions, definitions.
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _NVGPU_GPUID_T19X_H_
25#define _NVGPU_GPUID_T19X_H_
26
27#define NVGPU_GPUID_GV11B 0x0000015B
28#define NVGPU_GPUID_GV100 0x00000140
29
30#define NVGPU_COMPAT_TEGRA_GV11B "nvidia,gv11b"
31#define NVGPU_COMPAT_GENERIC_GV11B "nvidia,generic-gv11b"
32
33
34#define TEGRA_19x_GPUID NVGPU_GPUID_GV11B
35#define TEGRA_19x_GPUID_HAL gv11b_init_hal
36#define TEGRA_19x_GPU_COMPAT_TEGRA NVGPU_COMPAT_TEGRA_GV11B
37#define TEGRA_19x_GPU_COMPAT_GENERIC NVGPU_COMPAT_GENERIC_GV11B
38
39#define BIGGPU_19x_GPUID NVGPU_GPUID_GV100
40#define BIGGPU_19x_GPUID_HAL gv100_init_hal
41
42struct gpu_ops;
43extern int gv11b_init_hal(struct gk20a *);
44extern int gv100_init_hal(struct gk20a *);
45extern struct gk20a_platform t19x_gpu_tegra_platform;
46
47#endif
diff --git a/drivers/gpu/nvgpu/tsg_t19x.h b/drivers/gpu/nvgpu/tsg_t19x.h
new file mode 100644
index 00000000..d1f47cc3
--- /dev/null
+++ b/drivers/gpu/nvgpu/tsg_t19x.h
@@ -0,0 +1,36 @@
1/*
2 * NVIDIA T19x TSG
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef __NVGPU_TSG_T19X_H__
26#define __NVGPU_TSG_T19X_H__
27
28#include <nvgpu/types.h>
29
30struct tsg_t19x {
31 u32 num_active_tpcs;
32 u8 tpc_pg_enabled;
33 bool tpc_num_initialized;
34};
35
36#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/platform_gv11b_vgpu_tegra.c b/drivers/gpu/nvgpu/vgpu/gv11b/platform_gv11b_vgpu_tegra.c
new file mode 100644
index 00000000..fea473a7
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/platform_gv11b_vgpu_tegra.c
@@ -0,0 +1,105 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "gk20a/gk20a.h"
24#include "vgpu/clk_vgpu.h"
25#include "common/linux/platform_gk20a.h"
26#include "common/linux/os_linux.h"
27
28#include <nvgpu/nvhost.h>
29#include <nvgpu/nvhost_t19x.h>
30
31#include <linux/platform_device.h>
32
33static int gv11b_vgpu_probe(struct device *dev)
34{
35 struct platform_device *pdev = to_platform_device(dev);
36 struct gk20a_platform *platform = dev_get_drvdata(dev);
37 struct resource *r;
38 void __iomem *regs;
39 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(platform->g);
40 struct gk20a *g = platform->g;
41 int ret;
42
43 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "usermode");
44 if (!r) {
45 dev_err(dev, "failed to get usermode regs\n");
46 return -ENXIO;
47 }
48 regs = devm_ioremap_resource(dev, r);
49 if (IS_ERR(regs)) {
50 dev_err(dev, "failed to map usermode regs\n");
51 return PTR_ERR(regs);
52 }
53 l->t19x.usermode_regs = regs;
54
55#ifdef CONFIG_TEGRA_GK20A_NVHOST
56 ret = nvgpu_get_nvhost_dev(g);
57 if (ret) {
58 l->t19x.usermode_regs = NULL;
59 return ret;
60 }
61
62 ret = nvgpu_nvhost_syncpt_unit_interface_get_aperture(g->nvhost_dev,
63 &g->syncpt_unit_base,
64 &g->syncpt_unit_size);
65 if (ret) {
66 dev_err(dev, "Failed to get syncpt interface");
67 return -ENOSYS;
68 }
69 g->syncpt_size = nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(1);
70 nvgpu_info(g, "syncpt_unit_base %llx syncpt_unit_size %zx size %x\n",
71 g->syncpt_unit_base, g->syncpt_unit_size, g->syncpt_size);
72#endif
73 vgpu_init_clk_support(platform->g);
74
75 return 0;
76}
77
78struct gk20a_platform gv11b_vgpu_tegra_platform = {
79 .has_syncpoints = true,
80 .aggressive_sync_destroy_thresh = 64,
81
82 /* power management configuration */
83 .can_railgate_init = false,
84 .can_elpg_init = false,
85 .enable_slcg = false,
86 .enable_blcg = false,
87 .enable_elcg = false,
88 .enable_elpg = false,
89 .enable_aelpg = false,
90 .can_slcg = false,
91 .can_blcg = false,
92 .can_elcg = false,
93
94 .ch_wdt_timeout_ms = 5000,
95
96 .probe = gv11b_vgpu_probe,
97
98 .clk_round_rate = vgpu_clk_round_rate,
99 .get_clk_freqs = vgpu_clk_get_freqs,
100
101 /* frequency scaling configuration */
102 .devfreq_governor = "userspace",
103
104 .virtual_dev = true,
105};
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
new file mode 100644
index 00000000..ae9d52a7
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c
@@ -0,0 +1,117 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <gk20a/gk20a.h>
24
25#include "vgpu/vgpu.h"
26#include "gv11b/fifo_gv11b.h"
27#include <nvgpu/nvhost_t19x.h>
28
29#include <linux/tegra_vgpu.h>
30
31#ifdef CONFIG_TEGRA_GK20A_NVHOST
32int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
33 u32 syncpt_id, struct nvgpu_mem *syncpt_buf)
34{
35 int err;
36 struct gk20a *g = c->g;
37 struct vm_gk20a *vm = c->vm;
38 struct tegra_vgpu_cmd_msg msg = {};
39 struct tegra_vgpu_map_syncpt_params *p = &msg.params.t19x.map_syncpt;
40
41 /*
42 * Add ro map for complete sync point shim range in vm.
43 * All channels sharing same vm will share same ro mapping.
44 * Create rw map for current channel sync point.
45 */
46 if (!vm->syncpt_ro_map_gpu_va) {
47 vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm,
48 g->syncpt_unit_size,
49 gmmu_page_size_kernel);
50 if (!vm->syncpt_ro_map_gpu_va) {
51 nvgpu_err(g, "allocating read-only va space failed");
52 return -ENOMEM;
53 }
54
55 msg.cmd = TEGRA_VGPU_CMD_MAP_SYNCPT;
56 msg.handle = vgpu_get_handle(g);
57 p->as_handle = c->vm->handle;
58 p->gpu_va = vm->syncpt_ro_map_gpu_va;
59 p->len = g->syncpt_unit_size;
60 p->offset = 0;
61 p->prot = TEGRA_VGPU_MAP_PROT_READ_ONLY;
62 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
63 err = err ? err : msg.ret;
64 if (err) {
65 nvgpu_err(g,
66 "mapping read-only va space failed err %d",
67 err);
68 __nvgpu_vm_free_va(c->vm, vm->syncpt_ro_map_gpu_va,
69 gmmu_page_size_kernel);
70 vm->syncpt_ro_map_gpu_va = 0;
71 return err;
72 }
73 }
74
75 syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size,
76 gmmu_page_size_kernel);
77 if (!syncpt_buf->gpu_va) {
78 nvgpu_err(g, "allocating syncpt va space failed");
79 return -ENOMEM;
80 }
81
82 msg.cmd = TEGRA_VGPU_CMD_MAP_SYNCPT;
83 msg.handle = vgpu_get_handle(g);
84 p->as_handle = c->vm->handle;
85 p->gpu_va = syncpt_buf->gpu_va;
86 p->len = g->syncpt_size;
87 p->offset =
88 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id);
89 p->prot = TEGRA_VGPU_MAP_PROT_NONE;
90 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
91 err = err ? err : msg.ret;
92 if (err) {
93 nvgpu_err(g, "mapping syncpt va space failed err %d", err);
94 __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va,
95 gmmu_page_size_kernel);
96 return err;
97 }
98
99 return 0;
100}
101#endif /* CONFIG_TEGRA_GK20A_NVHOST */
102
103int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g)
104{
105 struct fifo_gk20a *f = &g->fifo;
106 int err;
107
108 err = vgpu_get_attribute(vgpu_get_handle(g),
109 TEGRA_VGPU_ATTRIB_MAX_SUBCTX_COUNT,
110 &f->t19x.max_subctx_count);
111 if (err) {
112 nvgpu_err(g, "get max_subctx_count failed %d", err);
113 return err;
114 }
115
116 return 0;
117}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.h
new file mode 100644
index 00000000..bea935d3
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _VGPU_FIFO_GV11B_H_
24#define _VGPU_FIFO_GV11B_H_
25
26struct gk20a;
27
28int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g);
29int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c,
30 u32 syncpt_id, struct nvgpu_mem *syncpt_buf);
31#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.c
new file mode 100644
index 00000000..89952221
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.c
@@ -0,0 +1,41 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <gk20a/gk20a.h>
24#include <vgpu/gr_vgpu.h>
25
26#include "vgpu_subctx_gv11b.h"
27
28int vgpu_gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va)
29{
30 int err;
31
32 err = vgpu_gv11b_alloc_subctx_header(c);
33 if (err)
34 return err;
35
36 err = vgpu_gr_commit_inst(c, gpu_va);
37 if (err)
38 vgpu_gv11b_free_subctx_header(c);
39
40 return err;
41}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.h
new file mode 100644
index 00000000..562198ca
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _VGPU_GR_GV11B_H_
24#define _VGPU_GR_GV11B_H_
25
26struct channel_gk20a;
27
28int vgpu_gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va);
29
30#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c
new file mode 100644
index 00000000..feac195e
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c
@@ -0,0 +1,37 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15
16#include <nvgpu/enabled.h>
17#include <nvgpu/enabled_t19x.h>
18
19#include "vgpu/vgpu.h"
20#include "vgpu_gv11b.h"
21
22int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g)
23{
24 int err;
25
26 gk20a_dbg_fn("");
27
28 err = vgpu_init_gpu_characteristics(g);
29 if (err) {
30 nvgpu_err(g, "vgpu_init_gpu_characteristics failed, err %d\n", err);
31 return err;
32 }
33
34 __nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true);
35
36 return 0;
37}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.h
new file mode 100644
index 00000000..9413904b
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _VGPU_GV11B_H_
15#define _VGPU_GV11B_H_
16
17struct gk20a;
18
19int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g);
20
21#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c
new file mode 100644
index 00000000..17d6f049
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c
@@ -0,0 +1,642 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <gk20a/gk20a.h>
24#include <gv11b/hal_gv11b.h>
25#include <vgpu/vgpu.h>
26#include <vgpu/fifo_vgpu.h>
27#include <vgpu/gr_vgpu.h>
28#include <vgpu/ltc_vgpu.h>
29#include <vgpu/mm_vgpu.h>
30#include <vgpu/dbg_vgpu.h>
31#include <vgpu/fecs_trace_vgpu.h>
32#include <vgpu/css_vgpu.h>
33#include <vgpu/vgpu_t19x.h>
34#include <vgpu/gm20b/vgpu_gr_gm20b.h>
35#include <vgpu/gp10b/vgpu_mm_gp10b.h>
36#include <vgpu/gp10b/vgpu_gr_gp10b.h>
37
38#include <gk20a/fb_gk20a.h>
39#include <gk20a/flcn_gk20a.h>
40#include <gk20a/bus_gk20a.h>
41#include <gk20a/mc_gk20a.h>
42
43#include <gm20b/gr_gm20b.h>
44#include <gm20b/fb_gm20b.h>
45#include <gm20b/fifo_gm20b.h>
46#include <gm20b/pmu_gm20b.h>
47#include <gm20b/mm_gm20b.h>
48#include <gm20b/acr_gm20b.h>
49#include <gm20b/ltc_gm20b.h>
50
51#include <gp10b/fb_gp10b.h>
52#include <gp10b/pmu_gp10b.h>
53#include <gp10b/mm_gp10b.h>
54#include <gp10b/mc_gp10b.h>
55#include <gp10b/ce_gp10b.h>
56#include <gp10b/fifo_gp10b.h>
57#include <gp10b/therm_gp10b.h>
58#include <gp10b/priv_ring_gp10b.h>
59#include <gp10b/ltc_gp10b.h>
60
61#include <gp106/pmu_gp106.h>
62#include <gp106/acr_gp106.h>
63
64#include <gv11b/fb_gv11b.h>
65#include <gv11b/pmu_gv11b.h>
66#include <gv11b/acr_gv11b.h>
67#include <gv11b/mm_gv11b.h>
68#include <gv11b/mc_gv11b.h>
69#include <gv11b/ce_gv11b.h>
70#include <gv11b/fifo_gv11b.h>
71#include <gv11b/therm_gv11b.h>
72#include <gv11b/regops_gv11b.h>
73#include <gv11b/gr_ctx_gv11b.h>
74#include <gv11b/ltc_gv11b.h>
75#include <gv11b/gv11b_gating_reglist.h>
76
77#include <gv100/gr_gv100.h>
78
79#include <nvgpu/enabled.h>
80
81#include "vgpu_gv11b.h"
82#include "vgpu_gr_gv11b.h"
83#include "vgpu_fifo_gv11b.h"
84#include "vgpu_subctx_gv11b.h"
85#include "vgpu_tsg_gv11b.h"
86
87#include <nvgpu/hw/gv11b/hw_fuse_gv11b.h>
88#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
89#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
90#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
91#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
92
93static const struct gpu_ops vgpu_gv11b_ops = {
94 .ltc = {
95 .determine_L2_size_bytes = vgpu_determine_L2_size_bytes,
96 .set_zbc_s_entry = gv11b_ltc_set_zbc_stencil_entry,
97 .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry,
98 .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry,
99 .init_cbc = NULL,
100 .init_fs_state = vgpu_ltc_init_fs_state,
101 .init_comptags = vgpu_ltc_init_comptags,
102 .cbc_ctrl = NULL,
103 .isr = gv11b_ltc_isr,
104 .cbc_fix_config = gv11b_ltc_cbc_fix_config,
105 .flush = gm20b_flush_ltc,
106 .set_enabled = gp10b_ltc_set_enabled,
107 },
108 .ce2 = {
109 .isr_stall = gv11b_ce_isr,
110 .isr_nonstall = gp10b_ce_nonstall_isr,
111 .get_num_pce = vgpu_ce_get_num_pce,
112 },
113 .gr = {
114 .init_gpc_mmu = gr_gv11b_init_gpc_mmu,
115 .bundle_cb_defaults = gr_gv11b_bundle_cb_defaults,
116 .cb_size_default = gr_gv11b_cb_size_default,
117 .calc_global_ctx_buffer_size =
118 gr_gv11b_calc_global_ctx_buffer_size,
119 .commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb,
120 .commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb,
121 .commit_global_cb_manager = gr_gp10b_commit_global_cb_manager,
122 .commit_global_pagepool = gr_gp10b_commit_global_pagepool,
123 .handle_sw_method = gr_gv11b_handle_sw_method,
124 .set_alpha_circular_buffer_size =
125 gr_gv11b_set_alpha_circular_buffer_size,
126 .set_circular_buffer_size = gr_gv11b_set_circular_buffer_size,
127 .enable_hww_exceptions = gr_gv11b_enable_hww_exceptions,
128 .is_valid_class = gr_gv11b_is_valid_class,
129 .is_valid_gfx_class = gr_gv11b_is_valid_gfx_class,
130 .is_valid_compute_class = gr_gv11b_is_valid_compute_class,
131 .get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs,
132 .get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs,
133 .init_fs_state = vgpu_gm20b_init_fs_state,
134 .set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask,
135 .falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments,
136 .load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode,
137 .set_gpc_tpc_mask = gr_gv11b_set_gpc_tpc_mask,
138 .get_gpc_tpc_mask = vgpu_gr_get_gpc_tpc_mask,
139 .free_channel_ctx = vgpu_gr_free_channel_ctx,
140 .alloc_obj_ctx = vgpu_gr_alloc_obj_ctx,
141 .bind_ctxsw_zcull = vgpu_gr_bind_ctxsw_zcull,
142 .get_zcull_info = vgpu_gr_get_zcull_info,
143 .is_tpc_addr = gr_gm20b_is_tpc_addr,
144 .get_tpc_num = gr_gm20b_get_tpc_num,
145 .detect_sm_arch = vgpu_gr_detect_sm_arch,
146 .add_zbc_color = gr_gp10b_add_zbc_color,
147 .add_zbc_depth = gr_gp10b_add_zbc_depth,
148 .zbc_set_table = vgpu_gr_add_zbc,
149 .zbc_query_table = vgpu_gr_query_zbc,
150 .pmu_save_zbc = gk20a_pmu_save_zbc,
151 .add_zbc = gr_gk20a_add_zbc,
152 .pagepool_default_size = gr_gv11b_pagepool_default_size,
153 .init_ctx_state = vgpu_gr_gp10b_init_ctx_state,
154 .alloc_gr_ctx = vgpu_gr_gp10b_alloc_gr_ctx,
155 .free_gr_ctx = vgpu_gr_gp10b_free_gr_ctx,
156 .update_ctxsw_preemption_mode =
157 gr_gp10b_update_ctxsw_preemption_mode,
158 .dump_gr_regs = NULL,
159 .update_pc_sampling = gr_gm20b_update_pc_sampling,
160 .get_fbp_en_mask = vgpu_gr_get_fbp_en_mask,
161 .get_max_ltc_per_fbp = vgpu_gr_get_max_ltc_per_fbp,
162 .get_max_lts_per_ltc = vgpu_gr_get_max_lts_per_ltc,
163 .get_rop_l2_en_mask = vgpu_gr_rop_l2_en_mask,
164 .get_max_fbps_count = vgpu_gr_get_max_fbps_count,
165 .init_sm_dsm_reg_info = gv11b_gr_init_sm_dsm_reg_info,
166 .wait_empty = gr_gv11b_wait_empty,
167 .init_cyclestats = vgpu_gr_gm20b_init_cyclestats,
168 .set_sm_debug_mode = vgpu_gr_set_sm_debug_mode,
169 .enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs,
170 .bpt_reg_info = gv11b_gr_bpt_reg_info,
171 .get_access_map = gr_gv11b_get_access_map,
172 .handle_fecs_error = gr_gv11b_handle_fecs_error,
173 .handle_sm_exception = gr_gk20a_handle_sm_exception,
174 .handle_tex_exception = gr_gv11b_handle_tex_exception,
175 .enable_gpc_exceptions = gr_gv11b_enable_gpc_exceptions,
176 .enable_exceptions = gr_gv11b_enable_exceptions,
177 .get_lrf_tex_ltc_dram_override = get_ecc_override_val,
178 .update_smpc_ctxsw_mode = vgpu_gr_update_smpc_ctxsw_mode,
179 .update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode,
180 .record_sm_error_state = gv11b_gr_record_sm_error_state,
181 .update_sm_error_state = gv11b_gr_update_sm_error_state,
182 .clear_sm_error_state = vgpu_gr_clear_sm_error_state,
183 .suspend_contexts = vgpu_gr_suspend_contexts,
184 .resume_contexts = vgpu_gr_resume_contexts,
185 .get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags,
186 .init_sm_id_table = gr_gv100_init_sm_id_table,
187 .load_smid_config = gr_gv11b_load_smid_config,
188 .program_sm_id_numbering = gr_gv11b_program_sm_id_numbering,
189 .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr,
190 .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr,
191 .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr,
192 .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr,
193 .setup_rop_mapping = gr_gv11b_setup_rop_mapping,
194 .program_zcull_mapping = gr_gv11b_program_zcull_mapping,
195 .commit_global_timeslice = gr_gv11b_commit_global_timeslice,
196 .commit_inst = vgpu_gr_gv11b_commit_inst,
197 .write_zcull_ptr = gr_gv11b_write_zcull_ptr,
198 .write_pm_ptr = gr_gv11b_write_pm_ptr,
199 .init_elcg_mode = gr_gv11b_init_elcg_mode,
200 .load_tpc_mask = gr_gv11b_load_tpc_mask,
201 .inval_icache = gr_gk20a_inval_icache,
202 .trigger_suspend = gv11b_gr_sm_trigger_suspend,
203 .wait_for_pause = gr_gk20a_wait_for_pause,
204 .resume_from_pause = gv11b_gr_resume_from_pause,
205 .clear_sm_errors = gr_gk20a_clear_sm_errors,
206 .tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions,
207 .get_esr_sm_sel = gv11b_gr_get_esr_sm_sel,
208 .sm_debugger_attached = gv11b_gr_sm_debugger_attached,
209 .suspend_single_sm = gv11b_gr_suspend_single_sm,
210 .suspend_all_sms = gv11b_gr_suspend_all_sms,
211 .resume_single_sm = gv11b_gr_resume_single_sm,
212 .resume_all_sms = gv11b_gr_resume_all_sms,
213 .get_sm_hww_warp_esr = gv11b_gr_get_sm_hww_warp_esr,
214 .get_sm_hww_global_esr = gv11b_gr_get_sm_hww_global_esr,
215 .get_sm_no_lock_down_hww_global_esr_mask =
216 gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask,
217 .lock_down_sm = gv11b_gr_lock_down_sm,
218 .wait_for_sm_lock_down = gv11b_gr_wait_for_sm_lock_down,
219 .clear_sm_hww = gv11b_gr_clear_sm_hww,
220 .init_ovr_sm_dsm_perf = gv11b_gr_init_ovr_sm_dsm_perf,
221 .get_ovr_perf_regs = gv11b_gr_get_ovr_perf_regs,
222 .disable_rd_coalesce = gm20a_gr_disable_rd_coalesce,
223 .set_boosted_ctx = NULL,
224 .set_preemption_mode = vgpu_gr_gp10b_set_preemption_mode,
225 .set_czf_bypass = NULL,
226 .pre_process_sm_exception = gr_gv11b_pre_process_sm_exception,
227 .set_preemption_buffer_va = gr_gv11b_set_preemption_buffer_va,
228 .init_preemption_state = NULL,
229 .update_boosted_ctx = NULL,
230 .set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3,
231 .create_gr_sysfs = gr_gv11b_create_sysfs,
232 .set_ctxsw_preemption_mode = vgpu_gr_gp10b_set_ctxsw_preemption_mode,
233 .is_etpc_addr = gv11b_gr_pri_is_etpc_addr,
234 .egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table,
235 .handle_tpc_mpc_exception = gr_gv11b_handle_tpc_mpc_exception,
236 .zbc_s_query_table = gr_gv11b_zbc_s_query_table,
237 .load_zbc_s_default_tbl = gr_gv11b_load_stencil_default_tbl,
238 .handle_gpc_gpcmmu_exception =
239 gr_gv11b_handle_gpc_gpcmmu_exception,
240 .add_zbc_type_s = gr_gv11b_add_zbc_type_s,
241 .get_egpc_base = gv11b_gr_get_egpc_base,
242 .get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num,
243 .handle_gpc_gpccs_exception =
244 gr_gv11b_handle_gpc_gpccs_exception,
245 .load_zbc_s_tbl = gr_gv11b_load_stencil_tbl,
246 .access_smpc_reg = gv11b_gr_access_smpc_reg,
247 .is_egpc_addr = gv11b_gr_pri_is_egpc_addr,
248 .add_zbc_s = gr_gv11b_add_zbc_stencil,
249 .handle_gcc_exception = gr_gv11b_handle_gcc_exception,
250 .init_sw_veid_bundle = gr_gv11b_init_sw_veid_bundle,
251 .handle_tpc_sm_ecc_exception =
252 gr_gv11b_handle_tpc_sm_ecc_exception,
253 .decode_egpc_addr = gv11b_gr_decode_egpc_addr,
254 .init_ctxsw_hdr_data = gr_gp10b_init_ctxsw_hdr_data,
255 },
256 .fb = {
257 .reset = gv11b_fb_reset,
258 .init_hw = gk20a_fb_init_hw,
259 .init_fs_state = gv11b_fb_init_fs_state,
260 .init_cbc = gv11b_fb_init_cbc,
261 .set_mmu_page_size = gm20b_fb_set_mmu_page_size,
262 .set_use_full_comp_tag_line =
263 gm20b_fb_set_use_full_comp_tag_line,
264 .compression_page_size = gp10b_fb_compression_page_size,
265 .compressible_page_size = gp10b_fb_compressible_page_size,
266 .vpr_info_fetch = gm20b_fb_vpr_info_fetch,
267 .dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info,
268 .read_wpr_info = gm20b_fb_read_wpr_info,
269 .is_debug_mode_enabled = NULL,
270 .set_debug_mode = vgpu_mm_mmu_set_debug_mode,
271 .tlb_invalidate = vgpu_mm_tlb_invalidate,
272 .hub_isr = gv11b_fb_hub_isr,
273 },
274 .clock_gating = {
275 .slcg_bus_load_gating_prod =
276 gv11b_slcg_bus_load_gating_prod,
277 .slcg_ce2_load_gating_prod =
278 gv11b_slcg_ce2_load_gating_prod,
279 .slcg_chiplet_load_gating_prod =
280 gv11b_slcg_chiplet_load_gating_prod,
281 .slcg_ctxsw_firmware_load_gating_prod =
282 gv11b_slcg_ctxsw_firmware_load_gating_prod,
283 .slcg_fb_load_gating_prod =
284 gv11b_slcg_fb_load_gating_prod,
285 .slcg_fifo_load_gating_prod =
286 gv11b_slcg_fifo_load_gating_prod,
287 .slcg_gr_load_gating_prod =
288 gr_gv11b_slcg_gr_load_gating_prod,
289 .slcg_ltc_load_gating_prod =
290 ltc_gv11b_slcg_ltc_load_gating_prod,
291 .slcg_perf_load_gating_prod =
292 gv11b_slcg_perf_load_gating_prod,
293 .slcg_priring_load_gating_prod =
294 gv11b_slcg_priring_load_gating_prod,
295 .slcg_pmu_load_gating_prod =
296 gv11b_slcg_pmu_load_gating_prod,
297 .slcg_therm_load_gating_prod =
298 gv11b_slcg_therm_load_gating_prod,
299 .slcg_xbar_load_gating_prod =
300 gv11b_slcg_xbar_load_gating_prod,
301 .blcg_bus_load_gating_prod =
302 gv11b_blcg_bus_load_gating_prod,
303 .blcg_ce_load_gating_prod =
304 gv11b_blcg_ce_load_gating_prod,
305 .blcg_ctxsw_firmware_load_gating_prod =
306 gv11b_blcg_ctxsw_firmware_load_gating_prod,
307 .blcg_fb_load_gating_prod =
308 gv11b_blcg_fb_load_gating_prod,
309 .blcg_fifo_load_gating_prod =
310 gv11b_blcg_fifo_load_gating_prod,
311 .blcg_gr_load_gating_prod =
312 gv11b_blcg_gr_load_gating_prod,
313 .blcg_ltc_load_gating_prod =
314 gv11b_blcg_ltc_load_gating_prod,
315 .blcg_pwr_csb_load_gating_prod =
316 gv11b_blcg_pwr_csb_load_gating_prod,
317 .blcg_pmu_load_gating_prod =
318 gv11b_blcg_pmu_load_gating_prod,
319 .blcg_xbar_load_gating_prod =
320 gv11b_blcg_xbar_load_gating_prod,
321 .pg_gr_load_gating_prod =
322 gr_gv11b_pg_gr_load_gating_prod,
323 },
324 .fifo = {
325 .init_fifo_setup_hw = vgpu_gv11b_init_fifo_setup_hw,
326 .bind_channel = vgpu_channel_bind,
327 .unbind_channel = vgpu_channel_unbind,
328 .disable_channel = vgpu_channel_disable,
329 .enable_channel = vgpu_channel_enable,
330 .alloc_inst = vgpu_channel_alloc_inst,
331 .free_inst = vgpu_channel_free_inst,
332 .setup_ramfc = vgpu_channel_setup_ramfc,
333 .channel_set_timeslice = vgpu_channel_set_timeslice,
334 .default_timeslice_us = vgpu_fifo_default_timeslice_us,
335 .setup_userd = gk20a_fifo_setup_userd,
336 .userd_gp_get = gv11b_userd_gp_get,
337 .userd_gp_put = gv11b_userd_gp_put,
338 .userd_pb_get = gv11b_userd_pb_get,
339 .pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val,
340 .preempt_channel = vgpu_fifo_preempt_channel,
341 .preempt_tsg = vgpu_fifo_preempt_tsg,
342 .enable_tsg = vgpu_enable_tsg,
343 .disable_tsg = gk20a_disable_tsg,
344 .tsg_verify_channel_status = NULL,
345 .tsg_verify_status_ctx_reload = NULL,
346 /* TODO: implement it for CE fault */
347 .tsg_verify_status_faulted = NULL,
348 .update_runlist = vgpu_fifo_update_runlist,
349 .trigger_mmu_fault = NULL,
350 .get_mmu_fault_info = NULL,
351 .wait_engine_idle = vgpu_fifo_wait_engine_idle,
352 .get_num_fifos = gv11b_fifo_get_num_fifos,
353 .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
354 .set_runlist_interleave = vgpu_fifo_set_runlist_interleave,
355 .tsg_set_timeslice = vgpu_tsg_set_timeslice,
356 .tsg_open = vgpu_tsg_open,
357 .force_reset_ch = vgpu_fifo_force_reset_ch,
358 .engine_enum_from_type = gp10b_fifo_engine_enum_from_type,
359 .device_info_data_parse = gp10b_device_info_data_parse,
360 .eng_runlist_base_size = fifo_eng_runlist_base__size_1_v,
361 .init_engine_info = vgpu_fifo_init_engine_info,
362 .runlist_entry_size = ram_rl_entry_size_v,
363 .get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry,
364 .get_ch_runlist_entry = gv11b_get_ch_runlist_entry,
365 .is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc,
366 .dump_pbdma_status = gk20a_dump_pbdma_status,
367 .dump_eng_status = gv11b_dump_eng_status,
368 .dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc,
369 .intr_0_error_mask = gv11b_fifo_intr_0_error_mask,
370 .is_preempt_pending = gv11b_fifo_is_preempt_pending,
371 .init_pbdma_intr_descs = gv11b_fifo_init_pbdma_intr_descs,
372 .reset_enable_hw = gv11b_init_fifo_reset_enable_hw,
373 .teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg,
374 .handle_sched_error = gv11b_fifo_handle_sched_error,
375 .handle_pbdma_intr_0 = gv11b_fifo_handle_pbdma_intr_0,
376 .handle_pbdma_intr_1 = gv11b_fifo_handle_pbdma_intr_1,
377 .init_eng_method_buffers = gv11b_fifo_init_eng_method_buffers,
378 .deinit_eng_method_buffers =
379 gv11b_fifo_deinit_eng_method_buffers,
380 .tsg_bind_channel = vgpu_gv11b_tsg_bind_channel,
381 .tsg_unbind_channel = vgpu_tsg_unbind_channel,
382#ifdef CONFIG_TEGRA_GK20A_NVHOST
383 .alloc_syncpt_buf = vgpu_gv11b_fifo_alloc_syncpt_buf,
384 .free_syncpt_buf = gv11b_fifo_free_syncpt_buf,
385 .add_syncpt_wait_cmd = gv11b_fifo_add_syncpt_wait_cmd,
386 .get_syncpt_wait_cmd_size = gv11b_fifo_get_syncpt_wait_cmd_size,
387 .add_syncpt_incr_cmd = gv11b_fifo_add_syncpt_incr_cmd,
388 .get_syncpt_incr_cmd_size = gv11b_fifo_get_syncpt_incr_cmd_size,
389#endif
390 .resetup_ramfc = NULL,
391 .reschedule_runlist = NULL,
392 .device_info_fault_id = top_device_info_data_fault_id_enum_v,
393 .free_channel_ctx_header = vgpu_gv11b_free_subctx_header,
394 .preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg,
395 .handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout,
396 },
397 .gr_ctx = {
398 .get_netlist_name = gr_gv11b_get_netlist_name,
399 .is_fw_defined = gr_gv11b_is_firmware_defined,
400 },
401#ifdef CONFIG_GK20A_CTXSW_TRACE
402 .fecs_trace = {
403 .alloc_user_buffer = NULL,
404 .free_user_buffer = NULL,
405 .mmap_user_buffer = NULL,
406 .init = NULL,
407 .deinit = NULL,
408 .enable = NULL,
409 .disable = NULL,
410 .is_enabled = NULL,
411 .reset = NULL,
412 .flush = NULL,
413 .poll = NULL,
414 .bind_channel = NULL,
415 .unbind_channel = NULL,
416 .max_entries = NULL,
417 },
418#endif /* CONFIG_GK20A_CTXSW_TRACE */
419 .mm = {
420 /* FIXME: add support for sparse mappings */
421 .support_sparse = NULL,
422 .gmmu_map = vgpu_gp10b_locked_gmmu_map,
423 .gmmu_unmap = vgpu_locked_gmmu_unmap,
424 .vm_bind_channel = vgpu_vm_bind_channel,
425 .fb_flush = vgpu_mm_fb_flush,
426 .l2_invalidate = vgpu_mm_l2_invalidate,
427 .l2_flush = vgpu_mm_l2_flush,
428 .cbc_clean = gk20a_mm_cbc_clean,
429 .set_big_page_size = gm20b_mm_set_big_page_size,
430 .get_big_page_sizes = gm20b_mm_get_big_page_sizes,
431 .get_default_big_page_size = gp10b_mm_get_default_big_page_size,
432 .gpu_phys_addr = gm20b_gpu_phys_addr,
433 .get_iommu_bit = gk20a_mm_get_iommu_bit,
434 .get_mmu_levels = gp10b_mm_get_mmu_levels,
435 .init_pdb = gp10b_mm_init_pdb,
436 .init_mm_setup_hw = vgpu_gp10b_init_mm_setup_hw,
437 .is_bar1_supported = gv11b_mm_is_bar1_supported,
438 .init_inst_block = gv11b_init_inst_block,
439 .mmu_fault_pending = gv11b_mm_mmu_fault_pending,
440 .get_kind_invalid = gm20b_get_kind_invalid,
441 .get_kind_pitch = gm20b_get_kind_pitch,
442 .init_bar2_vm = gb10b_init_bar2_vm,
443 .init_bar2_mm_hw_setup = gv11b_init_bar2_mm_hw_setup,
444 .remove_bar2_vm = gv11b_mm_remove_bar2_vm,
445 .fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy,
446 },
447 .therm = {
448 .init_therm_setup_hw = gp10b_init_therm_setup_hw,
449 .elcg_init_idle_filters = gv11b_elcg_init_idle_filters,
450 },
451 .pmu = {
452 .pmu_setup_elpg = gp10b_pmu_setup_elpg,
453 .pmu_get_queue_head = pwr_pmu_queue_head_r,
454 .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v,
455 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
456 .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v,
457 .pmu_queue_head = gk20a_pmu_queue_head,
458 .pmu_queue_tail = gk20a_pmu_queue_tail,
459 .pmu_msgq_tail = gk20a_pmu_msgq_tail,
460 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
461 .pmu_mutex_acquire = gk20a_pmu_mutex_acquire,
462 .pmu_mutex_release = gk20a_pmu_mutex_release,
463 .write_dmatrfbase = gp10b_write_dmatrfbase,
464 .pmu_elpg_statistics = gp106_pmu_elpg_statistics,
465 .pmu_pg_init_param = gv11b_pg_gr_init,
466 .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list,
467 .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list,
468 .dump_secure_fuses = pmu_dump_security_fuses_gp10b,
469 .reset_engine = gp106_pmu_engine_reset,
470 .is_engine_in_reset = gp106_pmu_is_engine_in_reset,
471 .pmu_nsbootstrap = gv11b_pmu_bootstrap,
472 .pmu_pg_set_sub_feature_mask = gv11b_pg_set_subfeature_mask,
473 .is_pmu_supported = gv11b_is_pmu_supported,
474 },
475 .regops = {
476 .get_global_whitelist_ranges =
477 gv11b_get_global_whitelist_ranges,
478 .get_global_whitelist_ranges_count =
479 gv11b_get_global_whitelist_ranges_count,
480 .get_context_whitelist_ranges =
481 gv11b_get_context_whitelist_ranges,
482 .get_context_whitelist_ranges_count =
483 gv11b_get_context_whitelist_ranges_count,
484 .get_runcontrol_whitelist = gv11b_get_runcontrol_whitelist,
485 .get_runcontrol_whitelist_count =
486 gv11b_get_runcontrol_whitelist_count,
487 .get_runcontrol_whitelist_ranges =
488 gv11b_get_runcontrol_whitelist_ranges,
489 .get_runcontrol_whitelist_ranges_count =
490 gv11b_get_runcontrol_whitelist_ranges_count,
491 .get_qctl_whitelist = gv11b_get_qctl_whitelist,
492 .get_qctl_whitelist_count = gv11b_get_qctl_whitelist_count,
493 .get_qctl_whitelist_ranges = gv11b_get_qctl_whitelist_ranges,
494 .get_qctl_whitelist_ranges_count =
495 gv11b_get_qctl_whitelist_ranges_count,
496 .apply_smpc_war = gv11b_apply_smpc_war,
497 },
498 .mc = {
499 .intr_enable = mc_gv11b_intr_enable,
500 .intr_unit_config = mc_gp10b_intr_unit_config,
501 .isr_stall = mc_gp10b_isr_stall,
502 .intr_stall = mc_gp10b_intr_stall,
503 .intr_stall_pause = mc_gp10b_intr_stall_pause,
504 .intr_stall_resume = mc_gp10b_intr_stall_resume,
505 .intr_nonstall = mc_gp10b_intr_nonstall,
506 .intr_nonstall_pause = mc_gp10b_intr_nonstall_pause,
507 .intr_nonstall_resume = mc_gp10b_intr_nonstall_resume,
508 .enable = gk20a_mc_enable,
509 .disable = gk20a_mc_disable,
510 .reset = gk20a_mc_reset,
511 .boot_0 = gk20a_mc_boot_0,
512 .is_intr1_pending = mc_gp10b_is_intr1_pending,
513 .is_intr_hub_pending = gv11b_mc_is_intr_hub_pending,
514 },
515 .debug = {
516 .show_dump = NULL,
517 },
518 .dbg_session_ops = {
519 .exec_reg_ops = vgpu_exec_regops,
520 .dbg_set_powergate = vgpu_dbg_set_powergate,
521 .check_and_set_global_reservation =
522 vgpu_check_and_set_global_reservation,
523 .check_and_set_context_reservation =
524 vgpu_check_and_set_context_reservation,
525 .release_profiler_reservation =
526 vgpu_release_profiler_reservation,
527 .perfbuffer_enable = vgpu_perfbuffer_enable,
528 .perfbuffer_disable = vgpu_perfbuffer_disable,
529 },
530 .bus = {
531 .init_hw = gk20a_bus_init_hw,
532 .isr = gk20a_bus_isr,
533 .read_ptimer = vgpu_read_ptimer,
534 .get_timestamps_zipper = vgpu_get_timestamps_zipper,
535 .bar1_bind = NULL,
536 },
537#if defined(CONFIG_GK20A_CYCLE_STATS)
538 .css = {
539 .enable_snapshot = vgpu_css_enable_snapshot_buffer,
540 .disable_snapshot = vgpu_css_release_snapshot_buffer,
541 .check_data_available = vgpu_css_flush_snapshots,
542 .set_handled_snapshots = NULL,
543 .allocate_perfmon_ids = NULL,
544 .release_perfmon_ids = NULL,
545 },
546#endif
547 .falcon = {
548 .falcon_hal_sw_init = gk20a_falcon_hal_sw_init,
549 },
550 .priv_ring = {
551 .isr = gp10b_priv_ring_isr,
552 },
553 .chip_init_gpu_characteristics = vgpu_gv11b_init_gpu_characteristics,
554 .get_litter_value = gv11b_get_litter_value,
555};
556
557int vgpu_gv11b_init_hal(struct gk20a *g)
558{
559 struct gpu_ops *gops = &g->ops;
560 u32 val;
561 bool priv_security;
562
563 gops->ltc = vgpu_gv11b_ops.ltc;
564 gops->ce2 = vgpu_gv11b_ops.ce2;
565 gops->gr = vgpu_gv11b_ops.gr;
566 gops->fb = vgpu_gv11b_ops.fb;
567 gops->clock_gating = vgpu_gv11b_ops.clock_gating;
568 gops->fifo = vgpu_gv11b_ops.fifo;
569 gops->gr_ctx = vgpu_gv11b_ops.gr_ctx;
570 gops->mm = vgpu_gv11b_ops.mm;
571 gops->fecs_trace = vgpu_gv11b_ops.fecs_trace;
572 gops->therm = vgpu_gv11b_ops.therm;
573 gops->pmu = vgpu_gv11b_ops.pmu;
574 gops->regops = vgpu_gv11b_ops.regops;
575 gops->mc = vgpu_gv11b_ops.mc;
576 gops->debug = vgpu_gv11b_ops.debug;
577 gops->dbg_session_ops = vgpu_gv11b_ops.dbg_session_ops;
578 gops->bus = vgpu_gv11b_ops.bus;
579#if defined(CONFIG_GK20A_CYCLE_STATS)
580 gops->css = vgpu_gv11b_ops.css;
581#endif
582 gops->falcon = vgpu_gv11b_ops.falcon;
583 gops->priv_ring = vgpu_gv11b_ops.priv_ring;
584
585 /* Lone functions */
586 gops->chip_init_gpu_characteristics =
587 vgpu_gv11b_ops.chip_init_gpu_characteristics;
588 gops->get_litter_value = vgpu_gv11b_ops.get_litter_value;
589
590 val = gk20a_readl(g, fuse_opt_priv_sec_en_r());
591 if (val) {
592 priv_security = true;
593 pr_err("priv security is enabled\n");
594 } else {
595 priv_security = false;
596 pr_err("priv security is disabled\n");
597 }
598 __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false);
599 __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, priv_security);
600 __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, priv_security);
601
602 /* priv security dependent ops */
603 if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
604 /* Add in ops from gm20b acr */
605 gops->pmu.prepare_ucode = gp106_prepare_ucode_blob,
606 gops->pmu.pmu_setup_hw_and_bootstrap = gv11b_bootstrap_hs_flcn,
607 gops->pmu.get_wpr = gm20b_wpr_info,
608 gops->pmu.alloc_blob_space = gm20b_alloc_blob_space,
609 gops->pmu.pmu_populate_loader_cfg =
610 gp106_pmu_populate_loader_cfg,
611 gops->pmu.flcn_populate_bl_dmem_desc =
612 gp106_flcn_populate_bl_dmem_desc,
613 gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt,
614 gops->pmu.falcon_clear_halt_interrupt_status =
615 clear_halt_interrupt_status,
616 gops->pmu.init_falcon_setup_hw = gv11b_init_pmu_setup_hw1,
617
618 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
619 gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode;
620 gops->pmu.is_lazy_bootstrap = gv11b_is_lazy_bootstrap,
621 gops->pmu.is_priv_load = gv11b_is_priv_load,
622
623 gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode;
624 } else {
625 /* Inherit from gk20a */
626 gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob,
627 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1,
628
629 gops->pmu.load_lsfalcon_ucode = NULL;
630 gops->pmu.init_wpr_region = NULL;
631 gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1;
632
633 gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
634 }
635
636 __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false);
637 g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT;
638
639 g->name = "gv11b";
640
641 return 0;
642}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c
new file mode 100644
index 00000000..857e58c4
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c
@@ -0,0 +1,79 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <gk20a/gk20a.h>
24#include <vgpu/vgpu.h>
25#include <linux/tegra_vgpu.h>
26
27int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c)
28{
29 struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
30 struct tegra_vgpu_cmd_msg msg = {};
31 struct tegra_vgpu_alloc_ctx_header_params *p =
32 &msg.params.t19x.alloc_ctx_header;
33 struct gr_gk20a *gr = &c->g->gr;
34 int err;
35
36 msg.cmd = TEGRA_VGPU_CMD_ALLOC_CTX_HEADER;
37 msg.handle = vgpu_get_handle(c->g);
38 p->ch_handle = c->virt_ctx;
39 p->ctx_header_va = __nvgpu_vm_alloc_va(c->vm,
40 gr->ctx_vars.golden_image_size,
41 gmmu_page_size_kernel);
42 if (!p->ctx_header_va) {
43 nvgpu_err(c->g, "alloc va failed for ctx_header");
44 return -ENOMEM;
45 }
46 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
47 err = err ? err : msg.ret;
48 if (unlikely(err)) {
49 nvgpu_err(c->g, "alloc ctx_header failed err %d", err);
50 __nvgpu_vm_free_va(c->vm, p->ctx_header_va,
51 gmmu_page_size_kernel);
52 return err;
53 }
54 ctx->mem.gpu_va = p->ctx_header_va;
55
56 return err;
57}
58
59void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c)
60{
61 struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header;
62 struct tegra_vgpu_cmd_msg msg = {};
63 struct tegra_vgpu_free_ctx_header_params *p =
64 &msg.params.t19x.free_ctx_header;
65 int err;
66
67 if (ctx->mem.gpu_va) {
68 msg.cmd = TEGRA_VGPU_CMD_FREE_CTX_HEADER;
69 msg.handle = vgpu_get_handle(c->g);
70 p->ch_handle = c->virt_ctx;
71 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
72 err = err ? err : msg.ret;
73 if (unlikely(err))
74 nvgpu_err(c->g, "free ctx_header failed err %d", err);
75 __nvgpu_vm_free_va(c->vm, ctx->mem.gpu_va,
76 gmmu_page_size_kernel);
77 ctx->mem.gpu_va = 0;
78 }
79}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.h
new file mode 100644
index 00000000..0e09f4f6
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _VGPU_SUBCTX_GV11B_H_
24#define _VGPU_SUBCTX_GV11B_H_
25
26struct channel_gk20a;
27
28int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c);
29void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c);
30
31#endif
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c
new file mode 100644
index 00000000..7e70272a
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/tegra_vgpu.h>
24#include <gk20a/gk20a.h>
25#include <vgpu/vgpu.h>
26
27#include "vgpu_tsg_gv11b.h"
28
29int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg,
30 struct channel_gk20a *ch)
31{
32 struct tegra_vgpu_cmd_msg msg = {};
33 struct tegra_vgpu_tsg_bind_channel_ex_params *p =
34 &msg.params.t19x.tsg_bind_channel_ex;
35 int err;
36
37 gk20a_dbg_fn("");
38
39 err = gk20a_tsg_bind_channel(tsg, ch);
40 if (err)
41 return err;
42
43 msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_CHANNEL_EX;
44 msg.handle = vgpu_get_handle(tsg->g);
45 p->tsg_id = tsg->tsgid;
46 p->ch_handle = ch->virt_ctx;
47 p->subctx_id = ch->t19x.subctx_id;
48 p->runqueue_sel = ch->t19x.runqueue_sel;
49 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
50 err = err ? err : msg.ret;
51 if (err) {
52 nvgpu_err(tsg->g,
53 "vgpu_gv11b_tsg_bind_channel failed, ch %d tsgid %d",
54 ch->chid, tsg->tsgid);
55 gk20a_tsg_unbind_channel(ch);
56 }
57
58 return err;
59}
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.h
new file mode 100644
index 00000000..c7bb2f4e
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _VGPU_TSG_GV11B_H_
24#define _VGPU_TSG_GV11B_H_
25
26int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg,
27 struct channel_gk20a *ch);
28
29#endif
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu_t19x.h b/drivers/gpu/nvgpu/vgpu/vgpu_t19x.h
new file mode 100644
index 00000000..8c020f80
--- /dev/null
+++ b/drivers/gpu/nvgpu/vgpu/vgpu_t19x.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef _VGPU_T19X_H_
24#define _VGPU_T19X_H_
25
26struct gk20a;
27
28int vgpu_gv11b_init_hal(struct gk20a *g);
29
30#define vgpu_t19x_init_hal(g) vgpu_gv11b_init_hal(g)
31
32#define TEGRA_19x_VGPU_COMPAT_TEGRA "nvidia,gv11b-vgpu"
33extern struct gk20a_platform gv11b_vgpu_tegra_platform;
34#define t19x_vgpu_tegra_platform gv11b_vgpu_tegra_platform
35
36#endif
diff --git a/include/linux/tegra_gpu_t19x.h b/include/linux/tegra_gpu_t19x.h
new file mode 100644
index 00000000..f6157c12
--- /dev/null
+++ b/include/linux/tegra_gpu_t19x.h
@@ -0,0 +1,24 @@
1/*
2 * Tegra GPU Virtualization Interfaces to Server
3 *
4 * Copyright (c) 2016, NVIDIA Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __TEGRA_GPU_T19X_H
20#define __TEGRA_GPU_T19X_H
21
22#define GPU_LIT_NUM_SUBCTX 99
23
24#endif
diff --git a/include/linux/tegra_vgpu_t19x.h b/include/linux/tegra_vgpu_t19x.h
new file mode 100644
index 00000000..38dbbf60
--- /dev/null
+++ b/include/linux/tegra_vgpu_t19x.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef __TEGRA_VGPU_T19X_H
15#define __TEGRA_VGPU_T19X_H
16
17#define TEGRA_VGPU_CMD_ALLOC_CTX_HEADER 100
18#define TEGRA_VGPU_CMD_FREE_CTX_HEADER 101
19#define TEGRA_VGPU_CMD_MAP_SYNCPT 102
20#define TEGRA_VGPU_CMD_TSG_BIND_CHANNEL_EX 103
21
22struct tegra_vgpu_alloc_ctx_header_params {
23 u64 ch_handle;
24 u64 ctx_header_va;
25};
26
27struct tegra_vgpu_free_ctx_header_params {
28 u64 ch_handle;
29};
30
31struct tegra_vgpu_map_syncpt_params {
32 u64 as_handle;
33 u64 gpu_va;
34 u64 len;
35 u64 offset;
36 u8 prot;
37};
38
39struct tegra_vgpu_tsg_bind_channel_ex_params {
40 u32 tsg_id;
41 u64 ch_handle;
42 u32 subctx_id;
43 u32 runqueue_sel;
44};
45
46union tegra_vgpu_t19x_params {
47 struct tegra_vgpu_alloc_ctx_header_params alloc_ctx_header;
48 struct tegra_vgpu_free_ctx_header_params free_ctx_header;
49 struct tegra_vgpu_map_syncpt_params map_syncpt;
50 struct tegra_vgpu_tsg_bind_channel_ex_params tsg_bind_channel_ex;
51};
52
53#define TEGRA_VGPU_ATTRIB_MAX_SUBCTX_COUNT 100
54
55#endif
diff --git a/include/uapi/linux/nvgpu-t19x.h b/include/uapi/linux/nvgpu-t19x.h
new file mode 100644
index 00000000..27db97c0
--- /dev/null
+++ b/include/uapi/linux/nvgpu-t19x.h
@@ -0,0 +1,59 @@
1/*
2 * NVGPU Public Interface Header
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16/* This file is meant to extend nvgpu.h, not replace it
17 * as such, be sure that nvgpu.h is actually the file performing the
18 * inclusion, to the extent that's possible.
19 */
20#ifndef _UAPI__LINUX_NVGPU_IOCTL_H
21# error "This file is to be included within nvgpu.h only."
22#endif
23
24#ifndef _UAPI__LINUX_NVGPU_T19X_IOCTL_H_
25#define _UAPI__LINUX_NVGPU_T19X_IOCTL_H_
26
27#define NVGPU_GPU_ARCH_GV110 0x00000150
28#define NVGPU_GPU_ARCH_GV100 0x00000140
29#define NVGPU_GPU_IMPL_GV11B 0x0000000B
30#define NVGPU_GPU_IMPL_GV100 0x00000000
31
32/*
33 * this flag is used in struct nvgpu_as_map_buffer_ex_args
34 * to provide L3 cache allocation hint
35 */
36#define NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC (1 << 7)
37
38/* subcontexts are available */
39#define NVGPU_GPU_FLAGS_SUPPORT_TSG_SUBCONTEXTS (1ULL << 22)
40
41struct nvgpu_tsg_bind_channel_ex_args {
42 /* in: channel fd */
43 __s32 channel_fd;
44
45 /* in: VEID in Volta */
46 __u32 subcontext_id;
47 __u32 num_active_tpcs;
48 __u8 tpc_pg_enabled;
49 __u8 reserved[11];
50};
51
52#define NVGPU_TSG_IOCTL_BIND_CHANNEL_EX \
53 _IOWR(NVGPU_TSG_IOCTL_MAGIC, 11, struct nvgpu_tsg_bind_channel_ex_args)
54
55#define NVGPU_TSG_IOCTL_MAX NVGPU_TSG_IOCTL_BIND_CHANNEL_EX
56
57#define NVGPU_TSG_IOCTL_MAX_ARG sizeof(struct nvgpu_tsg_bind_channel_ex_args)
58
59#endif /* _UAPI__LINUX_NVGPU_T19X_IOCTL_H_ */