summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/ltc_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/ltc_common.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_common.c155
1 files changed, 0 insertions, 155 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c
deleted file mode 100644
index 2b015fa0..00000000
--- a/drivers/gpu/nvgpu/gk20a/ltc_common.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/*
2 * drivers/video/tegra/host/gk20a/ltc_common.c
3 *
4 * GK20A Graphics
5 *
6 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <nvgpu/dma.h>
22#include <nvgpu/enabled.h>
23
24#include "gk20a.h"
25#include "gr_gk20a.h"
26
27/*
28 * Sets the ZBC color for the passed index.
29 */
30static void gk20a_ltc_set_zbc_color_entry(struct gk20a *g,
31 struct zbc_entry *color_val,
32 u32 index)
33{
34 u32 i;
35 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
36
37 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
38 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
39
40 for (i = 0;
41 i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++) {
42 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i),
43 color_val->color_l2[i]);
44 }
45 gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r());
46}
47
48/*
49 * Sets the ZBC depth for the passed index.
50 */
51static void gk20a_ltc_set_zbc_depth_entry(struct gk20a *g,
52 struct zbc_entry *depth_val,
53 u32 index)
54{
55 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
56
57 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
58 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
59
60 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(),
61 depth_val->depth);
62
63 gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r());
64}
65
66static int gk20a_ltc_alloc_phys_cbc(struct gk20a *g,
67 size_t compbit_backing_size)
68{
69 struct gr_gk20a *gr = &g->gr;
70
71 return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS,
72 compbit_backing_size,
73 &gr->compbit_store.mem);
74}
75
76static int gk20a_ltc_alloc_virt_cbc(struct gk20a *g,
77 size_t compbit_backing_size)
78{
79 struct gr_gk20a *gr = &g->gr;
80
81 return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING,
82 compbit_backing_size,
83 &gr->compbit_store.mem);
84}
85
86static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
87{
88 u32 max_size = gr->max_comptag_mem;
89 u32 max_comptag_lines = max_size << 3;
90
91 u32 compbit_base_post_divide;
92 u64 compbit_base_post_multiply64;
93 u64 compbit_store_iova;
94 u64 compbit_base_post_divide64;
95
96 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
97 compbit_store_iova = gk20a_mem_phys(&gr->compbit_store.mem);
98 else
99 compbit_store_iova = g->ops.mm.get_iova_addr(g,
100 gr->compbit_store.mem.priv.sgt->sgl, 0);
101
102 compbit_base_post_divide64 = compbit_store_iova >>
103 ltc_ltcs_ltss_cbc_base_alignment_shift_v();
104
105 do_div(compbit_base_post_divide64, g->ltc_count);
106 compbit_base_post_divide = u64_lo32(compbit_base_post_divide64);
107
108 compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
109 g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
110
111 if (compbit_base_post_multiply64 < compbit_store_iova)
112 compbit_base_post_divide++;
113
114 /* Bug 1477079 indicates sw adjustment on the posted divided base. */
115 if (g->ops.ltc.cbc_fix_config)
116 compbit_base_post_divide =
117 g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide);
118
119 gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
120 compbit_base_post_divide);
121
122 gk20a_dbg(gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
123 "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
124 (u32)(compbit_store_iova >> 32),
125 (u32)(compbit_store_iova & 0xffffffff),
126 compbit_base_post_divide);
127
128 gr->compbit_store.base_hw = compbit_base_post_divide;
129
130 g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
131 0, max_comptag_lines - 1);
132
133}
134
135#ifdef CONFIG_DEBUG_FS
136static void gk20a_ltc_sync_debugfs(struct gk20a *g)
137{
138 u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f();
139
140 nvgpu_spinlock_acquire(&g->debugfs_lock);
141 if (g->mm.ltc_enabled != g->mm.ltc_enabled_debug) {
142 u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r());
143 if (g->mm.ltc_enabled_debug)
144 /* bypass disabled (normal caching ops)*/
145 reg &= ~reg_f;
146 else
147 /* bypass enabled (no caching) */
148 reg |= reg_f;
149
150 gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg);
151 g->mm.ltc_enabled = g->mm.ltc_enabled_debug;
152 }
153 nvgpu_spinlock_release(&g->debugfs_lock);
154}
155#endif