summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/pmu_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/pmu_gv11b.c283
1 files changed, 283 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
new file mode 100644
index 00000000..2c7b6457
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
@@ -0,0 +1,283 @@
1/*
2 * GV11B PMU
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/delay.h> /* for udelay */
26#include <linux/clk.h>
27
28#include <soc/tegra/fuse.h>
29
30#include <nvgpu/pmu.h>
31#include <nvgpu/falcon.h>
32#include <nvgpu/enabled.h>
33#include <nvgpu/mm.h>
34
35#include "gk20a/gk20a.h"
36
37#include "gp10b/pmu_gp10b.h"
38#include "gp106/pmu_gp106.h"
39
40#include "pmu_gv11b.h"
41#include "acr_gv11b.h"
42
43#include <nvgpu/hw/gv11b/hw_pwr_gv11b.h>
44
45#define gv11b_dbg_pmu(fmt, arg...) \
46 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
47
48#define ALIGN_4KB 12
49
50bool gv11b_is_pmu_supported(struct gk20a *g)
51{
52 return true;
53}
54
55bool gv11b_is_lazy_bootstrap(u32 falcon_id)
56{
57 bool enable_status = false;
58
59 switch (falcon_id) {
60 case LSF_FALCON_ID_FECS:
61 enable_status = true;
62 break;
63 case LSF_FALCON_ID_GPCCS:
64 enable_status = true;
65 break;
66 default:
67 break;
68 }
69
70 return enable_status;
71}
72
73bool gv11b_is_priv_load(u32 falcon_id)
74{
75 bool enable_status = false;
76
77 switch (falcon_id) {
78 case LSF_FALCON_ID_FECS:
79 enable_status = true;
80 break;
81 case LSF_FALCON_ID_GPCCS:
82 enable_status = true;
83 break;
84 default:
85 break;
86 }
87
88 return enable_status;
89}
90
91int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
92{
93 struct gk20a *g = gk20a_from_pmu(pmu);
94 struct mm_gk20a *mm = &g->mm;
95 struct pmu_ucode_desc *desc = pmu->desc;
96 u64 addr_code_lo, addr_data_lo, addr_load_lo;
97 u64 addr_code_hi, addr_data_hi, addr_load_hi;
98 u32 i, blocks, addr_args;
99
100 gk20a_dbg_fn("");
101
102 gk20a_writel(g, pwr_falcon_itfen_r(),
103 gk20a_readl(g, pwr_falcon_itfen_r()) |
104 pwr_falcon_itfen_ctxen_enable_f());
105
106 gk20a_writel(g, pwr_pmu_new_instblk_r(),
107 pwr_pmu_new_instblk_ptr_f(
108 nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> ALIGN_4KB)
109 | pwr_pmu_new_instblk_valid_f(1)
110 | pwr_pmu_new_instblk_target_sys_ncoh_f());
111
112 /* TBD: load all other surfaces */
113 g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
114 pmu, GK20A_PMU_TRACE_BUFSIZE);
115 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu);
116 g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx(
117 pmu, GK20A_PMU_DMAIDX_VIRT);
118
119 g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu,
120 g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK));
121
122 addr_args = (pwr_falcon_hwcfg_dmem_size_v(
123 gk20a_readl(g, pwr_falcon_hwcfg_r()))
124 << GK20A_PMU_DMEM_BLKSIZE2) -
125 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu);
126
127 nvgpu_flcn_copy_to_dmem(pmu->flcn, addr_args,
128 (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)),
129 g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0);
130
131 gk20a_writel(g, pwr_falcon_dmemc_r(0),
132 pwr_falcon_dmemc_offs_f(0) |
133 pwr_falcon_dmemc_blk_f(0) |
134 pwr_falcon_dmemc_aincw_f(1));
135
136 addr_code_lo = u64_lo32((pmu->ucode.gpu_va +
137 desc->app_start_offset +
138 desc->app_resident_code_offset) >> 8);
139
140 addr_code_hi = u64_hi32((pmu->ucode.gpu_va +
141 desc->app_start_offset +
142 desc->app_resident_code_offset) >> 8);
143 addr_data_lo = u64_lo32((pmu->ucode.gpu_va +
144 desc->app_start_offset +
145 desc->app_resident_data_offset) >> 8);
146 addr_data_hi = u64_hi32((pmu->ucode.gpu_va +
147 desc->app_start_offset +
148 desc->app_resident_data_offset) >> 8);
149 addr_load_lo = u64_lo32((pmu->ucode.gpu_va +
150 desc->bootloader_start_offset) >> 8);
151 addr_load_hi = u64_hi32((pmu->ucode.gpu_va +
152 desc->bootloader_start_offset) >> 8);
153
154 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
155 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
156 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
157 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
158 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
159 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
160 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
161 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
162 gk20a_writel(g, pwr_falcon_dmemd_r(0), GK20A_PMU_DMAIDX_UCODE);
163 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code_lo << 8);
164 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code_hi);
165 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_offset);
166 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_size);
167 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
168 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0);
169 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_imem_entry);
170 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data_lo << 8);
171 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data_hi);
172 gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_data_size);
173 gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x1);
174 gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_args);
175
176 g->ops.pmu.write_dmatrfbase(g,
177 addr_load_lo - (desc->bootloader_imem_offset >> 8));
178
179 blocks = ((desc->bootloader_size + 0xFF) & ~0xFF) >> 8;
180
181 for (i = 0; i < blocks; i++) {
182 gk20a_writel(g, pwr_falcon_dmatrfmoffs_r(),
183 desc->bootloader_imem_offset + (i << 8));
184 gk20a_writel(g, pwr_falcon_dmatrffboffs_r(),
185 desc->bootloader_imem_offset + (i << 8));
186 gk20a_writel(g, pwr_falcon_dmatrfcmd_r(),
187 pwr_falcon_dmatrfcmd_imem_f(1) |
188 pwr_falcon_dmatrfcmd_write_f(0) |
189 pwr_falcon_dmatrfcmd_size_f(6) |
190 pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE));
191 }
192
193 nvgpu_flcn_bootstrap(pmu->flcn, desc->bootloader_entry_point);
194
195 gk20a_writel(g, pwr_falcon_os_r(), desc->app_version);
196
197 return 0;
198}
199
200static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
201 void *param, u32 handle, u32 status)
202{
203 gk20a_dbg_fn("");
204
205 if (status != 0) {
206 nvgpu_err(g, "Sub-feature mask update cmd aborted\n");
207 return;
208 }
209
210 gv11b_dbg_pmu("sub-feature mask update is acknowledged from PMU %x\n",
211 msg->msg.pg.msg_type);
212}
213
214static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg,
215 void *param, u32 handle, u32 status)
216{
217 gk20a_dbg_fn("");
218
219 if (status != 0) {
220 nvgpu_err(g, "GR PARAM cmd aborted\n");
221 return;
222 }
223
224 gv11b_dbg_pmu("GR PARAM is acknowledged from PMU %x\n",
225 msg->msg.pg.msg_type);
226}
227
228int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
229{
230 struct nvgpu_pmu *pmu = &g->pmu;
231 struct pmu_cmd cmd;
232 u32 seq;
233
234 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
235 memset(&cmd, 0, sizeof(struct pmu_cmd));
236 cmd.hdr.unit_id = PMU_UNIT_PG;
237 cmd.hdr.size = PMU_CMD_HDR_SIZE +
238 sizeof(struct pmu_pg_cmd_gr_init_param_v1);
239 cmd.cmd.pg.gr_init_param_v1.cmd_type =
240 PMU_PG_CMD_ID_PG_PARAM;
241 cmd.cmd.pg.gr_init_param_v1.sub_cmd_id =
242 PMU_PG_PARAM_CMD_GR_INIT_PARAM;
243 cmd.cmd.pg.gr_init_param_v1.featuremask =
244 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED;
245
246 gv11b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n");
247 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
248 pmu_handle_pg_param_msg, pmu, &seq, ~0);
249
250 } else
251 return -EINVAL;
252
253 return 0;
254}
255
256int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id)
257{
258 struct nvgpu_pmu *pmu = &g->pmu;
259 struct pmu_cmd cmd;
260 u32 seq;
261
262 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
263 memset(&cmd, 0, sizeof(struct pmu_cmd));
264 cmd.hdr.unit_id = PMU_UNIT_PG;
265 cmd.hdr.size = PMU_CMD_HDR_SIZE +
266 sizeof(struct pmu_pg_cmd_sub_feature_mask_update);
267 cmd.cmd.pg.sf_mask_update.cmd_type =
268 PMU_PG_CMD_ID_PG_PARAM;
269 cmd.cmd.pg.sf_mask_update.sub_cmd_id =
270 PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE;
271 cmd.cmd.pg.sf_mask_update.ctrl_id =
272 PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
273 cmd.cmd.pg.sf_mask_update.enabled_mask =
274 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED;
275
276 gv11b_dbg_pmu("cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n");
277 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
278 pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0);
279 } else
280 return -EINVAL;
281
282 return 0;
283}