summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106/clk_gp106.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp106/clk_gp106.c')
-rw-r--r--drivers/gpu/nvgpu/gp106/clk_gp106.c404
1 files changed, 404 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp106/clk_gp106.c b/drivers/gpu/nvgpu/gp106/clk_gp106.c
new file mode 100644
index 00000000..b8a1ba3d
--- /dev/null
+++ b/drivers/gpu/nvgpu/gp106/clk_gp106.c
@@ -0,0 +1,404 @@
1/*
2 * GP106 Clocks
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifdef CONFIG_DEBUG_FS
26#include <linux/debugfs.h>
27#endif
28
29#include <nvgpu/kmem.h>
30
31#include "gk20a/gk20a.h"
32#include "common/linux/os_linux.h"
33
34#include "clk_gp106.h"
35
36#include "gp106/mclk_gp106.h"
37
38#include <nvgpu/hw/gp106/hw_trim_gp106.h>
39
40#define gk20a_dbg_clk(fmt, arg...) \
41 gk20a_dbg(gpu_dbg_clk, fmt, ##arg)
42
43#ifdef CONFIG_DEBUG_FS
44static int clk_gp106_debugfs_init(struct gk20a *g);
45#endif
46
47#define NUM_NAMEMAPS 4
48#define XTAL4X_KHZ 108000
49
50
51static u32 gp106_get_rate_cntr(struct gk20a *g, struct namemap_cfg *);
52u32 gp106_crystal_clk_hz(struct gk20a *g)
53{
54 return (XTAL4X_KHZ * 1000);
55}
56
57unsigned long gp106_clk_measure_freq(struct gk20a *g, u32 api_domain)
58{
59 struct clk_gk20a *clk = &g->clk;
60 u32 freq_khz;
61 u32 i;
62 struct namemap_cfg *c = NULL;
63
64 for (i = 0; i < clk->namemap_num; i++) {
65 if (api_domain == clk->namemap_xlat_table[i]) {
66 c = &clk->clk_namemap[i];
67 break;
68 }
69 }
70
71 if (!c)
72 return 0;
73
74 freq_khz = c->is_counter ? c->scale * gp106_get_rate_cntr(g, c) :
75 0; /* TODO: PLL read */
76
77 /* Convert to HZ */
78 return freq_khz * 1000UL;
79}
80
81int gp106_init_clk_support(struct gk20a *g)
82{
83 struct clk_gk20a *clk = &g->clk;
84 u32 err = 0;
85
86 gk20a_dbg_fn("");
87
88 err = nvgpu_mutex_init(&clk->clk_mutex);
89 if (err)
90 return err;
91
92 clk->clk_namemap = (struct namemap_cfg *)
93 nvgpu_kzalloc(g, sizeof(struct namemap_cfg) * NUM_NAMEMAPS);
94
95 if (!clk->clk_namemap) {
96 nvgpu_mutex_destroy(&clk->clk_mutex);
97 return -ENOMEM;
98 }
99
100 clk->namemap_xlat_table = nvgpu_kcalloc(g, NUM_NAMEMAPS, sizeof(u32));
101
102 if (!clk->namemap_xlat_table) {
103 nvgpu_kfree(g, clk->clk_namemap);
104 nvgpu_mutex_destroy(&clk->clk_mutex);
105 return -ENOMEM;
106 }
107
108 clk->clk_namemap[0] = (struct namemap_cfg) {
109 .namemap = CLK_NAMEMAP_INDEX_GPC2CLK,
110 .is_enable = 1,
111 .is_counter = 1,
112 .g = g,
113 .cntr.reg_ctrl_addr = trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_r(),
114 .cntr.reg_ctrl_idx =
115 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_source_gpc2clk_f(),
116 .cntr.reg_cntr_addr = trim_gpc_bcast_clk_cntr_ncgpcclk_cnt_r(),
117 .name = "gpc2clk",
118 .scale = 1
119 };
120 clk->namemap_xlat_table[0] = CTRL_CLK_DOMAIN_GPC2CLK;
121 clk->clk_namemap[1] = (struct namemap_cfg) {
122 .namemap = CLK_NAMEMAP_INDEX_SYS2CLK,
123 .is_enable = 1,
124 .is_counter = 1,
125 .g = g,
126 .cntr.reg_ctrl_addr = trim_sys_clk_cntr_ncsyspll_cfg_r(),
127 .cntr.reg_ctrl_idx = trim_sys_clk_cntr_ncsyspll_cfg_source_sys2clk_f(),
128 .cntr.reg_cntr_addr = trim_sys_clk_cntr_ncsyspll_cnt_r(),
129 .name = "sys2clk",
130 .scale = 1
131 };
132 clk->namemap_xlat_table[1] = CTRL_CLK_DOMAIN_SYS2CLK;
133 clk->clk_namemap[2] = (struct namemap_cfg) {
134 .namemap = CLK_NAMEMAP_INDEX_XBAR2CLK,
135 .is_enable = 1,
136 .is_counter = 1,
137 .g = g,
138 .cntr.reg_ctrl_addr = trim_sys_clk_cntr_ncltcpll_cfg_r(),
139 .cntr.reg_ctrl_idx = trim_sys_clk_cntr_ncltcpll_cfg_source_xbar2clk_f(),
140 .cntr.reg_cntr_addr = trim_sys_clk_cntr_ncltcpll_cnt_r(),
141 .name = "xbar2clk",
142 .scale = 1
143 };
144 clk->namemap_xlat_table[2] = CTRL_CLK_DOMAIN_XBAR2CLK;
145 clk->clk_namemap[3] = (struct namemap_cfg) {
146 .namemap = CLK_NAMEMAP_INDEX_DRAMCLK,
147 .is_enable = 1,
148 .is_counter = 1,
149 .g = g,
150 .cntr.reg_ctrl_addr = trim_fbpa_bcast_clk_cntr_ncltcclk_cfg_r(),
151 .cntr.reg_ctrl_idx =
152 trim_fbpa_bcast_clk_cntr_ncltcclk_cfg_source_dramdiv4_rec_clk1_f(),
153 .cntr.reg_cntr_addr = trim_fbpa_bcast_clk_cntr_ncltcclk_cnt_r(),
154 .name = "dramdiv2_rec_clk1",
155 .scale = 2
156 };
157 clk->namemap_xlat_table[3] = CTRL_CLK_DOMAIN_MCLK;
158
159 clk->namemap_num = NUM_NAMEMAPS;
160
161 clk->g = g;
162
163#ifdef CONFIG_DEBUG_FS
164 if (!clk->debugfs_set) {
165 if (!clk_gp106_debugfs_init(g))
166 clk->debugfs_set = true;
167 }
168#endif
169 return err;
170}
171
172static u32 gp106_get_rate_cntr(struct gk20a *g, struct namemap_cfg *c) {
173 u32 save_reg;
174 u32 retries;
175 u32 cntr = 0;
176
177 struct clk_gk20a *clk = &g->clk;
178
179 if (!c || !c->cntr.reg_ctrl_addr || !c->cntr.reg_cntr_addr)
180 return 0;
181
182 nvgpu_mutex_acquire(&clk->clk_mutex);
183
184 /* Save the register */
185 save_reg = gk20a_readl(g, c->cntr.reg_ctrl_addr);
186
187 /* Disable and reset the current clock */
188 gk20a_writel(g, c->cntr.reg_ctrl_addr,
189 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_reset_asserted_f() |
190 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_deasserted_f());
191
192 /* Force wb() */
193 gk20a_readl(g, c->cntr.reg_ctrl_addr);
194
195 /* Wait for reset to happen */
196 retries = CLK_DEFAULT_CNTRL_SETTLE_RETRIES;
197 do {
198 nvgpu_udelay(CLK_DEFAULT_CNTRL_SETTLE_USECS);
199 } while ((--retries) && (cntr = gk20a_readl(g, c->cntr.reg_cntr_addr)));
200
201 if (!retries) {
202 nvgpu_err(g, "unable to settle counter reset, bailing");
203 goto read_err;
204 }
205 /* Program counter */
206 gk20a_writel(g, c->cntr.reg_ctrl_addr,
207 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_reset_deasserted_f() |
208 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_asserted_f() |
209 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_write_en_asserted_f() |
210 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_write_en_asserted_f() |
211 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_write_en_asserted_f() |
212 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_noofipclks_f(XTAL_CNTR_CLKS) |
213 c->cntr.reg_ctrl_idx);
214 gk20a_readl(g, c->cntr.reg_ctrl_addr);
215
216 nvgpu_udelay(XTAL_CNTR_DELAY);
217
218 cntr = XTAL_SCALE_TO_KHZ * gk20a_readl(g, c->cntr.reg_cntr_addr);
219
220read_err:
221 /* reset and restore control register */
222 gk20a_writel(g, c->cntr.reg_ctrl_addr,
223 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_reset_asserted_f() |
224 trim_gpc_bcast_clk_cntr_ncgpcclk_cfg_enable_deasserted_f());
225 gk20a_readl(g, c->cntr.reg_ctrl_addr);
226 gk20a_writel(g, c->cntr.reg_ctrl_addr, save_reg);
227 gk20a_readl(g, c->cntr.reg_ctrl_addr);
228 nvgpu_mutex_release(&clk->clk_mutex);
229
230 return cntr;
231
232}
233
234#ifdef CONFIG_DEBUG_FS
235static int gp106_get_rate_show(void *data , u64 *val) {
236 struct namemap_cfg *c = (struct namemap_cfg *) data;
237 struct gk20a *g = c->g;
238
239 *val = c->is_counter ? gp106_get_rate_cntr(g, c) : 0 /* TODO PLL read */;
240 return 0;
241}
242DEFINE_SIMPLE_ATTRIBUTE(get_rate_fops, gp106_get_rate_show, NULL, "%llu\n");
243
244static int sys_cfc_read(void *data , u64 *val)
245{
246 struct gk20a *g = (struct gk20a *)data;
247 bool bload = boardobjgrpmask_bitget(
248 &g->clk_pmu.clk_freq_controllers.freq_ctrl_load_mask.super,
249 CTRL_CLK_CLK_FREQ_CONTROLLER_ID_SYS);
250
251 /* val = 1 implies CLFC is loaded or enabled */
252 *val = bload ? 1 : 0;
253 return 0;
254}
255static int sys_cfc_write(void *data , u64 val)
256{
257 struct gk20a *g = (struct gk20a *)data;
258 int status;
259 /* val = 1 implies load or enable the CLFC */
260 bool bload = val ? true : false;
261
262 nvgpu_clk_arb_pstate_change_lock(g, true);
263 status = clk_pmu_freq_controller_load(g, bload,
264 CTRL_CLK_CLK_FREQ_CONTROLLER_ID_SYS);
265 nvgpu_clk_arb_pstate_change_lock(g, false);
266
267 return status;
268}
269DEFINE_SIMPLE_ATTRIBUTE(sys_cfc_fops, sys_cfc_read, sys_cfc_write, "%llu\n");
270
271static int ltc_cfc_read(void *data , u64 *val)
272{
273 struct gk20a *g = (struct gk20a *)data;
274 bool bload = boardobjgrpmask_bitget(
275 &g->clk_pmu.clk_freq_controllers.freq_ctrl_load_mask.super,
276 CTRL_CLK_CLK_FREQ_CONTROLLER_ID_LTC);
277
278 /* val = 1 implies CLFC is loaded or enabled */
279 *val = bload ? 1 : 0;
280 return 0;
281}
282static int ltc_cfc_write(void *data , u64 val)
283{
284 struct gk20a *g = (struct gk20a *)data;
285 int status;
286 /* val = 1 implies load or enable the CLFC */
287 bool bload = val ? true : false;
288
289 nvgpu_clk_arb_pstate_change_lock(g, true);
290 status = clk_pmu_freq_controller_load(g, bload,
291 CTRL_CLK_CLK_FREQ_CONTROLLER_ID_LTC);
292 nvgpu_clk_arb_pstate_change_lock(g, false);
293
294 return status;
295}
296DEFINE_SIMPLE_ATTRIBUTE(ltc_cfc_fops, ltc_cfc_read, ltc_cfc_write, "%llu\n");
297
298static int xbar_cfc_read(void *data , u64 *val)
299{
300 struct gk20a *g = (struct gk20a *)data;
301 bool bload = boardobjgrpmask_bitget(
302 &g->clk_pmu.clk_freq_controllers.freq_ctrl_load_mask.super,
303 CTRL_CLK_CLK_FREQ_CONTROLLER_ID_XBAR);
304
305 /* val = 1 implies CLFC is loaded or enabled */
306 *val = bload ? 1 : 0;
307 return 0;
308}
309static int xbar_cfc_write(void *data , u64 val)
310{
311 struct gk20a *g = (struct gk20a *)data;
312 int status;
313 /* val = 1 implies load or enable the CLFC */
314 bool bload = val ? true : false;
315
316 nvgpu_clk_arb_pstate_change_lock(g, true);
317 status = clk_pmu_freq_controller_load(g, bload,
318 CTRL_CLK_CLK_FREQ_CONTROLLER_ID_XBAR);
319 nvgpu_clk_arb_pstate_change_lock(g, false);
320
321 return status;
322}
323DEFINE_SIMPLE_ATTRIBUTE(xbar_cfc_fops, xbar_cfc_read,
324 xbar_cfc_write, "%llu\n");
325
326static int gpc_cfc_read(void *data , u64 *val)
327{
328 struct gk20a *g = (struct gk20a *)data;
329 bool bload = boardobjgrpmask_bitget(
330 &g->clk_pmu.clk_freq_controllers.freq_ctrl_load_mask.super,
331 CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC0);
332
333 /* val = 1 implies CLFC is loaded or enabled */
334 *val = bload ? 1 : 0;
335 return 0;
336}
337static int gpc_cfc_write(void *data , u64 val)
338{
339 struct gk20a *g = (struct gk20a *)data;
340 int status;
341 /* val = 1 implies load or enable the CLFC */
342 bool bload = val ? true : false;
343
344 nvgpu_clk_arb_pstate_change_lock(g, true);
345 status = clk_pmu_freq_controller_load(g, bload,
346 CTRL_CLK_CLK_FREQ_CONTROLLER_ID_GPC0);
347 nvgpu_clk_arb_pstate_change_lock(g, false);
348
349 return status;
350}
351DEFINE_SIMPLE_ATTRIBUTE(gpc_cfc_fops, gpc_cfc_read, gpc_cfc_write, "%llu\n");
352
353static int clk_gp106_debugfs_init(struct gk20a *g)
354{
355 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
356 struct dentry *gpu_root = l->debugfs;
357 struct dentry *clocks_root, *clk_freq_ctlr_root;
358 struct dentry *d;
359 unsigned int i;
360
361 if (NULL == (clocks_root = debugfs_create_dir("clocks", gpu_root)))
362 return -ENOMEM;
363
364 clk_freq_ctlr_root = debugfs_create_dir("clk_freq_ctlr", gpu_root);
365 if (clk_freq_ctlr_root == NULL)
366 return -ENOMEM;
367
368 d = debugfs_create_file("sys", S_IRUGO | S_IWUSR, clk_freq_ctlr_root,
369 g, &sys_cfc_fops);
370 d = debugfs_create_file("ltc", S_IRUGO | S_IWUSR, clk_freq_ctlr_root,
371 g, &ltc_cfc_fops);
372 d = debugfs_create_file("xbar", S_IRUGO | S_IWUSR, clk_freq_ctlr_root,
373 g, &xbar_cfc_fops);
374 d = debugfs_create_file("gpc", S_IRUGO | S_IWUSR, clk_freq_ctlr_root,
375 g, &gpc_cfc_fops);
376
377 gk20a_dbg(gpu_dbg_info, "g=%p", g);
378
379 for (i = 0; i < g->clk.namemap_num; i++) {
380 if (g->clk.clk_namemap[i].is_enable) {
381 d = debugfs_create_file(
382 g->clk.clk_namemap[i].name,
383 S_IRUGO,
384 clocks_root,
385 &g->clk.clk_namemap[i],
386 &get_rate_fops);
387 if (!d)
388 goto err_out;
389 }
390 }
391 return 0;
392
393err_out:
394 pr_err("%s: Failed to make debugfs node\n", __func__);
395 debugfs_remove_recursive(clocks_root);
396 return -ENOMEM;
397}
398#endif /* CONFIG_DEBUG_FS */
399
400int gp106_suspend_clk_support(struct gk20a *g)
401{
402 nvgpu_mutex_destroy(&g->clk.clk_mutex);
403 return 0;
404}