summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/xve/xve_gp106.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/xve/xve_gp106.c')
-rw-r--r--drivers/gpu/nvgpu/common/xve/xve_gp106.c498
1 files changed, 498 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/xve/xve_gp106.c b/drivers/gpu/nvgpu/common/xve/xve_gp106.c
new file mode 100644
index 00000000..6cfbc9d9
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/xve/xve_gp106.c
@@ -0,0 +1,498 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "gk20a/gk20a.h"
24#include "gp106/bios_gp106.h"
25
26#include <nvgpu/bug.h>
27#include <nvgpu/xve.h>
28#include <nvgpu/io.h>
29
30#include "xve_gp106.h"
31
32#include <nvgpu/hw/gp106/hw_xp_gp106.h>
33#include <nvgpu/hw/gp106/hw_xve_gp106.h>
34
35#define NV_PCFG 0x88000
36
37void xve_xve_writel_gp106(struct gk20a *g, u32 reg, u32 val)
38{
39 gk20a_writel(g, NV_PCFG + reg, val);
40}
41
42u32 xve_xve_readl_gp106(struct gk20a *g, u32 reg)
43{
44 return gk20a_readl(g, NV_PCFG + reg);
45}
46
47/**
48 * Resets the GPU (except the XVE/XP).
49 */
50void xve_reset_gpu_gp106(struct gk20a *g)
51{
52 u32 reset;
53
54 /*
55 * This resets the GPU except for the XVE/XP (since then we would lose
56 * the dGPU from the bus). t18x has a HW limitation where once that
57 * happens the GPU is gone until the entire system is reset.
58 *
59 * We have to use the auto-deassert register since we won't be able to
60 * access the GPU after the GPU goes into reset. This appears like the
61 * GPU has dropped from the bus and causes nvgpu to reset the entire
62 * system. Whoops!
63 */
64 reset = xve_reset_reset_m() |
65 xve_reset_gpu_on_sw_reset_m() |
66 xve_reset_counter_en_m() |
67 xve_reset_counter_val_f(0x7ff) |
68 xve_reset_clock_on_sw_reset_m() |
69 xve_reset_clock_counter_en_m() |
70 xve_reset_clock_counter_val_f(0x7ff);
71
72 g->ops.xve.xve_writel(g, xve_reset_r(), reset | xve_reset_reset_m());
73
74 /*
75 * Don't access GPU until _after_ it's back out of reset!
76 */
77 nvgpu_msleep(100);
78 g->ops.xve.xve_writel(g, xve_reset_r(), 0);
79}
80
81/**
82 * Places one of:
83 *
84 * %GPU_XVE_SPEED_2P5
85 * %GPU_XVE_SPEED_5P0
86 * %GPU_XVE_SPEED_8P0
87 *
88 * in the u32 pointed to by @xve_link_speed. If for some reason an unknown PCIe
89 * bus speed is detected then *@xve_link_speed is not touched and -ENODEV is
90 * returned.
91 */
92int xve_get_speed_gp106(struct gk20a *g, u32 *xve_link_speed)
93{
94 u32 status;
95 u32 link_speed, real_link_speed = 0;
96
97 status = g->ops.xve.xve_readl(g, xve_link_control_status_r());
98
99 link_speed = xve_link_control_status_link_speed_v(status);
100
101 /*
102 * Can't use a switch statement becuase switch statements dont work with
103 * function calls.
104 */
105 if (link_speed == xve_link_control_status_link_speed_link_speed_2p5_v())
106 real_link_speed = GPU_XVE_SPEED_2P5;
107 if (link_speed == xve_link_control_status_link_speed_link_speed_5p0_v())
108 real_link_speed = GPU_XVE_SPEED_5P0;
109 if (link_speed == xve_link_control_status_link_speed_link_speed_8p0_v())
110 real_link_speed = GPU_XVE_SPEED_8P0;
111
112 if (!real_link_speed)
113 return -ENODEV;
114
115 *xve_link_speed = real_link_speed;
116 return 0;
117}
118
119/**
120 * Set the mask for L0s in the XVE.
121 *
122 * When @status is non-zero the mask for L0s is set which _disables_ L0s. When
123 * @status is zero L0s is no longer masked and may be enabled.
124 */
125static void set_xve_l0s_mask(struct gk20a *g, bool status)
126{
127 u32 xve_priv;
128 u32 status_bit = status ? 1 : 0;
129
130 xve_priv = g->ops.xve.xve_readl(g, xve_priv_xv_r());
131
132 xve_priv = set_field(xve_priv,
133 xve_priv_xv_cya_l0s_enable_m(),
134 xve_priv_xv_cya_l0s_enable_f(status_bit));
135
136 g->ops.xve.xve_writel(g, xve_priv_xv_r(), xve_priv);
137}
138
139/**
140 * Set the mask for L1 in the XVE.
141 *
142 * When @status is non-zero the mask for L1 is set which _disables_ L0s. When
143 * @status is zero L1 is no longer masked and may be enabled.
144 */
145static void set_xve_l1_mask(struct gk20a *g, int status)
146{
147 u32 xve_priv;
148 u32 status_bit = status ? 1 : 0;
149
150 xve_priv = g->ops.xve.xve_readl(g, xve_priv_xv_r());
151
152 xve_priv = set_field(xve_priv,
153 xve_priv_xv_cya_l1_enable_m(),
154 xve_priv_xv_cya_l1_enable_f(status_bit));
155
156 g->ops.xve.xve_writel(g, xve_priv_xv_r(), xve_priv);
157}
158
159/**
160 * Disable ASPM permanently.
161 */
162void xve_disable_aspm_gp106(struct gk20a *g)
163{
164 set_xve_l0s_mask(g, true);
165 set_xve_l1_mask(g, true);
166}
167
168/**
169 * When doing the speed change disable power saving features.
170 */
171static void disable_aspm_gp106(struct gk20a *g)
172{
173 u32 xve_priv;
174
175 xve_priv = g->ops.xve.xve_readl(g, xve_priv_xv_r());
176
177 /*
178 * Store prior ASPM state so we can restore it later on.
179 */
180 g->xve_l0s = xve_priv_xv_cya_l0s_enable_v(xve_priv);
181 g->xve_l1 = xve_priv_xv_cya_l1_enable_v(xve_priv);
182
183 set_xve_l0s_mask(g, true);
184 set_xve_l1_mask(g, true);
185}
186
187/**
188 * Restore the state saved by disable_aspm_gp106().
189 */
190static void enable_aspm_gp106(struct gk20a *g)
191{
192 set_xve_l0s_mask(g, g->xve_l0s);
193 set_xve_l1_mask(g, g->xve_l1);
194}
195
196/*
197 * Error checking is done in xve_set_speed_gp106.
198 */
199static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
200{
201 u32 current_link_speed, new_link_speed;
202 u32 dl_mgr, saved_dl_mgr;
203 u32 pl_link_config;
204 u32 link_control_status, link_speed_setting, link_width;
205 struct nvgpu_timeout timeout;
206 int attempts = 10, err_status = 0;
207
208 g->ops.xve.get_speed(g, &current_link_speed);
209 xv_sc_dbg(g, PRE_CHANGE, "Executing PCIe link change.");
210 xv_sc_dbg(g, PRE_CHANGE, " Current speed: %s",
211 xve_speed_to_str(current_link_speed));
212 xv_sc_dbg(g, PRE_CHANGE, " Next speed: %s",
213 xve_speed_to_str(next_link_speed));
214 xv_sc_dbg(g, PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x",
215 gk20a_readl(g, xp_pl_link_config_r(0)));
216
217 xv_sc_dbg(g, DISABLE_ASPM, "Disabling ASPM...");
218 disable_aspm_gp106(g);
219 xv_sc_dbg(g, DISABLE_ASPM, " Done!");
220
221 xv_sc_dbg(g, DL_SAFE_MODE, "Putting DL in safe mode...");
222 saved_dl_mgr = gk20a_readl(g, xp_dl_mgr_r(0));
223
224 /*
225 * Put the DL in safe mode.
226 */
227 dl_mgr = saved_dl_mgr;
228 dl_mgr |= xp_dl_mgr_safe_timing_f(1);
229 gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr);
230 xv_sc_dbg(g, DL_SAFE_MODE, " Done!");
231
232 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
233 NVGPU_TIMER_CPU_TIMER);
234
235 xv_sc_dbg(g, CHECK_LINK, "Checking for link idle...");
236 do {
237 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
238 if ((xp_pl_link_config_ltssm_status_f(pl_link_config) ==
239 xp_pl_link_config_ltssm_status_idle_v()) &&
240 (xp_pl_link_config_ltssm_directive_f(pl_link_config) ==
241 xp_pl_link_config_ltssm_directive_normal_operations_v()))
242 break;
243 } while (!nvgpu_timeout_expired(&timeout));
244
245 if (nvgpu_timeout_peek_expired(&timeout)) {
246 err_status = -ETIMEDOUT;
247 goto done;
248 }
249
250 xv_sc_dbg(g, CHECK_LINK, " Done");
251
252 xv_sc_dbg(g, LINK_SETTINGS, "Preparing next link settings");
253 pl_link_config &= ~xp_pl_link_config_max_link_rate_m();
254 switch (next_link_speed) {
255 case GPU_XVE_SPEED_2P5:
256 link_speed_setting =
257 xve_link_control_status_link_speed_link_speed_2p5_v();
258 pl_link_config |= xp_pl_link_config_max_link_rate_f(
259 xp_pl_link_config_max_link_rate_2500_mtps_v());
260 break;
261 case GPU_XVE_SPEED_5P0:
262 link_speed_setting =
263 xve_link_control_status_link_speed_link_speed_5p0_v();
264 pl_link_config |= xp_pl_link_config_max_link_rate_f(
265 xp_pl_link_config_max_link_rate_5000_mtps_v());
266 break;
267 case GPU_XVE_SPEED_8P0:
268 link_speed_setting =
269 xve_link_control_status_link_speed_link_speed_8p0_v();
270 pl_link_config |= xp_pl_link_config_max_link_rate_f(
271 xp_pl_link_config_max_link_rate_8000_mtps_v());
272 break;
273 default:
274 BUG(); /* Should never be hit. */
275 }
276
277 link_control_status =
278 g->ops.xve.xve_readl(g, xve_link_control_status_r());
279 link_width = xve_link_control_status_link_width_v(link_control_status);
280
281 pl_link_config &= ~xp_pl_link_config_target_tx_width_m();
282
283 /* Can't use a switch due to oddities in register definitions. */
284 if (link_width == xve_link_control_status_link_width_x1_v())
285 pl_link_config |= xp_pl_link_config_target_tx_width_f(
286 xp_pl_link_config_target_tx_width_x1_v());
287 else if (link_width == xve_link_control_status_link_width_x2_v())
288 pl_link_config |= xp_pl_link_config_target_tx_width_f(
289 xp_pl_link_config_target_tx_width_x2_v());
290 else if (link_width == xve_link_control_status_link_width_x4_v())
291 pl_link_config |= xp_pl_link_config_target_tx_width_f(
292 xp_pl_link_config_target_tx_width_x4_v());
293 else if (link_width == xve_link_control_status_link_width_x8_v())
294 pl_link_config |= xp_pl_link_config_target_tx_width_f(
295 xp_pl_link_config_target_tx_width_x8_v());
296 else if (link_width == xve_link_control_status_link_width_x16_v())
297 pl_link_config |= xp_pl_link_config_target_tx_width_f(
298 xp_pl_link_config_target_tx_width_x16_v());
299 else
300 BUG();
301
302 xv_sc_dbg(g, LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config);
303 xv_sc_dbg(g, LINK_SETTINGS, " Done");
304
305 xv_sc_dbg(g, EXEC_CHANGE, "Running link speed change...");
306
307 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
308 NVGPU_TIMER_CPU_TIMER);
309 do {
310 gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config);
311 if (pl_link_config ==
312 gk20a_readl(g, xp_pl_link_config_r(0)))
313 break;
314 } while (!nvgpu_timeout_expired(&timeout));
315
316 if (nvgpu_timeout_peek_expired(&timeout)) {
317 err_status = -ETIMEDOUT;
318 goto done;
319 }
320
321 xv_sc_dbg(g, EXEC_CHANGE, " Wrote PL_LINK_CONFIG.");
322
323 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
324
325 do {
326 pl_link_config = set_field(pl_link_config,
327 xp_pl_link_config_ltssm_directive_m(),
328 xp_pl_link_config_ltssm_directive_f(
329 xp_pl_link_config_ltssm_directive_change_speed_v()));
330
331 xv_sc_dbg(g, EXEC_CHANGE, " Executing change (0x%08x)!",
332 pl_link_config);
333 gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config);
334
335 /*
336 * Read NV_XP_PL_LINK_CONFIG until the link has swapped to
337 * the target speed.
338 */
339 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
340 NVGPU_TIMER_CPU_TIMER);
341 do {
342 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
343 if (pl_link_config != 0xfffffff &&
344 (xp_pl_link_config_ltssm_status_f(pl_link_config) ==
345 xp_pl_link_config_ltssm_status_idle_v()) &&
346 (xp_pl_link_config_ltssm_directive_f(pl_link_config) ==
347 xp_pl_link_config_ltssm_directive_normal_operations_v()))
348 break;
349 } while (!nvgpu_timeout_expired(&timeout));
350
351 if (nvgpu_timeout_peek_expired(&timeout)) {
352 err_status = -ETIMEDOUT;
353 xv_sc_dbg(g, EXEC_CHANGE, " timeout; pl_link_config = 0x%x",
354 pl_link_config);
355 }
356
357 xv_sc_dbg(g, EXEC_CHANGE, " Change done... Checking status");
358
359 if (pl_link_config == 0xffffffff) {
360 WARN(1, "GPU fell of PCI bus!?");
361
362 /*
363 * The rest of the driver is probably about to
364 * explode...
365 */
366 BUG();
367 }
368
369 link_control_status =
370 g->ops.xve.xve_readl(g, xve_link_control_status_r());
371 xv_sc_dbg(g, EXEC_CHANGE, " target %d vs current %d",
372 link_speed_setting,
373 xve_link_control_status_link_speed_v(link_control_status));
374
375 if (err_status == -ETIMEDOUT) {
376 xv_sc_dbg(g, EXEC_CHANGE, " Oops timed out?");
377 break;
378 }
379 } while (attempts-- > 0 &&
380 link_speed_setting !=
381 xve_link_control_status_link_speed_v(link_control_status));
382
383 xv_sc_dbg(g, EXEC_VERIF, "Verifying speed change...");
384
385 /*
386 * Check that the new link speed is actually active. If we failed to
387 * change to the new link speed then return to the link speed setting
388 * pre-speed change.
389 */
390 new_link_speed = xve_link_control_status_link_speed_v(
391 link_control_status);
392 if (link_speed_setting != new_link_speed) {
393 u32 link_config = gk20a_readl(g, xp_pl_link_config_r(0));
394
395 xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds mismatch!");
396 xv_sc_dbg(g, EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x",
397 g->ops.xve.xve_readl(g, xve_link_control_status_r()));
398 xv_sc_dbg(g, EXEC_VERIF, " Link speed is %s - should be %s",
399 xve_speed_to_str(new_link_speed),
400 xve_speed_to_str(link_speed_setting));
401
402 link_config &= ~xp_pl_link_config_max_link_rate_m();
403 if (new_link_speed ==
404 xve_link_control_status_link_speed_link_speed_2p5_v())
405 link_config |= xp_pl_link_config_max_link_rate_f(
406 xp_pl_link_config_max_link_rate_2500_mtps_v());
407 else if (new_link_speed ==
408 xve_link_control_status_link_speed_link_speed_5p0_v())
409 link_config |= xp_pl_link_config_max_link_rate_f(
410 xp_pl_link_config_max_link_rate_5000_mtps_v());
411 else if (new_link_speed ==
412 xve_link_control_status_link_speed_link_speed_8p0_v())
413 link_config |= xp_pl_link_config_max_link_rate_f(
414 xp_pl_link_config_max_link_rate_8000_mtps_v());
415 else
416 link_config |= xp_pl_link_config_max_link_rate_f(
417 xp_pl_link_config_max_link_rate_2500_mtps_v());
418
419 gk20a_writel(g, xp_pl_link_config_r(0), link_config);
420 err_status = -ENODEV;
421 } else {
422 xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds match!");
423 err_status = 0;
424 }
425
426done:
427 /* Restore safe timings. */
428 xv_sc_dbg(g, CLEANUP, "Restoring saved DL settings...");
429 gk20a_writel(g, xp_dl_mgr_r(0), saved_dl_mgr);
430 xv_sc_dbg(g, CLEANUP, " Done");
431
432 xv_sc_dbg(g, CLEANUP, "Re-enabling ASPM settings...");
433 enable_aspm_gp106(g);
434 xv_sc_dbg(g, CLEANUP, " Done");
435
436 return err_status;
437}
438
439/**
440 * Sets the PCIe link speed to @xve_link_speed which must be one of:
441 *
442 * %GPU_XVE_SPEED_2P5
443 * %GPU_XVE_SPEED_5P0
444 * %GPU_XVE_SPEED_8P0
445 *
446 * If an error is encountered an appropriate error will be returned.
447 */
448int xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
449{
450 u32 current_link_speed;
451 int err;
452
453 if ((next_link_speed & GPU_XVE_SPEED_MASK) == 0)
454 return -EINVAL;
455
456 err = g->ops.xve.get_speed(g, &current_link_speed);
457 if (err)
458 return err;
459
460 /* No-op. */
461 if (current_link_speed == next_link_speed)
462 return 0;
463
464 return __do_xve_set_speed_gp106(g, next_link_speed);
465}
466
467/**
468 * Places a bitmask of available speeds for gp106 in @speed_mask.
469 */
470void xve_available_speeds_gp106(struct gk20a *g, u32 *speed_mask)
471{
472 *speed_mask = GPU_XVE_SPEED_2P5 | GPU_XVE_SPEED_5P0;
473}
474
475#if defined(CONFIG_PCI_MSI)
476void xve_rearm_msi_gp106(struct gk20a *g)
477{
478 /* We just need to write a dummy val in the CYA_2 offset */
479 g->ops.xve.xve_writel(g, xve_cya_2_r(), 0);
480}
481#endif
482
483void xve_enable_shadow_rom_gp106(struct gk20a *g)
484{
485 g->ops.xve.xve_writel(g, xve_rom_ctrl_r(),
486 xve_rom_ctrl_rom_shadow_enabled_f());
487}
488
489void xve_disable_shadow_rom_gp106(struct gk20a *g)
490{
491 g->ops.xve.xve_writel(g, xve_rom_ctrl_r(),
492 xve_rom_ctrl_rom_shadow_disabled_f());
493}
494
495u32 xve_get_link_control_status(struct gk20a *g)
496{
497 return g->ops.xve.xve_readl(g, xve_link_control_status_r());
498}