summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106/xve_gp106.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-08-09 15:38:53 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-10 01:28:32 -0400
commit6e746a97cc7ee2bc5a3adee04dd9c65b3921eee5 (patch)
tree71b9fe0f38f604a4cf21e8b163d221b8a1064a2f /drivers/gpu/nvgpu/gp106/xve_gp106.c
parent6b26d233499f9d447f06e8e72c72ed6728762e37 (diff)
gpu: nvgpu: Move xve HAL to common
Move implementation of xve HAL to common/xve. JIRA NVGPU-959 Change-Id: I27dba43253e3aa8fd11229a9c4fad97aa5cf0b59 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1796147 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp106/xve_gp106.c')
-rw-r--r--drivers/gpu/nvgpu/gp106/xve_gp106.c497
1 files changed, 0 insertions, 497 deletions
diff --git a/drivers/gpu/nvgpu/gp106/xve_gp106.c b/drivers/gpu/nvgpu/gp106/xve_gp106.c
deleted file mode 100644
index 40b0ff04..00000000
--- a/drivers/gpu/nvgpu/gp106/xve_gp106.c
+++ /dev/null
@@ -1,497 +0,0 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "gk20a/gk20a.h"
24#include "gp106/bios_gp106.h"
25#include "gp106/xve_gp106.h"
26
27#include <nvgpu/bug.h>
28#include <nvgpu/xve.h>
29#include <nvgpu/io.h>
30
31#include <nvgpu/hw/gp106/hw_xp_gp106.h>
32#include <nvgpu/hw/gp106/hw_xve_gp106.h>
33
34#define NV_PCFG 0x88000
35
36void xve_xve_writel_gp106(struct gk20a *g, u32 reg, u32 val)
37{
38 gk20a_writel(g, NV_PCFG + reg, val);
39}
40
41u32 xve_xve_readl_gp106(struct gk20a *g, u32 reg)
42{
43 return gk20a_readl(g, NV_PCFG + reg);
44}
45
46/**
47 * Resets the GPU (except the XVE/XP).
48 */
49void xve_reset_gpu_gp106(struct gk20a *g)
50{
51 u32 reset;
52
53 /*
54 * This resets the GPU except for the XVE/XP (since then we would lose
55 * the dGPU from the bus). t18x has a HW limitation where once that
56 * happens the GPU is gone until the entire system is reset.
57 *
58 * We have to use the auto-deassert register since we won't be able to
59 * access the GPU after the GPU goes into reset. This appears like the
60 * GPU has dropped from the bus and causes nvgpu to reset the entire
61 * system. Whoops!
62 */
63 reset = xve_reset_reset_m() |
64 xve_reset_gpu_on_sw_reset_m() |
65 xve_reset_counter_en_m() |
66 xve_reset_counter_val_f(0x7ff) |
67 xve_reset_clock_on_sw_reset_m() |
68 xve_reset_clock_counter_en_m() |
69 xve_reset_clock_counter_val_f(0x7ff);
70
71 g->ops.xve.xve_writel(g, xve_reset_r(), reset | xve_reset_reset_m());
72
73 /*
74 * Don't access GPU until _after_ it's back out of reset!
75 */
76 nvgpu_msleep(100);
77 g->ops.xve.xve_writel(g, xve_reset_r(), 0);
78}
79
80/**
81 * Places one of:
82 *
83 * %GPU_XVE_SPEED_2P5
84 * %GPU_XVE_SPEED_5P0
85 * %GPU_XVE_SPEED_8P0
86 *
87 * in the u32 pointed to by @xve_link_speed. If for some reason an unknown PCIe
88 * bus speed is detected then *@xve_link_speed is not touched and -ENODEV is
89 * returned.
90 */
91int xve_get_speed_gp106(struct gk20a *g, u32 *xve_link_speed)
92{
93 u32 status;
94 u32 link_speed, real_link_speed = 0;
95
96 status = g->ops.xve.xve_readl(g, xve_link_control_status_r());
97
98 link_speed = xve_link_control_status_link_speed_v(status);
99
100 /*
101 * Can't use a switch statement becuase switch statements dont work with
102 * function calls.
103 */
104 if (link_speed == xve_link_control_status_link_speed_link_speed_2p5_v())
105 real_link_speed = GPU_XVE_SPEED_2P5;
106 if (link_speed == xve_link_control_status_link_speed_link_speed_5p0_v())
107 real_link_speed = GPU_XVE_SPEED_5P0;
108 if (link_speed == xve_link_control_status_link_speed_link_speed_8p0_v())
109 real_link_speed = GPU_XVE_SPEED_8P0;
110
111 if (!real_link_speed)
112 return -ENODEV;
113
114 *xve_link_speed = real_link_speed;
115 return 0;
116}
117
118/**
119 * Set the mask for L0s in the XVE.
120 *
121 * When @status is non-zero the mask for L0s is set which _disables_ L0s. When
122 * @status is zero L0s is no longer masked and may be enabled.
123 */
124static void set_xve_l0s_mask(struct gk20a *g, bool status)
125{
126 u32 xve_priv;
127 u32 status_bit = status ? 1 : 0;
128
129 xve_priv = g->ops.xve.xve_readl(g, xve_priv_xv_r());
130
131 xve_priv = set_field(xve_priv,
132 xve_priv_xv_cya_l0s_enable_m(),
133 xve_priv_xv_cya_l0s_enable_f(status_bit));
134
135 g->ops.xve.xve_writel(g, xve_priv_xv_r(), xve_priv);
136}
137
138/**
139 * Set the mask for L1 in the XVE.
140 *
141 * When @status is non-zero the mask for L1 is set which _disables_ L0s. When
142 * @status is zero L1 is no longer masked and may be enabled.
143 */
144static void set_xve_l1_mask(struct gk20a *g, int status)
145{
146 u32 xve_priv;
147 u32 status_bit = status ? 1 : 0;
148
149 xve_priv = g->ops.xve.xve_readl(g, xve_priv_xv_r());
150
151 xve_priv = set_field(xve_priv,
152 xve_priv_xv_cya_l1_enable_m(),
153 xve_priv_xv_cya_l1_enable_f(status_bit));
154
155 g->ops.xve.xve_writel(g, xve_priv_xv_r(), xve_priv);
156}
157
158/**
159 * Disable ASPM permanently.
160 */
161void xve_disable_aspm_gp106(struct gk20a *g)
162{
163 set_xve_l0s_mask(g, true);
164 set_xve_l1_mask(g, true);
165}
166
167/**
168 * When doing the speed change disable power saving features.
169 */
170static void disable_aspm_gp106(struct gk20a *g)
171{
172 u32 xve_priv;
173
174 xve_priv = g->ops.xve.xve_readl(g, xve_priv_xv_r());
175
176 /*
177 * Store prior ASPM state so we can restore it later on.
178 */
179 g->xve_l0s = xve_priv_xv_cya_l0s_enable_v(xve_priv);
180 g->xve_l1 = xve_priv_xv_cya_l1_enable_v(xve_priv);
181
182 set_xve_l0s_mask(g, true);
183 set_xve_l1_mask(g, true);
184}
185
186/**
187 * Restore the state saved by disable_aspm_gp106().
188 */
189static void enable_aspm_gp106(struct gk20a *g)
190{
191 set_xve_l0s_mask(g, g->xve_l0s);
192 set_xve_l1_mask(g, g->xve_l1);
193}
194
195/*
196 * Error checking is done in xve_set_speed_gp106.
197 */
198static int __do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
199{
200 u32 current_link_speed, new_link_speed;
201 u32 dl_mgr, saved_dl_mgr;
202 u32 pl_link_config;
203 u32 link_control_status, link_speed_setting, link_width;
204 struct nvgpu_timeout timeout;
205 int attempts = 10, err_status = 0;
206
207 g->ops.xve.get_speed(g, &current_link_speed);
208 xv_sc_dbg(g, PRE_CHANGE, "Executing PCIe link change.");
209 xv_sc_dbg(g, PRE_CHANGE, " Current speed: %s",
210 xve_speed_to_str(current_link_speed));
211 xv_sc_dbg(g, PRE_CHANGE, " Next speed: %s",
212 xve_speed_to_str(next_link_speed));
213 xv_sc_dbg(g, PRE_CHANGE, " PL_LINK_CONFIG: 0x%08x",
214 gk20a_readl(g, xp_pl_link_config_r(0)));
215
216 xv_sc_dbg(g, DISABLE_ASPM, "Disabling ASPM...");
217 disable_aspm_gp106(g);
218 xv_sc_dbg(g, DISABLE_ASPM, " Done!");
219
220 xv_sc_dbg(g, DL_SAFE_MODE, "Putting DL in safe mode...");
221 saved_dl_mgr = gk20a_readl(g, xp_dl_mgr_r(0));
222
223 /*
224 * Put the DL in safe mode.
225 */
226 dl_mgr = saved_dl_mgr;
227 dl_mgr |= xp_dl_mgr_safe_timing_f(1);
228 gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr);
229 xv_sc_dbg(g, DL_SAFE_MODE, " Done!");
230
231 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
232 NVGPU_TIMER_CPU_TIMER);
233
234 xv_sc_dbg(g, CHECK_LINK, "Checking for link idle...");
235 do {
236 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
237 if ((xp_pl_link_config_ltssm_status_f(pl_link_config) ==
238 xp_pl_link_config_ltssm_status_idle_v()) &&
239 (xp_pl_link_config_ltssm_directive_f(pl_link_config) ==
240 xp_pl_link_config_ltssm_directive_normal_operations_v()))
241 break;
242 } while (!nvgpu_timeout_expired(&timeout));
243
244 if (nvgpu_timeout_peek_expired(&timeout)) {
245 err_status = -ETIMEDOUT;
246 goto done;
247 }
248
249 xv_sc_dbg(g, CHECK_LINK, " Done");
250
251 xv_sc_dbg(g, LINK_SETTINGS, "Preparing next link settings");
252 pl_link_config &= ~xp_pl_link_config_max_link_rate_m();
253 switch (next_link_speed) {
254 case GPU_XVE_SPEED_2P5:
255 link_speed_setting =
256 xve_link_control_status_link_speed_link_speed_2p5_v();
257 pl_link_config |= xp_pl_link_config_max_link_rate_f(
258 xp_pl_link_config_max_link_rate_2500_mtps_v());
259 break;
260 case GPU_XVE_SPEED_5P0:
261 link_speed_setting =
262 xve_link_control_status_link_speed_link_speed_5p0_v();
263 pl_link_config |= xp_pl_link_config_max_link_rate_f(
264 xp_pl_link_config_max_link_rate_5000_mtps_v());
265 break;
266 case GPU_XVE_SPEED_8P0:
267 link_speed_setting =
268 xve_link_control_status_link_speed_link_speed_8p0_v();
269 pl_link_config |= xp_pl_link_config_max_link_rate_f(
270 xp_pl_link_config_max_link_rate_8000_mtps_v());
271 break;
272 default:
273 BUG(); /* Should never be hit. */
274 }
275
276 link_control_status =
277 g->ops.xve.xve_readl(g, xve_link_control_status_r());
278 link_width = xve_link_control_status_link_width_v(link_control_status);
279
280 pl_link_config &= ~xp_pl_link_config_target_tx_width_m();
281
282 /* Can't use a switch due to oddities in register definitions. */
283 if (link_width == xve_link_control_status_link_width_x1_v())
284 pl_link_config |= xp_pl_link_config_target_tx_width_f(
285 xp_pl_link_config_target_tx_width_x1_v());
286 else if (link_width == xve_link_control_status_link_width_x2_v())
287 pl_link_config |= xp_pl_link_config_target_tx_width_f(
288 xp_pl_link_config_target_tx_width_x2_v());
289 else if (link_width == xve_link_control_status_link_width_x4_v())
290 pl_link_config |= xp_pl_link_config_target_tx_width_f(
291 xp_pl_link_config_target_tx_width_x4_v());
292 else if (link_width == xve_link_control_status_link_width_x8_v())
293 pl_link_config |= xp_pl_link_config_target_tx_width_f(
294 xp_pl_link_config_target_tx_width_x8_v());
295 else if (link_width == xve_link_control_status_link_width_x16_v())
296 pl_link_config |= xp_pl_link_config_target_tx_width_f(
297 xp_pl_link_config_target_tx_width_x16_v());
298 else
299 BUG();
300
301 xv_sc_dbg(g, LINK_SETTINGS, " pl_link_config = 0x%08x", pl_link_config);
302 xv_sc_dbg(g, LINK_SETTINGS, " Done");
303
304 xv_sc_dbg(g, EXEC_CHANGE, "Running link speed change...");
305
306 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
307 NVGPU_TIMER_CPU_TIMER);
308 do {
309 gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config);
310 if (pl_link_config ==
311 gk20a_readl(g, xp_pl_link_config_r(0)))
312 break;
313 } while (!nvgpu_timeout_expired(&timeout));
314
315 if (nvgpu_timeout_peek_expired(&timeout)) {
316 err_status = -ETIMEDOUT;
317 goto done;
318 }
319
320 xv_sc_dbg(g, EXEC_CHANGE, " Wrote PL_LINK_CONFIG.");
321
322 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
323
324 do {
325 pl_link_config = set_field(pl_link_config,
326 xp_pl_link_config_ltssm_directive_m(),
327 xp_pl_link_config_ltssm_directive_f(
328 xp_pl_link_config_ltssm_directive_change_speed_v()));
329
330 xv_sc_dbg(g, EXEC_CHANGE, " Executing change (0x%08x)!",
331 pl_link_config);
332 gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config);
333
334 /*
335 * Read NV_XP_PL_LINK_CONFIG until the link has swapped to
336 * the target speed.
337 */
338 nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
339 NVGPU_TIMER_CPU_TIMER);
340 do {
341 pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
342 if (pl_link_config != 0xfffffff &&
343 (xp_pl_link_config_ltssm_status_f(pl_link_config) ==
344 xp_pl_link_config_ltssm_status_idle_v()) &&
345 (xp_pl_link_config_ltssm_directive_f(pl_link_config) ==
346 xp_pl_link_config_ltssm_directive_normal_operations_v()))
347 break;
348 } while (!nvgpu_timeout_expired(&timeout));
349
350 if (nvgpu_timeout_peek_expired(&timeout)) {
351 err_status = -ETIMEDOUT;
352 xv_sc_dbg(g, EXEC_CHANGE, " timeout; pl_link_config = 0x%x",
353 pl_link_config);
354 }
355
356 xv_sc_dbg(g, EXEC_CHANGE, " Change done... Checking status");
357
358 if (pl_link_config == 0xffffffff) {
359 WARN(1, "GPU fell of PCI bus!?");
360
361 /*
362 * The rest of the driver is probably about to
363 * explode...
364 */
365 BUG();
366 }
367
368 link_control_status =
369 g->ops.xve.xve_readl(g, xve_link_control_status_r());
370 xv_sc_dbg(g, EXEC_CHANGE, " target %d vs current %d",
371 link_speed_setting,
372 xve_link_control_status_link_speed_v(link_control_status));
373
374 if (err_status == -ETIMEDOUT) {
375 xv_sc_dbg(g, EXEC_CHANGE, " Oops timed out?");
376 break;
377 }
378 } while (attempts-- > 0 &&
379 link_speed_setting !=
380 xve_link_control_status_link_speed_v(link_control_status));
381
382 xv_sc_dbg(g, EXEC_VERIF, "Verifying speed change...");
383
384 /*
385 * Check that the new link speed is actually active. If we failed to
386 * change to the new link speed then return to the link speed setting
387 * pre-speed change.
388 */
389 new_link_speed = xve_link_control_status_link_speed_v(
390 link_control_status);
391 if (link_speed_setting != new_link_speed) {
392 u32 link_config = gk20a_readl(g, xp_pl_link_config_r(0));
393
394 xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds mismatch!");
395 xv_sc_dbg(g, EXEC_VERIF, " LINK_CONTROL_STATUS: 0x%08x",
396 g->ops.xve.xve_readl(g, xve_link_control_status_r()));
397 xv_sc_dbg(g, EXEC_VERIF, " Link speed is %s - should be %s",
398 xve_speed_to_str(new_link_speed),
399 xve_speed_to_str(link_speed_setting));
400
401 link_config &= ~xp_pl_link_config_max_link_rate_m();
402 if (new_link_speed ==
403 xve_link_control_status_link_speed_link_speed_2p5_v())
404 link_config |= xp_pl_link_config_max_link_rate_f(
405 xp_pl_link_config_max_link_rate_2500_mtps_v());
406 else if (new_link_speed ==
407 xve_link_control_status_link_speed_link_speed_5p0_v())
408 link_config |= xp_pl_link_config_max_link_rate_f(
409 xp_pl_link_config_max_link_rate_5000_mtps_v());
410 else if (new_link_speed ==
411 xve_link_control_status_link_speed_link_speed_8p0_v())
412 link_config |= xp_pl_link_config_max_link_rate_f(
413 xp_pl_link_config_max_link_rate_8000_mtps_v());
414 else
415 link_config |= xp_pl_link_config_max_link_rate_f(
416 xp_pl_link_config_max_link_rate_2500_mtps_v());
417
418 gk20a_writel(g, xp_pl_link_config_r(0), link_config);
419 err_status = -ENODEV;
420 } else {
421 xv_sc_dbg(g, EXEC_VERIF, " Current and target speeds match!");
422 err_status = 0;
423 }
424
425done:
426 /* Restore safe timings. */
427 xv_sc_dbg(g, CLEANUP, "Restoring saved DL settings...");
428 gk20a_writel(g, xp_dl_mgr_r(0), saved_dl_mgr);
429 xv_sc_dbg(g, CLEANUP, " Done");
430
431 xv_sc_dbg(g, CLEANUP, "Re-enabling ASPM settings...");
432 enable_aspm_gp106(g);
433 xv_sc_dbg(g, CLEANUP, " Done");
434
435 return err_status;
436}
437
438/**
439 * Sets the PCIe link speed to @xve_link_speed which must be one of:
440 *
441 * %GPU_XVE_SPEED_2P5
442 * %GPU_XVE_SPEED_5P0
443 * %GPU_XVE_SPEED_8P0
444 *
445 * If an error is encountered an appropriate error will be returned.
446 */
447int xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
448{
449 u32 current_link_speed;
450 int err;
451
452 if ((next_link_speed & GPU_XVE_SPEED_MASK) == 0)
453 return -EINVAL;
454
455 err = g->ops.xve.get_speed(g, &current_link_speed);
456 if (err)
457 return err;
458
459 /* No-op. */
460 if (current_link_speed == next_link_speed)
461 return 0;
462
463 return __do_xve_set_speed_gp106(g, next_link_speed);
464}
465
466/**
467 * Places a bitmask of available speeds for gp106 in @speed_mask.
468 */
469void xve_available_speeds_gp106(struct gk20a *g, u32 *speed_mask)
470{
471 *speed_mask = GPU_XVE_SPEED_2P5 | GPU_XVE_SPEED_5P0;
472}
473
474#if defined(CONFIG_PCI_MSI)
475void xve_rearm_msi_gp106(struct gk20a *g)
476{
477 /* We just need to write a dummy val in the CYA_2 offset */
478 g->ops.xve.xve_writel(g, xve_cya_2_r(), 0);
479}
480#endif
481
482void xve_enable_shadow_rom_gp106(struct gk20a *g)
483{
484 g->ops.xve.xve_writel(g, xve_rom_ctrl_r(),
485 xve_rom_ctrl_rom_shadow_enabled_f());
486}
487
488void xve_disable_shadow_rom_gp106(struct gk20a *g)
489{
490 g->ops.xve.xve_writel(g, xve_rom_ctrl_r(),
491 xve_rom_ctrl_rom_shadow_disabled_f());
492}
493
494u32 xve_get_link_control_status(struct gk20a *g)
495{
496 return g->ops.xve.xve_readl(g, xve_link_control_status_r());
497}