diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_sd7220.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_sd7220.c | 1413 |
1 files changed, 1413 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c new file mode 100644 index 000000000000..0aeed0e74cb6 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_sd7220.c | |||
@@ -0,0 +1,1413 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | /* | ||
34 | * This file contains all of the code that is specific to the SerDes | ||
35 | * on the QLogic_IB 7220 chip. | ||
36 | */ | ||
37 | |||
38 | #include <linux/pci.h> | ||
39 | #include <linux/delay.h> | ||
40 | |||
41 | #include "qib.h" | ||
42 | #include "qib_7220.h" | ||
43 | |||
44 | /* | ||
45 | * Same as in qib_iba7220.c, but just the registers needed here. | ||
46 | * Could move whole set to qib_7220.h, but decided better to keep | ||
47 | * local. | ||
48 | */ | ||
49 | #define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64)) | ||
50 | #define kr_hwerrclear KREG_IDX(HwErrClear) | ||
51 | #define kr_hwerrmask KREG_IDX(HwErrMask) | ||
52 | #define kr_hwerrstatus KREG_IDX(HwErrStatus) | ||
53 | #define kr_ibcstatus KREG_IDX(IBCStatus) | ||
54 | #define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl) | ||
55 | #define kr_scratch KREG_IDX(Scratch) | ||
56 | #define kr_xgxs_cfg KREG_IDX(XGXSCfg) | ||
57 | /* these are used only here, not in qib_iba7220.c */ | ||
58 | #define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl) | ||
59 | #define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg) | ||
60 | #define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg) | ||
61 | #define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl) | ||
62 | #define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0) | ||
63 | |||
64 | /* | ||
65 | * The IBSerDesMappTable is a memory that holds values to be stored in | ||
66 | * various SerDes registers by IBC. | ||
67 | */ | ||
68 | #define kr_serdes_maptable KREG_IDX(IBSerDesMappTable) | ||
69 | |||
70 | /* | ||
71 | * Below used for sdnum parameter, selecting one of the two sections | ||
72 | * used for PCIe, or the single SerDes used for IB. | ||
73 | */ | ||
74 | #define PCIE_SERDES0 0 | ||
75 | #define PCIE_SERDES1 1 | ||
76 | |||
77 | /* | ||
78 | * The EPB requires addressing in a particular form. EPB_LOC() is intended | ||
79 | * to make #definitions a little more readable. | ||
80 | */ | ||
81 | #define EPB_ADDR_SHF 8 | ||
82 | #define EPB_LOC(chn, elt, reg) \ | ||
83 | (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \ | ||
84 | EPB_ADDR_SHF) | ||
85 | #define EPB_IB_QUAD0_CS_SHF (25) | ||
86 | #define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF) | ||
87 | #define EPB_IB_UC_CS_SHF (26) | ||
88 | #define EPB_PCIE_UC_CS_SHF (27) | ||
89 | #define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8)) | ||
90 | |||
91 | /* Forward declarations. */ | ||
92 | static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc, | ||
93 | u32 data, u32 mask); | ||
94 | static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, | ||
95 | int mask); | ||
96 | static int qib_sd_trimdone_poll(struct qib_devdata *dd); | ||
97 | static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where); | ||
98 | static int qib_sd_setvals(struct qib_devdata *dd); | ||
99 | static int qib_sd_early(struct qib_devdata *dd); | ||
100 | static int qib_sd_dactrim(struct qib_devdata *dd); | ||
101 | static int qib_internal_presets(struct qib_devdata *dd); | ||
102 | /* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */ | ||
103 | static int qib_sd_trimself(struct qib_devdata *dd, int val); | ||
104 | static int epb_access(struct qib_devdata *dd, int sdnum, int claim); | ||
105 | |||
106 | /* | ||
107 | * Below keeps track of whether the "once per power-on" initialization has | ||
108 | * been done, because uC code Version 1.32.17 or higher allows the uC to | ||
109 | * be reset at will, and Automatic Equalization may require it. So the | ||
110 | * state of the reset "pin", is no longer valid. Instead, we check for the | ||
111 | * actual uC code having been loaded. | ||
112 | */ | ||
113 | static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd) | ||
114 | { | ||
115 | struct qib_devdata *dd = ppd->dd; | ||
116 | if (!dd->cspec->serdes_first_init_done && (qib_sd7220_ib_vfy(dd) > 0)) | ||
117 | dd->cspec->serdes_first_init_done = 1; | ||
118 | return dd->cspec->serdes_first_init_done; | ||
119 | } | ||
120 | |||
121 | /* repeat #define for local use. "Real" #define is in qib_iba7220.c */ | ||
122 | #define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL | ||
123 | #define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF)) | ||
124 | #define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF)) | ||
125 | #define UC_PAR_CLR_D 8 | ||
126 | #define UC_PAR_CLR_M 0xC | ||
127 | #define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS) | ||
128 | #define START_EQ1(chan) EPB_LOC(chan, 7, 0x27) | ||
129 | |||
130 | void qib_sd7220_clr_ibpar(struct qib_devdata *dd) | ||
131 | { | ||
132 | int ret; | ||
133 | |||
134 | /* clear, then re-enable parity errs */ | ||
135 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, | ||
136 | UC_PAR_CLR_D, UC_PAR_CLR_M); | ||
137 | if (ret < 0) { | ||
138 | qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n"); | ||
139 | goto bail; | ||
140 | } | ||
141 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0, | ||
142 | UC_PAR_CLR_M); | ||
143 | |||
144 | qib_read_kreg32(dd, kr_scratch); | ||
145 | udelay(4); | ||
146 | qib_write_kreg(dd, kr_hwerrclear, | ||
147 | QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); | ||
148 | qib_read_kreg32(dd, kr_scratch); | ||
149 | bail: | ||
150 | return; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * After a reset or other unusual event, the epb interface may need | ||
155 | * to be re-synchronized, between the host and the uC. | ||
156 | * returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected) | ||
157 | */ | ||
158 | #define IBSD_RESYNC_TRIES 3 | ||
159 | #define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS) | ||
160 | #define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS) | ||
161 | |||
162 | static int qib_resync_ibepb(struct qib_devdata *dd) | ||
163 | { | ||
164 | int ret, pat, tries, chn; | ||
165 | u32 loc; | ||
166 | |||
167 | ret = -1; | ||
168 | chn = 0; | ||
169 | for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) { | ||
170 | loc = IB_PGUDP(chn); | ||
171 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); | ||
172 | if (ret < 0) { | ||
173 | qib_dev_err(dd, "Failed read in resync\n"); | ||
174 | continue; | ||
175 | } | ||
176 | if (ret != 0xF0 && ret != 0x55 && tries == 0) | ||
177 | qib_dev_err(dd, "unexpected pattern in resync\n"); | ||
178 | pat = ret ^ 0xA5; /* alternate F0 and 55 */ | ||
179 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF); | ||
180 | if (ret < 0) { | ||
181 | qib_dev_err(dd, "Failed write in resync\n"); | ||
182 | continue; | ||
183 | } | ||
184 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); | ||
185 | if (ret < 0) { | ||
186 | qib_dev_err(dd, "Failed re-read in resync\n"); | ||
187 | continue; | ||
188 | } | ||
189 | if (ret != pat) { | ||
190 | qib_dev_err(dd, "Failed compare1 in resync\n"); | ||
191 | continue; | ||
192 | } | ||
193 | loc = IB_CMUDONE(chn); | ||
194 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); | ||
195 | if (ret < 0) { | ||
196 | qib_dev_err(dd, "Failed CMUDONE rd in resync\n"); | ||
197 | continue; | ||
198 | } | ||
199 | if ((ret & 0x70) != ((chn << 4) | 0x40)) { | ||
200 | qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n", | ||
201 | ret, chn); | ||
202 | continue; | ||
203 | } | ||
204 | if (++chn == 4) | ||
205 | break; /* Success */ | ||
206 | } | ||
207 | return (ret > 0) ? 0 : ret; | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * Localize the stuff that should be done to change IB uC reset | ||
212 | * returns <0 for errors. | ||
213 | */ | ||
214 | static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst) | ||
215 | { | ||
216 | u64 rst_val; | ||
217 | int ret = 0; | ||
218 | unsigned long flags; | ||
219 | |||
220 | rst_val = qib_read_kreg64(dd, kr_ibserdesctrl); | ||
221 | if (assert_rst) { | ||
222 | /* | ||
223 | * Vendor recommends "interrupting" uC before reset, to | ||
224 | * minimize possible glitches. | ||
225 | */ | ||
226 | spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); | ||
227 | epb_access(dd, IB_7220_SERDES, 1); | ||
228 | rst_val |= 1ULL; | ||
229 | /* Squelch possible parity error from _asserting_ reset */ | ||
230 | qib_write_kreg(dd, kr_hwerrmask, | ||
231 | dd->cspec->hwerrmask & | ||
232 | ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); | ||
233 | qib_write_kreg(dd, kr_ibserdesctrl, rst_val); | ||
234 | /* flush write, delay to ensure it took effect */ | ||
235 | qib_read_kreg32(dd, kr_scratch); | ||
236 | udelay(2); | ||
237 | /* once it's reset, can remove interrupt */ | ||
238 | epb_access(dd, IB_7220_SERDES, -1); | ||
239 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); | ||
240 | } else { | ||
241 | /* | ||
242 | * Before we de-assert reset, we need to deal with | ||
243 | * possible glitch on the Parity-error line. | ||
244 | * Suppress it around the reset, both in chip-level | ||
245 | * hwerrmask and in IB uC control reg. uC will allow | ||
246 | * it again during startup. | ||
247 | */ | ||
248 | u64 val; | ||
249 | rst_val &= ~(1ULL); | ||
250 | qib_write_kreg(dd, kr_hwerrmask, | ||
251 | dd->cspec->hwerrmask & | ||
252 | ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); | ||
253 | |||
254 | ret = qib_resync_ibepb(dd); | ||
255 | if (ret < 0) | ||
256 | qib_dev_err(dd, "unable to re-sync IB EPB\n"); | ||
257 | |||
258 | /* set uC control regs to suppress parity errs */ | ||
259 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1); | ||
260 | if (ret < 0) | ||
261 | goto bail; | ||
262 | /* IB uC code past Version 1.32.17 allow suppression of wdog */ | ||
263 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, | ||
264 | 0x80); | ||
265 | if (ret < 0) { | ||
266 | qib_dev_err(dd, "Failed to set WDOG disable\n"); | ||
267 | goto bail; | ||
268 | } | ||
269 | qib_write_kreg(dd, kr_ibserdesctrl, rst_val); | ||
270 | /* flush write, delay for startup */ | ||
271 | qib_read_kreg32(dd, kr_scratch); | ||
272 | udelay(1); | ||
273 | /* clear, then re-enable parity errs */ | ||
274 | qib_sd7220_clr_ibpar(dd); | ||
275 | val = qib_read_kreg64(dd, kr_hwerrstatus); | ||
276 | if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) { | ||
277 | qib_dev_err(dd, "IBUC Parity still set after RST\n"); | ||
278 | dd->cspec->hwerrmask &= | ||
279 | ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR; | ||
280 | } | ||
281 | qib_write_kreg(dd, kr_hwerrmask, | ||
282 | dd->cspec->hwerrmask); | ||
283 | } | ||
284 | |||
285 | bail: | ||
286 | return ret; | ||
287 | } | ||
288 | |||
289 | static void qib_sd_trimdone_monitor(struct qib_devdata *dd, | ||
290 | const char *where) | ||
291 | { | ||
292 | int ret, chn, baduns; | ||
293 | u64 val; | ||
294 | |||
295 | if (!where) | ||
296 | where = "?"; | ||
297 | |||
298 | /* give time for reset to settle out in EPB */ | ||
299 | udelay(2); | ||
300 | |||
301 | ret = qib_resync_ibepb(dd); | ||
302 | if (ret < 0) | ||
303 | qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where); | ||
304 | |||
305 | /* Do "sacrificial read" to get EPB in sane state after reset */ | ||
306 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0); | ||
307 | if (ret < 0) | ||
308 | qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where); | ||
309 | |||
310 | /* Check/show "summary" Trim-done bit in IBCStatus */ | ||
311 | val = qib_read_kreg64(dd, kr_ibcstatus); | ||
312 | if (!(val & (1ULL << 11))) | ||
313 | qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where); | ||
314 | /* | ||
315 | * Do "dummy read/mod/wr" to get EPB in sane state after reset | ||
316 | * The default value for MPREG6 is 0. | ||
317 | */ | ||
318 | udelay(2); | ||
319 | |||
320 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80); | ||
321 | if (ret < 0) | ||
322 | qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where); | ||
323 | udelay(10); | ||
324 | |||
325 | baduns = 0; | ||
326 | |||
327 | for (chn = 3; chn >= 0; --chn) { | ||
328 | /* Read CTRL reg for each channel to check TRIMDONE */ | ||
329 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | ||
330 | IB_CTRL2(chn), 0, 0); | ||
331 | if (ret < 0) | ||
332 | qib_dev_err(dd, "Failed checking TRIMDONE, chn %d" | ||
333 | " (%s)\n", chn, where); | ||
334 | |||
335 | if (!(ret & 0x10)) { | ||
336 | int probe; | ||
337 | |||
338 | baduns |= (1 << chn); | ||
339 | qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)." | ||
340 | " (%s)\n", chn, ret, where); | ||
341 | probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | ||
342 | IB_PGUDP(0), 0, 0); | ||
343 | qib_dev_err(dd, "probe is %d (%02X)\n", | ||
344 | probe, probe); | ||
345 | probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | ||
346 | IB_CTRL2(chn), 0, 0); | ||
347 | qib_dev_err(dd, "re-read: %d (%02X)\n", | ||
348 | probe, probe); | ||
349 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | ||
350 | IB_CTRL2(chn), 0x10, 0x10); | ||
351 | if (ret < 0) | ||
352 | qib_dev_err(dd, | ||
353 | "Err on TRIMDONE rewrite1\n"); | ||
354 | } | ||
355 | } | ||
356 | for (chn = 3; chn >= 0; --chn) { | ||
357 | /* Read CTRL reg for each channel to check TRIMDONE */ | ||
358 | if (baduns & (1 << chn)) { | ||
359 | qib_dev_err(dd, | ||
360 | "Reseting TRIMDONE on chn %d (%s)\n", | ||
361 | chn, where); | ||
362 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | ||
363 | IB_CTRL2(chn), 0x10, 0x10); | ||
364 | if (ret < 0) | ||
365 | qib_dev_err(dd, "Failed re-setting " | ||
366 | "TRIMDONE, chn %d (%s)\n", | ||
367 | chn, where); | ||
368 | } | ||
369 | } | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Below is portion of IBA7220-specific bringup_serdes() that actually | ||
374 | * deals with registers and memory within the SerDes itself. | ||
375 | * Post IB uC code version 1.32.17, was_reset being 1 is not really | ||
376 | * informative, so we double-check. | ||
377 | */ | ||
378 | int qib_sd7220_init(struct qib_devdata *dd) | ||
379 | { | ||
380 | int ret = 1; /* default to failure */ | ||
381 | int first_reset, was_reset; | ||
382 | |||
383 | /* SERDES MPU reset recorded in D0 */ | ||
384 | was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1); | ||
385 | if (!was_reset) { | ||
386 | /* entered with reset not asserted, we need to do it */ | ||
387 | qib_ibsd_reset(dd, 1); | ||
388 | qib_sd_trimdone_monitor(dd, "Driver-reload"); | ||
389 | } | ||
390 | /* Substitute our deduced value for was_reset */ | ||
391 | ret = qib_ibsd_ucode_loaded(dd->pport); | ||
392 | if (ret < 0) | ||
393 | goto bail; | ||
394 | |||
395 | first_reset = !ret; /* First reset if IBSD uCode not yet loaded */ | ||
396 | /* | ||
397 | * Alter some regs per vendor latest doc, reset-defaults | ||
398 | * are not right for IB. | ||
399 | */ | ||
400 | ret = qib_sd_early(dd); | ||
401 | if (ret < 0) { | ||
402 | qib_dev_err(dd, "Failed to set IB SERDES early defaults\n"); | ||
403 | goto bail; | ||
404 | } | ||
405 | /* | ||
406 | * Set DAC manual trim IB. | ||
407 | * We only do this once after chip has been reset (usually | ||
408 | * same as once per system boot). | ||
409 | */ | ||
410 | if (first_reset) { | ||
411 | ret = qib_sd_dactrim(dd); | ||
412 | if (ret < 0) { | ||
413 | qib_dev_err(dd, "Failed IB SERDES DAC trim\n"); | ||
414 | goto bail; | ||
415 | } | ||
416 | } | ||
417 | /* | ||
418 | * Set various registers (DDS and RXEQ) that will be | ||
419 | * controlled by IBC (in 1.2 mode) to reasonable preset values | ||
420 | * Calling the "internal" version avoids the "check for needed" | ||
421 | * and "trimdone monitor" that might be counter-productive. | ||
422 | */ | ||
423 | ret = qib_internal_presets(dd); | ||
424 | if (ret < 0) { | ||
425 | qib_dev_err(dd, "Failed to set IB SERDES presets\n"); | ||
426 | goto bail; | ||
427 | } | ||
428 | ret = qib_sd_trimself(dd, 0x80); | ||
429 | if (ret < 0) { | ||
430 | qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n"); | ||
431 | goto bail; | ||
432 | } | ||
433 | |||
434 | /* Load image, then try to verify */ | ||
435 | ret = 0; /* Assume success */ | ||
436 | if (first_reset) { | ||
437 | int vfy; | ||
438 | int trim_done; | ||
439 | |||
440 | ret = qib_sd7220_ib_load(dd); | ||
441 | if (ret < 0) { | ||
442 | qib_dev_err(dd, "Failed to load IB SERDES image\n"); | ||
443 | goto bail; | ||
444 | } else { | ||
445 | /* Loaded image, try to verify */ | ||
446 | vfy = qib_sd7220_ib_vfy(dd); | ||
447 | if (vfy != ret) { | ||
448 | qib_dev_err(dd, "SERDES PRAM VFY failed\n"); | ||
449 | goto bail; | ||
450 | } /* end if verified */ | ||
451 | } /* end if loaded */ | ||
452 | |||
453 | /* | ||
454 | * Loaded and verified. Almost good... | ||
455 | * hold "success" in ret | ||
456 | */ | ||
457 | ret = 0; | ||
458 | /* | ||
459 | * Prev steps all worked, continue bringup | ||
460 | * De-assert RESET to uC, only in first reset, to allow | ||
461 | * trimming. | ||
462 | * | ||
463 | * Since our default setup sets START_EQ1 to | ||
464 | * PRESET, we need to clear that for this very first run. | ||
465 | */ | ||
466 | ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38); | ||
467 | if (ret < 0) { | ||
468 | qib_dev_err(dd, "Failed clearing START_EQ1\n"); | ||
469 | goto bail; | ||
470 | } | ||
471 | |||
472 | qib_ibsd_reset(dd, 0); | ||
473 | /* | ||
474 | * If this is not the first reset, trimdone should be set | ||
475 | * already. We may need to check about this. | ||
476 | */ | ||
477 | trim_done = qib_sd_trimdone_poll(dd); | ||
478 | /* | ||
479 | * Whether or not trimdone succeeded, we need to put the | ||
480 | * uC back into reset to avoid a possible fight with the | ||
481 | * IBC state-machine. | ||
482 | */ | ||
483 | qib_ibsd_reset(dd, 1); | ||
484 | |||
485 | if (!trim_done) { | ||
486 | qib_dev_err(dd, "No TRIMDONE seen\n"); | ||
487 | goto bail; | ||
488 | } | ||
489 | /* | ||
490 | * DEBUG: check each time we reset if trimdone bits have | ||
491 | * gotten cleared, and re-set them. | ||
492 | */ | ||
493 | qib_sd_trimdone_monitor(dd, "First-reset"); | ||
494 | /* Remember so we do not re-do the load, dactrim, etc. */ | ||
495 | dd->cspec->serdes_first_init_done = 1; | ||
496 | } | ||
497 | /* | ||
498 | * setup for channel training and load values for | ||
499 | * RxEq and DDS in tables used by IBC in IB1.2 mode | ||
500 | */ | ||
501 | ret = 0; | ||
502 | if (qib_sd_setvals(dd) >= 0) | ||
503 | goto done; | ||
504 | bail: | ||
505 | ret = 1; | ||
506 | done: | ||
507 | /* start relock timer regardless, but start at 1 second */ | ||
508 | set_7220_relock_poll(dd, -1); | ||
509 | return ret; | ||
510 | } | ||
511 | |||
512 | #define EPB_ACC_REQ 1 | ||
513 | #define EPB_ACC_GNT 0x100 | ||
514 | #define EPB_DATA_MASK 0xFF | ||
515 | #define EPB_RD (1ULL << 24) | ||
516 | #define EPB_TRANS_RDY (1ULL << 31) | ||
517 | #define EPB_TRANS_ERR (1ULL << 30) | ||
518 | #define EPB_TRANS_TRIES 5 | ||
519 | |||
520 | /* | ||
521 | * query, claim, release ownership of the EPB (External Parallel Bus) | ||
522 | * for a specified SERDES. | ||
523 | * the "claim" parameter is >0 to claim, <0 to release, 0 to query. | ||
524 | * Returns <0 for errors, >0 if we had ownership, else 0. | ||
525 | */ | ||
526 | static int epb_access(struct qib_devdata *dd, int sdnum, int claim) | ||
527 | { | ||
528 | u16 acc; | ||
529 | u64 accval; | ||
530 | int owned = 0; | ||
531 | u64 oct_sel = 0; | ||
532 | |||
533 | switch (sdnum) { | ||
534 | case IB_7220_SERDES: | ||
535 | /* | ||
536 | * The IB SERDES "ownership" is fairly simple. A single each | ||
537 | * request/grant. | ||
538 | */ | ||
539 | acc = kr_ibsd_epb_access_ctrl; | ||
540 | break; | ||
541 | |||
542 | case PCIE_SERDES0: | ||
543 | case PCIE_SERDES1: | ||
544 | /* PCIe SERDES has two "octants", need to select which */ | ||
545 | acc = kr_pciesd_epb_access_ctrl; | ||
546 | oct_sel = (2 << (sdnum - PCIE_SERDES0)); | ||
547 | break; | ||
548 | |||
549 | default: | ||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | /* Make sure any outstanding transaction was seen */ | ||
554 | qib_read_kreg32(dd, kr_scratch); | ||
555 | udelay(15); | ||
556 | |||
557 | accval = qib_read_kreg32(dd, acc); | ||
558 | |||
559 | owned = !!(accval & EPB_ACC_GNT); | ||
560 | if (claim < 0) { | ||
561 | /* Need to release */ | ||
562 | u64 pollval; | ||
563 | /* | ||
564 | * The only writeable bits are the request and CS. | ||
565 | * Both should be clear | ||
566 | */ | ||
567 | u64 newval = 0; | ||
568 | qib_write_kreg(dd, acc, newval); | ||
569 | /* First read after write is not trustworthy */ | ||
570 | pollval = qib_read_kreg32(dd, acc); | ||
571 | udelay(5); | ||
572 | pollval = qib_read_kreg32(dd, acc); | ||
573 | if (pollval & EPB_ACC_GNT) | ||
574 | owned = -1; | ||
575 | } else if (claim > 0) { | ||
576 | /* Need to claim */ | ||
577 | u64 pollval; | ||
578 | u64 newval = EPB_ACC_REQ | oct_sel; | ||
579 | qib_write_kreg(dd, acc, newval); | ||
580 | /* First read after write is not trustworthy */ | ||
581 | pollval = qib_read_kreg32(dd, acc); | ||
582 | udelay(5); | ||
583 | pollval = qib_read_kreg32(dd, acc); | ||
584 | if (!(pollval & EPB_ACC_GNT)) | ||
585 | owned = -1; | ||
586 | } | ||
587 | return owned; | ||
588 | } | ||
589 | |||
590 | /* | ||
591 | * Lemma to deal with race condition of write..read to epb regs | ||
592 | */ | ||
593 | static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp) | ||
594 | { | ||
595 | int tries; | ||
596 | u64 transval; | ||
597 | |||
598 | qib_write_kreg(dd, reg, i_val); | ||
599 | /* Throw away first read, as RDY bit may be stale */ | ||
600 | transval = qib_read_kreg64(dd, reg); | ||
601 | |||
602 | for (tries = EPB_TRANS_TRIES; tries; --tries) { | ||
603 | transval = qib_read_kreg32(dd, reg); | ||
604 | if (transval & EPB_TRANS_RDY) | ||
605 | break; | ||
606 | udelay(5); | ||
607 | } | ||
608 | if (transval & EPB_TRANS_ERR) | ||
609 | return -1; | ||
610 | if (tries > 0 && o_vp) | ||
611 | *o_vp = transval; | ||
612 | return tries; | ||
613 | } | ||
614 | |||
615 | /** | ||
616 | * qib_sd7220_reg_mod - modify SERDES register | ||
617 | * @dd: the qlogic_ib device | ||
618 | * @sdnum: which SERDES to access | ||
619 | * @loc: location - channel, element, register, as packed by EPB_LOC() macro. | ||
620 | * @wd: Write Data - value to set in register | ||
621 | * @mask: ones where data should be spliced into reg. | ||
622 | * | ||
623 | * Basic register read/modify/write, with un-needed acesses elided. That is, | ||
624 | * a mask of zero will prevent write, while a mask of 0xFF will prevent read. | ||
625 | * returns current (presumed, if a write was done) contents of selected | ||
626 | * register, or <0 if errors. | ||
627 | */ | ||
628 | static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc, | ||
629 | u32 wd, u32 mask) | ||
630 | { | ||
631 | u16 trans; | ||
632 | u64 transval; | ||
633 | int owned; | ||
634 | int tries, ret; | ||
635 | unsigned long flags; | ||
636 | |||
637 | switch (sdnum) { | ||
638 | case IB_7220_SERDES: | ||
639 | trans = kr_ibsd_epb_transaction_reg; | ||
640 | break; | ||
641 | |||
642 | case PCIE_SERDES0: | ||
643 | case PCIE_SERDES1: | ||
644 | trans = kr_pciesd_epb_transaction_reg; | ||
645 | break; | ||
646 | |||
647 | default: | ||
648 | return -1; | ||
649 | } | ||
650 | |||
651 | /* | ||
652 | * All access is locked in software (vs other host threads) and | ||
653 | * hardware (vs uC access). | ||
654 | */ | ||
655 | spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); | ||
656 | |||
657 | owned = epb_access(dd, sdnum, 1); | ||
658 | if (owned < 0) { | ||
659 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); | ||
660 | return -1; | ||
661 | } | ||
662 | ret = 0; | ||
663 | for (tries = EPB_TRANS_TRIES; tries; --tries) { | ||
664 | transval = qib_read_kreg32(dd, trans); | ||
665 | if (transval & EPB_TRANS_RDY) | ||
666 | break; | ||
667 | udelay(5); | ||
668 | } | ||
669 | |||
670 | if (tries > 0) { | ||
671 | tries = 1; /* to make read-skip work */ | ||
672 | if (mask != 0xFF) { | ||
673 | /* | ||
674 | * Not a pure write, so need to read. | ||
675 | * loc encodes chip-select as well as address | ||
676 | */ | ||
677 | transval = loc | EPB_RD; | ||
678 | tries = epb_trans(dd, trans, transval, &transval); | ||
679 | } | ||
680 | if (tries > 0 && mask != 0) { | ||
681 | /* | ||
682 | * Not a pure read, so need to write. | ||
683 | */ | ||
684 | wd = (wd & mask) | (transval & ~mask); | ||
685 | transval = loc | (wd & EPB_DATA_MASK); | ||
686 | tries = epb_trans(dd, trans, transval, &transval); | ||
687 | } | ||
688 | } | ||
689 | /* else, failed to see ready, what error-handling? */ | ||
690 | |||
691 | /* | ||
692 | * Release bus. Failure is an error. | ||
693 | */ | ||
694 | if (epb_access(dd, sdnum, -1) < 0) | ||
695 | ret = -1; | ||
696 | else | ||
697 | ret = transval & EPB_DATA_MASK; | ||
698 | |||
699 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); | ||
700 | if (tries <= 0) | ||
701 | ret = -1; | ||
702 | return ret; | ||
703 | } | ||
704 | |||
705 | #define EPB_ROM_R (2) | ||
706 | #define EPB_ROM_W (1) | ||
707 | /* | ||
708 | * Below, all uC-related, use appropriate UC_CS, depending | ||
709 | * on which SerDes is used. | ||
710 | */ | ||
711 | #define EPB_UC_CTL EPB_LOC(6, 0, 0) | ||
712 | #define EPB_MADDRL EPB_LOC(6, 0, 2) | ||
713 | #define EPB_MADDRH EPB_LOC(6, 0, 3) | ||
714 | #define EPB_ROMDATA EPB_LOC(6, 0, 4) | ||
715 | #define EPB_RAMDATA EPB_LOC(6, 0, 5) | ||
716 | |||
717 | /* Transfer date to/from uC Program RAM of IB or PCIe SerDes */ | ||
718 | static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc, | ||
719 | u8 *buf, int cnt, int rd_notwr) | ||
720 | { | ||
721 | u16 trans; | ||
722 | u64 transval; | ||
723 | u64 csbit; | ||
724 | int owned; | ||
725 | int tries; | ||
726 | int sofar; | ||
727 | int addr; | ||
728 | int ret; | ||
729 | unsigned long flags; | ||
730 | const char *op; | ||
731 | |||
732 | /* Pick appropriate transaction reg and "Chip select" for this serdes */ | ||
733 | switch (sdnum) { | ||
734 | case IB_7220_SERDES: | ||
735 | csbit = 1ULL << EPB_IB_UC_CS_SHF; | ||
736 | trans = kr_ibsd_epb_transaction_reg; | ||
737 | break; | ||
738 | |||
739 | case PCIE_SERDES0: | ||
740 | case PCIE_SERDES1: | ||
741 | /* PCIe SERDES has uC "chip select" in different bit, too */ | ||
742 | csbit = 1ULL << EPB_PCIE_UC_CS_SHF; | ||
743 | trans = kr_pciesd_epb_transaction_reg; | ||
744 | break; | ||
745 | |||
746 | default: | ||
747 | return -1; | ||
748 | } | ||
749 | |||
750 | op = rd_notwr ? "Rd" : "Wr"; | ||
751 | spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); | ||
752 | |||
753 | owned = epb_access(dd, sdnum, 1); | ||
754 | if (owned < 0) { | ||
755 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); | ||
756 | return -1; | ||
757 | } | ||
758 | |||
759 | /* | ||
760 | * In future code, we may need to distinguish several address ranges, | ||
761 | * and select various memories based on this. For now, just trim | ||
762 | * "loc" (location including address and memory select) to | ||
763 | * "addr" (address within memory). we will only support PRAM | ||
764 | * The memory is 8KB. | ||
765 | */ | ||
766 | addr = loc & 0x1FFF; | ||
767 | for (tries = EPB_TRANS_TRIES; tries; --tries) { | ||
768 | transval = qib_read_kreg32(dd, trans); | ||
769 | if (transval & EPB_TRANS_RDY) | ||
770 | break; | ||
771 | udelay(5); | ||
772 | } | ||
773 | |||
774 | sofar = 0; | ||
775 | if (tries > 0) { | ||
776 | /* | ||
777 | * Every "memory" access is doubly-indirect. | ||
778 | * We set two bytes of address, then read/write | ||
779 | * one or mores bytes of data. | ||
780 | */ | ||
781 | |||
782 | /* First, we set control to "Read" or "Write" */ | ||
783 | transval = csbit | EPB_UC_CTL | | ||
784 | (rd_notwr ? EPB_ROM_R : EPB_ROM_W); | ||
785 | tries = epb_trans(dd, trans, transval, &transval); | ||
786 | while (tries > 0 && sofar < cnt) { | ||
787 | if (!sofar) { | ||
788 | /* Only set address at start of chunk */ | ||
789 | int addrbyte = (addr + sofar) >> 8; | ||
790 | transval = csbit | EPB_MADDRH | addrbyte; | ||
791 | tries = epb_trans(dd, trans, transval, | ||
792 | &transval); | ||
793 | if (tries <= 0) | ||
794 | break; | ||
795 | addrbyte = (addr + sofar) & 0xFF; | ||
796 | transval = csbit | EPB_MADDRL | addrbyte; | ||
797 | tries = epb_trans(dd, trans, transval, | ||
798 | &transval); | ||
799 | if (tries <= 0) | ||
800 | break; | ||
801 | } | ||
802 | |||
803 | if (rd_notwr) | ||
804 | transval = csbit | EPB_ROMDATA | EPB_RD; | ||
805 | else | ||
806 | transval = csbit | EPB_ROMDATA | buf[sofar]; | ||
807 | tries = epb_trans(dd, trans, transval, &transval); | ||
808 | if (tries <= 0) | ||
809 | break; | ||
810 | if (rd_notwr) | ||
811 | buf[sofar] = transval & EPB_DATA_MASK; | ||
812 | ++sofar; | ||
813 | } | ||
814 | /* Finally, clear control-bit for Read or Write */ | ||
815 | transval = csbit | EPB_UC_CTL; | ||
816 | tries = epb_trans(dd, trans, transval, &transval); | ||
817 | } | ||
818 | |||
819 | ret = sofar; | ||
820 | /* Release bus. Failure is an error */ | ||
821 | if (epb_access(dd, sdnum, -1) < 0) | ||
822 | ret = -1; | ||
823 | |||
824 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); | ||
825 | if (tries <= 0) | ||
826 | ret = -1; | ||
827 | return ret; | ||
828 | } | ||
829 | |||
830 | #define PROG_CHUNK 64 | ||
831 | |||
832 | int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, | ||
833 | u8 *img, int len, int offset) | ||
834 | { | ||
835 | int cnt, sofar, req; | ||
836 | |||
837 | sofar = 0; | ||
838 | while (sofar < len) { | ||
839 | req = len - sofar; | ||
840 | if (req > PROG_CHUNK) | ||
841 | req = PROG_CHUNK; | ||
842 | cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar, | ||
843 | img + sofar, req, 0); | ||
844 | if (cnt < req) { | ||
845 | sofar = -1; | ||
846 | break; | ||
847 | } | ||
848 | sofar += req; | ||
849 | } | ||
850 | return sofar; | ||
851 | } | ||
852 | |||
853 | #define VFY_CHUNK 64 | ||
854 | #define SD_PRAM_ERROR_LIMIT 42 | ||
855 | |||
856 | int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, | ||
857 | const u8 *img, int len, int offset) | ||
858 | { | ||
859 | int cnt, sofar, req, idx, errors; | ||
860 | unsigned char readback[VFY_CHUNK]; | ||
861 | |||
862 | errors = 0; | ||
863 | sofar = 0; | ||
864 | while (sofar < len) { | ||
865 | req = len - sofar; | ||
866 | if (req > VFY_CHUNK) | ||
867 | req = VFY_CHUNK; | ||
868 | cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset, | ||
869 | readback, req, 1); | ||
870 | if (cnt < req) { | ||
871 | /* failed in read itself */ | ||
872 | sofar = -1; | ||
873 | break; | ||
874 | } | ||
875 | for (idx = 0; idx < cnt; ++idx) { | ||
876 | if (readback[idx] != img[idx+sofar]) | ||
877 | ++errors; | ||
878 | } | ||
879 | sofar += cnt; | ||
880 | } | ||
881 | return errors ? -errors : sofar; | ||
882 | } | ||
883 | |||
884 | /* | ||
885 | * IRQ not set up at this point in init, so we poll. | ||
886 | */ | ||
887 | #define IB_SERDES_TRIM_DONE (1ULL << 11) | ||
888 | #define TRIM_TMO (30) | ||
889 | |||
890 | static int qib_sd_trimdone_poll(struct qib_devdata *dd) | ||
891 | { | ||
892 | int trim_tmo, ret; | ||
893 | uint64_t val; | ||
894 | |||
895 | /* | ||
896 | * Default to failure, so IBC will not start | ||
897 | * without IB_SERDES_TRIM_DONE. | ||
898 | */ | ||
899 | ret = 0; | ||
900 | for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) { | ||
901 | val = qib_read_kreg64(dd, kr_ibcstatus); | ||
902 | if (val & IB_SERDES_TRIM_DONE) { | ||
903 | ret = 1; | ||
904 | break; | ||
905 | } | ||
906 | msleep(10); | ||
907 | } | ||
908 | if (trim_tmo >= TRIM_TMO) { | ||
909 | qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); | ||
910 | ret = 0; | ||
911 | } | ||
912 | return ret; | ||
913 | } | ||
914 | |||
915 | #define TX_FAST_ELT (9) | ||
916 | |||
917 | /* | ||
918 | * Set the "negotiation" values for SERDES. These are used by the IB1.2 | ||
919 | * link negotiation. Macros below are attempt to keep the values a | ||
920 | * little more human-editable. | ||
921 | * First, values related to Drive De-emphasis Settings. | ||
922 | */ | ||
923 | |||
924 | #define NUM_DDS_REGS 6 | ||
925 | #define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */ | ||
926 | |||
927 | #define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \ | ||
928 | { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \ | ||
929 | (main_d << 3) | 4 | (ipre_d >> 2), \ | ||
930 | (main_s << 3) | 4 | (ipre_s >> 2), \ | ||
931 | ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \ | ||
932 | ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } } | ||
933 | |||
934 | static struct dds_init { | ||
935 | uint8_t reg_vals[NUM_DDS_REGS]; | ||
936 | } dds_init_vals[] = { | ||
937 | /* DDR(FDR) SDR(HDR) */ | ||
938 | /* Vendor recommends below for 3m cable */ | ||
939 | #define DDS_3M 0 | ||
940 | DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0), | ||
941 | DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1), | ||
942 | DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0), | ||
943 | DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0), | ||
944 | DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0), | ||
945 | DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0), | ||
946 | DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0), | ||
947 | DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0), | ||
948 | DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0), | ||
949 | DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0), | ||
950 | DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0), | ||
951 | DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0), | ||
952 | DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0), | ||
953 | /* Vendor recommends below for 1m cable */ | ||
954 | #define DDS_1M 13 | ||
955 | DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0), | ||
956 | DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0), | ||
957 | DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0) | ||
958 | }; | ||
959 | |||
960 | /* | ||
961 | * Now the RXEQ section of the table. | ||
962 | */ | ||
963 | /* Hardware packs an element number and register address thus: */ | ||
964 | #define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4)) | ||
965 | #define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \ | ||
966 | {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} } | ||
967 | |||
968 | #define RXEQ_VAL_ALL(elt, adr, val) \ | ||
969 | {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} } | ||
970 | |||
971 | #define RXEQ_SDR_DFELTH 0 | ||
972 | #define RXEQ_SDR_TLTH 0 | ||
973 | #define RXEQ_SDR_G1CNT_Z1CNT 0x11 | ||
974 | #define RXEQ_SDR_ZCNT 23 | ||
975 | |||
976 | static struct rxeq_init { | ||
977 | u16 rdesc; /* in form used in SerDesDDSRXEQ */ | ||
978 | u8 rdata[4]; | ||
979 | } rxeq_init_vals[] = { | ||
980 | /* Set Rcv Eq. to Preset node */ | ||
981 | RXEQ_VAL_ALL(7, 0x27, 0x10), | ||
982 | /* Set DFELTHFDR/HDR thresholds */ | ||
983 | RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */ | ||
984 | RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */ | ||
985 | /* Set TLTHFDR/HDR theshold */ | ||
986 | RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */ | ||
987 | RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */ | ||
988 | /* Set Preamp setting 2 (ZFR/ZCNT) */ | ||
989 | RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */ | ||
990 | RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */ | ||
991 | /* Set Preamp DC gain and Setting 1 (GFR/GHR) */ | ||
992 | RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */ | ||
993 | RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */ | ||
994 | /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */ | ||
995 | RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */ | ||
996 | RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */ | ||
997 | }; | ||
998 | |||
999 | /* There are 17 values from vendor, but IBC only accesses the first 16 */ | ||
1000 | #define DDS_ROWS (16) | ||
1001 | #define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals) | ||
1002 | |||
1003 | static int qib_sd_setvals(struct qib_devdata *dd) | ||
1004 | { | ||
1005 | int idx, midx; | ||
1006 | int min_idx; /* Minimum index for this portion of table */ | ||
1007 | uint32_t dds_reg_map; | ||
1008 | u64 __iomem *taddr, *iaddr; | ||
1009 | uint64_t data; | ||
1010 | uint64_t sdctl; | ||
1011 | |||
1012 | taddr = dd->kregbase + kr_serdes_maptable; | ||
1013 | iaddr = dd->kregbase + kr_serdes_ddsrxeq0; | ||
1014 | |||
1015 | /* | ||
1016 | * Init the DDS section of the table. | ||
1017 | * Each "row" of the table provokes NUM_DDS_REG writes, to the | ||
1018 | * registers indicated in DDS_REG_MAP. | ||
1019 | */ | ||
1020 | sdctl = qib_read_kreg64(dd, kr_ibserdesctrl); | ||
1021 | sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8); | ||
1022 | sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13); | ||
1023 | qib_write_kreg(dd, kr_ibserdesctrl, sdctl); | ||
1024 | |||
1025 | /* | ||
1026 | * Iterate down table within loop for each register to store. | ||
1027 | */ | ||
1028 | dds_reg_map = DDS_REG_MAP; | ||
1029 | for (idx = 0; idx < NUM_DDS_REGS; ++idx) { | ||
1030 | data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT; | ||
1031 | writeq(data, iaddr + idx); | ||
1032 | mmiowb(); | ||
1033 | qib_read_kreg32(dd, kr_scratch); | ||
1034 | dds_reg_map >>= 4; | ||
1035 | for (midx = 0; midx < DDS_ROWS; ++midx) { | ||
1036 | u64 __iomem *daddr = taddr + ((midx << 4) + idx); | ||
1037 | data = dds_init_vals[midx].reg_vals[idx]; | ||
1038 | writeq(data, daddr); | ||
1039 | mmiowb(); | ||
1040 | qib_read_kreg32(dd, kr_scratch); | ||
1041 | } /* End inner for (vals for this reg, each row) */ | ||
1042 | } /* end outer for (regs to be stored) */ | ||
1043 | |||
1044 | /* | ||
1045 | * Init the RXEQ section of the table. | ||
1046 | * This runs in a different order, as the pattern of | ||
1047 | * register references is more complex, but there are only | ||
1048 | * four "data" values per register. | ||
1049 | */ | ||
1050 | min_idx = idx; /* RXEQ indices pick up where DDS left off */ | ||
1051 | taddr += 0x100; /* RXEQ data is in second half of table */ | ||
1052 | /* Iterate through RXEQ register addresses */ | ||
1053 | for (idx = 0; idx < RXEQ_ROWS; ++idx) { | ||
1054 | int didx; /* "destination" */ | ||
1055 | int vidx; | ||
1056 | |||
1057 | /* didx is offset by min_idx to address RXEQ range of regs */ | ||
1058 | didx = idx + min_idx; | ||
1059 | /* Store the next RXEQ register address */ | ||
1060 | writeq(rxeq_init_vals[idx].rdesc, iaddr + didx); | ||
1061 | mmiowb(); | ||
1062 | qib_read_kreg32(dd, kr_scratch); | ||
1063 | /* Iterate through RXEQ values */ | ||
1064 | for (vidx = 0; vidx < 4; vidx++) { | ||
1065 | data = rxeq_init_vals[idx].rdata[vidx]; | ||
1066 | writeq(data, taddr + (vidx << 6) + idx); | ||
1067 | mmiowb(); | ||
1068 | qib_read_kreg32(dd, kr_scratch); | ||
1069 | } | ||
1070 | } /* end outer for (Reg-writes for RXEQ) */ | ||
1071 | return 0; | ||
1072 | } | ||
1073 | |||
1074 | #define CMUCTRL5 EPB_LOC(7, 0, 0x15) | ||
1075 | #define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0) | ||
1076 | #define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5) | ||
1077 | #define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6) | ||
1078 | #define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8) | ||
1079 | #define START_EQ2(chan) EPB_LOC(chan, 7, 0x28) | ||
1080 | |||
1081 | /* | ||
1082 | * Repeat a "store" across all channels of the IB SerDes. | ||
1083 | * Although nominally it inherits the "read value" of the last | ||
1084 | * channel it modified, the only really useful return is <0 for | ||
1085 | * failure, >= 0 for success. The parameter 'loc' is assumed to | ||
1086 | * be the location in some channel of the register to be modified | ||
1087 | * The caller can specify use of the "gang write" option of EPB, | ||
1088 | * in which case we use the specified channel data for any fields | ||
1089 | * not explicitely written. | ||
1090 | */ | ||
1091 | static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, | ||
1092 | int mask) | ||
1093 | { | ||
1094 | int ret = -1; | ||
1095 | int chnl; | ||
1096 | |||
1097 | if (loc & EPB_GLOBAL_WR) { | ||
1098 | /* | ||
1099 | * Our caller has assured us that we can set all four | ||
1100 | * channels at once. Trust that. If mask is not 0xFF, | ||
1101 | * we will read the _specified_ channel for our starting | ||
1102 | * value. | ||
1103 | */ | ||
1104 | loc |= (1U << EPB_IB_QUAD0_CS_SHF); | ||
1105 | chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7; | ||
1106 | if (mask != 0xFF) { | ||
1107 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | ||
1108 | loc & ~EPB_GLOBAL_WR, 0, 0); | ||
1109 | if (ret < 0) { | ||
1110 | int sloc = loc >> EPB_ADDR_SHF; | ||
1111 | |||
1112 | qib_dev_err(dd, "pre-read failed: elt %d," | ||
1113 | " addr 0x%X, chnl %d\n", | ||
1114 | (sloc & 0xF), | ||
1115 | (sloc >> 9) & 0x3f, chnl); | ||
1116 | return ret; | ||
1117 | } | ||
1118 | val = (ret & ~mask) | (val & mask); | ||
1119 | } | ||
1120 | loc &= ~(7 << (4+EPB_ADDR_SHF)); | ||
1121 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF); | ||
1122 | if (ret < 0) { | ||
1123 | int sloc = loc >> EPB_ADDR_SHF; | ||
1124 | |||
1125 | qib_dev_err(dd, "Global WR failed: elt %d," | ||
1126 | " addr 0x%X, val %02X\n", | ||
1127 | (sloc & 0xF), (sloc >> 9) & 0x3f, val); | ||
1128 | } | ||
1129 | return ret; | ||
1130 | } | ||
1131 | /* Clear "channel" and set CS so we can simply iterate */ | ||
1132 | loc &= ~(7 << (4+EPB_ADDR_SHF)); | ||
1133 | loc |= (1U << EPB_IB_QUAD0_CS_SHF); | ||
1134 | for (chnl = 0; chnl < 4; ++chnl) { | ||
1135 | int cloc = loc | (chnl << (4+EPB_ADDR_SHF)); | ||
1136 | |||
1137 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask); | ||
1138 | if (ret < 0) { | ||
1139 | int sloc = loc >> EPB_ADDR_SHF; | ||
1140 | |||
1141 | qib_dev_err(dd, "Write failed: elt %d," | ||
1142 | " addr 0x%X, chnl %d, val 0x%02X," | ||
1143 | " mask 0x%02X\n", | ||
1144 | (sloc & 0xF), (sloc >> 9) & 0x3f, chnl, | ||
1145 | val & 0xFF, mask & 0xFF); | ||
1146 | break; | ||
1147 | } | ||
1148 | } | ||
1149 | return ret; | ||
1150 | } | ||
1151 | |||
1152 | /* | ||
1153 | * Set the Tx values normally modified by IBC in IB1.2 mode to default | ||
1154 | * values, as gotten from first row of init table. | ||
1155 | */ | ||
1156 | static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi) | ||
1157 | { | ||
1158 | int ret; | ||
1159 | int idx, reg, data; | ||
1160 | uint32_t regmap; | ||
1161 | |||
1162 | regmap = DDS_REG_MAP; | ||
1163 | for (idx = 0; idx < NUM_DDS_REGS; ++idx) { | ||
1164 | reg = (regmap & 0xF); | ||
1165 | regmap >>= 4; | ||
1166 | data = ddi->reg_vals[idx]; | ||
1167 | /* Vendor says RMW not needed for these regs, use 0xFF mask */ | ||
1168 | ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF); | ||
1169 | if (ret < 0) | ||
1170 | break; | ||
1171 | } | ||
1172 | return ret; | ||
1173 | } | ||
1174 | |||
1175 | /* | ||
1176 | * Set the Rx values normally modified by IBC in IB1.2 mode to default | ||
1177 | * values, as gotten from selected column of init table. | ||
1178 | */ | ||
1179 | static int set_rxeq_vals(struct qib_devdata *dd, int vsel) | ||
1180 | { | ||
1181 | int ret; | ||
1182 | int ridx; | ||
1183 | int cnt = ARRAY_SIZE(rxeq_init_vals); | ||
1184 | |||
1185 | for (ridx = 0; ridx < cnt; ++ridx) { | ||
1186 | int elt, reg, val, loc; | ||
1187 | |||
1188 | elt = rxeq_init_vals[ridx].rdesc & 0xF; | ||
1189 | reg = rxeq_init_vals[ridx].rdesc >> 4; | ||
1190 | loc = EPB_LOC(0, elt, reg); | ||
1191 | val = rxeq_init_vals[ridx].rdata[vsel]; | ||
1192 | /* mask of 0xFF, because hardware does full-byte store. */ | ||
1193 | ret = ibsd_mod_allchnls(dd, loc, val, 0xFF); | ||
1194 | if (ret < 0) | ||
1195 | break; | ||
1196 | } | ||
1197 | return ret; | ||
1198 | } | ||
1199 | |||
1200 | /* | ||
1201 | * Set the default values (row 0) for DDR Driver Demphasis. | ||
1202 | * we do this initially and whenever we turn off IB-1.2 | ||
1203 | * | ||
1204 | * The "default" values for Rx equalization are also stored to | ||
1205 | * SerDes registers. Formerly (and still default), we used set 2. | ||
1206 | * For experimenting with cables and link-partners, we allow changing | ||
1207 | * that via a module parameter. | ||
1208 | */ | ||
1209 | static unsigned qib_rxeq_set = 2; | ||
1210 | module_param_named(rxeq_default_set, qib_rxeq_set, uint, | ||
1211 | S_IWUSR | S_IRUGO); | ||
1212 | MODULE_PARM_DESC(rxeq_default_set, | ||
1213 | "Which set [0..3] of Rx Equalization values is default"); | ||
1214 | |||
1215 | static int qib_internal_presets(struct qib_devdata *dd) | ||
1216 | { | ||
1217 | int ret = 0; | ||
1218 | |||
1219 | ret = set_dds_vals(dd, dds_init_vals + DDS_3M); | ||
1220 | |||
1221 | if (ret < 0) | ||
1222 | qib_dev_err(dd, "Failed to set default DDS values\n"); | ||
1223 | ret = set_rxeq_vals(dd, qib_rxeq_set & 3); | ||
1224 | if (ret < 0) | ||
1225 | qib_dev_err(dd, "Failed to set default RXEQ values\n"); | ||
1226 | return ret; | ||
1227 | } | ||
1228 | |||
1229 | int qib_sd7220_presets(struct qib_devdata *dd) | ||
1230 | { | ||
1231 | int ret = 0; | ||
1232 | |||
1233 | if (!dd->cspec->presets_needed) | ||
1234 | return ret; | ||
1235 | dd->cspec->presets_needed = 0; | ||
1236 | /* Assert uC reset, so we don't clash with it. */ | ||
1237 | qib_ibsd_reset(dd, 1); | ||
1238 | udelay(2); | ||
1239 | qib_sd_trimdone_monitor(dd, "link-down"); | ||
1240 | |||
1241 | ret = qib_internal_presets(dd); | ||
1242 | return ret; | ||
1243 | } | ||
1244 | |||
1245 | static int qib_sd_trimself(struct qib_devdata *dd, int val) | ||
1246 | { | ||
1247 | int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF); | ||
1248 | |||
1249 | return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF); | ||
1250 | } | ||
1251 | |||
1252 | static int qib_sd_early(struct qib_devdata *dd) | ||
1253 | { | ||
1254 | int ret; | ||
1255 | |||
1256 | ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF); | ||
1257 | if (ret < 0) | ||
1258 | goto bail; | ||
1259 | ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF); | ||
1260 | if (ret < 0) | ||
1261 | goto bail; | ||
1262 | ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF); | ||
1263 | bail: | ||
1264 | return ret; | ||
1265 | } | ||
1266 | |||
1267 | #define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E) | ||
1268 | #define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6) | ||
1269 | #define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF) | ||
1270 | |||
1271 | static int qib_sd_dactrim(struct qib_devdata *dd) | ||
1272 | { | ||
1273 | int ret; | ||
1274 | |||
1275 | ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF); | ||
1276 | if (ret < 0) | ||
1277 | goto bail; | ||
1278 | |||
1279 | /* more fine-tuning of what will be default */ | ||
1280 | ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF); | ||
1281 | if (ret < 0) | ||
1282 | goto bail; | ||
1283 | |||
1284 | ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF); | ||
1285 | if (ret < 0) | ||
1286 | goto bail; | ||
1287 | |||
1288 | ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF); | ||
1289 | if (ret < 0) | ||
1290 | goto bail; | ||
1291 | |||
1292 | ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF); | ||
1293 | if (ret < 0) | ||
1294 | goto bail; | ||
1295 | |||
1296 | /* | ||
1297 | * Delay for max possible number of steps, with slop. | ||
1298 | * Each step is about 4usec. | ||
1299 | */ | ||
1300 | udelay(415); | ||
1301 | |||
1302 | ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF); | ||
1303 | |||
1304 | bail: | ||
1305 | return ret; | ||
1306 | } | ||
1307 | |||
1308 | #define RELOCK_FIRST_MS 3 | ||
1309 | #define RXLSPPM(chan) EPB_LOC(chan, 0, 2) | ||
1310 | void toggle_7220_rclkrls(struct qib_devdata *dd) | ||
1311 | { | ||
1312 | int loc = RXLSPPM(0) | EPB_GLOBAL_WR; | ||
1313 | int ret; | ||
1314 | |||
1315 | ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); | ||
1316 | if (ret < 0) | ||
1317 | qib_dev_err(dd, "RCLKRLS failed to clear D7\n"); | ||
1318 | else { | ||
1319 | udelay(1); | ||
1320 | ibsd_mod_allchnls(dd, loc, 0x80, 0x80); | ||
1321 | } | ||
1322 | /* And again for good measure */ | ||
1323 | udelay(1); | ||
1324 | ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); | ||
1325 | if (ret < 0) | ||
1326 | qib_dev_err(dd, "RCLKRLS failed to clear D7\n"); | ||
1327 | else { | ||
1328 | udelay(1); | ||
1329 | ibsd_mod_allchnls(dd, loc, 0x80, 0x80); | ||
1330 | } | ||
1331 | /* Now reset xgxs and IBC to complete the recovery */ | ||
1332 | dd->f_xgxs_reset(dd->pport); | ||
1333 | } | ||
1334 | |||
1335 | /* | ||
1336 | * Shut down the timer that polls for relock occasions, if needed | ||
1337 | * this is "hooked" from qib_7220_quiet_serdes(), which is called | ||
1338 | * just before qib_shutdown_device() in qib_driver.c shuts down all | ||
1339 | * the other timers | ||
1340 | */ | ||
1341 | void shutdown_7220_relock_poll(struct qib_devdata *dd) | ||
1342 | { | ||
1343 | if (dd->cspec->relock_timer_active) | ||
1344 | del_timer_sync(&dd->cspec->relock_timer); | ||
1345 | } | ||
1346 | |||
1347 | static unsigned qib_relock_by_timer = 1; | ||
1348 | module_param_named(relock_by_timer, qib_relock_by_timer, uint, | ||
1349 | S_IWUSR | S_IRUGO); | ||
1350 | MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up"); | ||
1351 | |||
1352 | static void qib_run_relock(unsigned long opaque) | ||
1353 | { | ||
1354 | struct qib_devdata *dd = (struct qib_devdata *)opaque; | ||
1355 | struct qib_pportdata *ppd = dd->pport; | ||
1356 | struct qib_chip_specific *cs = dd->cspec; | ||
1357 | int timeoff; | ||
1358 | |||
1359 | /* | ||
1360 | * Check link-training state for "stuck" state, when down. | ||
1361 | * if found, try relock and schedule another try at | ||
1362 | * exponentially growing delay, maxed at one second. | ||
1363 | * if not stuck, our work is done. | ||
1364 | */ | ||
1365 | if ((dd->flags & QIB_INITTED) && !(ppd->lflags & | ||
1366 | (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED | | ||
1367 | QIBL_LINKACTIVE))) { | ||
1368 | if (qib_relock_by_timer) { | ||
1369 | if (!(ppd->lflags & QIBL_IB_LINK_DISABLED)) | ||
1370 | toggle_7220_rclkrls(dd); | ||
1371 | } | ||
1372 | /* re-set timer for next check */ | ||
1373 | timeoff = cs->relock_interval << 1; | ||
1374 | if (timeoff > HZ) | ||
1375 | timeoff = HZ; | ||
1376 | cs->relock_interval = timeoff; | ||
1377 | } else | ||
1378 | timeoff = HZ; | ||
1379 | mod_timer(&cs->relock_timer, jiffies + timeoff); | ||
1380 | } | ||
1381 | |||
1382 | void set_7220_relock_poll(struct qib_devdata *dd, int ibup) | ||
1383 | { | ||
1384 | struct qib_chip_specific *cs = dd->cspec; | ||
1385 | |||
1386 | if (ibup) { | ||
1387 | /* We are now up, relax timer to 1 second interval */ | ||
1388 | if (cs->relock_timer_active) { | ||
1389 | cs->relock_interval = HZ; | ||
1390 | mod_timer(&cs->relock_timer, jiffies + HZ); | ||
1391 | } | ||
1392 | } else { | ||
1393 | /* Transition to down, (re-)set timer to short interval. */ | ||
1394 | unsigned int timeout; | ||
1395 | |||
1396 | timeout = msecs_to_jiffies(RELOCK_FIRST_MS); | ||
1397 | if (timeout == 0) | ||
1398 | timeout = 1; | ||
1399 | /* If timer has not yet been started, do so. */ | ||
1400 | if (!cs->relock_timer_active) { | ||
1401 | cs->relock_timer_active = 1; | ||
1402 | init_timer(&cs->relock_timer); | ||
1403 | cs->relock_timer.function = qib_run_relock; | ||
1404 | cs->relock_timer.data = (unsigned long) dd; | ||
1405 | cs->relock_interval = timeout; | ||
1406 | cs->relock_timer.expires = jiffies + timeout; | ||
1407 | add_timer(&cs->relock_timer); | ||
1408 | } else { | ||
1409 | cs->relock_interval = timeout; | ||
1410 | mod_timer(&cs->relock_timer, jiffies + timeout); | ||
1411 | } | ||
1412 | } | ||
1413 | } | ||