diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_iba7220.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_iba7220.c | 4618 |
1 files changed, 4618 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c new file mode 100644 index 000000000000..6fd8d74e7392 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
@@ -0,0 +1,4618 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | /* | ||
35 | * This file contains all of the code that is specific to the | ||
36 | * QLogic_IB 7220 chip (except that specific to the SerDes) | ||
37 | */ | ||
38 | |||
39 | #include <linux/interrupt.h> | ||
40 | #include <linux/pci.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/io.h> | ||
43 | #include <rdma/ib_verbs.h> | ||
44 | |||
45 | #include "qib.h" | ||
46 | #include "qib_7220.h" | ||
47 | |||
48 | static void qib_setup_7220_setextled(struct qib_pportdata *, u32); | ||
49 | static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t); | ||
50 | static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op); | ||
51 | static u32 qib_7220_iblink_state(u64); | ||
52 | static u8 qib_7220_phys_portstate(u64); | ||
53 | static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16); | ||
54 | static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16); | ||
55 | |||
56 | /* | ||
57 | * This file contains almost all the chip-specific register information and | ||
58 | * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the | ||
59 | * exception of SerDes support, which in in qib_sd7220.c. | ||
60 | */ | ||
61 | |||
62 | /* Below uses machine-generated qib_chipnum_regs.h file */ | ||
63 | #define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64)) | ||
64 | |||
65 | /* Use defines to tie machine-generated names to lower-case names */ | ||
66 | #define kr_control KREG_IDX(Control) | ||
67 | #define kr_counterregbase KREG_IDX(CntrRegBase) | ||
68 | #define kr_errclear KREG_IDX(ErrClear) | ||
69 | #define kr_errmask KREG_IDX(ErrMask) | ||
70 | #define kr_errstatus KREG_IDX(ErrStatus) | ||
71 | #define kr_extctrl KREG_IDX(EXTCtrl) | ||
72 | #define kr_extstatus KREG_IDX(EXTStatus) | ||
73 | #define kr_gpio_clear KREG_IDX(GPIOClear) | ||
74 | #define kr_gpio_mask KREG_IDX(GPIOMask) | ||
75 | #define kr_gpio_out KREG_IDX(GPIOOut) | ||
76 | #define kr_gpio_status KREG_IDX(GPIOStatus) | ||
77 | #define kr_hrtbt_guid KREG_IDX(HRTBT_GUID) | ||
78 | #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl) | ||
79 | #define kr_hwerrclear KREG_IDX(HwErrClear) | ||
80 | #define kr_hwerrmask KREG_IDX(HwErrMask) | ||
81 | #define kr_hwerrstatus KREG_IDX(HwErrStatus) | ||
82 | #define kr_ibcctrl KREG_IDX(IBCCtrl) | ||
83 | #define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl) | ||
84 | #define kr_ibcddrstatus KREG_IDX(IBCDDRStatus) | ||
85 | #define kr_ibcstatus KREG_IDX(IBCStatus) | ||
86 | #define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl) | ||
87 | #define kr_intclear KREG_IDX(IntClear) | ||
88 | #define kr_intmask KREG_IDX(IntMask) | ||
89 | #define kr_intstatus KREG_IDX(IntStatus) | ||
90 | #define kr_ncmodectrl KREG_IDX(IBNCModeCtrl) | ||
91 | #define kr_palign KREG_IDX(PageAlign) | ||
92 | #define kr_partitionkey KREG_IDX(RcvPartitionKey) | ||
93 | #define kr_portcnt KREG_IDX(PortCnt) | ||
94 | #define kr_rcvbthqp KREG_IDX(RcvBTHQP) | ||
95 | #define kr_rcvctrl KREG_IDX(RcvCtrl) | ||
96 | #define kr_rcvegrbase KREG_IDX(RcvEgrBase) | ||
97 | #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt) | ||
98 | #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt) | ||
99 | #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize) | ||
100 | #define kr_rcvhdrsize KREG_IDX(RcvHdrSize) | ||
101 | #define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt) | ||
102 | #define kr_rcvtidbase KREG_IDX(RcvTIDBase) | ||
103 | #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt) | ||
104 | #define kr_revision KREG_IDX(Revision) | ||
105 | #define kr_scratch KREG_IDX(Scratch) | ||
106 | #define kr_sendbuffererror KREG_IDX(SendBufErr0) | ||
107 | #define kr_sendctrl KREG_IDX(SendCtrl) | ||
108 | #define kr_senddmabase KREG_IDX(SendDmaBase) | ||
109 | #define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0) | ||
110 | #define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1) | ||
111 | #define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2) | ||
112 | #define kr_senddmahead KREG_IDX(SendDmaHead) | ||
113 | #define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr) | ||
114 | #define kr_senddmalengen KREG_IDX(SendDmaLenGen) | ||
115 | #define kr_senddmastatus KREG_IDX(SendDmaStatus) | ||
116 | #define kr_senddmatail KREG_IDX(SendDmaTail) | ||
117 | #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr) | ||
118 | #define kr_sendpiobufbase KREG_IDX(SendBufBase) | ||
119 | #define kr_sendpiobufcnt KREG_IDX(SendBufCnt) | ||
120 | #define kr_sendpiosize KREG_IDX(SendBufSize) | ||
121 | #define kr_sendregbase KREG_IDX(SendRegBase) | ||
122 | #define kr_userregbase KREG_IDX(UserRegBase) | ||
123 | #define kr_xgxs_cfg KREG_IDX(XGXSCfg) | ||
124 | |||
125 | /* These must only be written via qib_write_kreg_ctxt() */ | ||
126 | #define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0) | ||
127 | #define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0) | ||
128 | |||
129 | |||
130 | #define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \ | ||
131 | QIB_7220_LBIntCnt_OFFS) / sizeof(u64)) | ||
132 | |||
133 | #define cr_badformat CREG_IDX(RxVersionErrCnt) | ||
134 | #define cr_erricrc CREG_IDX(RxICRCErrCnt) | ||
135 | #define cr_errlink CREG_IDX(RxLinkMalformCnt) | ||
136 | #define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt) | ||
137 | #define cr_errpkey CREG_IDX(RxPKeyMismatchCnt) | ||
138 | #define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt) | ||
139 | #define cr_err_rlen CREG_IDX(RxLenErrCnt) | ||
140 | #define cr_errslen CREG_IDX(TxLenErrCnt) | ||
141 | #define cr_errtidfull CREG_IDX(RxTIDFullErrCnt) | ||
142 | #define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt) | ||
143 | #define cr_errvcrc CREG_IDX(RxVCRCErrCnt) | ||
144 | #define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt) | ||
145 | #define cr_lbint CREG_IDX(LBIntCnt) | ||
146 | #define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt) | ||
147 | #define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt) | ||
148 | #define cr_lbflowstall CREG_IDX(LBFlowStallCnt) | ||
149 | #define cr_pktrcv CREG_IDX(RxDataPktCnt) | ||
150 | #define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt) | ||
151 | #define cr_pktsend CREG_IDX(TxDataPktCnt) | ||
152 | #define cr_pktsendflow CREG_IDX(TxFlowPktCnt) | ||
153 | #define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt) | ||
154 | #define cr_rcvebp CREG_IDX(RxEBPCnt) | ||
155 | #define cr_rcvovfl CREG_IDX(RxBufOvflCnt) | ||
156 | #define cr_senddropped CREG_IDX(TxDroppedPktCnt) | ||
157 | #define cr_sendstall CREG_IDX(TxFlowStallCnt) | ||
158 | #define cr_sendunderrun CREG_IDX(TxUnderrunCnt) | ||
159 | #define cr_wordrcv CREG_IDX(RxDwordCnt) | ||
160 | #define cr_wordsend CREG_IDX(TxDwordCnt) | ||
161 | #define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt) | ||
162 | #define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt) | ||
163 | #define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt) | ||
164 | #define cr_iblinkdown CREG_IDX(IBLinkDownedCnt) | ||
165 | #define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt) | ||
166 | #define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt) | ||
167 | #define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt) | ||
168 | #define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt) | ||
169 | #define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt) | ||
170 | #define cr_rxvlerr CREG_IDX(RxVlErrCnt) | ||
171 | #define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt) | ||
172 | #define cr_psstat CREG_IDX(PSStat) | ||
173 | #define cr_psstart CREG_IDX(PSStart) | ||
174 | #define cr_psinterval CREG_IDX(PSInterval) | ||
175 | #define cr_psrcvdatacount CREG_IDX(PSRcvDataCount) | ||
176 | #define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount) | ||
177 | #define cr_psxmitdatacount CREG_IDX(PSXmitDataCount) | ||
178 | #define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount) | ||
179 | #define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount) | ||
180 | #define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt) | ||
181 | #define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt) | ||
182 | |||
183 | #define SYM_RMASK(regname, fldname) ((u64) \ | ||
184 | QIB_7220_##regname##_##fldname##_RMASK) | ||
185 | #define SYM_MASK(regname, fldname) ((u64) \ | ||
186 | QIB_7220_##regname##_##fldname##_RMASK << \ | ||
187 | QIB_7220_##regname##_##fldname##_LSB) | ||
188 | #define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB) | ||
189 | #define SYM_FIELD(value, regname, fldname) ((u64) \ | ||
190 | (((value) >> SYM_LSB(regname, fldname)) & \ | ||
191 | SYM_RMASK(regname, fldname))) | ||
192 | #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask) | ||
193 | #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask) | ||
194 | |||
195 | /* ibcctrl bits */ | ||
196 | #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 | ||
197 | /* cycle through TS1/TS2 till OK */ | ||
198 | #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2 | ||
199 | /* wait for TS1, then go on */ | ||
200 | #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3 | ||
201 | #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16 | ||
202 | |||
203 | #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ | ||
204 | #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ | ||
205 | #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ | ||
206 | |||
207 | #define BLOB_7220_IBCHG 0x81 | ||
208 | |||
209 | /* | ||
210 | * We could have a single register get/put routine, that takes a group type, | ||
211 | * but this is somewhat clearer and cleaner. It also gives us some error | ||
212 | * checking. 64 bit register reads should always work, but are inefficient | ||
213 | * on opteron (the northbridge always generates 2 separate HT 32 bit reads), | ||
214 | * so we use kreg32 wherever possible. User register and counter register | ||
215 | * reads are always 32 bit reads, so only one form of those routines. | ||
216 | */ | ||
217 | |||
218 | /** | ||
219 | * qib_read_ureg32 - read 32-bit virtualized per-context register | ||
220 | * @dd: device | ||
221 | * @regno: register number | ||
222 | * @ctxt: context number | ||
223 | * | ||
224 | * Return the contents of a register that is virtualized to be per context. | ||
225 | * Returns -1 on errors (not distinguishable from valid contents at | ||
226 | * runtime; we may add a separate error variable at some point). | ||
227 | */ | ||
228 | static inline u32 qib_read_ureg32(const struct qib_devdata *dd, | ||
229 | enum qib_ureg regno, int ctxt) | ||
230 | { | ||
231 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | ||
232 | return 0; | ||
233 | |||
234 | if (dd->userbase) | ||
235 | return readl(regno + (u64 __iomem *) | ||
236 | ((char __iomem *)dd->userbase + | ||
237 | dd->ureg_align * ctxt)); | ||
238 | else | ||
239 | return readl(regno + (u64 __iomem *) | ||
240 | (dd->uregbase + | ||
241 | (char __iomem *)dd->kregbase + | ||
242 | dd->ureg_align * ctxt)); | ||
243 | } | ||
244 | |||
245 | /** | ||
246 | * qib_write_ureg - write 32-bit virtualized per-context register | ||
247 | * @dd: device | ||
248 | * @regno: register number | ||
249 | * @value: value | ||
250 | * @ctxt: context | ||
251 | * | ||
252 | * Write the contents of a register that is virtualized to be per context. | ||
253 | */ | ||
254 | static inline void qib_write_ureg(const struct qib_devdata *dd, | ||
255 | enum qib_ureg regno, u64 value, int ctxt) | ||
256 | { | ||
257 | u64 __iomem *ubase; | ||
258 | |||
259 | if (dd->userbase) | ||
260 | ubase = (u64 __iomem *) | ||
261 | ((char __iomem *) dd->userbase + | ||
262 | dd->ureg_align * ctxt); | ||
263 | else | ||
264 | ubase = (u64 __iomem *) | ||
265 | (dd->uregbase + | ||
266 | (char __iomem *) dd->kregbase + | ||
267 | dd->ureg_align * ctxt); | ||
268 | |||
269 | if (dd->kregbase && (dd->flags & QIB_PRESENT)) | ||
270 | writeq(value, &ubase[regno]); | ||
271 | } | ||
272 | |||
273 | /** | ||
274 | * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register | ||
275 | * @dd: the qlogic_ib device | ||
276 | * @regno: the register number to write | ||
277 | * @ctxt: the context containing the register | ||
278 | * @value: the value to write | ||
279 | */ | ||
280 | static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, | ||
281 | const u16 regno, unsigned ctxt, | ||
282 | u64 value) | ||
283 | { | ||
284 | qib_write_kreg(dd, regno + ctxt, value); | ||
285 | } | ||
286 | |||
287 | static inline void write_7220_creg(const struct qib_devdata *dd, | ||
288 | u16 regno, u64 value) | ||
289 | { | ||
290 | if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT)) | ||
291 | writeq(value, &dd->cspec->cregbase[regno]); | ||
292 | } | ||
293 | |||
294 | static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno) | ||
295 | { | ||
296 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | ||
297 | return 0; | ||
298 | return readq(&dd->cspec->cregbase[regno]); | ||
299 | } | ||
300 | |||
301 | static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno) | ||
302 | { | ||
303 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | ||
304 | return 0; | ||
305 | return readl(&dd->cspec->cregbase[regno]); | ||
306 | } | ||
307 | |||
308 | /* kr_revision bits */ | ||
309 | #define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1) | ||
310 | #define QLOGIC_IB_R_EMULATORREV_SHIFT 40 | ||
311 | |||
312 | /* kr_control bits */ | ||
313 | #define QLOGIC_IB_C_RESET (1U << 7) | ||
314 | |||
315 | /* kr_intstatus, kr_intclear, kr_intmask bits */ | ||
316 | #define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1) | ||
317 | #define QLOGIC_IB_I_RCVURG_SHIFT 32 | ||
318 | #define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1) | ||
319 | #define QLOGIC_IB_I_RCVAVAIL_SHIFT 0 | ||
320 | #define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27) | ||
321 | |||
322 | #define QLOGIC_IB_C_FREEZEMODE 0x00000002 | ||
323 | #define QLOGIC_IB_C_LINKENABLE 0x00000004 | ||
324 | |||
325 | #define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL | ||
326 | #define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL | ||
327 | #define QLOGIC_IB_I_ERROR 0x0000000080000000ULL | ||
328 | #define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL | ||
329 | #define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL | ||
330 | #define QLOGIC_IB_I_GPIO 0x0000000010000000ULL | ||
331 | |||
332 | /* variables for sanity checking interrupt and errors */ | ||
333 | #define QLOGIC_IB_I_BITSEXTANT \ | ||
334 | (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \ | ||
335 | (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \ | ||
336 | (QLOGIC_IB_I_RCVAVAIL_MASK << \ | ||
337 | QLOGIC_IB_I_RCVAVAIL_SHIFT) | \ | ||
338 | QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \ | ||
339 | QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \ | ||
340 | QLOGIC_IB_I_SERDESTRIMDONE) | ||
341 | |||
342 | #define IB_HWE_BITSEXTANT \ | ||
343 | (HWE_MASK(RXEMemParityErr) | \ | ||
344 | HWE_MASK(TXEMemParityErr) | \ | ||
345 | (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \ | ||
346 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \ | ||
347 | QLOGIC_IB_HWE_PCIE1PLLFAILED | \ | ||
348 | QLOGIC_IB_HWE_PCIE0PLLFAILED | \ | ||
349 | QLOGIC_IB_HWE_PCIEPOISONEDTLP | \ | ||
350 | QLOGIC_IB_HWE_PCIECPLTIMEOUT | \ | ||
351 | QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \ | ||
352 | QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \ | ||
353 | QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \ | ||
354 | HWE_MASK(PowerOnBISTFailed) | \ | ||
355 | QLOGIC_IB_HWE_COREPLL_FBSLIP | \ | ||
356 | QLOGIC_IB_HWE_COREPLL_RFSLIP | \ | ||
357 | QLOGIC_IB_HWE_SERDESPLLFAILED | \ | ||
358 | HWE_MASK(IBCBusToSPCParityErr) | \ | ||
359 | HWE_MASK(IBCBusFromSPCParityErr) | \ | ||
360 | QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \ | ||
361 | QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \ | ||
362 | QLOGIC_IB_HWE_SDMAMEMREADERR | \ | ||
363 | QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \ | ||
364 | QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \ | ||
365 | QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \ | ||
366 | QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \ | ||
367 | QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \ | ||
368 | QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \ | ||
369 | QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \ | ||
370 | QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \ | ||
371 | QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR) | ||
372 | |||
373 | #define IB_E_BITSEXTANT \ | ||
374 | (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \ | ||
375 | ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \ | ||
376 | ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \ | ||
377 | ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \ | ||
378 | ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \ | ||
379 | ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \ | ||
380 | ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \ | ||
381 | ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \ | ||
382 | ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \ | ||
383 | ERR_MASK(SendSpecialTriggerErr) | \ | ||
384 | ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \ | ||
385 | ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \ | ||
386 | ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
387 | ERR_MASK(SendDroppedDataPktErr) | \ | ||
388 | ERR_MASK(SendPioArmLaunchErr) | \ | ||
389 | ERR_MASK(SendUnexpectedPktNumErr) | \ | ||
390 | ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \ | ||
391 | ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \ | ||
392 | ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \ | ||
393 | ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \ | ||
394 | ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \ | ||
395 | ERR_MASK(SDmaUnexpDataErr) | \ | ||
396 | ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \ | ||
397 | ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \ | ||
398 | ERR_MASK(SDmaDescAddrMisalignErr) | \ | ||
399 | ERR_MASK(InvalidEEPCmd)) | ||
400 | |||
401 | /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ | ||
402 | #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL | ||
403 | #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0 | ||
404 | #define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL | ||
405 | #define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL | ||
406 | #define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL | ||
407 | #define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL | ||
408 | #define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL | ||
409 | #define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL | ||
410 | #define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL | ||
411 | #define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL | ||
412 | #define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL | ||
413 | #define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL | ||
414 | /* specific to this chip */ | ||
415 | #define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL | ||
416 | #define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL | ||
417 | #define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL | ||
418 | #define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL | ||
419 | #define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL | ||
420 | #define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL | ||
421 | #define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL | ||
422 | #define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL | ||
423 | #define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL | ||
424 | #define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL | ||
425 | #define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL | ||
426 | #define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL | ||
427 | |||
428 | #define IBA7220_IBCC_LINKCMD_SHIFT 19 | ||
429 | |||
430 | /* kr_ibcddrctrl bits */ | ||
431 | #define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL | ||
432 | #define IBA7220_IBC_DLIDLMC_SHIFT 32 | ||
433 | |||
434 | #define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \ | ||
435 | SYM_RMASK(IBCDDRCtrl, HRTBT_ENB)) | ||
436 | #define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB) | ||
437 | |||
438 | #define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8) | ||
439 | #define IBA7220_IBC_LREV_MASK 1 | ||
440 | #define IBA7220_IBC_LREV_SHIFT 8 | ||
441 | #define IBA7220_IBC_RXPOL_MASK 1 | ||
442 | #define IBA7220_IBC_RXPOL_SHIFT 7 | ||
443 | #define IBA7220_IBC_WIDTH_SHIFT 5 | ||
444 | #define IBA7220_IBC_WIDTH_MASK 0x3 | ||
445 | #define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT) | ||
446 | #define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT) | ||
447 | #define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT) | ||
448 | #define IBA7220_IBC_SPEED_AUTONEG (1 << 1) | ||
449 | #define IBA7220_IBC_SPEED_SDR (1 << 2) | ||
450 | #define IBA7220_IBC_SPEED_DDR (1 << 3) | ||
451 | #define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1) | ||
452 | #define IBA7220_IBC_IBTA_1_2_MASK (1) | ||
453 | |||
454 | /* kr_ibcddrstatus */ | ||
455 | /* link latency shift is 0, don't bother defining */ | ||
456 | #define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff | ||
457 | |||
458 | /* kr_extstatus bits */ | ||
459 | #define QLOGIC_IB_EXTS_FREQSEL 0x2 | ||
460 | #define QLOGIC_IB_EXTS_SERDESSEL 0x4 | ||
461 | #define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000 | ||
462 | #define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000 | ||
463 | |||
464 | /* kr_xgxsconfig bits */ | ||
465 | #define QLOGIC_IB_XGXS_RESET 0x5ULL | ||
466 | #define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63) | ||
467 | |||
468 | /* kr_rcvpktledcnt */ | ||
469 | #define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */ | ||
470 | #define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */ | ||
471 | |||
472 | #define _QIB_GPIO_SDA_NUM 1 | ||
473 | #define _QIB_GPIO_SCL_NUM 0 | ||
474 | #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */ | ||
475 | #define QIB_TWSI_TEMP_DEV 0x98 | ||
476 | |||
477 | /* HW counter clock is at 4nsec */ | ||
478 | #define QIB_7220_PSXMITWAIT_CHECK_RATE 4000 | ||
479 | |||
480 | #define IBA7220_R_INTRAVAIL_SHIFT 17 | ||
481 | #define IBA7220_R_PKEY_DIS_SHIFT 34 | ||
482 | #define IBA7220_R_TAILUPD_SHIFT 35 | ||
483 | #define IBA7220_R_CTXTCFG_SHIFT 36 | ||
484 | |||
485 | #define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ | ||
486 | |||
487 | /* | ||
488 | * the size bits give us 2^N, in KB units. 0 marks as invalid, | ||
489 | * and 7 is reserved. We currently use only 2KB and 4KB | ||
490 | */ | ||
491 | #define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */ | ||
492 | #define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */ | ||
493 | #define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */ | ||
494 | #define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */ | ||
495 | #define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */ | ||
496 | #define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */ | ||
497 | |||
498 | #define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */ | ||
499 | |||
500 | /* packet rate matching delay multiplier */ | ||
501 | static u8 rate_to_delay[2][2] = { | ||
502 | /* 1x, 4x */ | ||
503 | { 8, 2 }, /* SDR */ | ||
504 | { 4, 1 } /* DDR */ | ||
505 | }; | ||
506 | |||
507 | static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = { | ||
508 | [IB_RATE_2_5_GBPS] = 8, | ||
509 | [IB_RATE_5_GBPS] = 4, | ||
510 | [IB_RATE_10_GBPS] = 2, | ||
511 | [IB_RATE_20_GBPS] = 1 | ||
512 | }; | ||
513 | |||
514 | #define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive) | ||
515 | #define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive) | ||
516 | |||
517 | /* link training states, from IBC */ | ||
518 | #define IB_7220_LT_STATE_DISABLED 0x00 | ||
519 | #define IB_7220_LT_STATE_LINKUP 0x01 | ||
520 | #define IB_7220_LT_STATE_POLLACTIVE 0x02 | ||
521 | #define IB_7220_LT_STATE_POLLQUIET 0x03 | ||
522 | #define IB_7220_LT_STATE_SLEEPDELAY 0x04 | ||
523 | #define IB_7220_LT_STATE_SLEEPQUIET 0x05 | ||
524 | #define IB_7220_LT_STATE_CFGDEBOUNCE 0x08 | ||
525 | #define IB_7220_LT_STATE_CFGRCVFCFG 0x09 | ||
526 | #define IB_7220_LT_STATE_CFGWAITRMT 0x0a | ||
527 | #define IB_7220_LT_STATE_CFGIDLE 0x0b | ||
528 | #define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c | ||
529 | #define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e | ||
530 | #define IB_7220_LT_STATE_RECOVERIDLE 0x0f | ||
531 | |||
532 | /* link state machine states from IBC */ | ||
533 | #define IB_7220_L_STATE_DOWN 0x0 | ||
534 | #define IB_7220_L_STATE_INIT 0x1 | ||
535 | #define IB_7220_L_STATE_ARM 0x2 | ||
536 | #define IB_7220_L_STATE_ACTIVE 0x3 | ||
537 | #define IB_7220_L_STATE_ACT_DEFER 0x4 | ||
538 | |||
539 | static const u8 qib_7220_physportstate[0x20] = { | ||
540 | [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, | ||
541 | [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, | ||
542 | [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, | ||
543 | [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, | ||
544 | [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, | ||
545 | [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, | ||
546 | [IB_7220_LT_STATE_CFGDEBOUNCE] = | ||
547 | IB_PHYSPORTSTATE_CFG_TRAIN, | ||
548 | [IB_7220_LT_STATE_CFGRCVFCFG] = | ||
549 | IB_PHYSPORTSTATE_CFG_TRAIN, | ||
550 | [IB_7220_LT_STATE_CFGWAITRMT] = | ||
551 | IB_PHYSPORTSTATE_CFG_TRAIN, | ||
552 | [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
553 | [IB_7220_LT_STATE_RECOVERRETRAIN] = | ||
554 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
555 | [IB_7220_LT_STATE_RECOVERWAITRMT] = | ||
556 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
557 | [IB_7220_LT_STATE_RECOVERIDLE] = | ||
558 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
559 | [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
560 | [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
561 | [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
562 | [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
563 | [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
564 | [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
565 | [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
566 | [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN | ||
567 | }; | ||
568 | |||
569 | int qib_special_trigger; | ||
570 | module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO); | ||
571 | MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch"); | ||
572 | |||
573 | #define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr) | ||
574 | #define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr) | ||
575 | |||
576 | #define SYM_MASK_BIT(regname, fldname, bit) ((u64) \ | ||
577 | (1ULL << (SYM_LSB(regname, fldname) + (bit)))) | ||
578 | |||
579 | #define TXEMEMPARITYERR_PIOBUF \ | ||
580 | SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0) | ||
581 | #define TXEMEMPARITYERR_PIOPBC \ | ||
582 | SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1) | ||
583 | #define TXEMEMPARITYERR_PIOLAUNCHFIFO \ | ||
584 | SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2) | ||
585 | |||
586 | #define RXEMEMPARITYERR_RCVBUF \ | ||
587 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0) | ||
588 | #define RXEMEMPARITYERR_LOOKUPQ \ | ||
589 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1) | ||
590 | #define RXEMEMPARITYERR_EXPTID \ | ||
591 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2) | ||
592 | #define RXEMEMPARITYERR_EAGERTID \ | ||
593 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3) | ||
594 | #define RXEMEMPARITYERR_FLAGBUF \ | ||
595 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4) | ||
596 | #define RXEMEMPARITYERR_DATAINFO \ | ||
597 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5) | ||
598 | #define RXEMEMPARITYERR_HDRINFO \ | ||
599 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6) | ||
600 | |||
601 | /* 7220 specific hardware errors... */ | ||
602 | static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = { | ||
603 | /* generic hardware errors */ | ||
604 | QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"), | ||
605 | QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"), | ||
606 | |||
607 | QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF, | ||
608 | "TXE PIOBUF Memory Parity"), | ||
609 | QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC, | ||
610 | "TXE PIOPBC Memory Parity"), | ||
611 | QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO, | ||
612 | "TXE PIOLAUNCHFIFO Memory Parity"), | ||
613 | |||
614 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF, | ||
615 | "RXE RCVBUF Memory Parity"), | ||
616 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ, | ||
617 | "RXE LOOKUPQ Memory Parity"), | ||
618 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID, | ||
619 | "RXE EAGERTID Memory Parity"), | ||
620 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID, | ||
621 | "RXE EXPTID Memory Parity"), | ||
622 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF, | ||
623 | "RXE FLAGBUF Memory Parity"), | ||
624 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO, | ||
625 | "RXE DATAINFO Memory Parity"), | ||
626 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO, | ||
627 | "RXE HDRINFO Memory Parity"), | ||
628 | |||
629 | /* chip-specific hardware errors */ | ||
630 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP, | ||
631 | "PCIe Poisoned TLP"), | ||
632 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT, | ||
633 | "PCIe completion timeout"), | ||
634 | /* | ||
635 | * In practice, it's unlikely wthat we'll see PCIe PLL, or bus | ||
636 | * parity or memory parity error failures, because most likely we | ||
637 | * won't be able to talk to the core of the chip. Nonetheless, we | ||
638 | * might see them, if they are in parts of the PCIe core that aren't | ||
639 | * essential. | ||
640 | */ | ||
641 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED, | ||
642 | "PCIePLL1"), | ||
643 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED, | ||
644 | "PCIePLL0"), | ||
645 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH, | ||
646 | "PCIe XTLH core parity"), | ||
647 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM, | ||
648 | "PCIe ADM TX core parity"), | ||
649 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM, | ||
650 | "PCIe ADM RX core parity"), | ||
651 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED, | ||
652 | "SerDes PLL"), | ||
653 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR, | ||
654 | "PCIe cpl header queue"), | ||
655 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR, | ||
656 | "PCIe cpl data queue"), | ||
657 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR, | ||
658 | "Send DMA memory read"), | ||
659 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED, | ||
660 | "uC PLL clock not locked"), | ||
661 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT, | ||
662 | "PCIe serdes Q0 no clock"), | ||
663 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT, | ||
664 | "PCIe serdes Q1 no clock"), | ||
665 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT, | ||
666 | "PCIe serdes Q2 no clock"), | ||
667 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT, | ||
668 | "PCIe serdes Q3 no clock"), | ||
669 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR, | ||
670 | "DDS RXEQ memory parity"), | ||
671 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR, | ||
672 | "IB uC memory parity"), | ||
673 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR, | ||
674 | "PCIe uC oct0 memory parity"), | ||
675 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR, | ||
676 | "PCIe uC oct1 memory parity"), | ||
677 | }; | ||
678 | |||
679 | #define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID) | ||
680 | |||
681 | #define QLOGIC_IB_E_PKTERRS (\ | ||
682 | ERR_MASK(SendPktLenErr) | \ | ||
683 | ERR_MASK(SendDroppedDataPktErr) | \ | ||
684 | ERR_MASK(RcvVCRCErr) | \ | ||
685 | ERR_MASK(RcvICRCErr) | \ | ||
686 | ERR_MASK(RcvShortPktLenErr) | \ | ||
687 | ERR_MASK(RcvEBPErr)) | ||
688 | |||
689 | /* Convenience for decoding Send DMA errors */ | ||
690 | #define QLOGIC_IB_E_SDMAERRS ( \ | ||
691 | ERR_MASK(SDmaGenMismatchErr) | \ | ||
692 | ERR_MASK(SDmaOutOfBoundErr) | \ | ||
693 | ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \ | ||
694 | ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \ | ||
695 | ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \ | ||
696 | ERR_MASK(SDmaUnexpDataErr) | \ | ||
697 | ERR_MASK(SDmaDescAddrMisalignErr) | \ | ||
698 | ERR_MASK(SDmaDisabledErr) | \ | ||
699 | ERR_MASK(SendBufMisuseErr)) | ||
700 | |||
701 | /* These are all rcv-related errors which we want to count for stats */ | ||
702 | #define E_SUM_PKTERRS \ | ||
703 | (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \ | ||
704 | ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \ | ||
705 | ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \ | ||
706 | ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ | ||
707 | ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \ | ||
708 | ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr)) | ||
709 | |||
710 | /* These are all send-related errors which we want to count for stats */ | ||
711 | #define E_SUM_ERRS \ | ||
712 | (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \ | ||
713 | ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
714 | ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \ | ||
715 | ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ | ||
716 | ERR_MASK(InvalidAddrErr)) | ||
717 | |||
718 | /* | ||
719 | * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore | ||
720 | * errors not related to freeze and cancelling buffers. Can't ignore | ||
721 | * armlaunch because could get more while still cleaning up, and need | ||
722 | * to cancel those as they happen. | ||
723 | */ | ||
724 | #define E_SPKT_ERRS_IGNORE \ | ||
725 | (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
726 | ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \ | ||
727 | ERR_MASK(SendPktLenErr)) | ||
728 | |||
729 | /* | ||
730 | * these are errors that can occur when the link changes state while | ||
731 | * a packet is being sent or received. This doesn't cover things | ||
732 | * like EBP or VCRC that can be the result of a sending having the | ||
733 | * link change state, so we receive a "known bad" packet. | ||
734 | */ | ||
735 | #define E_SUM_LINK_PKTERRS \ | ||
736 | (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
737 | ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ | ||
738 | ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ | ||
739 | ERR_MASK(RcvUnexpectedCharErr)) | ||
740 | |||
741 | static void autoneg_7220_work(struct work_struct *); | ||
742 | static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *); | ||
743 | |||
744 | /* | ||
745 | * Called when we might have an error that is specific to a particular | ||
746 | * PIO buffer, and may need to cancel that buffer, so it can be re-used. | ||
747 | * because we don't need to force the update of pioavail. | ||
748 | */ | ||
749 | static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd) | ||
750 | { | ||
751 | unsigned long sbuf[3]; | ||
752 | struct qib_devdata *dd = ppd->dd; | ||
753 | |||
754 | /* | ||
755 | * It's possible that sendbuffererror could have bits set; might | ||
756 | * have already done this as a result of hardware error handling. | ||
757 | */ | ||
758 | /* read these before writing errorclear */ | ||
759 | sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror); | ||
760 | sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1); | ||
761 | sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2); | ||
762 | |||
763 | if (sbuf[0] || sbuf[1] || sbuf[2]) | ||
764 | qib_disarm_piobufs_set(dd, sbuf, | ||
765 | dd->piobcnt2k + dd->piobcnt4k); | ||
766 | } | ||
767 | |||
768 | static void qib_7220_txe_recover(struct qib_devdata *dd) | ||
769 | { | ||
770 | qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n"); | ||
771 | qib_disarm_7220_senderrbufs(dd->pport); | ||
772 | } | ||
773 | |||
774 | /* | ||
775 | * This is called with interrupts disabled and sdma_lock held. | ||
776 | */ | ||
777 | static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) | ||
778 | { | ||
779 | struct qib_devdata *dd = ppd->dd; | ||
780 | u64 set_sendctrl = 0; | ||
781 | u64 clr_sendctrl = 0; | ||
782 | |||
783 | if (op & QIB_SDMA_SENDCTRL_OP_ENABLE) | ||
784 | set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable); | ||
785 | else | ||
786 | clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable); | ||
787 | |||
788 | if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE) | ||
789 | set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable); | ||
790 | else | ||
791 | clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable); | ||
792 | |||
793 | if (op & QIB_SDMA_SENDCTRL_OP_HALT) | ||
794 | set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt); | ||
795 | else | ||
796 | clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt); | ||
797 | |||
798 | spin_lock(&dd->sendctrl_lock); | ||
799 | |||
800 | dd->sendctrl |= set_sendctrl; | ||
801 | dd->sendctrl &= ~clr_sendctrl; | ||
802 | |||
803 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | ||
804 | qib_write_kreg(dd, kr_scratch, 0); | ||
805 | |||
806 | spin_unlock(&dd->sendctrl_lock); | ||
807 | } | ||
808 | |||
809 | static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd, | ||
810 | u64 err, char *buf, size_t blen) | ||
811 | { | ||
812 | static const struct { | ||
813 | u64 err; | ||
814 | const char *msg; | ||
815 | } errs[] = { | ||
816 | { ERR_MASK(SDmaGenMismatchErr), | ||
817 | "SDmaGenMismatch" }, | ||
818 | { ERR_MASK(SDmaOutOfBoundErr), | ||
819 | "SDmaOutOfBound" }, | ||
820 | { ERR_MASK(SDmaTailOutOfBoundErr), | ||
821 | "SDmaTailOutOfBound" }, | ||
822 | { ERR_MASK(SDmaBaseErr), | ||
823 | "SDmaBase" }, | ||
824 | { ERR_MASK(SDma1stDescErr), | ||
825 | "SDma1stDesc" }, | ||
826 | { ERR_MASK(SDmaRpyTagErr), | ||
827 | "SDmaRpyTag" }, | ||
828 | { ERR_MASK(SDmaDwEnErr), | ||
829 | "SDmaDwEn" }, | ||
830 | { ERR_MASK(SDmaMissingDwErr), | ||
831 | "SDmaMissingDw" }, | ||
832 | { ERR_MASK(SDmaUnexpDataErr), | ||
833 | "SDmaUnexpData" }, | ||
834 | { ERR_MASK(SDmaDescAddrMisalignErr), | ||
835 | "SDmaDescAddrMisalign" }, | ||
836 | { ERR_MASK(SendBufMisuseErr), | ||
837 | "SendBufMisuse" }, | ||
838 | { ERR_MASK(SDmaDisabledErr), | ||
839 | "SDmaDisabled" }, | ||
840 | }; | ||
841 | int i; | ||
842 | size_t bidx = 0; | ||
843 | |||
844 | for (i = 0; i < ARRAY_SIZE(errs); i++) { | ||
845 | if (err & errs[i].err) | ||
846 | bidx += scnprintf(buf + bidx, blen - bidx, | ||
847 | "%s ", errs[i].msg); | ||
848 | } | ||
849 | } | ||
850 | |||
851 | /* | ||
852 | * This is called as part of link down clean up so disarm and flush | ||
853 | * all send buffers so that SMP packets can be sent. | ||
854 | */ | ||
855 | static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd) | ||
856 | { | ||
857 | /* This will trigger the Abort interrupt */ | ||
858 | sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH | | ||
859 | QIB_SENDCTRL_AVAIL_BLIP); | ||
860 | ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */ | ||
861 | } | ||
862 | |||
863 | static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd) | ||
864 | { | ||
865 | /* | ||
866 | * Set SendDmaLenGen and clear and set | ||
867 | * the MSB of the generation count to enable generation checking | ||
868 | * and load the internal generation counter. | ||
869 | */ | ||
870 | qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt); | ||
871 | qib_write_kreg(ppd->dd, kr_senddmalengen, | ||
872 | ppd->sdma_descq_cnt | | ||
873 | (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB)); | ||
874 | } | ||
875 | |||
876 | static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd) | ||
877 | { | ||
878 | qib_sdma_7220_setlengen(ppd); | ||
879 | qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */ | ||
880 | ppd->sdma_head_dma[0] = 0; | ||
881 | } | ||
882 | |||
883 | #define DISABLES_SDMA ( \ | ||
884 | ERR_MASK(SDmaDisabledErr) | \ | ||
885 | ERR_MASK(SDmaBaseErr) | \ | ||
886 | ERR_MASK(SDmaTailOutOfBoundErr) | \ | ||
887 | ERR_MASK(SDmaOutOfBoundErr) | \ | ||
888 | ERR_MASK(SDma1stDescErr) | \ | ||
889 | ERR_MASK(SDmaRpyTagErr) | \ | ||
890 | ERR_MASK(SDmaGenMismatchErr) | \ | ||
891 | ERR_MASK(SDmaDescAddrMisalignErr) | \ | ||
892 | ERR_MASK(SDmaMissingDwErr) | \ | ||
893 | ERR_MASK(SDmaDwEnErr)) | ||
894 | |||
895 | static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs) | ||
896 | { | ||
897 | unsigned long flags; | ||
898 | struct qib_devdata *dd = ppd->dd; | ||
899 | char *msg; | ||
900 | |||
901 | errs &= QLOGIC_IB_E_SDMAERRS; | ||
902 | |||
903 | msg = dd->cspec->sdmamsgbuf; | ||
904 | qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf); | ||
905 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
906 | |||
907 | if (errs & ERR_MASK(SendBufMisuseErr)) { | ||
908 | unsigned long sbuf[3]; | ||
909 | |||
910 | sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror); | ||
911 | sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1); | ||
912 | sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2); | ||
913 | |||
914 | qib_dev_err(ppd->dd, | ||
915 | "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n", | ||
916 | ppd->dd->unit, ppd->port, sbuf[2], sbuf[1], | ||
917 | sbuf[0]); | ||
918 | } | ||
919 | |||
920 | if (errs & ERR_MASK(SDmaUnexpDataErr)) | ||
921 | qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit, | ||
922 | ppd->port); | ||
923 | |||
924 | switch (ppd->sdma_state.current_state) { | ||
925 | case qib_sdma_state_s00_hw_down: | ||
926 | /* not expecting any interrupts */ | ||
927 | break; | ||
928 | |||
929 | case qib_sdma_state_s10_hw_start_up_wait: | ||
930 | /* handled in intr path */ | ||
931 | break; | ||
932 | |||
933 | case qib_sdma_state_s20_idle: | ||
934 | /* not expecting any interrupts */ | ||
935 | break; | ||
936 | |||
937 | case qib_sdma_state_s30_sw_clean_up_wait: | ||
938 | /* not expecting any interrupts */ | ||
939 | break; | ||
940 | |||
941 | case qib_sdma_state_s40_hw_clean_up_wait: | ||
942 | if (errs & ERR_MASK(SDmaDisabledErr)) | ||
943 | __qib_sdma_process_event(ppd, | ||
944 | qib_sdma_event_e50_hw_cleaned); | ||
945 | break; | ||
946 | |||
947 | case qib_sdma_state_s50_hw_halt_wait: | ||
948 | /* handled in intr path */ | ||
949 | break; | ||
950 | |||
951 | case qib_sdma_state_s99_running: | ||
952 | if (errs & DISABLES_SDMA) | ||
953 | __qib_sdma_process_event(ppd, | ||
954 | qib_sdma_event_e7220_err_halted); | ||
955 | break; | ||
956 | } | ||
957 | |||
958 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
959 | } | ||
960 | |||
961 | /* | ||
962 | * Decode the error status into strings, deciding whether to always | ||
963 | * print * it or not depending on "normal packet errors" vs everything | ||
964 | * else. Return 1 if "real" errors, otherwise 0 if only packet | ||
965 | * errors, so caller can decide what to print with the string. | ||
966 | */ | ||
967 | static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen, | ||
968 | u64 err) | ||
969 | { | ||
970 | int iserr = 1; | ||
971 | |||
972 | *buf = '\0'; | ||
973 | if (err & QLOGIC_IB_E_PKTERRS) { | ||
974 | if (!(err & ~QLOGIC_IB_E_PKTERRS)) | ||
975 | iserr = 0; | ||
976 | if ((err & ERR_MASK(RcvICRCErr)) && | ||
977 | !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr)))) | ||
978 | strlcat(buf, "CRC ", blen); | ||
979 | if (!iserr) | ||
980 | goto done; | ||
981 | } | ||
982 | if (err & ERR_MASK(RcvHdrLenErr)) | ||
983 | strlcat(buf, "rhdrlen ", blen); | ||
984 | if (err & ERR_MASK(RcvBadTidErr)) | ||
985 | strlcat(buf, "rbadtid ", blen); | ||
986 | if (err & ERR_MASK(RcvBadVersionErr)) | ||
987 | strlcat(buf, "rbadversion ", blen); | ||
988 | if (err & ERR_MASK(RcvHdrErr)) | ||
989 | strlcat(buf, "rhdr ", blen); | ||
990 | if (err & ERR_MASK(SendSpecialTriggerErr)) | ||
991 | strlcat(buf, "sendspecialtrigger ", blen); | ||
992 | if (err & ERR_MASK(RcvLongPktLenErr)) | ||
993 | strlcat(buf, "rlongpktlen ", blen); | ||
994 | if (err & ERR_MASK(RcvMaxPktLenErr)) | ||
995 | strlcat(buf, "rmaxpktlen ", blen); | ||
996 | if (err & ERR_MASK(RcvMinPktLenErr)) | ||
997 | strlcat(buf, "rminpktlen ", blen); | ||
998 | if (err & ERR_MASK(SendMinPktLenErr)) | ||
999 | strlcat(buf, "sminpktlen ", blen); | ||
1000 | if (err & ERR_MASK(RcvFormatErr)) | ||
1001 | strlcat(buf, "rformaterr ", blen); | ||
1002 | if (err & ERR_MASK(RcvUnsupportedVLErr)) | ||
1003 | strlcat(buf, "runsupvl ", blen); | ||
1004 | if (err & ERR_MASK(RcvUnexpectedCharErr)) | ||
1005 | strlcat(buf, "runexpchar ", blen); | ||
1006 | if (err & ERR_MASK(RcvIBFlowErr)) | ||
1007 | strlcat(buf, "ribflow ", blen); | ||
1008 | if (err & ERR_MASK(SendUnderRunErr)) | ||
1009 | strlcat(buf, "sunderrun ", blen); | ||
1010 | if (err & ERR_MASK(SendPioArmLaunchErr)) | ||
1011 | strlcat(buf, "spioarmlaunch ", blen); | ||
1012 | if (err & ERR_MASK(SendUnexpectedPktNumErr)) | ||
1013 | strlcat(buf, "sunexperrpktnum ", blen); | ||
1014 | if (err & ERR_MASK(SendDroppedSmpPktErr)) | ||
1015 | strlcat(buf, "sdroppedsmppkt ", blen); | ||
1016 | if (err & ERR_MASK(SendMaxPktLenErr)) | ||
1017 | strlcat(buf, "smaxpktlen ", blen); | ||
1018 | if (err & ERR_MASK(SendUnsupportedVLErr)) | ||
1019 | strlcat(buf, "sunsupVL ", blen); | ||
1020 | if (err & ERR_MASK(InvalidAddrErr)) | ||
1021 | strlcat(buf, "invalidaddr ", blen); | ||
1022 | if (err & ERR_MASK(RcvEgrFullErr)) | ||
1023 | strlcat(buf, "rcvegrfull ", blen); | ||
1024 | if (err & ERR_MASK(RcvHdrFullErr)) | ||
1025 | strlcat(buf, "rcvhdrfull ", blen); | ||
1026 | if (err & ERR_MASK(IBStatusChanged)) | ||
1027 | strlcat(buf, "ibcstatuschg ", blen); | ||
1028 | if (err & ERR_MASK(RcvIBLostLinkErr)) | ||
1029 | strlcat(buf, "riblostlink ", blen); | ||
1030 | if (err & ERR_MASK(HardwareErr)) | ||
1031 | strlcat(buf, "hardware ", blen); | ||
1032 | if (err & ERR_MASK(ResetNegated)) | ||
1033 | strlcat(buf, "reset ", blen); | ||
1034 | if (err & QLOGIC_IB_E_SDMAERRS) | ||
1035 | qib_decode_7220_sdma_errs(dd->pport, err, buf, blen); | ||
1036 | if (err & ERR_MASK(InvalidEEPCmd)) | ||
1037 | strlcat(buf, "invalideepromcmd ", blen); | ||
1038 | done: | ||
1039 | return iserr; | ||
1040 | } | ||
1041 | |||
1042 | static void reenable_7220_chase(unsigned long opaque) | ||
1043 | { | ||
1044 | struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; | ||
1045 | ppd->cpspec->chase_timer.expires = 0; | ||
1046 | qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, | ||
1047 | QLOGIC_IB_IBCC_LINKINITCMD_POLL); | ||
1048 | } | ||
1049 | |||
1050 | static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst) | ||
1051 | { | ||
1052 | u8 ibclt; | ||
1053 | u64 tnow; | ||
1054 | |||
1055 | ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState); | ||
1056 | |||
1057 | /* | ||
1058 | * Detect and handle the state chase issue, where we can | ||
1059 | * get stuck if we are unlucky on timing on both sides of | ||
1060 | * the link. If we are, we disable, set a timer, and | ||
1061 | * then re-enable. | ||
1062 | */ | ||
1063 | switch (ibclt) { | ||
1064 | case IB_7220_LT_STATE_CFGRCVFCFG: | ||
1065 | case IB_7220_LT_STATE_CFGWAITRMT: | ||
1066 | case IB_7220_LT_STATE_TXREVLANES: | ||
1067 | case IB_7220_LT_STATE_CFGENH: | ||
1068 | tnow = get_jiffies_64(); | ||
1069 | if (ppd->cpspec->chase_end && | ||
1070 | time_after64(tnow, ppd->cpspec->chase_end)) { | ||
1071 | ppd->cpspec->chase_end = 0; | ||
1072 | qib_set_ib_7220_lstate(ppd, | ||
1073 | QLOGIC_IB_IBCC_LINKCMD_DOWN, | ||
1074 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
1075 | ppd->cpspec->chase_timer.expires = jiffies + | ||
1076 | QIB_CHASE_DIS_TIME; | ||
1077 | add_timer(&ppd->cpspec->chase_timer); | ||
1078 | } else if (!ppd->cpspec->chase_end) | ||
1079 | ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME; | ||
1080 | break; | ||
1081 | |||
1082 | default: | ||
1083 | ppd->cpspec->chase_end = 0; | ||
1084 | break; | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | static void handle_7220_errors(struct qib_devdata *dd, u64 errs) | ||
1089 | { | ||
1090 | char *msg; | ||
1091 | u64 ignore_this_time = 0; | ||
1092 | u64 iserr = 0; | ||
1093 | int log_idx; | ||
1094 | struct qib_pportdata *ppd = dd->pport; | ||
1095 | u64 mask; | ||
1096 | |||
1097 | /* don't report errors that are masked */ | ||
1098 | errs &= dd->cspec->errormask; | ||
1099 | msg = dd->cspec->emsgbuf; | ||
1100 | |||
1101 | /* do these first, they are most important */ | ||
1102 | if (errs & ERR_MASK(HardwareErr)) | ||
1103 | qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); | ||
1104 | else | ||
1105 | for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) | ||
1106 | if (errs & dd->eep_st_masks[log_idx].errs_to_log) | ||
1107 | qib_inc_eeprom_err(dd, log_idx, 1); | ||
1108 | |||
1109 | if (errs & QLOGIC_IB_E_SDMAERRS) | ||
1110 | sdma_7220_errors(ppd, errs); | ||
1111 | |||
1112 | if (errs & ~IB_E_BITSEXTANT) | ||
1113 | qib_dev_err(dd, "error interrupt with unknown errors " | ||
1114 | "%llx set\n", (unsigned long long) | ||
1115 | (errs & ~IB_E_BITSEXTANT)); | ||
1116 | |||
1117 | if (errs & E_SUM_ERRS) { | ||
1118 | qib_disarm_7220_senderrbufs(ppd); | ||
1119 | if ((errs & E_SUM_LINK_PKTERRS) && | ||
1120 | !(ppd->lflags & QIBL_LINKACTIVE)) { | ||
1121 | /* | ||
1122 | * This can happen when trying to bring the link | ||
1123 | * up, but the IB link changes state at the "wrong" | ||
1124 | * time. The IB logic then complains that the packet | ||
1125 | * isn't valid. We don't want to confuse people, so | ||
1126 | * we just don't print them, except at debug | ||
1127 | */ | ||
1128 | ignore_this_time = errs & E_SUM_LINK_PKTERRS; | ||
1129 | } | ||
1130 | } else if ((errs & E_SUM_LINK_PKTERRS) && | ||
1131 | !(ppd->lflags & QIBL_LINKACTIVE)) { | ||
1132 | /* | ||
1133 | * This can happen when SMA is trying to bring the link | ||
1134 | * up, but the IB link changes state at the "wrong" time. | ||
1135 | * The IB logic then complains that the packet isn't | ||
1136 | * valid. We don't want to confuse people, so we just | ||
1137 | * don't print them, except at debug | ||
1138 | */ | ||
1139 | ignore_this_time = errs & E_SUM_LINK_PKTERRS; | ||
1140 | } | ||
1141 | |||
1142 | qib_write_kreg(dd, kr_errclear, errs); | ||
1143 | |||
1144 | errs &= ~ignore_this_time; | ||
1145 | if (!errs) | ||
1146 | goto done; | ||
1147 | |||
1148 | /* | ||
1149 | * The ones we mask off are handled specially below | ||
1150 | * or above. Also mask SDMADISABLED by default as it | ||
1151 | * is too chatty. | ||
1152 | */ | ||
1153 | mask = ERR_MASK(IBStatusChanged) | | ||
1154 | ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | | ||
1155 | ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr); | ||
1156 | |||
1157 | qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); | ||
1158 | |||
1159 | if (errs & E_SUM_PKTERRS) | ||
1160 | qib_stats.sps_rcverrs++; | ||
1161 | if (errs & E_SUM_ERRS) | ||
1162 | qib_stats.sps_txerrs++; | ||
1163 | iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS | | ||
1164 | ERR_MASK(SDmaDisabledErr)); | ||
1165 | |||
1166 | if (errs & ERR_MASK(IBStatusChanged)) { | ||
1167 | u64 ibcs; | ||
1168 | |||
1169 | ibcs = qib_read_kreg64(dd, kr_ibcstatus); | ||
1170 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | ||
1171 | handle_7220_chase(ppd, ibcs); | ||
1172 | |||
1173 | /* Update our picture of width and speed from chip */ | ||
1174 | ppd->link_width_active = | ||
1175 | ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ? | ||
1176 | IB_WIDTH_4X : IB_WIDTH_1X; | ||
1177 | ppd->link_speed_active = | ||
1178 | ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ? | ||
1179 | QIB_IB_DDR : QIB_IB_SDR; | ||
1180 | |||
1181 | /* | ||
1182 | * Since going into a recovery state causes the link state | ||
1183 | * to go down and since recovery is transitory, it is better | ||
1184 | * if we "miss" ever seeing the link training state go into | ||
1185 | * recovery (i.e., ignore this transition for link state | ||
1186 | * special handling purposes) without updating lastibcstat. | ||
1187 | */ | ||
1188 | if (qib_7220_phys_portstate(ibcs) != | ||
1189 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER) | ||
1190 | qib_handle_e_ibstatuschanged(ppd, ibcs); | ||
1191 | } | ||
1192 | |||
1193 | if (errs & ERR_MASK(ResetNegated)) { | ||
1194 | qib_dev_err(dd, "Got reset, requires re-init " | ||
1195 | "(unload and reload driver)\n"); | ||
1196 | dd->flags &= ~QIB_INITTED; /* needs re-init */ | ||
1197 | /* mark as having had error */ | ||
1198 | *dd->devstatusp |= QIB_STATUS_HWERROR; | ||
1199 | *dd->pport->statusp &= ~QIB_STATUS_IB_CONF; | ||
1200 | } | ||
1201 | |||
1202 | if (*msg && iserr) | ||
1203 | qib_dev_porterr(dd, ppd->port, "%s error\n", msg); | ||
1204 | |||
1205 | if (ppd->state_wanted & ppd->lflags) | ||
1206 | wake_up_interruptible(&ppd->state_wait); | ||
1207 | |||
1208 | /* | ||
1209 | * If there were hdrq or egrfull errors, wake up any processes | ||
1210 | * waiting in poll. We used to try to check which contexts had | ||
1211 | * the overflow, but given the cost of that and the chip reads | ||
1212 | * to support it, it's better to just wake everybody up if we | ||
1213 | * get an overflow; waiters can poll again if it's not them. | ||
1214 | */ | ||
1215 | if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) { | ||
1216 | qib_handle_urcv(dd, ~0U); | ||
1217 | if (errs & ERR_MASK(RcvEgrFullErr)) | ||
1218 | qib_stats.sps_buffull++; | ||
1219 | else | ||
1220 | qib_stats.sps_hdrfull++; | ||
1221 | } | ||
1222 | done: | ||
1223 | return; | ||
1224 | } | ||
1225 | |||
1226 | /* enable/disable chip from delivering interrupts */ | ||
1227 | static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable) | ||
1228 | { | ||
1229 | if (enable) { | ||
1230 | if (dd->flags & QIB_BADINTR) | ||
1231 | return; | ||
1232 | qib_write_kreg(dd, kr_intmask, ~0ULL); | ||
1233 | /* force re-interrupt of any pending interrupts. */ | ||
1234 | qib_write_kreg(dd, kr_intclear, 0ULL); | ||
1235 | } else | ||
1236 | qib_write_kreg(dd, kr_intmask, 0ULL); | ||
1237 | } | ||
1238 | |||
1239 | /* | ||
1240 | * Try to cleanup as much as possible for anything that might have gone | ||
1241 | * wrong while in freeze mode, such as pio buffers being written by user | ||
1242 | * processes (causing armlaunch), send errors due to going into freeze mode, | ||
1243 | * etc., and try to avoid causing extra interrupts while doing so. | ||
1244 | * Forcibly update the in-memory pioavail register copies after cleanup | ||
1245 | * because the chip won't do it while in freeze mode (the register values | ||
1246 | * themselves are kept correct). | ||
1247 | * Make sure that we don't lose any important interrupts by using the chip | ||
1248 | * feature that says that writing 0 to a bit in *clear that is set in | ||
1249 | * *status will cause an interrupt to be generated again (if allowed by | ||
1250 | * the *mask value). | ||
1251 | * This is in chip-specific code because of all of the register accesses, | ||
1252 | * even though the details are similar on most chips. | ||
1253 | */ | ||
1254 | static void qib_7220_clear_freeze(struct qib_devdata *dd) | ||
1255 | { | ||
1256 | /* disable error interrupts, to avoid confusion */ | ||
1257 | qib_write_kreg(dd, kr_errmask, 0ULL); | ||
1258 | |||
1259 | /* also disable interrupts; errormask is sometimes overwriten */ | ||
1260 | qib_7220_set_intr_state(dd, 0); | ||
1261 | |||
1262 | qib_cancel_sends(dd->pport); | ||
1263 | |||
1264 | /* clear the freeze, and be sure chip saw it */ | ||
1265 | qib_write_kreg(dd, kr_control, dd->control); | ||
1266 | qib_read_kreg32(dd, kr_scratch); | ||
1267 | |||
1268 | /* force in-memory update now we are out of freeze */ | ||
1269 | qib_force_pio_avail_update(dd); | ||
1270 | |||
1271 | /* | ||
1272 | * force new interrupt if any hwerr, error or interrupt bits are | ||
1273 | * still set, and clear "safe" send packet errors related to freeze | ||
1274 | * and cancelling sends. Re-enable error interrupts before possible | ||
1275 | * force of re-interrupt on pending interrupts. | ||
1276 | */ | ||
1277 | qib_write_kreg(dd, kr_hwerrclear, 0ULL); | ||
1278 | qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); | ||
1279 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | ||
1280 | qib_7220_set_intr_state(dd, 1); | ||
1281 | } | ||
1282 | |||
1283 | /** | ||
1284 | * qib_7220_handle_hwerrors - display hardware errors. | ||
1285 | * @dd: the qlogic_ib device | ||
1286 | * @msg: the output buffer | ||
1287 | * @msgl: the size of the output buffer | ||
1288 | * | ||
1289 | * Use same msg buffer as regular errors to avoid excessive stack | ||
1290 | * use. Most hardware errors are catastrophic, but for right now, | ||
1291 | * we'll print them and continue. We reuse the same message buffer as | ||
1292 | * handle_7220_errors() to avoid excessive stack usage. | ||
1293 | */ | ||
1294 | static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, | ||
1295 | size_t msgl) | ||
1296 | { | ||
1297 | u64 hwerrs; | ||
1298 | u32 bits, ctrl; | ||
1299 | int isfatal = 0; | ||
1300 | char *bitsmsg; | ||
1301 | int log_idx; | ||
1302 | |||
1303 | hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); | ||
1304 | if (!hwerrs) | ||
1305 | goto bail; | ||
1306 | if (hwerrs == ~0ULL) { | ||
1307 | qib_dev_err(dd, "Read of hardware error status failed " | ||
1308 | "(all bits set); ignoring\n"); | ||
1309 | goto bail; | ||
1310 | } | ||
1311 | qib_stats.sps_hwerrs++; | ||
1312 | |||
1313 | /* | ||
1314 | * Always clear the error status register, except MEMBISTFAIL, | ||
1315 | * regardless of whether we continue or stop using the chip. | ||
1316 | * We want that set so we know it failed, even across driver reload. | ||
1317 | * We'll still ignore it in the hwerrmask. We do this partly for | ||
1318 | * diagnostics, but also for support. | ||
1319 | */ | ||
1320 | qib_write_kreg(dd, kr_hwerrclear, | ||
1321 | hwerrs & ~HWE_MASK(PowerOnBISTFailed)); | ||
1322 | |||
1323 | hwerrs &= dd->cspec->hwerrmask; | ||
1324 | |||
1325 | /* We log some errors to EEPROM, check if we have any of those. */ | ||
1326 | for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) | ||
1327 | if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log) | ||
1328 | qib_inc_eeprom_err(dd, log_idx, 1); | ||
1329 | if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC | | ||
1330 | RXE_PARITY)) | ||
1331 | qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " | ||
1332 | "(cleared)\n", (unsigned long long) hwerrs); | ||
1333 | |||
1334 | if (hwerrs & ~IB_HWE_BITSEXTANT) | ||
1335 | qib_dev_err(dd, "hwerror interrupt with unknown errors " | ||
1336 | "%llx set\n", (unsigned long long) | ||
1337 | (hwerrs & ~IB_HWE_BITSEXTANT)); | ||
1338 | |||
1339 | if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) | ||
1340 | qib_sd7220_clr_ibpar(dd); | ||
1341 | |||
1342 | ctrl = qib_read_kreg32(dd, kr_control); | ||
1343 | if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) { | ||
1344 | /* | ||
1345 | * Parity errors in send memory are recoverable by h/w | ||
1346 | * just do housekeeping, exit freeze mode and continue. | ||
1347 | */ | ||
1348 | if (hwerrs & (TXEMEMPARITYERR_PIOBUF | | ||
1349 | TXEMEMPARITYERR_PIOPBC)) { | ||
1350 | qib_7220_txe_recover(dd); | ||
1351 | hwerrs &= ~(TXEMEMPARITYERR_PIOBUF | | ||
1352 | TXEMEMPARITYERR_PIOPBC); | ||
1353 | } | ||
1354 | if (hwerrs) | ||
1355 | isfatal = 1; | ||
1356 | else | ||
1357 | qib_7220_clear_freeze(dd); | ||
1358 | } | ||
1359 | |||
1360 | *msg = '\0'; | ||
1361 | |||
1362 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { | ||
1363 | isfatal = 1; | ||
1364 | strlcat(msg, "[Memory BIST test failed, " | ||
1365 | "InfiniPath hardware unusable]", msgl); | ||
1366 | /* ignore from now on, so disable until driver reloaded */ | ||
1367 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); | ||
1368 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
1369 | } | ||
1370 | |||
1371 | qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs, | ||
1372 | ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl); | ||
1373 | |||
1374 | bitsmsg = dd->cspec->bitsmsgbuf; | ||
1375 | if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << | ||
1376 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) { | ||
1377 | bits = (u32) ((hwerrs >> | ||
1378 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & | ||
1379 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); | ||
1380 | snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, | ||
1381 | "[PCIe Mem Parity Errs %x] ", bits); | ||
1382 | strlcat(msg, bitsmsg, msgl); | ||
1383 | } | ||
1384 | |||
1385 | #define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \ | ||
1386 | QLOGIC_IB_HWE_COREPLL_RFSLIP) | ||
1387 | |||
1388 | if (hwerrs & _QIB_PLL_FAIL) { | ||
1389 | isfatal = 1; | ||
1390 | snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, | ||
1391 | "[PLL failed (%llx), InfiniPath hardware unusable]", | ||
1392 | (unsigned long long) hwerrs & _QIB_PLL_FAIL); | ||
1393 | strlcat(msg, bitsmsg, msgl); | ||
1394 | /* ignore from now on, so disable until driver reloaded */ | ||
1395 | dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL); | ||
1396 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
1397 | } | ||
1398 | |||
1399 | if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) { | ||
1400 | /* | ||
1401 | * If it occurs, it is left masked since the eternal | ||
1402 | * interface is unused. | ||
1403 | */ | ||
1404 | dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED; | ||
1405 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
1406 | } | ||
1407 | |||
1408 | qib_dev_err(dd, "%s hardware error\n", msg); | ||
1409 | |||
1410 | if (isfatal && !dd->diag_client) { | ||
1411 | qib_dev_err(dd, "Fatal Hardware Error, no longer" | ||
1412 | " usable, SN %.16s\n", dd->serial); | ||
1413 | /* | ||
1414 | * For /sys status file and user programs to print; if no | ||
1415 | * trailing brace is copied, we'll know it was truncated. | ||
1416 | */ | ||
1417 | if (dd->freezemsg) | ||
1418 | snprintf(dd->freezemsg, dd->freezelen, | ||
1419 | "{%s}", msg); | ||
1420 | qib_disable_after_error(dd); | ||
1421 | } | ||
1422 | bail:; | ||
1423 | } | ||
1424 | |||
1425 | /** | ||
1426 | * qib_7220_init_hwerrors - enable hardware errors | ||
1427 | * @dd: the qlogic_ib device | ||
1428 | * | ||
1429 | * now that we have finished initializing everything that might reasonably | ||
1430 | * cause a hardware error, and cleared those errors bits as they occur, | ||
1431 | * we can enable hardware errors in the mask (potentially enabling | ||
1432 | * freeze mode), and enable hardware errors as errors (along with | ||
1433 | * everything else) in errormask | ||
1434 | */ | ||
1435 | static void qib_7220_init_hwerrors(struct qib_devdata *dd) | ||
1436 | { | ||
1437 | u64 val; | ||
1438 | u64 extsval; | ||
1439 | |||
1440 | extsval = qib_read_kreg64(dd, kr_extstatus); | ||
1441 | |||
1442 | if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST | | ||
1443 | QLOGIC_IB_EXTS_MEMBIST_DISABLED))) | ||
1444 | qib_dev_err(dd, "MemBIST did not complete!\n"); | ||
1445 | if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED) | ||
1446 | qib_devinfo(dd->pcidev, "MemBIST is disabled.\n"); | ||
1447 | |||
1448 | val = ~0ULL; /* default to all hwerrors become interrupts, */ | ||
1449 | |||
1450 | val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR; | ||
1451 | dd->cspec->hwerrmask = val; | ||
1452 | |||
1453 | qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); | ||
1454 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
1455 | |||
1456 | /* clear all */ | ||
1457 | qib_write_kreg(dd, kr_errclear, ~0ULL); | ||
1458 | /* enable errors that are masked, at least this first time. */ | ||
1459 | qib_write_kreg(dd, kr_errmask, ~0ULL); | ||
1460 | dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); | ||
1461 | /* clear any interrupts up to this point (ints still not enabled) */ | ||
1462 | qib_write_kreg(dd, kr_intclear, ~0ULL); | ||
1463 | } | ||
1464 | |||
1465 | /* | ||
1466 | * Disable and enable the armlaunch error. Used for PIO bandwidth testing | ||
1467 | * on chips that are count-based, rather than trigger-based. There is no | ||
1468 | * reference counting, but that's also fine, given the intended use. | ||
1469 | * Only chip-specific because it's all register accesses | ||
1470 | */ | ||
1471 | static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable) | ||
1472 | { | ||
1473 | if (enable) { | ||
1474 | qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr)); | ||
1475 | dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr); | ||
1476 | } else | ||
1477 | dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr); | ||
1478 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | ||
1479 | } | ||
1480 | |||
1481 | /* | ||
1482 | * Formerly took parameter <which> in pre-shifted, | ||
1483 | * pre-merged form with LinkCmd and LinkInitCmd | ||
1484 | * together, and assuming the zero was NOP. | ||
1485 | */ | ||
1486 | static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd, | ||
1487 | u16 linitcmd) | ||
1488 | { | ||
1489 | u64 mod_wd; | ||
1490 | struct qib_devdata *dd = ppd->dd; | ||
1491 | unsigned long flags; | ||
1492 | |||
1493 | if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) { | ||
1494 | /* | ||
1495 | * If we are told to disable, note that so link-recovery | ||
1496 | * code does not attempt to bring us back up. | ||
1497 | */ | ||
1498 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
1499 | ppd->lflags |= QIBL_IB_LINK_DISABLED; | ||
1500 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
1501 | } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) { | ||
1502 | /* | ||
1503 | * Any other linkinitcmd will lead to LINKDOWN and then | ||
1504 | * to INIT (if all is well), so clear flag to let | ||
1505 | * link-recovery code attempt to bring us back up. | ||
1506 | */ | ||
1507 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
1508 | ppd->lflags &= ~QIBL_IB_LINK_DISABLED; | ||
1509 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
1510 | } | ||
1511 | |||
1512 | mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) | | ||
1513 | (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | ||
1514 | |||
1515 | qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd); | ||
1516 | /* write to chip to prevent back-to-back writes of ibc reg */ | ||
1517 | qib_write_kreg(dd, kr_scratch, 0); | ||
1518 | } | ||
1519 | |||
1520 | /* | ||
1521 | * All detailed interaction with the SerDes has been moved to qib_sd7220.c | ||
1522 | * | ||
1523 | * The portion of IBA7220-specific bringup_serdes() that actually deals with | ||
1524 | * registers and memory within the SerDes itself is qib_sd7220_init(). | ||
1525 | */ | ||
1526 | |||
1527 | /** | ||
1528 | * qib_7220_bringup_serdes - bring up the serdes | ||
1529 | * @ppd: physical port on the qlogic_ib device | ||
1530 | */ | ||
1531 | static int qib_7220_bringup_serdes(struct qib_pportdata *ppd) | ||
1532 | { | ||
1533 | struct qib_devdata *dd = ppd->dd; | ||
1534 | u64 val, prev_val, guid, ibc; | ||
1535 | int ret = 0; | ||
1536 | |||
1537 | /* Put IBC in reset, sends disabled */ | ||
1538 | dd->control &= ~QLOGIC_IB_C_LINKENABLE; | ||
1539 | qib_write_kreg(dd, kr_control, 0ULL); | ||
1540 | |||
1541 | if (qib_compat_ddr_negotiate) { | ||
1542 | ppd->cpspec->ibdeltainprog = 1; | ||
1543 | ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr); | ||
1544 | ppd->cpspec->iblnkerrsnap = | ||
1545 | read_7220_creg32(dd, cr_iblinkerrrecov); | ||
1546 | } | ||
1547 | |||
1548 | /* flowcontrolwatermark is in units of KBytes */ | ||
1549 | ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark); | ||
1550 | /* | ||
1551 | * How often flowctrl sent. More or less in usecs; balance against | ||
1552 | * watermark value, so that in theory senders always get a flow | ||
1553 | * control update in time to not let the IB link go idle. | ||
1554 | */ | ||
1555 | ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod); | ||
1556 | /* max error tolerance */ | ||
1557 | ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold); | ||
1558 | /* use "real" buffer space for */ | ||
1559 | ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale); | ||
1560 | /* IB credit flow control. */ | ||
1561 | ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold); | ||
1562 | /* | ||
1563 | * set initial max size pkt IBC will send, including ICRC; it's the | ||
1564 | * PIO buffer size in dwords, less 1; also see qib_set_mtu() | ||
1565 | */ | ||
1566 | ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen); | ||
1567 | ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */ | ||
1568 | |||
1569 | /* initially come up waiting for TS1, without sending anything. */ | ||
1570 | val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << | ||
1571 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | ||
1572 | qib_write_kreg(dd, kr_ibcctrl, val); | ||
1573 | |||
1574 | if (!ppd->cpspec->ibcddrctrl) { | ||
1575 | /* not on re-init after reset */ | ||
1576 | ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl); | ||
1577 | |||
1578 | if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR)) | ||
1579 | ppd->cpspec->ibcddrctrl |= | ||
1580 | IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
1581 | IBA7220_IBC_IBTA_1_2_MASK; | ||
1582 | else | ||
1583 | ppd->cpspec->ibcddrctrl |= | ||
1584 | ppd->link_speed_enabled == QIB_IB_DDR ? | ||
1585 | IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; | ||
1586 | if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == | ||
1587 | (IB_WIDTH_1X | IB_WIDTH_4X)) | ||
1588 | ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG; | ||
1589 | else | ||
1590 | ppd->cpspec->ibcddrctrl |= | ||
1591 | ppd->link_width_enabled == IB_WIDTH_4X ? | ||
1592 | IBA7220_IBC_WIDTH_4X_ONLY : | ||
1593 | IBA7220_IBC_WIDTH_1X_ONLY; | ||
1594 | |||
1595 | /* always enable these on driver reload, not sticky */ | ||
1596 | ppd->cpspec->ibcddrctrl |= | ||
1597 | IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT; | ||
1598 | ppd->cpspec->ibcddrctrl |= | ||
1599 | IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; | ||
1600 | |||
1601 | /* enable automatic lane reversal detection for receive */ | ||
1602 | ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED; | ||
1603 | } else | ||
1604 | /* write to chip to prevent back-to-back writes of ibc reg */ | ||
1605 | qib_write_kreg(dd, kr_scratch, 0); | ||
1606 | |||
1607 | qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); | ||
1608 | qib_write_kreg(dd, kr_scratch, 0); | ||
1609 | |||
1610 | qib_write_kreg(dd, kr_ncmodectrl, 0Ull); | ||
1611 | qib_write_kreg(dd, kr_scratch, 0); | ||
1612 | |||
1613 | ret = qib_sd7220_init(dd); | ||
1614 | |||
1615 | val = qib_read_kreg64(dd, kr_xgxs_cfg); | ||
1616 | prev_val = val; | ||
1617 | val |= QLOGIC_IB_XGXS_FC_SAFE; | ||
1618 | if (val != prev_val) { | ||
1619 | qib_write_kreg(dd, kr_xgxs_cfg, val); | ||
1620 | qib_read_kreg32(dd, kr_scratch); | ||
1621 | } | ||
1622 | if (val & QLOGIC_IB_XGXS_RESET) | ||
1623 | val &= ~QLOGIC_IB_XGXS_RESET; | ||
1624 | if (val != prev_val) | ||
1625 | qib_write_kreg(dd, kr_xgxs_cfg, val); | ||
1626 | |||
1627 | /* first time through, set port guid */ | ||
1628 | if (!ppd->guid) | ||
1629 | ppd->guid = dd->base_guid; | ||
1630 | guid = be64_to_cpu(ppd->guid); | ||
1631 | |||
1632 | qib_write_kreg(dd, kr_hrtbt_guid, guid); | ||
1633 | if (!ret) { | ||
1634 | dd->control |= QLOGIC_IB_C_LINKENABLE; | ||
1635 | qib_write_kreg(dd, kr_control, dd->control); | ||
1636 | } else | ||
1637 | /* write to chip to prevent back-to-back writes of ibc reg */ | ||
1638 | qib_write_kreg(dd, kr_scratch, 0); | ||
1639 | return ret; | ||
1640 | } | ||
1641 | |||
1642 | /** | ||
1643 | * qib_7220_quiet_serdes - set serdes to txidle | ||
1644 | * @ppd: physical port of the qlogic_ib device | ||
1645 | * Called when driver is being unloaded | ||
1646 | */ | ||
1647 | static void qib_7220_quiet_serdes(struct qib_pportdata *ppd) | ||
1648 | { | ||
1649 | u64 val; | ||
1650 | struct qib_devdata *dd = ppd->dd; | ||
1651 | unsigned long flags; | ||
1652 | |||
1653 | /* disable IBC */ | ||
1654 | dd->control &= ~QLOGIC_IB_C_LINKENABLE; | ||
1655 | qib_write_kreg(dd, kr_control, | ||
1656 | dd->control | QLOGIC_IB_C_FREEZEMODE); | ||
1657 | |||
1658 | ppd->cpspec->chase_end = 0; | ||
1659 | if (ppd->cpspec->chase_timer.data) /* if initted */ | ||
1660 | del_timer_sync(&ppd->cpspec->chase_timer); | ||
1661 | |||
1662 | if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || | ||
1663 | ppd->cpspec->ibdeltainprog) { | ||
1664 | u64 diagc; | ||
1665 | |||
1666 | /* enable counter writes */ | ||
1667 | diagc = qib_read_kreg64(dd, kr_hwdiagctrl); | ||
1668 | qib_write_kreg(dd, kr_hwdiagctrl, | ||
1669 | diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable)); | ||
1670 | |||
1671 | if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) { | ||
1672 | val = read_7220_creg32(dd, cr_ibsymbolerr); | ||
1673 | if (ppd->cpspec->ibdeltainprog) | ||
1674 | val -= val - ppd->cpspec->ibsymsnap; | ||
1675 | val -= ppd->cpspec->ibsymdelta; | ||
1676 | write_7220_creg(dd, cr_ibsymbolerr, val); | ||
1677 | } | ||
1678 | if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) { | ||
1679 | val = read_7220_creg32(dd, cr_iblinkerrrecov); | ||
1680 | if (ppd->cpspec->ibdeltainprog) | ||
1681 | val -= val - ppd->cpspec->iblnkerrsnap; | ||
1682 | val -= ppd->cpspec->iblnkerrdelta; | ||
1683 | write_7220_creg(dd, cr_iblinkerrrecov, val); | ||
1684 | } | ||
1685 | |||
1686 | /* and disable counter writes */ | ||
1687 | qib_write_kreg(dd, kr_hwdiagctrl, diagc); | ||
1688 | } | ||
1689 | qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
1690 | |||
1691 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
1692 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | ||
1693 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
1694 | wake_up(&ppd->cpspec->autoneg_wait); | ||
1695 | cancel_delayed_work(&ppd->cpspec->autoneg_work); | ||
1696 | flush_scheduled_work(); | ||
1697 | |||
1698 | shutdown_7220_relock_poll(ppd->dd); | ||
1699 | val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); | ||
1700 | val |= QLOGIC_IB_XGXS_RESET; | ||
1701 | qib_write_kreg(ppd->dd, kr_xgxs_cfg, val); | ||
1702 | } | ||
1703 | |||
1704 | /** | ||
1705 | * qib_setup_7220_setextled - set the state of the two external LEDs | ||
1706 | * @dd: the qlogic_ib device | ||
1707 | * @on: whether the link is up or not | ||
1708 | * | ||
1709 | * The exact combo of LEDs if on is true is determined by looking | ||
1710 | * at the ibcstatus. | ||
1711 | * | ||
1712 | * These LEDs indicate the physical and logical state of IB link. | ||
1713 | * For this chip (at least with recommended board pinouts), LED1 | ||
1714 | * is Yellow (logical state) and LED2 is Green (physical state), | ||
1715 | * | ||
1716 | * Note: We try to match the Mellanox HCA LED behavior as best | ||
1717 | * we can. Green indicates physical link state is OK (something is | ||
1718 | * plugged in, and we can train). | ||
1719 | * Amber indicates the link is logically up (ACTIVE). | ||
1720 | * Mellanox further blinks the amber LED to indicate data packet | ||
1721 | * activity, but we have no hardware support for that, so it would | ||
1722 | * require waking up every 10-20 msecs and checking the counters | ||
1723 | * on the chip, and then turning the LED off if appropriate. That's | ||
1724 | * visible overhead, so not something we will do. | ||
1725 | * | ||
1726 | */ | ||
1727 | static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on) | ||
1728 | { | ||
1729 | struct qib_devdata *dd = ppd->dd; | ||
1730 | u64 extctl, ledblink = 0, val, lst, ltst; | ||
1731 | unsigned long flags; | ||
1732 | |||
1733 | /* | ||
1734 | * The diags use the LED to indicate diag info, so we leave | ||
1735 | * the external LED alone when the diags are running. | ||
1736 | */ | ||
1737 | if (dd->diag_client) | ||
1738 | return; | ||
1739 | |||
1740 | if (ppd->led_override) { | ||
1741 | ltst = (ppd->led_override & QIB_LED_PHYS) ? | ||
1742 | IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED, | ||
1743 | lst = (ppd->led_override & QIB_LED_LOG) ? | ||
1744 | IB_PORT_ACTIVE : IB_PORT_DOWN; | ||
1745 | } else if (on) { | ||
1746 | val = qib_read_kreg64(dd, kr_ibcstatus); | ||
1747 | ltst = qib_7220_phys_portstate(val); | ||
1748 | lst = qib_7220_iblink_state(val); | ||
1749 | } else { | ||
1750 | ltst = 0; | ||
1751 | lst = 0; | ||
1752 | } | ||
1753 | |||
1754 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | ||
1755 | extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) | | ||
1756 | SYM_MASK(EXTCtrl, LEDPriPortYellowOn)); | ||
1757 | if (ltst == IB_PHYSPORTSTATE_LINKUP) { | ||
1758 | extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn); | ||
1759 | /* | ||
1760 | * counts are in chip clock (4ns) periods. | ||
1761 | * This is 1/16 sec (66.6ms) on, | ||
1762 | * 3/16 sec (187.5 ms) off, with packets rcvd | ||
1763 | */ | ||
1764 | ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT) | ||
1765 | | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT); | ||
1766 | } | ||
1767 | if (lst == IB_PORT_ACTIVE) | ||
1768 | extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn); | ||
1769 | dd->cspec->extctrl = extctl; | ||
1770 | qib_write_kreg(dd, kr_extctrl, extctl); | ||
1771 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | ||
1772 | |||
1773 | if (ledblink) /* blink the LED on packet receive */ | ||
1774 | qib_write_kreg(dd, kr_rcvpktledcnt, ledblink); | ||
1775 | } | ||
1776 | |||
1777 | static void qib_7220_free_irq(struct qib_devdata *dd) | ||
1778 | { | ||
1779 | if (dd->cspec->irq) { | ||
1780 | free_irq(dd->cspec->irq, dd); | ||
1781 | dd->cspec->irq = 0; | ||
1782 | } | ||
1783 | qib_nomsi(dd); | ||
1784 | } | ||
1785 | |||
1786 | /* | ||
1787 | * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff | ||
1788 | * @dd: the qlogic_ib device | ||
1789 | * | ||
1790 | * This is called during driver unload. | ||
1791 | * | ||
1792 | */ | ||
1793 | static void qib_setup_7220_cleanup(struct qib_devdata *dd) | ||
1794 | { | ||
1795 | qib_7220_free_irq(dd); | ||
1796 | kfree(dd->cspec->cntrs); | ||
1797 | kfree(dd->cspec->portcntrs); | ||
1798 | } | ||
1799 | |||
1800 | /* | ||
1801 | * This is only called for SDmaInt. | ||
1802 | * SDmaDisabled is handled on the error path. | ||
1803 | */ | ||
1804 | static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat) | ||
1805 | { | ||
1806 | unsigned long flags; | ||
1807 | |||
1808 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
1809 | |||
1810 | switch (ppd->sdma_state.current_state) { | ||
1811 | case qib_sdma_state_s00_hw_down: | ||
1812 | break; | ||
1813 | |||
1814 | case qib_sdma_state_s10_hw_start_up_wait: | ||
1815 | __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started); | ||
1816 | break; | ||
1817 | |||
1818 | case qib_sdma_state_s20_idle: | ||
1819 | break; | ||
1820 | |||
1821 | case qib_sdma_state_s30_sw_clean_up_wait: | ||
1822 | break; | ||
1823 | |||
1824 | case qib_sdma_state_s40_hw_clean_up_wait: | ||
1825 | break; | ||
1826 | |||
1827 | case qib_sdma_state_s50_hw_halt_wait: | ||
1828 | __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted); | ||
1829 | break; | ||
1830 | |||
1831 | case qib_sdma_state_s99_running: | ||
1832 | /* too chatty to print here */ | ||
1833 | __qib_sdma_intr(ppd); | ||
1834 | break; | ||
1835 | } | ||
1836 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
1837 | } | ||
1838 | |||
1839 | static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint) | ||
1840 | { | ||
1841 | unsigned long flags; | ||
1842 | |||
1843 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
1844 | if (needint) { | ||
1845 | if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) | ||
1846 | goto done; | ||
1847 | /* | ||
1848 | * blip the availupd off, next write will be on, so | ||
1849 | * we ensure an avail update, regardless of threshold or | ||
1850 | * buffers becoming free, whenever we want an interrupt | ||
1851 | */ | ||
1852 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl & | ||
1853 | ~SYM_MASK(SendCtrl, SendBufAvailUpd)); | ||
1854 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
1855 | dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); | ||
1856 | } else | ||
1857 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); | ||
1858 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | ||
1859 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
1860 | done: | ||
1861 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
1862 | } | ||
1863 | |||
1864 | /* | ||
1865 | * Handle errors and unusual events first, separate function | ||
1866 | * to improve cache hits for fast path interrupt handling. | ||
1867 | */ | ||
1868 | static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat) | ||
1869 | { | ||
1870 | if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT)) | ||
1871 | qib_dev_err(dd, | ||
1872 | "interrupt with unknown interrupts %Lx set\n", | ||
1873 | istat & ~QLOGIC_IB_I_BITSEXTANT); | ||
1874 | |||
1875 | if (istat & QLOGIC_IB_I_GPIO) { | ||
1876 | u32 gpiostatus; | ||
1877 | |||
1878 | /* | ||
1879 | * Boards for this chip currently don't use GPIO interrupts, | ||
1880 | * so clear by writing GPIOstatus to GPIOclear, and complain | ||
1881 | * to alert developer. To avoid endless repeats, clear | ||
1882 | * the bits in the mask, since there is some kind of | ||
1883 | * programming error or chip problem. | ||
1884 | */ | ||
1885 | gpiostatus = qib_read_kreg32(dd, kr_gpio_status); | ||
1886 | /* | ||
1887 | * In theory, writing GPIOstatus to GPIOclear could | ||
1888 | * have a bad side-effect on some diagnostic that wanted | ||
1889 | * to poll for a status-change, but the various shadows | ||
1890 | * make that problematic at best. Diags will just suppress | ||
1891 | * all GPIO interrupts during such tests. | ||
1892 | */ | ||
1893 | qib_write_kreg(dd, kr_gpio_clear, gpiostatus); | ||
1894 | |||
1895 | if (gpiostatus) { | ||
1896 | const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); | ||
1897 | u32 gpio_irq = mask & gpiostatus; | ||
1898 | |||
1899 | /* | ||
1900 | * A bit set in status and (chip) Mask register | ||
1901 | * would cause an interrupt. Since we are not | ||
1902 | * expecting any, report it. Also check that the | ||
1903 | * chip reflects our shadow, report issues, | ||
1904 | * and refresh from the shadow. | ||
1905 | */ | ||
1906 | /* | ||
1907 | * Clear any troublemakers, and update chip | ||
1908 | * from shadow | ||
1909 | */ | ||
1910 | dd->cspec->gpio_mask &= ~gpio_irq; | ||
1911 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | ||
1912 | } | ||
1913 | } | ||
1914 | |||
1915 | if (istat & QLOGIC_IB_I_ERROR) { | ||
1916 | u64 estat; | ||
1917 | |||
1918 | qib_stats.sps_errints++; | ||
1919 | estat = qib_read_kreg64(dd, kr_errstatus); | ||
1920 | if (!estat) | ||
1921 | qib_devinfo(dd->pcidev, "error interrupt (%Lx), " | ||
1922 | "but no error bits set!\n", istat); | ||
1923 | else | ||
1924 | handle_7220_errors(dd, estat); | ||
1925 | } | ||
1926 | } | ||
1927 | |||
1928 | static irqreturn_t qib_7220intr(int irq, void *data) | ||
1929 | { | ||
1930 | struct qib_devdata *dd = data; | ||
1931 | irqreturn_t ret; | ||
1932 | u64 istat; | ||
1933 | u64 ctxtrbits; | ||
1934 | u64 rmask; | ||
1935 | unsigned i; | ||
1936 | |||
1937 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { | ||
1938 | /* | ||
1939 | * This return value is not great, but we do not want the | ||
1940 | * interrupt core code to remove our interrupt handler | ||
1941 | * because we don't appear to be handling an interrupt | ||
1942 | * during a chip reset. | ||
1943 | */ | ||
1944 | ret = IRQ_HANDLED; | ||
1945 | goto bail; | ||
1946 | } | ||
1947 | |||
1948 | istat = qib_read_kreg64(dd, kr_intstatus); | ||
1949 | |||
1950 | if (unlikely(!istat)) { | ||
1951 | ret = IRQ_NONE; /* not our interrupt, or already handled */ | ||
1952 | goto bail; | ||
1953 | } | ||
1954 | if (unlikely(istat == -1)) { | ||
1955 | qib_bad_intrstatus(dd); | ||
1956 | /* don't know if it was our interrupt or not */ | ||
1957 | ret = IRQ_NONE; | ||
1958 | goto bail; | ||
1959 | } | ||
1960 | |||
1961 | qib_stats.sps_ints++; | ||
1962 | if (dd->int_counter != (u32) -1) | ||
1963 | dd->int_counter++; | ||
1964 | |||
1965 | if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | | ||
1966 | QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) | ||
1967 | unlikely_7220_intr(dd, istat); | ||
1968 | |||
1969 | /* | ||
1970 | * Clear the interrupt bits we found set, relatively early, so we | ||
1971 | * "know" know the chip will have seen this by the time we process | ||
1972 | * the queue, and will re-interrupt if necessary. The processor | ||
1973 | * itself won't take the interrupt again until we return. | ||
1974 | */ | ||
1975 | qib_write_kreg(dd, kr_intclear, istat); | ||
1976 | |||
1977 | /* | ||
1978 | * Handle kernel receive queues before checking for pio buffers | ||
1979 | * available since receives can overflow; piobuf waiters can afford | ||
1980 | * a few extra cycles, since they were waiting anyway. | ||
1981 | */ | ||
1982 | ctxtrbits = istat & | ||
1983 | ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) | | ||
1984 | (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT)); | ||
1985 | if (ctxtrbits) { | ||
1986 | rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) | | ||
1987 | (1ULL << QLOGIC_IB_I_RCVURG_SHIFT); | ||
1988 | for (i = 0; i < dd->first_user_ctxt; i++) { | ||
1989 | if (ctxtrbits & rmask) { | ||
1990 | ctxtrbits &= ~rmask; | ||
1991 | qib_kreceive(dd->rcd[i], NULL, NULL); | ||
1992 | } | ||
1993 | rmask <<= 1; | ||
1994 | } | ||
1995 | if (ctxtrbits) { | ||
1996 | ctxtrbits = | ||
1997 | (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) | | ||
1998 | (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT); | ||
1999 | qib_handle_urcv(dd, ctxtrbits); | ||
2000 | } | ||
2001 | } | ||
2002 | |||
2003 | /* only call for SDmaInt */ | ||
2004 | if (istat & QLOGIC_IB_I_SDMAINT) | ||
2005 | sdma_7220_intr(dd->pport, istat); | ||
2006 | |||
2007 | if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) | ||
2008 | qib_ib_piobufavail(dd); | ||
2009 | |||
2010 | ret = IRQ_HANDLED; | ||
2011 | bail: | ||
2012 | return ret; | ||
2013 | } | ||
2014 | |||
2015 | /* | ||
2016 | * Set up our chip-specific interrupt handler. | ||
2017 | * The interrupt type has already been setup, so | ||
2018 | * we just need to do the registration and error checking. | ||
2019 | * If we are using MSI interrupts, we may fall back to | ||
2020 | * INTx later, if the interrupt handler doesn't get called | ||
2021 | * within 1/2 second (see verify_interrupt()). | ||
2022 | */ | ||
2023 | static void qib_setup_7220_interrupt(struct qib_devdata *dd) | ||
2024 | { | ||
2025 | if (!dd->cspec->irq) | ||
2026 | qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " | ||
2027 | "work\n"); | ||
2028 | else { | ||
2029 | int ret = request_irq(dd->cspec->irq, qib_7220intr, | ||
2030 | dd->msi_lo ? 0 : IRQF_SHARED, | ||
2031 | QIB_DRV_NAME, dd); | ||
2032 | |||
2033 | if (ret) | ||
2034 | qib_dev_err(dd, "Couldn't setup %s interrupt " | ||
2035 | "(irq=%d): %d\n", dd->msi_lo ? | ||
2036 | "MSI" : "INTx", dd->cspec->irq, ret); | ||
2037 | } | ||
2038 | } | ||
2039 | |||
2040 | /** | ||
2041 | * qib_7220_boardname - fill in the board name | ||
2042 | * @dd: the qlogic_ib device | ||
2043 | * | ||
2044 | * info is based on the board revision register | ||
2045 | */ | ||
2046 | static void qib_7220_boardname(struct qib_devdata *dd) | ||
2047 | { | ||
2048 | char *n; | ||
2049 | u32 boardid, namelen; | ||
2050 | |||
2051 | boardid = SYM_FIELD(dd->revision, Revision, | ||
2052 | BoardID); | ||
2053 | |||
2054 | switch (boardid) { | ||
2055 | case 1: | ||
2056 | n = "InfiniPath_QLE7240"; | ||
2057 | break; | ||
2058 | case 2: | ||
2059 | n = "InfiniPath_QLE7280"; | ||
2060 | break; | ||
2061 | default: | ||
2062 | qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid); | ||
2063 | n = "Unknown_InfiniPath_7220"; | ||
2064 | break; | ||
2065 | } | ||
2066 | |||
2067 | namelen = strlen(n) + 1; | ||
2068 | dd->boardname = kmalloc(namelen, GFP_KERNEL); | ||
2069 | if (!dd->boardname) | ||
2070 | qib_dev_err(dd, "Failed allocation for board name: %s\n", n); | ||
2071 | else | ||
2072 | snprintf(dd->boardname, namelen, "%s", n); | ||
2073 | |||
2074 | if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2) | ||
2075 | qib_dev_err(dd, "Unsupported InfiniPath hardware " | ||
2076 | "revision %u.%u!\n", | ||
2077 | dd->majrev, dd->minrev); | ||
2078 | |||
2079 | snprintf(dd->boardversion, sizeof(dd->boardversion), | ||
2080 | "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", | ||
2081 | QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, | ||
2082 | (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch), | ||
2083 | dd->majrev, dd->minrev, | ||
2084 | (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); | ||
2085 | } | ||
2086 | |||
2087 | /* | ||
2088 | * This routine sleeps, so it can only be called from user context, not | ||
2089 | * from interrupt context. | ||
2090 | */ | ||
2091 | static int qib_setup_7220_reset(struct qib_devdata *dd) | ||
2092 | { | ||
2093 | u64 val; | ||
2094 | int i; | ||
2095 | int ret; | ||
2096 | u16 cmdval; | ||
2097 | u8 int_line, clinesz; | ||
2098 | unsigned long flags; | ||
2099 | |||
2100 | qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); | ||
2101 | |||
2102 | /* Use dev_err so it shows up in logs, etc. */ | ||
2103 | qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); | ||
2104 | |||
2105 | /* no interrupts till re-initted */ | ||
2106 | qib_7220_set_intr_state(dd, 0); | ||
2107 | |||
2108 | dd->pport->cpspec->ibdeltainprog = 0; | ||
2109 | dd->pport->cpspec->ibsymdelta = 0; | ||
2110 | dd->pport->cpspec->iblnkerrdelta = 0; | ||
2111 | |||
2112 | /* | ||
2113 | * Keep chip from being accessed until we are ready. Use | ||
2114 | * writeq() directly, to allow the write even though QIB_PRESENT | ||
2115 | * isnt' set. | ||
2116 | */ | ||
2117 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT); | ||
2118 | dd->int_counter = 0; /* so we check interrupts work again */ | ||
2119 | val = dd->control | QLOGIC_IB_C_RESET; | ||
2120 | writeq(val, &dd->kregbase[kr_control]); | ||
2121 | mb(); /* prevent compiler reordering around actual reset */ | ||
2122 | |||
2123 | for (i = 1; i <= 5; i++) { | ||
2124 | /* | ||
2125 | * Allow MBIST, etc. to complete; longer on each retry. | ||
2126 | * We sometimes get machine checks from bus timeout if no | ||
2127 | * response, so for now, make it *really* long. | ||
2128 | */ | ||
2129 | msleep(1000 + (1 + i) * 2000); | ||
2130 | |||
2131 | qib_pcie_reenable(dd, cmdval, int_line, clinesz); | ||
2132 | |||
2133 | /* | ||
2134 | * Use readq directly, so we don't need to mark it as PRESENT | ||
2135 | * until we get a successful indication that all is well. | ||
2136 | */ | ||
2137 | val = readq(&dd->kregbase[kr_revision]); | ||
2138 | if (val == dd->revision) { | ||
2139 | dd->flags |= QIB_PRESENT; /* it's back */ | ||
2140 | ret = qib_reinit_intr(dd); | ||
2141 | goto bail; | ||
2142 | } | ||
2143 | } | ||
2144 | ret = 0; /* failed */ | ||
2145 | |||
2146 | bail: | ||
2147 | if (ret) { | ||
2148 | if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) | ||
2149 | qib_dev_err(dd, "Reset failed to setup PCIe or " | ||
2150 | "interrupts; continuing anyway\n"); | ||
2151 | |||
2152 | /* hold IBC in reset, no sends, etc till later */ | ||
2153 | qib_write_kreg(dd, kr_control, 0ULL); | ||
2154 | |||
2155 | /* clear the reset error, init error/hwerror mask */ | ||
2156 | qib_7220_init_hwerrors(dd); | ||
2157 | |||
2158 | /* do setup similar to speed or link-width changes */ | ||
2159 | if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) | ||
2160 | dd->cspec->presets_needed = 1; | ||
2161 | spin_lock_irqsave(&dd->pport->lflags_lock, flags); | ||
2162 | dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY; | ||
2163 | dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED; | ||
2164 | spin_unlock_irqrestore(&dd->pport->lflags_lock, flags); | ||
2165 | } | ||
2166 | |||
2167 | return ret; | ||
2168 | } | ||
2169 | |||
2170 | /** | ||
2171 | * qib_7220_put_tid - write a TID to the chip | ||
2172 | * @dd: the qlogic_ib device | ||
2173 | * @tidptr: pointer to the expected TID (in chip) to update | ||
2174 | * @tidtype: 0 for eager, 1 for expected | ||
2175 | * @pa: physical address of in memory buffer; tidinvalid if freeing | ||
2176 | */ | ||
2177 | static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, | ||
2178 | u32 type, unsigned long pa) | ||
2179 | { | ||
2180 | if (pa != dd->tidinvalid) { | ||
2181 | u64 chippa = pa >> IBA7220_TID_PA_SHIFT; | ||
2182 | |||
2183 | /* paranoia checks */ | ||
2184 | if (pa != (chippa << IBA7220_TID_PA_SHIFT)) { | ||
2185 | qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", | ||
2186 | pa); | ||
2187 | return; | ||
2188 | } | ||
2189 | if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { | ||
2190 | qib_dev_err(dd, "Physical page address 0x%lx " | ||
2191 | "larger than supported\n", pa); | ||
2192 | return; | ||
2193 | } | ||
2194 | |||
2195 | if (type == RCVHQ_RCV_TYPE_EAGER) | ||
2196 | chippa |= dd->tidtemplate; | ||
2197 | else /* for now, always full 4KB page */ | ||
2198 | chippa |= IBA7220_TID_SZ_4K; | ||
2199 | pa = chippa; | ||
2200 | } | ||
2201 | writeq(pa, tidptr); | ||
2202 | mmiowb(); | ||
2203 | } | ||
2204 | |||
2205 | /** | ||
2206 | * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager | ||
2207 | * @dd: the qlogic_ib device | ||
2208 | * @ctxt: the ctxt | ||
2209 | * | ||
2210 | * clear all TID entries for a ctxt, expected and eager. | ||
2211 | * Used from qib_close(). On this chip, TIDs are only 32 bits, | ||
2212 | * not 64, but they are still on 64 bit boundaries, so tidbase | ||
2213 | * is declared as u64 * for the pointer math, even though we write 32 bits | ||
2214 | */ | ||
2215 | static void qib_7220_clear_tids(struct qib_devdata *dd, | ||
2216 | struct qib_ctxtdata *rcd) | ||
2217 | { | ||
2218 | u64 __iomem *tidbase; | ||
2219 | unsigned long tidinv; | ||
2220 | u32 ctxt; | ||
2221 | int i; | ||
2222 | |||
2223 | if (!dd->kregbase || !rcd) | ||
2224 | return; | ||
2225 | |||
2226 | ctxt = rcd->ctxt; | ||
2227 | |||
2228 | tidinv = dd->tidinvalid; | ||
2229 | tidbase = (u64 __iomem *) | ||
2230 | ((char __iomem *)(dd->kregbase) + | ||
2231 | dd->rcvtidbase + | ||
2232 | ctxt * dd->rcvtidcnt * sizeof(*tidbase)); | ||
2233 | |||
2234 | for (i = 0; i < dd->rcvtidcnt; i++) | ||
2235 | qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, | ||
2236 | tidinv); | ||
2237 | |||
2238 | tidbase = (u64 __iomem *) | ||
2239 | ((char __iomem *)(dd->kregbase) + | ||
2240 | dd->rcvegrbase + | ||
2241 | rcd->rcvegr_tid_base * sizeof(*tidbase)); | ||
2242 | |||
2243 | for (i = 0; i < rcd->rcvegrcnt; i++) | ||
2244 | qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, | ||
2245 | tidinv); | ||
2246 | } | ||
2247 | |||
2248 | /** | ||
2249 | * qib_7220_tidtemplate - setup constants for TID updates | ||
2250 | * @dd: the qlogic_ib device | ||
2251 | * | ||
2252 | * We setup stuff that we use a lot, to avoid calculating each time | ||
2253 | */ | ||
2254 | static void qib_7220_tidtemplate(struct qib_devdata *dd) | ||
2255 | { | ||
2256 | if (dd->rcvegrbufsize == 2048) | ||
2257 | dd->tidtemplate = IBA7220_TID_SZ_2K; | ||
2258 | else if (dd->rcvegrbufsize == 4096) | ||
2259 | dd->tidtemplate = IBA7220_TID_SZ_4K; | ||
2260 | dd->tidinvalid = 0; | ||
2261 | } | ||
2262 | |||
2263 | /** | ||
2264 | * qib_init_7220_get_base_info - set chip-specific flags for user code | ||
2265 | * @rcd: the qlogic_ib ctxt | ||
2266 | * @kbase: qib_base_info pointer | ||
2267 | * | ||
2268 | * We set the PCIE flag because the lower bandwidth on PCIe vs | ||
2269 | * HyperTransport can affect some user packet algorithims. | ||
2270 | */ | ||
2271 | static int qib_7220_get_base_info(struct qib_ctxtdata *rcd, | ||
2272 | struct qib_base_info *kinfo) | ||
2273 | { | ||
2274 | kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE | | ||
2275 | QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA; | ||
2276 | |||
2277 | if (rcd->dd->flags & QIB_USE_SPCL_TRIG) | ||
2278 | kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER; | ||
2279 | |||
2280 | return 0; | ||
2281 | } | ||
2282 | |||
2283 | static struct qib_message_header * | ||
2284 | qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) | ||
2285 | { | ||
2286 | u32 offset = qib_hdrget_offset(rhf_addr); | ||
2287 | |||
2288 | return (struct qib_message_header *) | ||
2289 | (rhf_addr - dd->rhf_offset + offset); | ||
2290 | } | ||
2291 | |||
2292 | static void qib_7220_config_ctxts(struct qib_devdata *dd) | ||
2293 | { | ||
2294 | unsigned long flags; | ||
2295 | u32 nchipctxts; | ||
2296 | |||
2297 | nchipctxts = qib_read_kreg32(dd, kr_portcnt); | ||
2298 | dd->cspec->numctxts = nchipctxts; | ||
2299 | if (qib_n_krcv_queues > 1) { | ||
2300 | dd->qpn_mask = 0x3f; | ||
2301 | dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; | ||
2302 | if (dd->first_user_ctxt > nchipctxts) | ||
2303 | dd->first_user_ctxt = nchipctxts; | ||
2304 | } else | ||
2305 | dd->first_user_ctxt = dd->num_pports; | ||
2306 | dd->n_krcv_queues = dd->first_user_ctxt; | ||
2307 | |||
2308 | if (!qib_cfgctxts) { | ||
2309 | int nctxts = dd->first_user_ctxt + num_online_cpus(); | ||
2310 | |||
2311 | if (nctxts <= 5) | ||
2312 | dd->ctxtcnt = 5; | ||
2313 | else if (nctxts <= 9) | ||
2314 | dd->ctxtcnt = 9; | ||
2315 | else if (nctxts <= nchipctxts) | ||
2316 | dd->ctxtcnt = nchipctxts; | ||
2317 | } else if (qib_cfgctxts <= nchipctxts) | ||
2318 | dd->ctxtcnt = qib_cfgctxts; | ||
2319 | if (!dd->ctxtcnt) /* none of the above, set to max */ | ||
2320 | dd->ctxtcnt = nchipctxts; | ||
2321 | |||
2322 | /* | ||
2323 | * Chip can be configured for 5, 9, or 17 ctxts, and choice | ||
2324 | * affects number of eager TIDs per ctxt (1K, 2K, 4K). | ||
2325 | * Lock to be paranoid about later motion, etc. | ||
2326 | */ | ||
2327 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | ||
2328 | if (dd->ctxtcnt > 9) | ||
2329 | dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT; | ||
2330 | else if (dd->ctxtcnt > 5) | ||
2331 | dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT; | ||
2332 | /* else configure for default 5 receive ctxts */ | ||
2333 | if (dd->qpn_mask) | ||
2334 | dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB; | ||
2335 | qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); | ||
2336 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | ||
2337 | |||
2338 | /* kr_rcvegrcnt changes based on the number of contexts enabled */ | ||
2339 | dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); | ||
2340 | dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT); | ||
2341 | } | ||
2342 | |||
2343 | static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which) | ||
2344 | { | ||
2345 | int lsb, ret = 0; | ||
2346 | u64 maskr; /* right-justified mask */ | ||
2347 | |||
2348 | switch (which) { | ||
2349 | case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */ | ||
2350 | ret = ppd->link_width_enabled; | ||
2351 | goto done; | ||
2352 | |||
2353 | case QIB_IB_CFG_LWID: /* Get currently active Link-width */ | ||
2354 | ret = ppd->link_width_active; | ||
2355 | goto done; | ||
2356 | |||
2357 | case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */ | ||
2358 | ret = ppd->link_speed_enabled; | ||
2359 | goto done; | ||
2360 | |||
2361 | case QIB_IB_CFG_SPD: /* Get current Link spd */ | ||
2362 | ret = ppd->link_speed_active; | ||
2363 | goto done; | ||
2364 | |||
2365 | case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ | ||
2366 | lsb = IBA7220_IBC_RXPOL_SHIFT; | ||
2367 | maskr = IBA7220_IBC_RXPOL_MASK; | ||
2368 | break; | ||
2369 | |||
2370 | case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ | ||
2371 | lsb = IBA7220_IBC_LREV_SHIFT; | ||
2372 | maskr = IBA7220_IBC_LREV_MASK; | ||
2373 | break; | ||
2374 | |||
2375 | case QIB_IB_CFG_LINKLATENCY: | ||
2376 | ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus) | ||
2377 | & IBA7220_DDRSTAT_LINKLAT_MASK; | ||
2378 | goto done; | ||
2379 | |||
2380 | case QIB_IB_CFG_OP_VLS: | ||
2381 | ret = ppd->vls_operational; | ||
2382 | goto done; | ||
2383 | |||
2384 | case QIB_IB_CFG_VL_HIGH_CAP: | ||
2385 | ret = 0; | ||
2386 | goto done; | ||
2387 | |||
2388 | case QIB_IB_CFG_VL_LOW_CAP: | ||
2389 | ret = 0; | ||
2390 | goto done; | ||
2391 | |||
2392 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | ||
2393 | ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, | ||
2394 | OverrunThreshold); | ||
2395 | goto done; | ||
2396 | |||
2397 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | ||
2398 | ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, | ||
2399 | PhyerrThreshold); | ||
2400 | goto done; | ||
2401 | |||
2402 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | ||
2403 | /* will only take effect when the link state changes */ | ||
2404 | ret = (ppd->cpspec->ibcctrl & | ||
2405 | SYM_MASK(IBCCtrl, LinkDownDefaultState)) ? | ||
2406 | IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL; | ||
2407 | goto done; | ||
2408 | |||
2409 | case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ | ||
2410 | lsb = IBA7220_IBC_HRTBT_SHIFT; | ||
2411 | maskr = IBA7220_IBC_HRTBT_MASK; | ||
2412 | break; | ||
2413 | |||
2414 | case QIB_IB_CFG_PMA_TICKS: | ||
2415 | /* | ||
2416 | * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs | ||
2417 | * Since the clock is always 250MHz, the value is 1 or 0. | ||
2418 | */ | ||
2419 | ret = (ppd->link_speed_active == QIB_IB_DDR); | ||
2420 | goto done; | ||
2421 | |||
2422 | default: | ||
2423 | ret = -EINVAL; | ||
2424 | goto done; | ||
2425 | } | ||
2426 | ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr); | ||
2427 | done: | ||
2428 | return ret; | ||
2429 | } | ||
2430 | |||
2431 | static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) | ||
2432 | { | ||
2433 | struct qib_devdata *dd = ppd->dd; | ||
2434 | u64 maskr; /* right-justified mask */ | ||
2435 | int lsb, ret = 0, setforce = 0; | ||
2436 | u16 lcmd, licmd; | ||
2437 | unsigned long flags; | ||
2438 | |||
2439 | switch (which) { | ||
2440 | case QIB_IB_CFG_LIDLMC: | ||
2441 | /* | ||
2442 | * Set LID and LMC. Combined to avoid possible hazard | ||
2443 | * caller puts LMC in 16MSbits, DLID in 16LSbits of val | ||
2444 | */ | ||
2445 | lsb = IBA7220_IBC_DLIDLMC_SHIFT; | ||
2446 | maskr = IBA7220_IBC_DLIDLMC_MASK; | ||
2447 | break; | ||
2448 | |||
2449 | case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */ | ||
2450 | /* | ||
2451 | * As with speed, only write the actual register if | ||
2452 | * the link is currently down, otherwise takes effect | ||
2453 | * on next link change. | ||
2454 | */ | ||
2455 | ppd->link_width_enabled = val; | ||
2456 | if (!(ppd->lflags & QIBL_LINKDOWN)) | ||
2457 | goto bail; | ||
2458 | /* | ||
2459 | * We set the QIBL_IB_FORCE_NOTIFY bit so updown | ||
2460 | * will get called because we want update | ||
2461 | * link_width_active, and the change may not take | ||
2462 | * effect for some time (if we are in POLL), so this | ||
2463 | * flag will force the updown routine to be called | ||
2464 | * on the next ibstatuschange down interrupt, even | ||
2465 | * if it's not an down->up transition. | ||
2466 | */ | ||
2467 | val--; /* convert from IB to chip */ | ||
2468 | maskr = IBA7220_IBC_WIDTH_MASK; | ||
2469 | lsb = IBA7220_IBC_WIDTH_SHIFT; | ||
2470 | setforce = 1; | ||
2471 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
2472 | ppd->lflags |= QIBL_IB_FORCE_NOTIFY; | ||
2473 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
2474 | break; | ||
2475 | |||
2476 | case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */ | ||
2477 | /* | ||
2478 | * If we turn off IB1.2, need to preset SerDes defaults, | ||
2479 | * but not right now. Set a flag for the next time | ||
2480 | * we command the link down. As with width, only write the | ||
2481 | * actual register if the link is currently down, otherwise | ||
2482 | * takes effect on next link change. Since setting is being | ||
2483 | * explictly requested (via MAD or sysfs), clear autoneg | ||
2484 | * failure status if speed autoneg is enabled. | ||
2485 | */ | ||
2486 | ppd->link_speed_enabled = val; | ||
2487 | if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) && | ||
2488 | !(val & (val - 1))) | ||
2489 | dd->cspec->presets_needed = 1; | ||
2490 | if (!(ppd->lflags & QIBL_LINKDOWN)) | ||
2491 | goto bail; | ||
2492 | /* | ||
2493 | * We set the QIBL_IB_FORCE_NOTIFY bit so updown | ||
2494 | * will get called because we want update | ||
2495 | * link_speed_active, and the change may not take | ||
2496 | * effect for some time (if we are in POLL), so this | ||
2497 | * flag will force the updown routine to be called | ||
2498 | * on the next ibstatuschange down interrupt, even | ||
2499 | * if it's not an down->up transition. | ||
2500 | */ | ||
2501 | if (val == (QIB_IB_SDR | QIB_IB_DDR)) { | ||
2502 | val = IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
2503 | IBA7220_IBC_IBTA_1_2_MASK; | ||
2504 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
2505 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | ||
2506 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
2507 | } else | ||
2508 | val = val == QIB_IB_DDR ? | ||
2509 | IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; | ||
2510 | maskr = IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
2511 | IBA7220_IBC_IBTA_1_2_MASK; | ||
2512 | /* IBTA 1.2 mode + speed bits are contiguous */ | ||
2513 | lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE); | ||
2514 | setforce = 1; | ||
2515 | break; | ||
2516 | |||
2517 | case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ | ||
2518 | lsb = IBA7220_IBC_RXPOL_SHIFT; | ||
2519 | maskr = IBA7220_IBC_RXPOL_MASK; | ||
2520 | break; | ||
2521 | |||
2522 | case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ | ||
2523 | lsb = IBA7220_IBC_LREV_SHIFT; | ||
2524 | maskr = IBA7220_IBC_LREV_MASK; | ||
2525 | break; | ||
2526 | |||
2527 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | ||
2528 | maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, | ||
2529 | OverrunThreshold); | ||
2530 | if (maskr != val) { | ||
2531 | ppd->cpspec->ibcctrl &= | ||
2532 | ~SYM_MASK(IBCCtrl, OverrunThreshold); | ||
2533 | ppd->cpspec->ibcctrl |= (u64) val << | ||
2534 | SYM_LSB(IBCCtrl, OverrunThreshold); | ||
2535 | qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); | ||
2536 | qib_write_kreg(dd, kr_scratch, 0); | ||
2537 | } | ||
2538 | goto bail; | ||
2539 | |||
2540 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | ||
2541 | maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, | ||
2542 | PhyerrThreshold); | ||
2543 | if (maskr != val) { | ||
2544 | ppd->cpspec->ibcctrl &= | ||
2545 | ~SYM_MASK(IBCCtrl, PhyerrThreshold); | ||
2546 | ppd->cpspec->ibcctrl |= (u64) val << | ||
2547 | SYM_LSB(IBCCtrl, PhyerrThreshold); | ||
2548 | qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); | ||
2549 | qib_write_kreg(dd, kr_scratch, 0); | ||
2550 | } | ||
2551 | goto bail; | ||
2552 | |||
2553 | case QIB_IB_CFG_PKEYS: /* update pkeys */ | ||
2554 | maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | | ||
2555 | ((u64) ppd->pkeys[2] << 32) | | ||
2556 | ((u64) ppd->pkeys[3] << 48); | ||
2557 | qib_write_kreg(dd, kr_partitionkey, maskr); | ||
2558 | goto bail; | ||
2559 | |||
2560 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | ||
2561 | /* will only take effect when the link state changes */ | ||
2562 | if (val == IB_LINKINITCMD_POLL) | ||
2563 | ppd->cpspec->ibcctrl &= | ||
2564 | ~SYM_MASK(IBCCtrl, LinkDownDefaultState); | ||
2565 | else /* SLEEP */ | ||
2566 | ppd->cpspec->ibcctrl |= | ||
2567 | SYM_MASK(IBCCtrl, LinkDownDefaultState); | ||
2568 | qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); | ||
2569 | qib_write_kreg(dd, kr_scratch, 0); | ||
2570 | goto bail; | ||
2571 | |||
2572 | case QIB_IB_CFG_MTU: /* update the MTU in IBC */ | ||
2573 | /* | ||
2574 | * Update our housekeeping variables, and set IBC max | ||
2575 | * size, same as init code; max IBC is max we allow in | ||
2576 | * buffer, less the qword pbc, plus 1 for ICRC, in dwords | ||
2577 | * Set even if it's unchanged, print debug message only | ||
2578 | * on changes. | ||
2579 | */ | ||
2580 | val = (ppd->ibmaxlen >> 2) + 1; | ||
2581 | ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen); | ||
2582 | ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen); | ||
2583 | qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); | ||
2584 | qib_write_kreg(dd, kr_scratch, 0); | ||
2585 | goto bail; | ||
2586 | |||
2587 | case QIB_IB_CFG_LSTATE: /* set the IB link state */ | ||
2588 | switch (val & 0xffff0000) { | ||
2589 | case IB_LINKCMD_DOWN: | ||
2590 | lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN; | ||
2591 | if (!ppd->cpspec->ibdeltainprog && | ||
2592 | qib_compat_ddr_negotiate) { | ||
2593 | ppd->cpspec->ibdeltainprog = 1; | ||
2594 | ppd->cpspec->ibsymsnap = | ||
2595 | read_7220_creg32(dd, cr_ibsymbolerr); | ||
2596 | ppd->cpspec->iblnkerrsnap = | ||
2597 | read_7220_creg32(dd, cr_iblinkerrrecov); | ||
2598 | } | ||
2599 | break; | ||
2600 | |||
2601 | case IB_LINKCMD_ARMED: | ||
2602 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED; | ||
2603 | break; | ||
2604 | |||
2605 | case IB_LINKCMD_ACTIVE: | ||
2606 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE; | ||
2607 | break; | ||
2608 | |||
2609 | default: | ||
2610 | ret = -EINVAL; | ||
2611 | qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); | ||
2612 | goto bail; | ||
2613 | } | ||
2614 | switch (val & 0xffff) { | ||
2615 | case IB_LINKINITCMD_NOP: | ||
2616 | licmd = 0; | ||
2617 | break; | ||
2618 | |||
2619 | case IB_LINKINITCMD_POLL: | ||
2620 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL; | ||
2621 | break; | ||
2622 | |||
2623 | case IB_LINKINITCMD_SLEEP: | ||
2624 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP; | ||
2625 | break; | ||
2626 | |||
2627 | case IB_LINKINITCMD_DISABLE: | ||
2628 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE; | ||
2629 | ppd->cpspec->chase_end = 0; | ||
2630 | /* | ||
2631 | * stop state chase counter and timer, if running. | ||
2632 | * wait forpending timer, but don't clear .data (ppd)! | ||
2633 | */ | ||
2634 | if (ppd->cpspec->chase_timer.expires) { | ||
2635 | del_timer_sync(&ppd->cpspec->chase_timer); | ||
2636 | ppd->cpspec->chase_timer.expires = 0; | ||
2637 | } | ||
2638 | break; | ||
2639 | |||
2640 | default: | ||
2641 | ret = -EINVAL; | ||
2642 | qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", | ||
2643 | val & 0xffff); | ||
2644 | goto bail; | ||
2645 | } | ||
2646 | qib_set_ib_7220_lstate(ppd, lcmd, licmd); | ||
2647 | goto bail; | ||
2648 | |||
2649 | case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ | ||
2650 | if (val > IBA7220_IBC_HRTBT_MASK) { | ||
2651 | ret = -EINVAL; | ||
2652 | goto bail; | ||
2653 | } | ||
2654 | lsb = IBA7220_IBC_HRTBT_SHIFT; | ||
2655 | maskr = IBA7220_IBC_HRTBT_MASK; | ||
2656 | break; | ||
2657 | |||
2658 | default: | ||
2659 | ret = -EINVAL; | ||
2660 | goto bail; | ||
2661 | } | ||
2662 | ppd->cpspec->ibcddrctrl &= ~(maskr << lsb); | ||
2663 | ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb); | ||
2664 | qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); | ||
2665 | qib_write_kreg(dd, kr_scratch, 0); | ||
2666 | if (setforce) { | ||
2667 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
2668 | ppd->lflags |= QIBL_IB_FORCE_NOTIFY; | ||
2669 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
2670 | } | ||
2671 | bail: | ||
2672 | return ret; | ||
2673 | } | ||
2674 | |||
2675 | static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what) | ||
2676 | { | ||
2677 | int ret = 0; | ||
2678 | u64 val, ddr; | ||
2679 | |||
2680 | if (!strncmp(what, "ibc", 3)) { | ||
2681 | ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); | ||
2682 | val = 0; /* disable heart beat, so link will come up */ | ||
2683 | qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", | ||
2684 | ppd->dd->unit, ppd->port); | ||
2685 | } else if (!strncmp(what, "off", 3)) { | ||
2686 | ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); | ||
2687 | /* enable heart beat again */ | ||
2688 | val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; | ||
2689 | qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " | ||
2690 | "(normal)\n", ppd->dd->unit, ppd->port); | ||
2691 | } else | ||
2692 | ret = -EINVAL; | ||
2693 | if (!ret) { | ||
2694 | qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl); | ||
2695 | ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK | ||
2696 | << IBA7220_IBC_HRTBT_SHIFT); | ||
2697 | ppd->cpspec->ibcddrctrl = ddr | val; | ||
2698 | qib_write_kreg(ppd->dd, kr_ibcddrctrl, | ||
2699 | ppd->cpspec->ibcddrctrl); | ||
2700 | qib_write_kreg(ppd->dd, kr_scratch, 0); | ||
2701 | } | ||
2702 | return ret; | ||
2703 | } | ||
2704 | |||
2705 | static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, | ||
2706 | u32 updegr, u32 egrhd) | ||
2707 | { | ||
2708 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | ||
2709 | if (updegr) | ||
2710 | qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); | ||
2711 | } | ||
2712 | |||
2713 | static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd) | ||
2714 | { | ||
2715 | u32 head, tail; | ||
2716 | |||
2717 | head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); | ||
2718 | if (rcd->rcvhdrtail_kvaddr) | ||
2719 | tail = qib_get_rcvhdrtail(rcd); | ||
2720 | else | ||
2721 | tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); | ||
2722 | return head == tail; | ||
2723 | } | ||
2724 | |||
2725 | /* | ||
2726 | * Modify the RCVCTRL register in chip-specific way. This | ||
2727 | * is a function because bit positions and (future) register | ||
2728 | * location is chip-specifc, but the needed operations are | ||
2729 | * generic. <op> is a bit-mask because we often want to | ||
2730 | * do multiple modifications. | ||
2731 | */ | ||
2732 | static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op, | ||
2733 | int ctxt) | ||
2734 | { | ||
2735 | struct qib_devdata *dd = ppd->dd; | ||
2736 | u64 mask, val; | ||
2737 | unsigned long flags; | ||
2738 | |||
2739 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | ||
2740 | if (op & QIB_RCVCTRL_TAILUPD_ENB) | ||
2741 | dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT); | ||
2742 | if (op & QIB_RCVCTRL_TAILUPD_DIS) | ||
2743 | dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT); | ||
2744 | if (op & QIB_RCVCTRL_PKEY_ENB) | ||
2745 | dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT); | ||
2746 | if (op & QIB_RCVCTRL_PKEY_DIS) | ||
2747 | dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT); | ||
2748 | if (ctxt < 0) | ||
2749 | mask = (1ULL << dd->ctxtcnt) - 1; | ||
2750 | else | ||
2751 | mask = (1ULL << ctxt); | ||
2752 | if (op & QIB_RCVCTRL_CTXT_ENB) { | ||
2753 | /* always done for specific ctxt */ | ||
2754 | dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable)); | ||
2755 | if (!(dd->flags & QIB_NODMA_RTAIL)) | ||
2756 | dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT; | ||
2757 | /* Write these registers before the context is enabled. */ | ||
2758 | qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, | ||
2759 | dd->rcd[ctxt]->rcvhdrqtailaddr_phys); | ||
2760 | qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, | ||
2761 | dd->rcd[ctxt]->rcvhdrq_phys); | ||
2762 | dd->rcd[ctxt]->seq_cnt = 1; | ||
2763 | } | ||
2764 | if (op & QIB_RCVCTRL_CTXT_DIS) | ||
2765 | dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable)); | ||
2766 | if (op & QIB_RCVCTRL_INTRAVAIL_ENB) | ||
2767 | dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT); | ||
2768 | if (op & QIB_RCVCTRL_INTRAVAIL_DIS) | ||
2769 | dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT); | ||
2770 | qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); | ||
2771 | if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) { | ||
2772 | /* arm rcv interrupt */ | ||
2773 | val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) | | ||
2774 | dd->rhdrhead_intr_off; | ||
2775 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | ||
2776 | } | ||
2777 | if (op & QIB_RCVCTRL_CTXT_ENB) { | ||
2778 | /* | ||
2779 | * Init the context registers also; if we were | ||
2780 | * disabled, tail and head should both be zero | ||
2781 | * already from the enable, but since we don't | ||
2782 | * know, we have to do it explictly. | ||
2783 | */ | ||
2784 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); | ||
2785 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); | ||
2786 | |||
2787 | val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); | ||
2788 | dd->rcd[ctxt]->head = val; | ||
2789 | /* If kctxt, interrupt on next receive. */ | ||
2790 | if (ctxt < dd->first_user_ctxt) | ||
2791 | val |= dd->rhdrhead_intr_off; | ||
2792 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | ||
2793 | } | ||
2794 | if (op & QIB_RCVCTRL_CTXT_DIS) { | ||
2795 | if (ctxt >= 0) { | ||
2796 | qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0); | ||
2797 | qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0); | ||
2798 | } else { | ||
2799 | unsigned i; | ||
2800 | |||
2801 | for (i = 0; i < dd->cfgctxts; i++) { | ||
2802 | qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, | ||
2803 | i, 0); | ||
2804 | qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0); | ||
2805 | } | ||
2806 | } | ||
2807 | } | ||
2808 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | ||
2809 | } | ||
2810 | |||
2811 | /* | ||
2812 | * Modify the SENDCTRL register in chip-specific way. This | ||
2813 | * is a function there may be multiple such registers with | ||
2814 | * slightly different layouts. To start, we assume the | ||
2815 | * "canonical" register layout of the first chips. | ||
2816 | * Chip requires no back-back sendctrl writes, so write | ||
2817 | * scratch register after writing sendctrl | ||
2818 | */ | ||
2819 | static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op) | ||
2820 | { | ||
2821 | struct qib_devdata *dd = ppd->dd; | ||
2822 | u64 tmp_dd_sendctrl; | ||
2823 | unsigned long flags; | ||
2824 | |||
2825 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
2826 | |||
2827 | /* First the ones that are "sticky", saved in shadow */ | ||
2828 | if (op & QIB_SENDCTRL_CLEAR) | ||
2829 | dd->sendctrl = 0; | ||
2830 | if (op & QIB_SENDCTRL_SEND_DIS) | ||
2831 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable); | ||
2832 | else if (op & QIB_SENDCTRL_SEND_ENB) { | ||
2833 | dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable); | ||
2834 | if (dd->flags & QIB_USE_SPCL_TRIG) | ||
2835 | dd->sendctrl |= SYM_MASK(SendCtrl, | ||
2836 | SSpecialTriggerEn); | ||
2837 | } | ||
2838 | if (op & QIB_SENDCTRL_AVAIL_DIS) | ||
2839 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | ||
2840 | else if (op & QIB_SENDCTRL_AVAIL_ENB) | ||
2841 | dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); | ||
2842 | |||
2843 | if (op & QIB_SENDCTRL_DISARM_ALL) { | ||
2844 | u32 i, last; | ||
2845 | |||
2846 | tmp_dd_sendctrl = dd->sendctrl; | ||
2847 | /* | ||
2848 | * disarm any that are not yet launched, disabling sends | ||
2849 | * and updates until done. | ||
2850 | */ | ||
2851 | last = dd->piobcnt2k + dd->piobcnt4k; | ||
2852 | tmp_dd_sendctrl &= | ||
2853 | ~(SYM_MASK(SendCtrl, SPioEnable) | | ||
2854 | SYM_MASK(SendCtrl, SendBufAvailUpd)); | ||
2855 | for (i = 0; i < last; i++) { | ||
2856 | qib_write_kreg(dd, kr_sendctrl, | ||
2857 | tmp_dd_sendctrl | | ||
2858 | SYM_MASK(SendCtrl, Disarm) | i); | ||
2859 | qib_write_kreg(dd, kr_scratch, 0); | ||
2860 | } | ||
2861 | } | ||
2862 | |||
2863 | tmp_dd_sendctrl = dd->sendctrl; | ||
2864 | |||
2865 | if (op & QIB_SENDCTRL_FLUSH) | ||
2866 | tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort); | ||
2867 | if (op & QIB_SENDCTRL_DISARM) | ||
2868 | tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) | | ||
2869 | ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) << | ||
2870 | SYM_LSB(SendCtrl, DisarmPIOBuf)); | ||
2871 | if ((op & QIB_SENDCTRL_AVAIL_BLIP) && | ||
2872 | (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) | ||
2873 | tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | ||
2874 | |||
2875 | qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); | ||
2876 | qib_write_kreg(dd, kr_scratch, 0); | ||
2877 | |||
2878 | if (op & QIB_SENDCTRL_AVAIL_BLIP) { | ||
2879 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | ||
2880 | qib_write_kreg(dd, kr_scratch, 0); | ||
2881 | } | ||
2882 | |||
2883 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
2884 | |||
2885 | if (op & QIB_SENDCTRL_FLUSH) { | ||
2886 | u32 v; | ||
2887 | /* | ||
2888 | * ensure writes have hit chip, then do a few | ||
2889 | * more reads, to allow DMA of pioavail registers | ||
2890 | * to occur, so in-memory copy is in sync with | ||
2891 | * the chip. Not always safe to sleep. | ||
2892 | */ | ||
2893 | v = qib_read_kreg32(dd, kr_scratch); | ||
2894 | qib_write_kreg(dd, kr_scratch, v); | ||
2895 | v = qib_read_kreg32(dd, kr_scratch); | ||
2896 | qib_write_kreg(dd, kr_scratch, v); | ||
2897 | qib_read_kreg32(dd, kr_scratch); | ||
2898 | } | ||
2899 | } | ||
2900 | |||
2901 | /** | ||
2902 | * qib_portcntr_7220 - read a per-port counter | ||
2903 | * @dd: the qlogic_ib device | ||
2904 | * @creg: the counter to snapshot | ||
2905 | */ | ||
2906 | static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg) | ||
2907 | { | ||
2908 | u64 ret = 0ULL; | ||
2909 | struct qib_devdata *dd = ppd->dd; | ||
2910 | u16 creg; | ||
2911 | /* 0xffff for unimplemented or synthesized counters */ | ||
2912 | static const u16 xlator[] = { | ||
2913 | [QIBPORTCNTR_PKTSEND] = cr_pktsend, | ||
2914 | [QIBPORTCNTR_WORDSEND] = cr_wordsend, | ||
2915 | [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount, | ||
2916 | [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount, | ||
2917 | [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount, | ||
2918 | [QIBPORTCNTR_SENDSTALL] = cr_sendstall, | ||
2919 | [QIBPORTCNTR_PKTRCV] = cr_pktrcv, | ||
2920 | [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount, | ||
2921 | [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount, | ||
2922 | [QIBPORTCNTR_RCVEBP] = cr_rcvebp, | ||
2923 | [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl, | ||
2924 | [QIBPORTCNTR_WORDRCV] = cr_wordrcv, | ||
2925 | [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt, | ||
2926 | [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr, | ||
2927 | [QIBPORTCNTR_RXVLERR] = cr_rxvlerr, | ||
2928 | [QIBPORTCNTR_ERRICRC] = cr_erricrc, | ||
2929 | [QIBPORTCNTR_ERRVCRC] = cr_errvcrc, | ||
2930 | [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc, | ||
2931 | [QIBPORTCNTR_BADFORMAT] = cr_badformat, | ||
2932 | [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen, | ||
2933 | [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr, | ||
2934 | [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen, | ||
2935 | [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl, | ||
2936 | [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl, | ||
2937 | [QIBPORTCNTR_ERRLINK] = cr_errlink, | ||
2938 | [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown, | ||
2939 | [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov, | ||
2940 | [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr, | ||
2941 | [QIBPORTCNTR_PSINTERVAL] = cr_psinterval, | ||
2942 | [QIBPORTCNTR_PSSTART] = cr_psstart, | ||
2943 | [QIBPORTCNTR_PSSTAT] = cr_psstat, | ||
2944 | [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt, | ||
2945 | [QIBPORTCNTR_ERRPKEY] = cr_errpkey, | ||
2946 | [QIBPORTCNTR_KHDROVFL] = 0xffff, | ||
2947 | }; | ||
2948 | |||
2949 | if (reg >= ARRAY_SIZE(xlator)) { | ||
2950 | qib_devinfo(ppd->dd->pcidev, | ||
2951 | "Unimplemented portcounter %u\n", reg); | ||
2952 | goto done; | ||
2953 | } | ||
2954 | creg = xlator[reg]; | ||
2955 | |||
2956 | if (reg == QIBPORTCNTR_KHDROVFL) { | ||
2957 | int i; | ||
2958 | |||
2959 | /* sum over all kernel contexts */ | ||
2960 | for (i = 0; i < dd->first_user_ctxt; i++) | ||
2961 | ret += read_7220_creg32(dd, cr_portovfl + i); | ||
2962 | } | ||
2963 | if (creg == 0xffff) | ||
2964 | goto done; | ||
2965 | |||
2966 | /* | ||
2967 | * only fast incrementing counters are 64bit; use 32 bit reads to | ||
2968 | * avoid two independent reads when on opteron | ||
2969 | */ | ||
2970 | if ((creg == cr_wordsend || creg == cr_wordrcv || | ||
2971 | creg == cr_pktsend || creg == cr_pktrcv)) | ||
2972 | ret = read_7220_creg(dd, creg); | ||
2973 | else | ||
2974 | ret = read_7220_creg32(dd, creg); | ||
2975 | if (creg == cr_ibsymbolerr) { | ||
2976 | if (dd->pport->cpspec->ibdeltainprog) | ||
2977 | ret -= ret - ppd->cpspec->ibsymsnap; | ||
2978 | ret -= dd->pport->cpspec->ibsymdelta; | ||
2979 | } else if (creg == cr_iblinkerrrecov) { | ||
2980 | if (dd->pport->cpspec->ibdeltainprog) | ||
2981 | ret -= ret - ppd->cpspec->iblnkerrsnap; | ||
2982 | ret -= dd->pport->cpspec->iblnkerrdelta; | ||
2983 | } | ||
2984 | done: | ||
2985 | return ret; | ||
2986 | } | ||
2987 | |||
2988 | /* | ||
2989 | * Device counter names (not port-specific), one line per stat, | ||
2990 | * single string. Used by utilities like ipathstats to print the stats | ||
2991 | * in a way which works for different versions of drivers, without changing | ||
2992 | * the utility. Names need to be 12 chars or less (w/o newline), for proper | ||
2993 | * display by utility. | ||
2994 | * Non-error counters are first. | ||
2995 | * Start of "error" conters is indicated by a leading "E " on the first | ||
2996 | * "error" counter, and doesn't count in label length. | ||
2997 | * The EgrOvfl list needs to be last so we truncate them at the configured | ||
2998 | * context count for the device. | ||
2999 | * cntr7220indices contains the corresponding register indices. | ||
3000 | */ | ||
3001 | static const char cntr7220names[] = | ||
3002 | "Interrupts\n" | ||
3003 | "HostBusStall\n" | ||
3004 | "E RxTIDFull\n" | ||
3005 | "RxTIDInvalid\n" | ||
3006 | "Ctxt0EgrOvfl\n" | ||
3007 | "Ctxt1EgrOvfl\n" | ||
3008 | "Ctxt2EgrOvfl\n" | ||
3009 | "Ctxt3EgrOvfl\n" | ||
3010 | "Ctxt4EgrOvfl\n" | ||
3011 | "Ctxt5EgrOvfl\n" | ||
3012 | "Ctxt6EgrOvfl\n" | ||
3013 | "Ctxt7EgrOvfl\n" | ||
3014 | "Ctxt8EgrOvfl\n" | ||
3015 | "Ctxt9EgrOvfl\n" | ||
3016 | "Ctx10EgrOvfl\n" | ||
3017 | "Ctx11EgrOvfl\n" | ||
3018 | "Ctx12EgrOvfl\n" | ||
3019 | "Ctx13EgrOvfl\n" | ||
3020 | "Ctx14EgrOvfl\n" | ||
3021 | "Ctx15EgrOvfl\n" | ||
3022 | "Ctx16EgrOvfl\n"; | ||
3023 | |||
3024 | static const size_t cntr7220indices[] = { | ||
3025 | cr_lbint, | ||
3026 | cr_lbflowstall, | ||
3027 | cr_errtidfull, | ||
3028 | cr_errtidvalid, | ||
3029 | cr_portovfl + 0, | ||
3030 | cr_portovfl + 1, | ||
3031 | cr_portovfl + 2, | ||
3032 | cr_portovfl + 3, | ||
3033 | cr_portovfl + 4, | ||
3034 | cr_portovfl + 5, | ||
3035 | cr_portovfl + 6, | ||
3036 | cr_portovfl + 7, | ||
3037 | cr_portovfl + 8, | ||
3038 | cr_portovfl + 9, | ||
3039 | cr_portovfl + 10, | ||
3040 | cr_portovfl + 11, | ||
3041 | cr_portovfl + 12, | ||
3042 | cr_portovfl + 13, | ||
3043 | cr_portovfl + 14, | ||
3044 | cr_portovfl + 15, | ||
3045 | cr_portovfl + 16, | ||
3046 | }; | ||
3047 | |||
3048 | /* | ||
3049 | * same as cntr7220names and cntr7220indices, but for port-specific counters. | ||
3050 | * portcntr7220indices is somewhat complicated by some registers needing | ||
3051 | * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG | ||
3052 | */ | ||
3053 | static const char portcntr7220names[] = | ||
3054 | "TxPkt\n" | ||
3055 | "TxFlowPkt\n" | ||
3056 | "TxWords\n" | ||
3057 | "RxPkt\n" | ||
3058 | "RxFlowPkt\n" | ||
3059 | "RxWords\n" | ||
3060 | "TxFlowStall\n" | ||
3061 | "TxDmaDesc\n" /* 7220 and 7322-only */ | ||
3062 | "E RxDlidFltr\n" /* 7220 and 7322-only */ | ||
3063 | "IBStatusChng\n" | ||
3064 | "IBLinkDown\n" | ||
3065 | "IBLnkRecov\n" | ||
3066 | "IBRxLinkErr\n" | ||
3067 | "IBSymbolErr\n" | ||
3068 | "RxLLIErr\n" | ||
3069 | "RxBadFormat\n" | ||
3070 | "RxBadLen\n" | ||
3071 | "RxBufOvrfl\n" | ||
3072 | "RxEBP\n" | ||
3073 | "RxFlowCtlErr\n" | ||
3074 | "RxICRCerr\n" | ||
3075 | "RxLPCRCerr\n" | ||
3076 | "RxVCRCerr\n" | ||
3077 | "RxInvalLen\n" | ||
3078 | "RxInvalPKey\n" | ||
3079 | "RxPktDropped\n" | ||
3080 | "TxBadLength\n" | ||
3081 | "TxDropped\n" | ||
3082 | "TxInvalLen\n" | ||
3083 | "TxUnderrun\n" | ||
3084 | "TxUnsupVL\n" | ||
3085 | "RxLclPhyErr\n" /* 7220 and 7322-only */ | ||
3086 | "RxVL15Drop\n" /* 7220 and 7322-only */ | ||
3087 | "RxVlErr\n" /* 7220 and 7322-only */ | ||
3088 | "XcessBufOvfl\n" /* 7220 and 7322-only */ | ||
3089 | ; | ||
3090 | |||
3091 | #define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */ | ||
3092 | static const size_t portcntr7220indices[] = { | ||
3093 | QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG, | ||
3094 | cr_pktsendflow, | ||
3095 | QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG, | ||
3096 | QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG, | ||
3097 | cr_pktrcvflowctrl, | ||
3098 | QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG, | ||
3099 | QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG, | ||
3100 | cr_txsdmadesc, | ||
3101 | cr_rxdlidfltr, | ||
3102 | cr_ibstatuschange, | ||
3103 | QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG, | ||
3104 | QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG, | ||
3105 | QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG, | ||
3106 | QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG, | ||
3107 | QIBPORTCNTR_LLI | _PORT_VIRT_FLAG, | ||
3108 | QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG, | ||
3109 | QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG, | ||
3110 | QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG, | ||
3111 | QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG, | ||
3112 | cr_rcvflowctrl_err, | ||
3113 | QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG, | ||
3114 | QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG, | ||
3115 | QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG, | ||
3116 | QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG, | ||
3117 | QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG, | ||
3118 | QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG, | ||
3119 | cr_invalidslen, | ||
3120 | cr_senddropped, | ||
3121 | cr_errslen, | ||
3122 | cr_sendunderrun, | ||
3123 | cr_txunsupvl, | ||
3124 | QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG, | ||
3125 | QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG, | ||
3126 | QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG, | ||
3127 | QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG, | ||
3128 | }; | ||
3129 | |||
3130 | /* do all the setup to make the counter reads efficient later */ | ||
3131 | static void init_7220_cntrnames(struct qib_devdata *dd) | ||
3132 | { | ||
3133 | int i, j = 0; | ||
3134 | char *s; | ||
3135 | |||
3136 | for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts; | ||
3137 | i++) { | ||
3138 | /* we always have at least one counter before the egrovfl */ | ||
3139 | if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12)) | ||
3140 | j = 1; | ||
3141 | s = strchr(s + 1, '\n'); | ||
3142 | if (s && j) | ||
3143 | j++; | ||
3144 | } | ||
3145 | dd->cspec->ncntrs = i; | ||
3146 | if (!s) | ||
3147 | /* full list; size is without terminating null */ | ||
3148 | dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1; | ||
3149 | else | ||
3150 | dd->cspec->cntrnamelen = 1 + s - cntr7220names; | ||
3151 | dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs | ||
3152 | * sizeof(u64), GFP_KERNEL); | ||
3153 | if (!dd->cspec->cntrs) | ||
3154 | qib_dev_err(dd, "Failed allocation for counters\n"); | ||
3155 | |||
3156 | for (i = 0, s = (char *)portcntr7220names; s; i++) | ||
3157 | s = strchr(s + 1, '\n'); | ||
3158 | dd->cspec->nportcntrs = i - 1; | ||
3159 | dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1; | ||
3160 | dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs | ||
3161 | * sizeof(u64), GFP_KERNEL); | ||
3162 | if (!dd->cspec->portcntrs) | ||
3163 | qib_dev_err(dd, "Failed allocation for portcounters\n"); | ||
3164 | } | ||
3165 | |||
3166 | static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep, | ||
3167 | u64 **cntrp) | ||
3168 | { | ||
3169 | u32 ret; | ||
3170 | |||
3171 | if (!dd->cspec->cntrs) { | ||
3172 | ret = 0; | ||
3173 | goto done; | ||
3174 | } | ||
3175 | |||
3176 | if (namep) { | ||
3177 | *namep = (char *)cntr7220names; | ||
3178 | ret = dd->cspec->cntrnamelen; | ||
3179 | if (pos >= ret) | ||
3180 | ret = 0; /* final read after getting everything */ | ||
3181 | } else { | ||
3182 | u64 *cntr = dd->cspec->cntrs; | ||
3183 | int i; | ||
3184 | |||
3185 | ret = dd->cspec->ncntrs * sizeof(u64); | ||
3186 | if (!cntr || pos >= ret) { | ||
3187 | /* everything read, or couldn't get memory */ | ||
3188 | ret = 0; | ||
3189 | goto done; | ||
3190 | } | ||
3191 | |||
3192 | *cntrp = cntr; | ||
3193 | for (i = 0; i < dd->cspec->ncntrs; i++) | ||
3194 | *cntr++ = read_7220_creg32(dd, cntr7220indices[i]); | ||
3195 | } | ||
3196 | done: | ||
3197 | return ret; | ||
3198 | } | ||
3199 | |||
3200 | static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, | ||
3201 | char **namep, u64 **cntrp) | ||
3202 | { | ||
3203 | u32 ret; | ||
3204 | |||
3205 | if (!dd->cspec->portcntrs) { | ||
3206 | ret = 0; | ||
3207 | goto done; | ||
3208 | } | ||
3209 | if (namep) { | ||
3210 | *namep = (char *)portcntr7220names; | ||
3211 | ret = dd->cspec->portcntrnamelen; | ||
3212 | if (pos >= ret) | ||
3213 | ret = 0; /* final read after getting everything */ | ||
3214 | } else { | ||
3215 | u64 *cntr = dd->cspec->portcntrs; | ||
3216 | struct qib_pportdata *ppd = &dd->pport[port]; | ||
3217 | int i; | ||
3218 | |||
3219 | ret = dd->cspec->nportcntrs * sizeof(u64); | ||
3220 | if (!cntr || pos >= ret) { | ||
3221 | /* everything read, or couldn't get memory */ | ||
3222 | ret = 0; | ||
3223 | goto done; | ||
3224 | } | ||
3225 | *cntrp = cntr; | ||
3226 | for (i = 0; i < dd->cspec->nportcntrs; i++) { | ||
3227 | if (portcntr7220indices[i] & _PORT_VIRT_FLAG) | ||
3228 | *cntr++ = qib_portcntr_7220(ppd, | ||
3229 | portcntr7220indices[i] & | ||
3230 | ~_PORT_VIRT_FLAG); | ||
3231 | else | ||
3232 | *cntr++ = read_7220_creg32(dd, | ||
3233 | portcntr7220indices[i]); | ||
3234 | } | ||
3235 | } | ||
3236 | done: | ||
3237 | return ret; | ||
3238 | } | ||
3239 | |||
3240 | /** | ||
3241 | * qib_get_7220_faststats - get word counters from chip before they overflow | ||
3242 | * @opaque - contains a pointer to the qlogic_ib device qib_devdata | ||
3243 | * | ||
3244 | * This needs more work; in particular, decision on whether we really | ||
3245 | * need traffic_wds done the way it is | ||
3246 | * called from add_timer | ||
3247 | */ | ||
3248 | static void qib_get_7220_faststats(unsigned long opaque) | ||
3249 | { | ||
3250 | struct qib_devdata *dd = (struct qib_devdata *) opaque; | ||
3251 | struct qib_pportdata *ppd = dd->pport; | ||
3252 | unsigned long flags; | ||
3253 | u64 traffic_wds; | ||
3254 | |||
3255 | /* | ||
3256 | * don't access the chip while running diags, or memory diags can | ||
3257 | * fail | ||
3258 | */ | ||
3259 | if (!(dd->flags & QIB_INITTED) || dd->diag_client) | ||
3260 | /* but re-arm the timer, for diags case; won't hurt other */ | ||
3261 | goto done; | ||
3262 | |||
3263 | /* | ||
3264 | * We now try to maintain an activity timer, based on traffic | ||
3265 | * exceeding a threshold, so we need to check the word-counts | ||
3266 | * even if they are 64-bit. | ||
3267 | */ | ||
3268 | traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) + | ||
3269 | qib_portcntr_7220(ppd, cr_wordrcv); | ||
3270 | spin_lock_irqsave(&dd->eep_st_lock, flags); | ||
3271 | traffic_wds -= dd->traffic_wds; | ||
3272 | dd->traffic_wds += traffic_wds; | ||
3273 | if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) | ||
3274 | atomic_add(5, &dd->active_time); /* S/B #define */ | ||
3275 | spin_unlock_irqrestore(&dd->eep_st_lock, flags); | ||
3276 | done: | ||
3277 | mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); | ||
3278 | } | ||
3279 | |||
3280 | /* | ||
3281 | * If we are using MSI, try to fallback to INTx. | ||
3282 | */ | ||
3283 | static int qib_7220_intr_fallback(struct qib_devdata *dd) | ||
3284 | { | ||
3285 | if (!dd->msi_lo) | ||
3286 | return 0; | ||
3287 | |||
3288 | qib_devinfo(dd->pcidev, "MSI interrupt not detected," | ||
3289 | " trying INTx interrupts\n"); | ||
3290 | qib_7220_free_irq(dd); | ||
3291 | qib_enable_intx(dd->pcidev); | ||
3292 | /* | ||
3293 | * Some newer kernels require free_irq before disable_msi, | ||
3294 | * and irq can be changed during disable and INTx enable | ||
3295 | * and we need to therefore use the pcidev->irq value, | ||
3296 | * not our saved MSI value. | ||
3297 | */ | ||
3298 | dd->cspec->irq = dd->pcidev->irq; | ||
3299 | qib_setup_7220_interrupt(dd); | ||
3300 | return 1; | ||
3301 | } | ||
3302 | |||
3303 | /* | ||
3304 | * Reset the XGXS (between serdes and IBC). Slightly less intrusive | ||
3305 | * than resetting the IBC or external link state, and useful in some | ||
3306 | * cases to cause some retraining. To do this right, we reset IBC | ||
3307 | * as well. | ||
3308 | */ | ||
3309 | static void qib_7220_xgxs_reset(struct qib_pportdata *ppd) | ||
3310 | { | ||
3311 | u64 val, prev_val; | ||
3312 | struct qib_devdata *dd = ppd->dd; | ||
3313 | |||
3314 | prev_val = qib_read_kreg64(dd, kr_xgxs_cfg); | ||
3315 | val = prev_val | QLOGIC_IB_XGXS_RESET; | ||
3316 | prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */ | ||
3317 | qib_write_kreg(dd, kr_control, | ||
3318 | dd->control & ~QLOGIC_IB_C_LINKENABLE); | ||
3319 | qib_write_kreg(dd, kr_xgxs_cfg, val); | ||
3320 | qib_read_kreg32(dd, kr_scratch); | ||
3321 | qib_write_kreg(dd, kr_xgxs_cfg, prev_val); | ||
3322 | qib_write_kreg(dd, kr_control, dd->control); | ||
3323 | } | ||
3324 | |||
3325 | /* | ||
3326 | * For this chip, we want to use the same buffer every time | ||
3327 | * when we are trying to bring the link up (they are always VL15 | ||
3328 | * packets). At that link state the packet should always go out immediately | ||
3329 | * (or at least be discarded at the tx interface if the link is down). | ||
3330 | * If it doesn't, and the buffer isn't available, that means some other | ||
3331 | * sender has gotten ahead of us, and is preventing our packet from going | ||
3332 | * out. In that case, we flush all packets, and try again. If that still | ||
3333 | * fails, we fail the request, and hope things work the next time around. | ||
3334 | * | ||
3335 | * We don't need very complicated heuristics on whether the packet had | ||
3336 | * time to go out or not, since even at SDR 1X, it goes out in very short | ||
3337 | * time periods, covered by the chip reads done here and as part of the | ||
3338 | * flush. | ||
3339 | */ | ||
3340 | static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum) | ||
3341 | { | ||
3342 | u32 __iomem *buf; | ||
3343 | u32 lbuf = ppd->dd->cspec->lastbuf_for_pio; | ||
3344 | int do_cleanup; | ||
3345 | unsigned long flags; | ||
3346 | |||
3347 | /* | ||
3348 | * always blip to get avail list updated, since it's almost | ||
3349 | * always needed, and is fairly cheap. | ||
3350 | */ | ||
3351 | sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
3352 | qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ | ||
3353 | buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); | ||
3354 | if (buf) | ||
3355 | goto done; | ||
3356 | |||
3357 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
3358 | if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle && | ||
3359 | ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) { | ||
3360 | __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down); | ||
3361 | do_cleanup = 0; | ||
3362 | } else { | ||
3363 | do_cleanup = 1; | ||
3364 | qib_7220_sdma_hw_clean_up(ppd); | ||
3365 | } | ||
3366 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
3367 | |||
3368 | if (do_cleanup) { | ||
3369 | qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ | ||
3370 | buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); | ||
3371 | } | ||
3372 | done: | ||
3373 | return buf; | ||
3374 | } | ||
3375 | |||
3376 | /* | ||
3377 | * This code for non-IBTA-compliant IB speed negotiation is only known to | ||
3378 | * work for the SDR to DDR transition, and only between an HCA and a switch | ||
3379 | * with recent firmware. It is based on observed heuristics, rather than | ||
3380 | * actual knowledge of the non-compliant speed negotiation. | ||
3381 | * It has a number of hard-coded fields, since the hope is to rewrite this | ||
3382 | * when a spec is available on how the negoation is intended to work. | ||
3383 | */ | ||
3384 | static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr, | ||
3385 | u32 dcnt, u32 *data) | ||
3386 | { | ||
3387 | int i; | ||
3388 | u64 pbc; | ||
3389 | u32 __iomem *piobuf; | ||
3390 | u32 pnum; | ||
3391 | struct qib_devdata *dd = ppd->dd; | ||
3392 | |||
3393 | i = 0; | ||
3394 | pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */ | ||
3395 | pbc |= PBC_7220_VL15_SEND; | ||
3396 | while (!(piobuf = get_7220_link_buf(ppd, &pnum))) { | ||
3397 | if (i++ > 5) | ||
3398 | return; | ||
3399 | udelay(2); | ||
3400 | } | ||
3401 | sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum)); | ||
3402 | writeq(pbc, piobuf); | ||
3403 | qib_flush_wc(); | ||
3404 | qib_pio_copy(piobuf + 2, hdr, 7); | ||
3405 | qib_pio_copy(piobuf + 9, data, dcnt); | ||
3406 | if (dd->flags & QIB_USE_SPCL_TRIG) { | ||
3407 | u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; | ||
3408 | |||
3409 | qib_flush_wc(); | ||
3410 | __raw_writel(0xaebecede, piobuf + spcl_off); | ||
3411 | } | ||
3412 | qib_flush_wc(); | ||
3413 | qib_sendbuf_done(dd, pnum); | ||
3414 | } | ||
3415 | |||
3416 | /* | ||
3417 | * _start packet gets sent twice at start, _done gets sent twice at end | ||
3418 | */ | ||
3419 | static void autoneg_7220_send(struct qib_pportdata *ppd, int which) | ||
3420 | { | ||
3421 | struct qib_devdata *dd = ppd->dd; | ||
3422 | static u32 swapped; | ||
3423 | u32 dw, i, hcnt, dcnt, *data; | ||
3424 | static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba }; | ||
3425 | static u32 madpayload_start[0x40] = { | ||
3426 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | ||
3427 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | ||
3428 | 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */ | ||
3429 | }; | ||
3430 | static u32 madpayload_done[0x40] = { | ||
3431 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | ||
3432 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | ||
3433 | 0x40000001, 0x1388, 0x15e, /* rest 0's */ | ||
3434 | }; | ||
3435 | |||
3436 | dcnt = ARRAY_SIZE(madpayload_start); | ||
3437 | hcnt = ARRAY_SIZE(hdr); | ||
3438 | if (!swapped) { | ||
3439 | /* for maintainability, do it at runtime */ | ||
3440 | for (i = 0; i < hcnt; i++) { | ||
3441 | dw = (__force u32) cpu_to_be32(hdr[i]); | ||
3442 | hdr[i] = dw; | ||
3443 | } | ||
3444 | for (i = 0; i < dcnt; i++) { | ||
3445 | dw = (__force u32) cpu_to_be32(madpayload_start[i]); | ||
3446 | madpayload_start[i] = dw; | ||
3447 | dw = (__force u32) cpu_to_be32(madpayload_done[i]); | ||
3448 | madpayload_done[i] = dw; | ||
3449 | } | ||
3450 | swapped = 1; | ||
3451 | } | ||
3452 | |||
3453 | data = which ? madpayload_done : madpayload_start; | ||
3454 | |||
3455 | autoneg_7220_sendpkt(ppd, hdr, dcnt, data); | ||
3456 | qib_read_kreg64(dd, kr_scratch); | ||
3457 | udelay(2); | ||
3458 | autoneg_7220_sendpkt(ppd, hdr, dcnt, data); | ||
3459 | qib_read_kreg64(dd, kr_scratch); | ||
3460 | udelay(2); | ||
3461 | } | ||
3462 | |||
3463 | /* | ||
3464 | * Do the absolute minimum to cause an IB speed change, and make it | ||
3465 | * ready, but don't actually trigger the change. The caller will | ||
3466 | * do that when ready (if link is in Polling training state, it will | ||
3467 | * happen immediately, otherwise when link next goes down) | ||
3468 | * | ||
3469 | * This routine should only be used as part of the DDR autonegotation | ||
3470 | * code for devices that are not compliant with IB 1.2 (or code that | ||
3471 | * fixes things up for same). | ||
3472 | * | ||
3473 | * When link has gone down, and autoneg enabled, or autoneg has | ||
3474 | * failed and we give up until next time we set both speeds, and | ||
3475 | * then we want IBTA enabled as well as "use max enabled speed. | ||
3476 | */ | ||
3477 | static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) | ||
3478 | { | ||
3479 | ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
3480 | IBA7220_IBC_IBTA_1_2_MASK); | ||
3481 | |||
3482 | if (speed == (QIB_IB_SDR | QIB_IB_DDR)) | ||
3483 | ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
3484 | IBA7220_IBC_IBTA_1_2_MASK; | ||
3485 | else | ||
3486 | ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ? | ||
3487 | IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; | ||
3488 | |||
3489 | qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); | ||
3490 | qib_write_kreg(ppd->dd, kr_scratch, 0); | ||
3491 | } | ||
3492 | |||
3493 | /* | ||
3494 | * This routine is only used when we are not talking to another | ||
3495 | * IB 1.2-compliant device that we think can do DDR. | ||
3496 | * (This includes all existing switch chips as of Oct 2007.) | ||
3497 | * 1.2-compliant devices go directly to DDR prior to reaching INIT | ||
3498 | */ | ||
3499 | static void try_7220_autoneg(struct qib_pportdata *ppd) | ||
3500 | { | ||
3501 | unsigned long flags; | ||
3502 | |||
3503 | /* | ||
3504 | * Required for older non-IB1.2 DDR switches. Newer | ||
3505 | * non-IB-compliant switches don't need it, but so far, | ||
3506 | * aren't bothered by it either. "Magic constant" | ||
3507 | */ | ||
3508 | qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07); | ||
3509 | |||
3510 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3511 | ppd->lflags |= QIBL_IB_AUTONEG_INPROG; | ||
3512 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
3513 | autoneg_7220_send(ppd, 0); | ||
3514 | set_7220_ibspeed_fast(ppd, QIB_IB_DDR); | ||
3515 | |||
3516 | toggle_7220_rclkrls(ppd->dd); | ||
3517 | /* 2 msec is minimum length of a poll cycle */ | ||
3518 | schedule_delayed_work(&ppd->cpspec->autoneg_work, | ||
3519 | msecs_to_jiffies(2)); | ||
3520 | } | ||
3521 | |||
3522 | /* | ||
3523 | * Handle the empirically determined mechanism for auto-negotiation | ||
3524 | * of DDR speed with switches. | ||
3525 | */ | ||
3526 | static void autoneg_7220_work(struct work_struct *work) | ||
3527 | { | ||
3528 | struct qib_pportdata *ppd; | ||
3529 | struct qib_devdata *dd; | ||
3530 | u64 startms; | ||
3531 | u32 i; | ||
3532 | unsigned long flags; | ||
3533 | |||
3534 | ppd = &container_of(work, struct qib_chippport_specific, | ||
3535 | autoneg_work.work)->pportdata; | ||
3536 | dd = ppd->dd; | ||
3537 | |||
3538 | startms = jiffies_to_msecs(jiffies); | ||
3539 | |||
3540 | /* | ||
3541 | * Busy wait for this first part, it should be at most a | ||
3542 | * few hundred usec, since we scheduled ourselves for 2msec. | ||
3543 | */ | ||
3544 | for (i = 0; i < 25; i++) { | ||
3545 | if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState) | ||
3546 | == IB_7220_LT_STATE_POLLQUIET) { | ||
3547 | qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE); | ||
3548 | break; | ||
3549 | } | ||
3550 | udelay(100); | ||
3551 | } | ||
3552 | |||
3553 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | ||
3554 | goto done; /* we got there early or told to stop */ | ||
3555 | |||
3556 | /* we expect this to timeout */ | ||
3557 | if (wait_event_timeout(ppd->cpspec->autoneg_wait, | ||
3558 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | ||
3559 | msecs_to_jiffies(90))) | ||
3560 | goto done; | ||
3561 | |||
3562 | toggle_7220_rclkrls(dd); | ||
3563 | |||
3564 | /* we expect this to timeout */ | ||
3565 | if (wait_event_timeout(ppd->cpspec->autoneg_wait, | ||
3566 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | ||
3567 | msecs_to_jiffies(1700))) | ||
3568 | goto done; | ||
3569 | |||
3570 | set_7220_ibspeed_fast(ppd, QIB_IB_SDR); | ||
3571 | toggle_7220_rclkrls(dd); | ||
3572 | |||
3573 | /* | ||
3574 | * Wait up to 250 msec for link to train and get to INIT at DDR; | ||
3575 | * this should terminate early. | ||
3576 | */ | ||
3577 | wait_event_timeout(ppd->cpspec->autoneg_wait, | ||
3578 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | ||
3579 | msecs_to_jiffies(250)); | ||
3580 | done: | ||
3581 | if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) { | ||
3582 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3583 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | ||
3584 | if (dd->cspec->autoneg_tries == AUTONEG_TRIES) { | ||
3585 | ppd->lflags |= QIBL_IB_AUTONEG_FAILED; | ||
3586 | dd->cspec->autoneg_tries = 0; | ||
3587 | } | ||
3588 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
3589 | set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled); | ||
3590 | } | ||
3591 | } | ||
3592 | |||
3593 | static u32 qib_7220_iblink_state(u64 ibcs) | ||
3594 | { | ||
3595 | u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState); | ||
3596 | |||
3597 | switch (state) { | ||
3598 | case IB_7220_L_STATE_INIT: | ||
3599 | state = IB_PORT_INIT; | ||
3600 | break; | ||
3601 | case IB_7220_L_STATE_ARM: | ||
3602 | state = IB_PORT_ARMED; | ||
3603 | break; | ||
3604 | case IB_7220_L_STATE_ACTIVE: | ||
3605 | /* fall through */ | ||
3606 | case IB_7220_L_STATE_ACT_DEFER: | ||
3607 | state = IB_PORT_ACTIVE; | ||
3608 | break; | ||
3609 | default: /* fall through */ | ||
3610 | case IB_7220_L_STATE_DOWN: | ||
3611 | state = IB_PORT_DOWN; | ||
3612 | break; | ||
3613 | } | ||
3614 | return state; | ||
3615 | } | ||
3616 | |||
3617 | /* returns the IBTA port state, rather than the IBC link training state */ | ||
3618 | static u8 qib_7220_phys_portstate(u64 ibcs) | ||
3619 | { | ||
3620 | u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState); | ||
3621 | return qib_7220_physportstate[state]; | ||
3622 | } | ||
3623 | |||
3624 | static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | ||
3625 | { | ||
3626 | int ret = 0, symadj = 0; | ||
3627 | struct qib_devdata *dd = ppd->dd; | ||
3628 | unsigned long flags; | ||
3629 | |||
3630 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3631 | ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; | ||
3632 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
3633 | |||
3634 | if (!ibup) { | ||
3635 | /* | ||
3636 | * When the link goes down we don't want AEQ running, so it | ||
3637 | * won't interfere with IBC training, etc., and we need | ||
3638 | * to go back to the static SerDes preset values. | ||
3639 | */ | ||
3640 | if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | | ||
3641 | QIBL_IB_AUTONEG_INPROG))) | ||
3642 | set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled); | ||
3643 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | ||
3644 | qib_sd7220_presets(dd); | ||
3645 | qib_cancel_sends(ppd); /* initial disarm, etc. */ | ||
3646 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
3647 | if (__qib_sdma_running(ppd)) | ||
3648 | __qib_sdma_process_event(ppd, | ||
3649 | qib_sdma_event_e70_go_idle); | ||
3650 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
3651 | } | ||
3652 | /* this might better in qib_sd7220_presets() */ | ||
3653 | set_7220_relock_poll(dd, ibup); | ||
3654 | } else { | ||
3655 | if (qib_compat_ddr_negotiate && | ||
3656 | !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | | ||
3657 | QIBL_IB_AUTONEG_INPROG)) && | ||
3658 | ppd->link_speed_active == QIB_IB_SDR && | ||
3659 | (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) == | ||
3660 | (QIB_IB_DDR | QIB_IB_SDR) && | ||
3661 | dd->cspec->autoneg_tries < AUTONEG_TRIES) { | ||
3662 | /* we are SDR, and DDR auto-negotiation enabled */ | ||
3663 | ++dd->cspec->autoneg_tries; | ||
3664 | if (!ppd->cpspec->ibdeltainprog) { | ||
3665 | ppd->cpspec->ibdeltainprog = 1; | ||
3666 | ppd->cpspec->ibsymsnap = read_7220_creg32(dd, | ||
3667 | cr_ibsymbolerr); | ||
3668 | ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd, | ||
3669 | cr_iblinkerrrecov); | ||
3670 | } | ||
3671 | try_7220_autoneg(ppd); | ||
3672 | ret = 1; /* no other IB status change processing */ | ||
3673 | } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && | ||
3674 | ppd->link_speed_active == QIB_IB_SDR) { | ||
3675 | autoneg_7220_send(ppd, 1); | ||
3676 | set_7220_ibspeed_fast(ppd, QIB_IB_DDR); | ||
3677 | udelay(2); | ||
3678 | toggle_7220_rclkrls(dd); | ||
3679 | ret = 1; /* no other IB status change processing */ | ||
3680 | } else { | ||
3681 | if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && | ||
3682 | (ppd->link_speed_active & QIB_IB_DDR)) { | ||
3683 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3684 | ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG | | ||
3685 | QIBL_IB_AUTONEG_FAILED); | ||
3686 | spin_unlock_irqrestore(&ppd->lflags_lock, | ||
3687 | flags); | ||
3688 | dd->cspec->autoneg_tries = 0; | ||
3689 | /* re-enable SDR, for next link down */ | ||
3690 | set_7220_ibspeed_fast(ppd, | ||
3691 | ppd->link_speed_enabled); | ||
3692 | wake_up(&ppd->cpspec->autoneg_wait); | ||
3693 | symadj = 1; | ||
3694 | } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) { | ||
3695 | /* | ||
3696 | * Clear autoneg failure flag, and do setup | ||
3697 | * so we'll try next time link goes down and | ||
3698 | * back to INIT (possibly connected to a | ||
3699 | * different device). | ||
3700 | */ | ||
3701 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3702 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | ||
3703 | spin_unlock_irqrestore(&ppd->lflags_lock, | ||
3704 | flags); | ||
3705 | ppd->cpspec->ibcddrctrl |= | ||
3706 | IBA7220_IBC_IBTA_1_2_MASK; | ||
3707 | qib_write_kreg(dd, kr_ncmodectrl, 0); | ||
3708 | symadj = 1; | ||
3709 | } | ||
3710 | } | ||
3711 | |||
3712 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | ||
3713 | symadj = 1; | ||
3714 | |||
3715 | if (!ret) { | ||
3716 | ppd->delay_mult = rate_to_delay | ||
3717 | [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1] | ||
3718 | [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1]; | ||
3719 | |||
3720 | set_7220_relock_poll(dd, ibup); | ||
3721 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
3722 | /* | ||
3723 | * Unlike 7322, the 7220 needs this, due to lack of | ||
3724 | * interrupt in some cases when we have sdma active | ||
3725 | * when the link goes down. | ||
3726 | */ | ||
3727 | if (ppd->sdma_state.current_state != | ||
3728 | qib_sdma_state_s20_idle) | ||
3729 | __qib_sdma_process_event(ppd, | ||
3730 | qib_sdma_event_e00_go_hw_down); | ||
3731 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
3732 | } | ||
3733 | } | ||
3734 | |||
3735 | if (symadj) { | ||
3736 | if (ppd->cpspec->ibdeltainprog) { | ||
3737 | ppd->cpspec->ibdeltainprog = 0; | ||
3738 | ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd, | ||
3739 | cr_ibsymbolerr) - ppd->cpspec->ibsymsnap; | ||
3740 | ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd, | ||
3741 | cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap; | ||
3742 | } | ||
3743 | } else if (!ibup && qib_compat_ddr_negotiate && | ||
3744 | !ppd->cpspec->ibdeltainprog && | ||
3745 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | ||
3746 | ppd->cpspec->ibdeltainprog = 1; | ||
3747 | ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd, | ||
3748 | cr_ibsymbolerr); | ||
3749 | ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd, | ||
3750 | cr_iblinkerrrecov); | ||
3751 | } | ||
3752 | |||
3753 | if (!ret) | ||
3754 | qib_setup_7220_setextled(ppd, ibup); | ||
3755 | return ret; | ||
3756 | } | ||
3757 | |||
3758 | /* | ||
3759 | * Does read/modify/write to appropriate registers to | ||
3760 | * set output and direction bits selected by mask. | ||
3761 | * these are in their canonical postions (e.g. lsb of | ||
3762 | * dir will end up in D48 of extctrl on existing chips). | ||
3763 | * returns contents of GP Inputs. | ||
3764 | */ | ||
3765 | static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) | ||
3766 | { | ||
3767 | u64 read_val, new_out; | ||
3768 | unsigned long flags; | ||
3769 | |||
3770 | if (mask) { | ||
3771 | /* some bits being written, lock access to GPIO */ | ||
3772 | dir &= mask; | ||
3773 | out &= mask; | ||
3774 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | ||
3775 | dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); | ||
3776 | dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); | ||
3777 | new_out = (dd->cspec->gpio_out & ~mask) | out; | ||
3778 | |||
3779 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | ||
3780 | qib_write_kreg(dd, kr_gpio_out, new_out); | ||
3781 | dd->cspec->gpio_out = new_out; | ||
3782 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | ||
3783 | } | ||
3784 | /* | ||
3785 | * It is unlikely that a read at this time would get valid | ||
3786 | * data on a pin whose direction line was set in the same | ||
3787 | * call to this function. We include the read here because | ||
3788 | * that allows us to potentially combine a change on one pin with | ||
3789 | * a read on another, and because the old code did something like | ||
3790 | * this. | ||
3791 | */ | ||
3792 | read_val = qib_read_kreg64(dd, kr_extstatus); | ||
3793 | return SYM_FIELD(read_val, EXTStatus, GPIOIn); | ||
3794 | } | ||
3795 | |||
3796 | /* | ||
3797 | * Read fundamental info we need to use the chip. These are | ||
3798 | * the registers that describe chip capabilities, and are | ||
3799 | * saved in shadow registers. | ||
3800 | */ | ||
3801 | static void get_7220_chip_params(struct qib_devdata *dd) | ||
3802 | { | ||
3803 | u64 val; | ||
3804 | u32 piobufs; | ||
3805 | int mtu; | ||
3806 | |||
3807 | dd->uregbase = qib_read_kreg32(dd, kr_userregbase); | ||
3808 | |||
3809 | dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); | ||
3810 | dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); | ||
3811 | dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); | ||
3812 | dd->palign = qib_read_kreg32(dd, kr_palign); | ||
3813 | dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); | ||
3814 | dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; | ||
3815 | |||
3816 | val = qib_read_kreg64(dd, kr_sendpiosize); | ||
3817 | dd->piosize2k = val & ~0U; | ||
3818 | dd->piosize4k = val >> 32; | ||
3819 | |||
3820 | mtu = ib_mtu_enum_to_int(qib_ibmtu); | ||
3821 | if (mtu == -1) | ||
3822 | mtu = QIB_DEFAULT_MTU; | ||
3823 | dd->pport->ibmtu = (u32)mtu; | ||
3824 | |||
3825 | val = qib_read_kreg64(dd, kr_sendpiobufcnt); | ||
3826 | dd->piobcnt2k = val & ~0U; | ||
3827 | dd->piobcnt4k = val >> 32; | ||
3828 | /* these may be adjusted in init_chip_wc_pat() */ | ||
3829 | dd->pio2kbase = (u32 __iomem *) | ||
3830 | ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); | ||
3831 | if (dd->piobcnt4k) { | ||
3832 | dd->pio4kbase = (u32 __iomem *) | ||
3833 | ((char __iomem *) dd->kregbase + | ||
3834 | (dd->piobufbase >> 32)); | ||
3835 | /* | ||
3836 | * 4K buffers take 2 pages; we use roundup just to be | ||
3837 | * paranoid; we calculate it once here, rather than on | ||
3838 | * ever buf allocate | ||
3839 | */ | ||
3840 | dd->align4k = ALIGN(dd->piosize4k, dd->palign); | ||
3841 | } | ||
3842 | |||
3843 | piobufs = dd->piobcnt4k + dd->piobcnt2k; | ||
3844 | |||
3845 | dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / | ||
3846 | (sizeof(u64) * BITS_PER_BYTE / 2); | ||
3847 | } | ||
3848 | |||
3849 | /* | ||
3850 | * The chip base addresses in cspec and cpspec have to be set | ||
3851 | * after possible init_chip_wc_pat(), rather than in | ||
3852 | * qib_get_7220_chip_params(), so split out as separate function | ||
3853 | */ | ||
3854 | static void set_7220_baseaddrs(struct qib_devdata *dd) | ||
3855 | { | ||
3856 | u32 cregbase; | ||
3857 | /* init after possible re-map in init_chip_wc_pat() */ | ||
3858 | cregbase = qib_read_kreg32(dd, kr_counterregbase); | ||
3859 | dd->cspec->cregbase = (u64 __iomem *) | ||
3860 | ((char __iomem *) dd->kregbase + cregbase); | ||
3861 | |||
3862 | dd->egrtidbase = (u64 __iomem *) | ||
3863 | ((char __iomem *) dd->kregbase + dd->rcvegrbase); | ||
3864 | } | ||
3865 | |||
3866 | |||
3867 | #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \ | ||
3868 | SYM_MASK(SendCtrl, SPioEnable) | \ | ||
3869 | SYM_MASK(SendCtrl, SSpecialTriggerEn) | \ | ||
3870 | SYM_MASK(SendCtrl, SendBufAvailUpd) | \ | ||
3871 | SYM_MASK(SendCtrl, AvailUpdThld) | \ | ||
3872 | SYM_MASK(SendCtrl, SDmaEnable) | \ | ||
3873 | SYM_MASK(SendCtrl, SDmaIntEnable) | \ | ||
3874 | SYM_MASK(SendCtrl, SDmaHalt) | \ | ||
3875 | SYM_MASK(SendCtrl, SDmaSingleDescriptor)) | ||
3876 | |||
3877 | static int sendctrl_hook(struct qib_devdata *dd, | ||
3878 | const struct diag_observer *op, | ||
3879 | u32 offs, u64 *data, u64 mask, int only_32) | ||
3880 | { | ||
3881 | unsigned long flags; | ||
3882 | unsigned idx = offs / sizeof(u64); | ||
3883 | u64 local_data, all_bits; | ||
3884 | |||
3885 | if (idx != kr_sendctrl) { | ||
3886 | qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n", | ||
3887 | offs, only_32 ? "32" : "64"); | ||
3888 | return 0; | ||
3889 | } | ||
3890 | |||
3891 | all_bits = ~0ULL; | ||
3892 | if (only_32) | ||
3893 | all_bits >>= 32; | ||
3894 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
3895 | if ((mask & all_bits) != all_bits) { | ||
3896 | /* | ||
3897 | * At least some mask bits are zero, so we need | ||
3898 | * to read. The judgement call is whether from | ||
3899 | * reg or shadow. First-cut: read reg, and complain | ||
3900 | * if any bits which should be shadowed are different | ||
3901 | * from their shadowed value. | ||
3902 | */ | ||
3903 | if (only_32) | ||
3904 | local_data = (u64)qib_read_kreg32(dd, idx); | ||
3905 | else | ||
3906 | local_data = qib_read_kreg64(dd, idx); | ||
3907 | qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n", | ||
3908 | (u32)local_data, (u32)dd->sendctrl); | ||
3909 | if ((local_data & SENDCTRL_SHADOWED) != | ||
3910 | (dd->sendctrl & SENDCTRL_SHADOWED)) | ||
3911 | qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n", | ||
3912 | (u32)local_data, (u32) dd->sendctrl); | ||
3913 | *data = (local_data & ~mask) | (*data & mask); | ||
3914 | } | ||
3915 | if (mask) { | ||
3916 | /* | ||
3917 | * At least some mask bits are one, so we need | ||
3918 | * to write, but only shadow some bits. | ||
3919 | */ | ||
3920 | u64 sval, tval; /* Shadowed, transient */ | ||
3921 | |||
3922 | /* | ||
3923 | * New shadow val is bits we don't want to touch, | ||
3924 | * ORed with bits we do, that are intended for shadow. | ||
3925 | */ | ||
3926 | sval = (dd->sendctrl & ~mask); | ||
3927 | sval |= *data & SENDCTRL_SHADOWED & mask; | ||
3928 | dd->sendctrl = sval; | ||
3929 | tval = sval | (*data & ~SENDCTRL_SHADOWED & mask); | ||
3930 | qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n", | ||
3931 | (u32)tval, (u32)sval); | ||
3932 | qib_write_kreg(dd, kr_sendctrl, tval); | ||
3933 | qib_write_kreg(dd, kr_scratch, 0Ull); | ||
3934 | } | ||
3935 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
3936 | |||
3937 | return only_32 ? 4 : 8; | ||
3938 | } | ||
3939 | |||
3940 | static const struct diag_observer sendctrl_observer = { | ||
3941 | sendctrl_hook, kr_sendctrl * sizeof(u64), | ||
3942 | kr_sendctrl * sizeof(u64) | ||
3943 | }; | ||
3944 | |||
3945 | /* | ||
3946 | * write the final few registers that depend on some of the | ||
3947 | * init setup. Done late in init, just before bringing up | ||
3948 | * the serdes. | ||
3949 | */ | ||
3950 | static int qib_late_7220_initreg(struct qib_devdata *dd) | ||
3951 | { | ||
3952 | int ret = 0; | ||
3953 | u64 val; | ||
3954 | |||
3955 | qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); | ||
3956 | qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); | ||
3957 | qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); | ||
3958 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); | ||
3959 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); | ||
3960 | if (val != dd->pioavailregs_phys) { | ||
3961 | qib_dev_err(dd, "Catastrophic software error, " | ||
3962 | "SendPIOAvailAddr written as %lx, " | ||
3963 | "read back as %llx\n", | ||
3964 | (unsigned long) dd->pioavailregs_phys, | ||
3965 | (unsigned long long) val); | ||
3966 | ret = -EINVAL; | ||
3967 | } | ||
3968 | qib_register_observer(dd, &sendctrl_observer); | ||
3969 | return ret; | ||
3970 | } | ||
3971 | |||
3972 | static int qib_init_7220_variables(struct qib_devdata *dd) | ||
3973 | { | ||
3974 | struct qib_chippport_specific *cpspec; | ||
3975 | struct qib_pportdata *ppd; | ||
3976 | int ret = 0; | ||
3977 | u32 sbufs, updthresh; | ||
3978 | |||
3979 | cpspec = (struct qib_chippport_specific *)(dd + 1); | ||
3980 | ppd = &cpspec->pportdata; | ||
3981 | dd->pport = ppd; | ||
3982 | dd->num_pports = 1; | ||
3983 | |||
3984 | dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports); | ||
3985 | ppd->cpspec = cpspec; | ||
3986 | |||
3987 | spin_lock_init(&dd->cspec->sdepb_lock); | ||
3988 | spin_lock_init(&dd->cspec->rcvmod_lock); | ||
3989 | spin_lock_init(&dd->cspec->gpio_lock); | ||
3990 | |||
3991 | /* we haven't yet set QIB_PRESENT, so use read directly */ | ||
3992 | dd->revision = readq(&dd->kregbase[kr_revision]); | ||
3993 | |||
3994 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { | ||
3995 | qib_dev_err(dd, "Revision register read failure, " | ||
3996 | "giving up initialization\n"); | ||
3997 | ret = -ENODEV; | ||
3998 | goto bail; | ||
3999 | } | ||
4000 | dd->flags |= QIB_PRESENT; /* now register routines work */ | ||
4001 | |||
4002 | dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, | ||
4003 | ChipRevMajor); | ||
4004 | dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, | ||
4005 | ChipRevMinor); | ||
4006 | |||
4007 | get_7220_chip_params(dd); | ||
4008 | qib_7220_boardname(dd); | ||
4009 | |||
4010 | /* | ||
4011 | * GPIO bits for TWSI data and clock, | ||
4012 | * used for serial EEPROM. | ||
4013 | */ | ||
4014 | dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; | ||
4015 | dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; | ||
4016 | dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; | ||
4017 | |||
4018 | dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | | ||
4019 | QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE; | ||
4020 | dd->flags |= qib_special_trigger ? | ||
4021 | QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA; | ||
4022 | |||
4023 | /* | ||
4024 | * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. | ||
4025 | * 2 is Some Misc, 3 is reserved for future. | ||
4026 | */ | ||
4027 | dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr); | ||
4028 | |||
4029 | dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr); | ||
4030 | |||
4031 | dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated); | ||
4032 | |||
4033 | init_waitqueue_head(&cpspec->autoneg_wait); | ||
4034 | INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work); | ||
4035 | |||
4036 | qib_init_pportdata(ppd, dd, 0, 1); | ||
4037 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; | ||
4038 | ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR; | ||
4039 | |||
4040 | ppd->link_width_enabled = ppd->link_width_supported; | ||
4041 | ppd->link_speed_enabled = ppd->link_speed_supported; | ||
4042 | /* | ||
4043 | * Set the initial values to reasonable default, will be set | ||
4044 | * for real when link is up. | ||
4045 | */ | ||
4046 | ppd->link_width_active = IB_WIDTH_4X; | ||
4047 | ppd->link_speed_active = QIB_IB_SDR; | ||
4048 | ppd->delay_mult = rate_to_delay[0][1]; | ||
4049 | ppd->vls_supported = IB_VL_VL0; | ||
4050 | ppd->vls_operational = ppd->vls_supported; | ||
4051 | |||
4052 | if (!qib_mini_init) | ||
4053 | qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP); | ||
4054 | |||
4055 | init_timer(&ppd->cpspec->chase_timer); | ||
4056 | ppd->cpspec->chase_timer.function = reenable_7220_chase; | ||
4057 | ppd->cpspec->chase_timer.data = (unsigned long)ppd; | ||
4058 | |||
4059 | qib_num_cfg_vls = 1; /* if any 7220's, only one VL */ | ||
4060 | |||
4061 | dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; | ||
4062 | dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; | ||
4063 | dd->rhf_offset = | ||
4064 | dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); | ||
4065 | |||
4066 | /* we always allocate at least 2048 bytes for eager buffers */ | ||
4067 | ret = ib_mtu_enum_to_int(qib_ibmtu); | ||
4068 | dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; | ||
4069 | |||
4070 | qib_7220_tidtemplate(dd); | ||
4071 | |||
4072 | /* | ||
4073 | * We can request a receive interrupt for 1 or | ||
4074 | * more packets from current offset. For now, we set this | ||
4075 | * up for a single packet. | ||
4076 | */ | ||
4077 | dd->rhdrhead_intr_off = 1ULL << 32; | ||
4078 | |||
4079 | /* setup the stats timer; the add_timer is done at end of init */ | ||
4080 | init_timer(&dd->stats_timer); | ||
4081 | dd->stats_timer.function = qib_get_7220_faststats; | ||
4082 | dd->stats_timer.data = (unsigned long) dd; | ||
4083 | dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ; | ||
4084 | |||
4085 | /* | ||
4086 | * Control[4] has been added to change the arbitration within | ||
4087 | * the SDMA engine between favoring data fetches over descriptor | ||
4088 | * fetches. qib_sdma_fetch_arb==0 gives data fetches priority. | ||
4089 | */ | ||
4090 | if (qib_sdma_fetch_arb) | ||
4091 | dd->control |= 1 << 4; | ||
4092 | |||
4093 | dd->ureg_align = 0x10000; /* 64KB alignment */ | ||
4094 | |||
4095 | dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1; | ||
4096 | qib_7220_config_ctxts(dd); | ||
4097 | qib_set_ctxtcnt(dd); /* needed for PAT setup */ | ||
4098 | |||
4099 | if (qib_wc_pat) { | ||
4100 | ret = init_chip_wc_pat(dd, 0); | ||
4101 | if (ret) | ||
4102 | goto bail; | ||
4103 | } | ||
4104 | set_7220_baseaddrs(dd); /* set chip access pointers now */ | ||
4105 | |||
4106 | ret = 0; | ||
4107 | if (qib_mini_init) | ||
4108 | goto bail; | ||
4109 | |||
4110 | ret = qib_create_ctxts(dd); | ||
4111 | init_7220_cntrnames(dd); | ||
4112 | |||
4113 | /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA. | ||
4114 | * reserve the update threshold amount for other kernel use, such | ||
4115 | * as sending SMI, MAD, and ACKs, or 3, whichever is greater, | ||
4116 | * unless we aren't enabling SDMA, in which case we want to use | ||
4117 | * all the 4k bufs for the kernel. | ||
4118 | * if this was less than the update threshold, we could wait | ||
4119 | * a long time for an update. Coded this way because we | ||
4120 | * sometimes change the update threshold for various reasons, | ||
4121 | * and we want this to remain robust. | ||
4122 | */ | ||
4123 | updthresh = 8U; /* update threshold */ | ||
4124 | if (dd->flags & QIB_HAS_SEND_DMA) { | ||
4125 | dd->cspec->sdmabufcnt = dd->piobcnt4k; | ||
4126 | sbufs = updthresh > 3 ? updthresh : 3; | ||
4127 | } else { | ||
4128 | dd->cspec->sdmabufcnt = 0; | ||
4129 | sbufs = dd->piobcnt4k; | ||
4130 | } | ||
4131 | |||
4132 | dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - | ||
4133 | dd->cspec->sdmabufcnt; | ||
4134 | dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; | ||
4135 | dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ | ||
4136 | dd->pbufsctxt = dd->lastctxt_piobuf / | ||
4137 | (dd->cfgctxts - dd->first_user_ctxt); | ||
4138 | |||
4139 | /* | ||
4140 | * if we are at 16 user contexts, we will have one 7 sbufs | ||
4141 | * per context, so drop the update threshold to match. We | ||
4142 | * want to update before we actually run out, at low pbufs/ctxt | ||
4143 | * so give ourselves some margin | ||
4144 | */ | ||
4145 | if ((dd->pbufsctxt - 2) < updthresh) | ||
4146 | updthresh = dd->pbufsctxt - 2; | ||
4147 | |||
4148 | dd->cspec->updthresh_dflt = updthresh; | ||
4149 | dd->cspec->updthresh = updthresh; | ||
4150 | |||
4151 | /* before full enable, no interrupts, no locking needed */ | ||
4152 | dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) | ||
4153 | << SYM_LSB(SendCtrl, AvailUpdThld); | ||
4154 | |||
4155 | dd->psxmitwait_supported = 1; | ||
4156 | dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE; | ||
4157 | bail: | ||
4158 | return ret; | ||
4159 | } | ||
4160 | |||
4161 | static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc, | ||
4162 | u32 *pbufnum) | ||
4163 | { | ||
4164 | u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK; | ||
4165 | struct qib_devdata *dd = ppd->dd; | ||
4166 | u32 __iomem *buf; | ||
4167 | |||
4168 | if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) && | ||
4169 | !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE))) | ||
4170 | buf = get_7220_link_buf(ppd, pbufnum); | ||
4171 | else { | ||
4172 | if ((plen + 1) > dd->piosize2kmax_dwords) | ||
4173 | first = dd->piobcnt2k; | ||
4174 | else | ||
4175 | first = 0; | ||
4176 | /* try 4k if all 2k busy, so same last for both sizes */ | ||
4177 | last = dd->cspec->lastbuf_for_pio; | ||
4178 | buf = qib_getsendbuf_range(dd, pbufnum, first, last); | ||
4179 | } | ||
4180 | return buf; | ||
4181 | } | ||
4182 | |||
4183 | /* these 2 "counters" are really control registers, and are always RW */ | ||
4184 | static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv, | ||
4185 | u32 start) | ||
4186 | { | ||
4187 | write_7220_creg(ppd->dd, cr_psinterval, intv); | ||
4188 | write_7220_creg(ppd->dd, cr_psstart, start); | ||
4189 | } | ||
4190 | |||
4191 | /* | ||
4192 | * NOTE: no real attempt is made to generalize the SDMA stuff. | ||
4193 | * At some point "soon" we will have a new more generalized | ||
4194 | * set of sdma interface, and then we'll clean this up. | ||
4195 | */ | ||
4196 | |||
4197 | /* Must be called with sdma_lock held, or before init finished */ | ||
4198 | static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail) | ||
4199 | { | ||
4200 | /* Commit writes to memory and advance the tail on the chip */ | ||
4201 | wmb(); | ||
4202 | ppd->sdma_descq_tail = tail; | ||
4203 | qib_write_kreg(ppd->dd, kr_senddmatail, tail); | ||
4204 | } | ||
4205 | |||
4206 | static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) | ||
4207 | { | ||
4208 | } | ||
4209 | |||
4210 | static struct sdma_set_state_action sdma_7220_action_table[] = { | ||
4211 | [qib_sdma_state_s00_hw_down] = { | ||
4212 | .op_enable = 0, | ||
4213 | .op_intenable = 0, | ||
4214 | .op_halt = 0, | ||
4215 | .go_s99_running_tofalse = 1, | ||
4216 | }, | ||
4217 | [qib_sdma_state_s10_hw_start_up_wait] = { | ||
4218 | .op_enable = 1, | ||
4219 | .op_intenable = 1, | ||
4220 | .op_halt = 1, | ||
4221 | }, | ||
4222 | [qib_sdma_state_s20_idle] = { | ||
4223 | .op_enable = 1, | ||
4224 | .op_intenable = 1, | ||
4225 | .op_halt = 1, | ||
4226 | }, | ||
4227 | [qib_sdma_state_s30_sw_clean_up_wait] = { | ||
4228 | .op_enable = 0, | ||
4229 | .op_intenable = 1, | ||
4230 | .op_halt = 0, | ||
4231 | }, | ||
4232 | [qib_sdma_state_s40_hw_clean_up_wait] = { | ||
4233 | .op_enable = 1, | ||
4234 | .op_intenable = 1, | ||
4235 | .op_halt = 1, | ||
4236 | }, | ||
4237 | [qib_sdma_state_s50_hw_halt_wait] = { | ||
4238 | .op_enable = 1, | ||
4239 | .op_intenable = 1, | ||
4240 | .op_halt = 1, | ||
4241 | }, | ||
4242 | [qib_sdma_state_s99_running] = { | ||
4243 | .op_enable = 1, | ||
4244 | .op_intenable = 1, | ||
4245 | .op_halt = 0, | ||
4246 | .go_s99_running_totrue = 1, | ||
4247 | }, | ||
4248 | }; | ||
4249 | |||
4250 | static void qib_7220_sdma_init_early(struct qib_pportdata *ppd) | ||
4251 | { | ||
4252 | ppd->sdma_state.set_state_action = sdma_7220_action_table; | ||
4253 | } | ||
4254 | |||
4255 | static int init_sdma_7220_regs(struct qib_pportdata *ppd) | ||
4256 | { | ||
4257 | struct qib_devdata *dd = ppd->dd; | ||
4258 | unsigned i, n; | ||
4259 | u64 senddmabufmask[3] = { 0 }; | ||
4260 | |||
4261 | /* Set SendDmaBase */ | ||
4262 | qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys); | ||
4263 | qib_sdma_7220_setlengen(ppd); | ||
4264 | qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */ | ||
4265 | /* Set SendDmaHeadAddr */ | ||
4266 | qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys); | ||
4267 | |||
4268 | /* | ||
4269 | * Reserve all the former "kernel" piobufs, using high number range | ||
4270 | * so we get as many 4K buffers as possible | ||
4271 | */ | ||
4272 | n = dd->piobcnt2k + dd->piobcnt4k; | ||
4273 | i = n - dd->cspec->sdmabufcnt; | ||
4274 | |||
4275 | for (; i < n; ++i) { | ||
4276 | unsigned word = i / 64; | ||
4277 | unsigned bit = i & 63; | ||
4278 | |||
4279 | BUG_ON(word >= 3); | ||
4280 | senddmabufmask[word] |= 1ULL << bit; | ||
4281 | } | ||
4282 | qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]); | ||
4283 | qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]); | ||
4284 | qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]); | ||
4285 | |||
4286 | ppd->sdma_state.first_sendbuf = i; | ||
4287 | ppd->sdma_state.last_sendbuf = n; | ||
4288 | |||
4289 | return 0; | ||
4290 | } | ||
4291 | |||
4292 | /* sdma_lock must be held */ | ||
4293 | static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd) | ||
4294 | { | ||
4295 | struct qib_devdata *dd = ppd->dd; | ||
4296 | int sane; | ||
4297 | int use_dmahead; | ||
4298 | u16 swhead; | ||
4299 | u16 swtail; | ||
4300 | u16 cnt; | ||
4301 | u16 hwhead; | ||
4302 | |||
4303 | use_dmahead = __qib_sdma_running(ppd) && | ||
4304 | (dd->flags & QIB_HAS_SDMA_TIMEOUT); | ||
4305 | retry: | ||
4306 | hwhead = use_dmahead ? | ||
4307 | (u16)le64_to_cpu(*ppd->sdma_head_dma) : | ||
4308 | (u16)qib_read_kreg32(dd, kr_senddmahead); | ||
4309 | |||
4310 | swhead = ppd->sdma_descq_head; | ||
4311 | swtail = ppd->sdma_descq_tail; | ||
4312 | cnt = ppd->sdma_descq_cnt; | ||
4313 | |||
4314 | if (swhead < swtail) { | ||
4315 | /* not wrapped */ | ||
4316 | sane = (hwhead >= swhead) & (hwhead <= swtail); | ||
4317 | } else if (swhead > swtail) { | ||
4318 | /* wrapped around */ | ||
4319 | sane = ((hwhead >= swhead) && (hwhead < cnt)) || | ||
4320 | (hwhead <= swtail); | ||
4321 | } else { | ||
4322 | /* empty */ | ||
4323 | sane = (hwhead == swhead); | ||
4324 | } | ||
4325 | |||
4326 | if (unlikely(!sane)) { | ||
4327 | if (use_dmahead) { | ||
4328 | /* try one more time, directly from the register */ | ||
4329 | use_dmahead = 0; | ||
4330 | goto retry; | ||
4331 | } | ||
4332 | /* assume no progress */ | ||
4333 | hwhead = swhead; | ||
4334 | } | ||
4335 | |||
4336 | return hwhead; | ||
4337 | } | ||
4338 | |||
4339 | static int qib_sdma_7220_busy(struct qib_pportdata *ppd) | ||
4340 | { | ||
4341 | u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus); | ||
4342 | |||
4343 | return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) || | ||
4344 | (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) || | ||
4345 | (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) || | ||
4346 | !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty)); | ||
4347 | } | ||
4348 | |||
4349 | /* | ||
4350 | * Compute the amount of delay before sending the next packet if the | ||
4351 | * port's send rate differs from the static rate set for the QP. | ||
4352 | * Since the delay affects this packet but the amount of the delay is | ||
4353 | * based on the length of the previous packet, use the last delay computed | ||
4354 | * and save the delay count for this packet to be used next time | ||
4355 | * we get here. | ||
4356 | */ | ||
4357 | static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen, | ||
4358 | u8 srate, u8 vl) | ||
4359 | { | ||
4360 | u8 snd_mult = ppd->delay_mult; | ||
4361 | u8 rcv_mult = ib_rate_to_delay[srate]; | ||
4362 | u32 ret = ppd->cpspec->last_delay_mult; | ||
4363 | |||
4364 | ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ? | ||
4365 | (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0; | ||
4366 | |||
4367 | /* Indicate VL15, if necessary */ | ||
4368 | if (vl == 15) | ||
4369 | ret |= PBC_7220_VL15_SEND_CTRL; | ||
4370 | return ret; | ||
4371 | } | ||
4372 | |||
4373 | static void qib_7220_initvl15_bufs(struct qib_devdata *dd) | ||
4374 | { | ||
4375 | } | ||
4376 | |||
4377 | static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd) | ||
4378 | { | ||
4379 | if (!rcd->ctxt) { | ||
4380 | rcd->rcvegrcnt = IBA7220_KRCVEGRCNT; | ||
4381 | rcd->rcvegr_tid_base = 0; | ||
4382 | } else { | ||
4383 | rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; | ||
4384 | rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT + | ||
4385 | (rcd->ctxt - 1) * rcd->rcvegrcnt; | ||
4386 | } | ||
4387 | } | ||
4388 | |||
4389 | static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start, | ||
4390 | u32 len, u32 which, struct qib_ctxtdata *rcd) | ||
4391 | { | ||
4392 | int i; | ||
4393 | unsigned long flags; | ||
4394 | |||
4395 | switch (which) { | ||
4396 | case TXCHK_CHG_TYPE_KERN: | ||
4397 | /* see if we need to raise avail update threshold */ | ||
4398 | spin_lock_irqsave(&dd->uctxt_lock, flags); | ||
4399 | for (i = dd->first_user_ctxt; | ||
4400 | dd->cspec->updthresh != dd->cspec->updthresh_dflt | ||
4401 | && i < dd->cfgctxts; i++) | ||
4402 | if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && | ||
4403 | ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) | ||
4404 | < dd->cspec->updthresh_dflt) | ||
4405 | break; | ||
4406 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
4407 | if (i == dd->cfgctxts) { | ||
4408 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
4409 | dd->cspec->updthresh = dd->cspec->updthresh_dflt; | ||
4410 | dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); | ||
4411 | dd->sendctrl |= (dd->cspec->updthresh & | ||
4412 | SYM_RMASK(SendCtrl, AvailUpdThld)) << | ||
4413 | SYM_LSB(SendCtrl, AvailUpdThld); | ||
4414 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
4415 | sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
4416 | } | ||
4417 | break; | ||
4418 | case TXCHK_CHG_TYPE_USER: | ||
4419 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
4420 | if (rcd && rcd->subctxt_cnt && ((rcd->piocnt | ||
4421 | / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { | ||
4422 | dd->cspec->updthresh = (rcd->piocnt / | ||
4423 | rcd->subctxt_cnt) - 1; | ||
4424 | dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); | ||
4425 | dd->sendctrl |= (dd->cspec->updthresh & | ||
4426 | SYM_RMASK(SendCtrl, AvailUpdThld)) | ||
4427 | << SYM_LSB(SendCtrl, AvailUpdThld); | ||
4428 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
4429 | sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
4430 | } else | ||
4431 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
4432 | break; | ||
4433 | } | ||
4434 | } | ||
4435 | |||
4436 | static void writescratch(struct qib_devdata *dd, u32 val) | ||
4437 | { | ||
4438 | qib_write_kreg(dd, kr_scratch, val); | ||
4439 | } | ||
4440 | |||
4441 | #define VALID_TS_RD_REG_MASK 0xBF | ||
4442 | /** | ||
4443 | * qib_7220_tempsense_read - read register of temp sensor via TWSI | ||
4444 | * @dd: the qlogic_ib device | ||
4445 | * @regnum: register to read from | ||
4446 | * | ||
4447 | * returns reg contents (0..255) or < 0 for error | ||
4448 | */ | ||
4449 | static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum) | ||
4450 | { | ||
4451 | int ret; | ||
4452 | u8 rdata; | ||
4453 | |||
4454 | if (regnum > 7) { | ||
4455 | ret = -EINVAL; | ||
4456 | goto bail; | ||
4457 | } | ||
4458 | |||
4459 | /* return a bogus value for (the one) register we do not have */ | ||
4460 | if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) { | ||
4461 | ret = 0; | ||
4462 | goto bail; | ||
4463 | } | ||
4464 | |||
4465 | ret = mutex_lock_interruptible(&dd->eep_lock); | ||
4466 | if (ret) | ||
4467 | goto bail; | ||
4468 | |||
4469 | ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1); | ||
4470 | if (!ret) | ||
4471 | ret = rdata; | ||
4472 | |||
4473 | mutex_unlock(&dd->eep_lock); | ||
4474 | |||
4475 | /* | ||
4476 | * There are three possibilities here: | ||
4477 | * ret is actual value (0..255) | ||
4478 | * ret is -ENXIO or -EINVAL from twsi code or this file | ||
4479 | * ret is -EINTR from mutex_lock_interruptible. | ||
4480 | */ | ||
4481 | bail: | ||
4482 | return ret; | ||
4483 | } | ||
4484 | |||
4485 | /* Dummy function, as 7220 boards never disable EEPROM Write */ | ||
4486 | static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen) | ||
4487 | { | ||
4488 | return 1; | ||
4489 | } | ||
4490 | |||
4491 | /** | ||
4492 | * qib_init_iba7220_funcs - set up the chip-specific function pointers | ||
4493 | * @dev: the pci_dev for qlogic_ib device | ||
4494 | * @ent: pci_device_id struct for this dev | ||
4495 | * | ||
4496 | * This is global, and is called directly at init to set up the | ||
4497 | * chip-specific function pointers for later use. | ||
4498 | */ | ||
4499 | struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev, | ||
4500 | const struct pci_device_id *ent) | ||
4501 | { | ||
4502 | struct qib_devdata *dd; | ||
4503 | int ret; | ||
4504 | u32 boardid, minwidth; | ||
4505 | |||
4506 | dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) + | ||
4507 | sizeof(struct qib_chippport_specific)); | ||
4508 | if (IS_ERR(dd)) | ||
4509 | goto bail; | ||
4510 | |||
4511 | dd->f_bringup_serdes = qib_7220_bringup_serdes; | ||
4512 | dd->f_cleanup = qib_setup_7220_cleanup; | ||
4513 | dd->f_clear_tids = qib_7220_clear_tids; | ||
4514 | dd->f_free_irq = qib_7220_free_irq; | ||
4515 | dd->f_get_base_info = qib_7220_get_base_info; | ||
4516 | dd->f_get_msgheader = qib_7220_get_msgheader; | ||
4517 | dd->f_getsendbuf = qib_7220_getsendbuf; | ||
4518 | dd->f_gpio_mod = gpio_7220_mod; | ||
4519 | dd->f_eeprom_wen = qib_7220_eeprom_wen; | ||
4520 | dd->f_hdrqempty = qib_7220_hdrqempty; | ||
4521 | dd->f_ib_updown = qib_7220_ib_updown; | ||
4522 | dd->f_init_ctxt = qib_7220_init_ctxt; | ||
4523 | dd->f_initvl15_bufs = qib_7220_initvl15_bufs; | ||
4524 | dd->f_intr_fallback = qib_7220_intr_fallback; | ||
4525 | dd->f_late_initreg = qib_late_7220_initreg; | ||
4526 | dd->f_setpbc_control = qib_7220_setpbc_control; | ||
4527 | dd->f_portcntr = qib_portcntr_7220; | ||
4528 | dd->f_put_tid = qib_7220_put_tid; | ||
4529 | dd->f_quiet_serdes = qib_7220_quiet_serdes; | ||
4530 | dd->f_rcvctrl = rcvctrl_7220_mod; | ||
4531 | dd->f_read_cntrs = qib_read_7220cntrs; | ||
4532 | dd->f_read_portcntrs = qib_read_7220portcntrs; | ||
4533 | dd->f_reset = qib_setup_7220_reset; | ||
4534 | dd->f_init_sdma_regs = init_sdma_7220_regs; | ||
4535 | dd->f_sdma_busy = qib_sdma_7220_busy; | ||
4536 | dd->f_sdma_gethead = qib_sdma_7220_gethead; | ||
4537 | dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl; | ||
4538 | dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt; | ||
4539 | dd->f_sdma_update_tail = qib_sdma_update_7220_tail; | ||
4540 | dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up; | ||
4541 | dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up; | ||
4542 | dd->f_sdma_init_early = qib_7220_sdma_init_early; | ||
4543 | dd->f_sendctrl = sendctrl_7220_mod; | ||
4544 | dd->f_set_armlaunch = qib_set_7220_armlaunch; | ||
4545 | dd->f_set_cntr_sample = qib_set_cntr_7220_sample; | ||
4546 | dd->f_iblink_state = qib_7220_iblink_state; | ||
4547 | dd->f_ibphys_portstate = qib_7220_phys_portstate; | ||
4548 | dd->f_get_ib_cfg = qib_7220_get_ib_cfg; | ||
4549 | dd->f_set_ib_cfg = qib_7220_set_ib_cfg; | ||
4550 | dd->f_set_ib_loopback = qib_7220_set_loopback; | ||
4551 | dd->f_set_intr_state = qib_7220_set_intr_state; | ||
4552 | dd->f_setextled = qib_setup_7220_setextled; | ||
4553 | dd->f_txchk_change = qib_7220_txchk_change; | ||
4554 | dd->f_update_usrhead = qib_update_7220_usrhead; | ||
4555 | dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr; | ||
4556 | dd->f_xgxs_reset = qib_7220_xgxs_reset; | ||
4557 | dd->f_writescratch = writescratch; | ||
4558 | dd->f_tempsense_rd = qib_7220_tempsense_rd; | ||
4559 | /* | ||
4560 | * Do remaining pcie setup and save pcie values in dd. | ||
4561 | * Any error printing is already done by the init code. | ||
4562 | * On return, we have the chip mapped, but chip registers | ||
4563 | * are not set up until start of qib_init_7220_variables. | ||
4564 | */ | ||
4565 | ret = qib_pcie_ddinit(dd, pdev, ent); | ||
4566 | if (ret < 0) | ||
4567 | goto bail_free; | ||
4568 | |||
4569 | /* initialize chip-specific variables */ | ||
4570 | ret = qib_init_7220_variables(dd); | ||
4571 | if (ret) | ||
4572 | goto bail_cleanup; | ||
4573 | |||
4574 | if (qib_mini_init) | ||
4575 | goto bail; | ||
4576 | |||
4577 | boardid = SYM_FIELD(dd->revision, Revision, | ||
4578 | BoardID); | ||
4579 | switch (boardid) { | ||
4580 | case 0: | ||
4581 | case 2: | ||
4582 | case 10: | ||
4583 | case 12: | ||
4584 | minwidth = 16; /* x16 capable boards */ | ||
4585 | break; | ||
4586 | default: | ||
4587 | minwidth = 8; /* x8 capable boards */ | ||
4588 | break; | ||
4589 | } | ||
4590 | if (qib_pcie_params(dd, minwidth, NULL, NULL)) | ||
4591 | qib_dev_err(dd, "Failed to setup PCIe or interrupts; " | ||
4592 | "continuing anyway\n"); | ||
4593 | |||
4594 | /* save IRQ for possible later use */ | ||
4595 | dd->cspec->irq = pdev->irq; | ||
4596 | |||
4597 | if (qib_read_kreg64(dd, kr_hwerrstatus) & | ||
4598 | QLOGIC_IB_HWE_SERDESPLLFAILED) | ||
4599 | qib_write_kreg(dd, kr_hwerrclear, | ||
4600 | QLOGIC_IB_HWE_SERDESPLLFAILED); | ||
4601 | |||
4602 | /* setup interrupt handler (interrupt type handled above) */ | ||
4603 | qib_setup_7220_interrupt(dd); | ||
4604 | qib_7220_init_hwerrors(dd); | ||
4605 | |||
4606 | /* clear diagctrl register, in case diags were running and crashed */ | ||
4607 | qib_write_kreg(dd, kr_hwdiagctrl, 0); | ||
4608 | |||
4609 | goto bail; | ||
4610 | |||
4611 | bail_cleanup: | ||
4612 | qib_pcie_ddcleanup(dd); | ||
4613 | bail_free: | ||
4614 | qib_free_devdata(dd); | ||
4615 | dd = ERR_PTR(ret); | ||
4616 | bail: | ||
4617 | return dd; | ||
4618 | } | ||