aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib/qib_iba7322.c
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2010-05-24 00:44:54 -0400
committerRoland Dreier <rolandd@cisco.com>2010-05-24 00:44:54 -0400
commitf931551bafe1f10ded7f5282e2aa162c267a2e5d (patch)
treee81b4656a8116abf5fd0bc0bbc46560aff536159 /drivers/infiniband/hw/qib/qib_iba7322.c
parent9a6edb60ec10d86b1025a0cdad68fd89f1ddaf02 (diff)
IB/qib: Add new qib driver for QLogic PCIe InfiniBand adapters
Add a low-level IB driver for QLogic PCIe adapters. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_iba7322.c')
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c8058
1 files changed, 8058 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
new file mode 100644
index 000000000000..2c24eab35b54
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -0,0 +1,8058 @@
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file contains all of the code that is specific to the
35 * InfiniPath 7322 chip
36 */
37
38#include <linux/interrupt.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/io.h>
42#include <linux/jiffies.h>
43#include <rdma/ib_verbs.h>
44#include <rdma/ib_smi.h>
45#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
46#include <linux/dca.h>
47#endif
48
49#include "qib.h"
50#include "qib_7322_regs.h"
51#include "qib_qsfp.h"
52
53#include "qib_mad.h"
54
55static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
56static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
57static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
58static irqreturn_t qib_7322intr(int irq, void *data);
59static irqreturn_t qib_7322bufavail(int irq, void *data);
60static irqreturn_t sdma_intr(int irq, void *data);
61static irqreturn_t sdma_idle_intr(int irq, void *data);
62static irqreturn_t sdma_progress_intr(int irq, void *data);
63static irqreturn_t sdma_cleanup_intr(int irq, void *data);
64static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
65 struct qib_ctxtdata *rcd);
66static u8 qib_7322_phys_portstate(u64);
67static u32 qib_7322_iblink_state(u64);
68static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
69 u16 linitcmd);
70static void force_h1(struct qib_pportdata *);
71static void adj_tx_serdes(struct qib_pportdata *);
72static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
73static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
74
75static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
76static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
77
78#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
79
80/* LE2 serdes values for different cases */
81#define LE2_DEFAULT 5
82#define LE2_5m 4
83#define LE2_QME 0
84
85/* Below is special-purpose, so only really works for the IB SerDes blocks. */
86#define IBSD(hw_pidx) (hw_pidx + 2)
87
88/* these are variables for documentation and experimentation purposes */
89static const unsigned rcv_int_timeout = 375;
90static const unsigned rcv_int_count = 16;
91static const unsigned sdma_idle_cnt = 64;
92
93/* Time to stop altering Rx Equalization parameters, after link up. */
94#define RXEQ_DISABLE_MSECS 2500
95
96/*
97 * Number of VLs we are configured to use (to allow for more
98 * credits per vl, etc.)
99 */
100ushort qib_num_cfg_vls = 2;
101module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
102MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
103
104static ushort qib_chase = 1;
105module_param_named(chase, qib_chase, ushort, S_IRUGO);
106MODULE_PARM_DESC(chase, "Enable state chase handling");
107
108static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
109module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
110MODULE_PARM_DESC(long_attenuation, \
111 "attenuation cutoff (dB) for long copper cable setup");
112
113static ushort qib_singleport;
114module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
115MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
116
117
118/*
119 * Setup QMH7342 receive and transmit parameters, necessary because
120 * each bay, Mez connector, and IB port need different tuning, beyond
121 * what the switch and HCA can do automatically.
122 * It's expected to be done by cat'ing files to the modules file,
123 * rather than setting up as a module parameter.
124 * It's a "write-only" file, returns 0 when read back.
125 * The unit, port, bay (if given), and values MUST be done as a single write.
126 * The unit, port, and bay must precede the values to be effective.
127 */
128static int setup_qmh_params(const char *, struct kernel_param *);
129static unsigned dummy_qmh_params;
130module_param_call(qmh_serdes_setup, setup_qmh_params, param_get_uint,
131 &dummy_qmh_params, S_IWUSR | S_IRUGO);
132
133/* similarly for QME7342, but it's simpler */
134static int setup_qme_params(const char *, struct kernel_param *);
135static unsigned dummy_qme_params;
136module_param_call(qme_serdes_setup, setup_qme_params, param_get_uint,
137 &dummy_qme_params, S_IWUSR | S_IRUGO);
138
139#define MAX_ATTEN_LEN 64 /* plenty for any real system */
140/* for read back, default index is ~5m copper cable */
141static char cable_atten_list[MAX_ATTEN_LEN] = "10";
142static struct kparam_string kp_cable_atten = {
143 .string = cable_atten_list,
144 .maxlen = MAX_ATTEN_LEN
145};
146static int setup_cable_atten(const char *, struct kernel_param *);
147module_param_call(cable_atten, setup_cable_atten, param_get_string,
148 &kp_cable_atten, S_IWUSR | S_IRUGO);
149MODULE_PARM_DESC(cable_atten, \
150 "cable attenuation indices for cables with invalid EEPROM");
151
152#define BOARD_QME7342 5
153#define BOARD_QMH7342 6
154#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
155 BOARD_QMH7342)
156#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
157 BOARD_QME7342)
158
159#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
160
161#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
162
163#define MASK_ACROSS(lsb, msb) \
164 (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
165
166#define SYM_RMASK(regname, fldname) ((u64) \
167 QIB_7322_##regname##_##fldname##_RMASK)
168
169#define SYM_MASK(regname, fldname) ((u64) \
170 QIB_7322_##regname##_##fldname##_RMASK << \
171 QIB_7322_##regname##_##fldname##_LSB)
172
173#define SYM_FIELD(value, regname, fldname) ((u64) \
174 (((value) >> SYM_LSB(regname, fldname)) & \
175 SYM_RMASK(regname, fldname)))
176
177/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
178#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
179 (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
180
181#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
182#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
183#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
184#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
185#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
186/* Below because most, but not all, fields of IntMask have that full suffix */
187#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
188
189
190#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
191
192/*
193 * the size bits give us 2^N, in KB units. 0 marks as invalid,
194 * and 7 is reserved. We currently use only 2KB and 4KB
195 */
196#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
197#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
198#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
199#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
200
201#define SendIBSLIDAssignMask \
202 QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
203#define SendIBSLMCMask \
204 QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
205
206#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
207#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
208#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
209#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
210#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
211#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
212
213#define _QIB_GPIO_SDA_NUM 1
214#define _QIB_GPIO_SCL_NUM 0
215#define QIB_EEPROM_WEN_NUM 14
216#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
217
218/* HW counter clock is at 4nsec */
219#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
220
221/* full speed IB port 1 only */
222#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
223#define PORT_SPD_CAP_SHIFT 3
224
225/* full speed featuremask, both ports */
226#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
227
228/*
229 * This file contains almost all the chip-specific register information and
230 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
231 */
232
233/* Use defines to tie machine-generated names to lower-case names */
234#define kr_contextcnt KREG_IDX(ContextCnt)
235#define kr_control KREG_IDX(Control)
236#define kr_counterregbase KREG_IDX(CntrRegBase)
237#define kr_errclear KREG_IDX(ErrClear)
238#define kr_errmask KREG_IDX(ErrMask)
239#define kr_errstatus KREG_IDX(ErrStatus)
240#define kr_extctrl KREG_IDX(EXTCtrl)
241#define kr_extstatus KREG_IDX(EXTStatus)
242#define kr_gpio_clear KREG_IDX(GPIOClear)
243#define kr_gpio_mask KREG_IDX(GPIOMask)
244#define kr_gpio_out KREG_IDX(GPIOOut)
245#define kr_gpio_status KREG_IDX(GPIOStatus)
246#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
247#define kr_debugportval KREG_IDX(DebugPortValueReg)
248#define kr_fmask KREG_IDX(feature_mask)
249#define kr_act_fmask KREG_IDX(active_feature_mask)
250#define kr_hwerrclear KREG_IDX(HwErrClear)
251#define kr_hwerrmask KREG_IDX(HwErrMask)
252#define kr_hwerrstatus KREG_IDX(HwErrStatus)
253#define kr_intclear KREG_IDX(IntClear)
254#define kr_intmask KREG_IDX(IntMask)
255#define kr_intredirect KREG_IDX(IntRedirect0)
256#define kr_intstatus KREG_IDX(IntStatus)
257#define kr_pagealign KREG_IDX(PageAlign)
258#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
259#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
260#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
261#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
262#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
263#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
264#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
265#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
266#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
267#define kr_revision KREG_IDX(Revision)
268#define kr_scratch KREG_IDX(Scratch)
269#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
270#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
271#define kr_sendctrl KREG_IDX(SendCtrl)
272#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
273#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
274#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
275#define kr_sendpiobufbase KREG_IDX(SendBufBase)
276#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
277#define kr_sendpiosize KREG_IDX(SendBufSize)
278#define kr_sendregbase KREG_IDX(SendRegBase)
279#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
280#define kr_userregbase KREG_IDX(UserRegBase)
281#define kr_intgranted KREG_IDX(Int_Granted)
282#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
283#define kr_intblocked KREG_IDX(IntBlocked)
284#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
285
286/*
287 * per-port kernel registers. Access only with qib_read_kreg_port()
288 * or qib_write_kreg_port()
289 */
290#define krp_errclear KREG_IBPORT_IDX(ErrClear)
291#define krp_errmask KREG_IBPORT_IDX(ErrMask)
292#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
293#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
294#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
295#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
296#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
297#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
298#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
299#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
300#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
301#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
302#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
303#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
304#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
305#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
306#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
307#define krp_psstart KREG_IBPORT_IDX(PSStart)
308#define krp_psstat KREG_IBPORT_IDX(PSStat)
309#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
310#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
311#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
312#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
313#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
314#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
315#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
316#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
317#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
318#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
319#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
320#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
321#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
322#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
323#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
324#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
325#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
326#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
327#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
328#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
329#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
330#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
331#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
332#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
333#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
334#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
335#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
336#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
337#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
338#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
339#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
340
341/*
342 * Per-context kernel registers. Acess only with qib_read_kreg_ctxt()
343 * or qib_write_kreg_ctxt()
344 */
345#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
346#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
347
348/*
349 * TID Flow table, per context. Reduces
350 * number of hdrq updates to one per flow (or on errors).
351 * context 0 and 1 share same memory, but have distinct
352 * addresses. Since for now, we never use expected sends
353 * on kernel contexts, we don't worry about that (we initialize
354 * those entries for ctxt 0/1 on driver load twice, for example).
355 */
356#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
357#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
358
359/* these are the error bits in the tid flows, and are W1C */
360#define TIDFLOW_ERRBITS ( \
361 (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
362 SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
363 (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
364 SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
365
366/* Most (not all) Counters are per-IBport.
367 * Requires LBIntCnt is at offset 0 in the group
368 */
369#define CREG_IDX(regname) \
370((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
371
372#define crp_badformat CREG_IDX(RxVersionErrCnt)
373#define crp_err_rlen CREG_IDX(RxLenErrCnt)
374#define crp_erricrc CREG_IDX(RxICRCErrCnt)
375#define crp_errlink CREG_IDX(RxLinkMalformCnt)
376#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
377#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
378#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
379#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
380#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
381#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
382#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
383#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
384#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
385#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
386#define crp_pktrcv CREG_IDX(RxDataPktCnt)
387#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
388#define crp_pktsend CREG_IDX(TxDataPktCnt)
389#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
390#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
391#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
392#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
393#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
394#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
395#define crp_rcvebp CREG_IDX(RxEBPCnt)
396#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
397#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
398#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
399#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
400#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
401#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
402#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
403#define crp_sendstall CREG_IDX(TxFlowStallCnt)
404#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
405#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
406#define crp_txlenerr CREG_IDX(TxLenErrCnt)
407#define crp_txlenerr CREG_IDX(TxLenErrCnt)
408#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
409#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
410#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
411#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
412#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
413#define crp_wordrcv CREG_IDX(RxDwordCnt)
414#define crp_wordsend CREG_IDX(TxDwordCnt)
415#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
416
417/* these are the (few) counters that are not port-specific */
418#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
419 QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
420#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
421#define cr_lbint CREG_DEVIDX(LBIntCnt)
422#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
423#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
424#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
425#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
426#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
427
428/* no chip register for # of IB ports supported, so define */
429#define NUM_IB_PORTS 2
430
431/* 1 VL15 buffer per hardware IB port, no register for this, so define */
432#define NUM_VL15_BUFS NUM_IB_PORTS
433
434/*
435 * context 0 and 1 are special, and there is no chip register that
436 * defines this value, so we have to define it here.
437 * These are all allocated to either 0 or 1 for single port
438 * hardware configuration, otherwise each gets half
439 */
440#define KCTXT0_EGRCNT 2048
441
442/* values for vl and port fields in PBC, 7322-specific */
443#define PBC_PORT_SEL_LSB 26
444#define PBC_PORT_SEL_RMASK 1
445#define PBC_VL_NUM_LSB 27
446#define PBC_VL_NUM_RMASK 7
447#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
448#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
449
450static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
451 [IB_RATE_2_5_GBPS] = 16,
452 [IB_RATE_5_GBPS] = 8,
453 [IB_RATE_10_GBPS] = 4,
454 [IB_RATE_20_GBPS] = 2,
455 [IB_RATE_30_GBPS] = 2,
456 [IB_RATE_40_GBPS] = 1
457};
458
459#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
460#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
461
462/* link training states, from IBC */
463#define IB_7322_LT_STATE_DISABLED 0x00
464#define IB_7322_LT_STATE_LINKUP 0x01
465#define IB_7322_LT_STATE_POLLACTIVE 0x02
466#define IB_7322_LT_STATE_POLLQUIET 0x03
467#define IB_7322_LT_STATE_SLEEPDELAY 0x04
468#define IB_7322_LT_STATE_SLEEPQUIET 0x05
469#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
470#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
471#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
472#define IB_7322_LT_STATE_CFGIDLE 0x0b
473#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
474#define IB_7322_LT_STATE_TXREVLANES 0x0d
475#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
476#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
477#define IB_7322_LT_STATE_CFGENH 0x10
478#define IB_7322_LT_STATE_CFGTEST 0x11
479
480/* link state machine states from IBC */
481#define IB_7322_L_STATE_DOWN 0x0
482#define IB_7322_L_STATE_INIT 0x1
483#define IB_7322_L_STATE_ARM 0x2
484#define IB_7322_L_STATE_ACTIVE 0x3
485#define IB_7322_L_STATE_ACT_DEFER 0x4
486
487static const u8 qib_7322_physportstate[0x20] = {
488 [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
489 [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
490 [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
491 [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
492 [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
493 [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
494 [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
495 [IB_7322_LT_STATE_CFGRCVFCFG] =
496 IB_PHYSPORTSTATE_CFG_TRAIN,
497 [IB_7322_LT_STATE_CFGWAITRMT] =
498 IB_PHYSPORTSTATE_CFG_TRAIN,
499 [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
500 [IB_7322_LT_STATE_RECOVERRETRAIN] =
501 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
502 [IB_7322_LT_STATE_RECOVERWAITRMT] =
503 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
504 [IB_7322_LT_STATE_RECOVERIDLE] =
505 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
506 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
507 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
508 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
509 [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
510 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
511 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
512 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
513 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
514};
515
516struct qib_chip_specific {
517 u64 __iomem *cregbase;
518 u64 *cntrs;
519 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
520 spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
521 u64 main_int_mask; /* clear bits which have dedicated handlers */
522 u64 int_enable_mask; /* for per port interrupts in single port mode */
523 u64 errormask;
524 u64 hwerrmask;
525 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
526 u64 gpio_mask; /* shadow the gpio mask register */
527 u64 extctrl; /* shadow the gpio output enable, etc... */
528 u32 ncntrs;
529 u32 nportcntrs;
530 u32 cntrnamelen;
531 u32 portcntrnamelen;
532 u32 numctxts;
533 u32 rcvegrcnt;
534 u32 updthresh; /* current AvailUpdThld */
535 u32 updthresh_dflt; /* default AvailUpdThld */
536 u32 r1;
537 int irq;
538 u32 num_msix_entries;
539 u32 sdmabufcnt;
540 u32 lastbuf_for_pio;
541 u32 stay_in_freeze;
542 u32 recovery_ports_initted;
543#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
544 u32 dca_ctrl;
545 int rhdr_cpu[18];
546 int sdma_cpu[2];
547 u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
548#endif
549 struct msix_entry *msix_entries;
550 void **msix_arg;
551 unsigned long *sendchkenable;
552 unsigned long *sendgrhchk;
553 unsigned long *sendibchk;
554 u32 rcvavail_timeout[18];
555 char emsgbuf[128]; /* for device error interrupt msg buffer */
556};
557
558/* Table of entries in "human readable" form Tx Emphasis. */
559struct txdds_ent {
560 u8 amp;
561 u8 pre;
562 u8 main;
563 u8 post;
564};
565
566struct vendor_txdds_ent {
567 u8 oui[QSFP_VOUI_LEN];
568 u8 *partnum;
569 struct txdds_ent sdr;
570 struct txdds_ent ddr;
571 struct txdds_ent qdr;
572};
573
574static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
575
576#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
577#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
578
579#define H1_FORCE_VAL 8
580#define H1_FORCE_QME 1 /* may be overridden via setup_qme_params() */
581#define H1_FORCE_QMH 7 /* may be overridden via setup_qmh_params() */
582
583/* The static and dynamic registers are paired, and the pairs indexed by spd */
584#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
585 + ((spd) * 2))
586
587#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
588#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
589#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
590#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
591#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
592
593static const struct txdds_ent qmh_sdr_txdds = { 11, 0, 5, 6 };
594static const struct txdds_ent qmh_ddr_txdds = { 7, 0, 2, 8 };
595static const struct txdds_ent qmh_qdr_txdds = { 0, 1, 3, 10 };
596
597/* this is used for unknown mez cards also */
598static const struct txdds_ent qme_sdr_txdds = { 11, 0, 4, 4 };
599static const struct txdds_ent qme_ddr_txdds = { 7, 0, 2, 7 };
600static const struct txdds_ent qme_qdr_txdds = { 0, 1, 12, 11 };
601
602struct qib_chippport_specific {
603 u64 __iomem *kpregbase;
604 u64 __iomem *cpregbase;
605 u64 *portcntrs;
606 struct qib_pportdata *ppd;
607 wait_queue_head_t autoneg_wait;
608 struct delayed_work autoneg_work;
609 struct delayed_work ipg_work;
610 struct timer_list chase_timer;
611 /*
612 * these 5 fields are used to establish deltas for IB symbol
613 * errors and linkrecovery errors. They can be reported on
614 * some chips during link negotiation prior to INIT, and with
615 * DDR when faking DDR negotiations with non-IBTA switches.
616 * The chip counters are adjusted at driver unload if there is
617 * a non-zero delta.
618 */
619 u64 ibdeltainprog;
620 u64 ibsymdelta;
621 u64 ibsymsnap;
622 u64 iblnkerrdelta;
623 u64 iblnkerrsnap;
624 u64 iblnkdownsnap;
625 u64 iblnkdowndelta;
626 u64 ibmalfdelta;
627 u64 ibmalfsnap;
628 u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
629 u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
630 u64 qdr_dfe_time;
631 u64 chase_end;
632 u32 autoneg_tries;
633 u32 recovery_init;
634 u32 qdr_dfe_on;
635 u32 qdr_reforce;
636 /*
637 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
638 * entry zero is unused, to simplify indexing
639 */
640 u16 h1_val;
641 u8 amp[SERDES_CHANS];
642 u8 pre[SERDES_CHANS];
643 u8 mainv[SERDES_CHANS];
644 u8 post[SERDES_CHANS];
645 u8 no_eep; /* attenuation index to use if no qsfp info */
646 u8 ipg_tries;
647 u8 ibmalfusesnap;
648 struct qib_qsfp_data qsfp_data;
649 char epmsgbuf[192]; /* for port error interrupt msg buffer */
650};
651
652static struct {
653 const char *name;
654 irq_handler_t handler;
655 int lsb;
656 int port; /* 0 if not port-specific, else port # */
657} irq_table[] = {
658 { QIB_DRV_NAME, qib_7322intr, -1, 0 },
659 { QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
660 SYM_LSB(IntStatus, SendBufAvail), 0 },
661 { QIB_DRV_NAME " (sdma 0)", sdma_intr,
662 SYM_LSB(IntStatus, SDmaInt_0), 1 },
663 { QIB_DRV_NAME " (sdma 1)", sdma_intr,
664 SYM_LSB(IntStatus, SDmaInt_1), 2 },
665 { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
666 SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
667 { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
668 SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
669 { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
670 SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
671 { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
672 SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
673 { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
674 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
675 { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
676 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
677};
678
679#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
680static const struct dca_reg_map {
681 int shadow_inx;
682 int lsb;
683 u64 mask;
684 u16 regno;
685} dca_rcvhdr_reg_map[] = {
686 { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
687 ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
688 { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
689 ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
690 { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
691 ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
692 { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
693 ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
694 { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
695 ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
696 { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
697 ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
698 { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
699 ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
700 { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
701 ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
702 { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
703 ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
704 { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
705 ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
706 { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
707 ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
708 { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
709 ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
710 { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
711 ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
712 { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
713 ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
714 { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
715 ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
716 { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
717 ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
718 { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
719 ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
720 { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
721 ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
722};
723#endif
724
725/* ibcctrl bits */
726#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
727/* cycle through TS1/TS2 till OK */
728#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
729/* wait for TS1, then go on */
730#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
731#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
732
733#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
734#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
735#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
736
737#define BLOB_7322_IBCHG 0x101
738
739static inline void qib_write_kreg(const struct qib_devdata *dd,
740 const u32 regno, u64 value);
741static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
742static void write_7322_initregs(struct qib_devdata *);
743static void write_7322_init_portregs(struct qib_pportdata *);
744static void setup_7322_link_recovery(struct qib_pportdata *, u32);
745static void check_7322_rxe_status(struct qib_pportdata *);
746static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
747
748/**
749 * qib_read_ureg32 - read 32-bit virtualized per-context register
750 * @dd: device
751 * @regno: register number
752 * @ctxt: context number
753 *
754 * Return the contents of a register that is virtualized to be per context.
755 * Returns -1 on errors (not distinguishable from valid contents at
756 * runtime; we may add a separate error variable at some point).
757 */
758static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
759 enum qib_ureg regno, int ctxt)
760{
761 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
762 return 0;
763 return readl(regno + (u64 __iomem *)(
764 (dd->ureg_align * ctxt) + (dd->userbase ?
765 (char __iomem *)dd->userbase :
766 (char __iomem *)dd->kregbase + dd->uregbase)));
767}
768
769/**
770 * qib_read_ureg - read virtualized per-context register
771 * @dd: device
772 * @regno: register number
773 * @ctxt: context number
774 *
775 * Return the contents of a register that is virtualized to be per context.
776 * Returns -1 on errors (not distinguishable from valid contents at
777 * runtime; we may add a separate error variable at some point).
778 */
779static inline u64 qib_read_ureg(const struct qib_devdata *dd,
780 enum qib_ureg regno, int ctxt)
781{
782
783 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
784 return 0;
785 return readq(regno + (u64 __iomem *)(
786 (dd->ureg_align * ctxt) + (dd->userbase ?
787 (char __iomem *)dd->userbase :
788 (char __iomem *)dd->kregbase + dd->uregbase)));
789}
790
791/**
792 * qib_write_ureg - write virtualized per-context register
793 * @dd: device
794 * @regno: register number
795 * @value: value
796 * @ctxt: context
797 *
798 * Write the contents of a register that is virtualized to be per context.
799 */
800static inline void qib_write_ureg(const struct qib_devdata *dd,
801 enum qib_ureg regno, u64 value, int ctxt)
802{
803 u64 __iomem *ubase;
804 if (dd->userbase)
805 ubase = (u64 __iomem *)
806 ((char __iomem *) dd->userbase +
807 dd->ureg_align * ctxt);
808 else
809 ubase = (u64 __iomem *)
810 (dd->uregbase +
811 (char __iomem *) dd->kregbase +
812 dd->ureg_align * ctxt);
813
814 if (dd->kregbase && (dd->flags & QIB_PRESENT))
815 writeq(value, &ubase[regno]);
816}
817
818static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
819 const u32 regno)
820{
821 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
822 return -1;
823 return readl((u32 __iomem *) &dd->kregbase[regno]);
824}
825
826static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
827 const u32 regno)
828{
829 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
830 return -1;
831 return readq(&dd->kregbase[regno]);
832}
833
834static inline void qib_write_kreg(const struct qib_devdata *dd,
835 const u32 regno, u64 value)
836{
837 if (dd->kregbase && (dd->flags & QIB_PRESENT))
838 writeq(value, &dd->kregbase[regno]);
839}
840
841/*
842 * not many sanity checks for the port-specific kernel register routines,
843 * since they are only used when it's known to be safe.
844*/
845static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
846 const u16 regno)
847{
848 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
849 return 0ULL;
850 return readq(&ppd->cpspec->kpregbase[regno]);
851}
852
853static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
854 const u16 regno, u64 value)
855{
856 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
857 (ppd->dd->flags & QIB_PRESENT))
858 writeq(value, &ppd->cpspec->kpregbase[regno]);
859}
860
861/**
862 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
863 * @dd: the qlogic_ib device
864 * @regno: the register number to write
865 * @ctxt: the context containing the register
866 * @value: the value to write
867 */
868static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
869 const u16 regno, unsigned ctxt,
870 u64 value)
871{
872 qib_write_kreg(dd, regno + ctxt, value);
873}
874
875static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
876{
877 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
878 return 0;
879 return readq(&dd->cspec->cregbase[regno]);
880
881
882}
883
884static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
885{
886 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
887 return 0;
888 return readl(&dd->cspec->cregbase[regno]);
889
890
891}
892
893static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
894 u16 regno, u64 value)
895{
896 if (ppd->cpspec && ppd->cpspec->cpregbase &&
897 (ppd->dd->flags & QIB_PRESENT))
898 writeq(value, &ppd->cpspec->cpregbase[regno]);
899}
900
901static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
902 u16 regno)
903{
904 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
905 !(ppd->dd->flags & QIB_PRESENT))
906 return 0;
907 return readq(&ppd->cpspec->cpregbase[regno]);
908}
909
910static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
911 u16 regno)
912{
913 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
914 !(ppd->dd->flags & QIB_PRESENT))
915 return 0;
916 return readl(&ppd->cpspec->cpregbase[regno]);
917}
918
919/* bits in Control register */
920#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
921#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
922
923/* bits in general interrupt regs */
924#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
925#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
926#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
927#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
928#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
929#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
930#define QIB_I_C_ERROR INT_MASK(Err)
931
932#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
933#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
934#define QIB_I_GPIO INT_MASK(AssertGPIO)
935#define QIB_I_P_SDMAINT(pidx) \
936 (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
937 INT_MASK_P(SDmaProgress, pidx) | \
938 INT_MASK_PM(SDmaCleanupDone, pidx))
939
940/* Interrupt bits that are "per port" */
941#define QIB_I_P_BITSEXTANT(pidx) \
942 (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
943 INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
944 INT_MASK_P(SDmaProgress, pidx) | \
945 INT_MASK_PM(SDmaCleanupDone, pidx))
946
947/* Interrupt bits that are common to a device */
948/* currently unused: QIB_I_SPIOSENT */
949#define QIB_I_C_BITSEXTANT \
950 (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
951 QIB_I_SPIOSENT | \
952 QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
953
954#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
955 QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
956
957/*
958 * Error bits that are "per port".
959 */
960#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
961#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
962#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
963#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
964#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
965#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
966#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
967#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
968#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
969#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
970#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
971#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
972#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
973#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
974#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
975#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
976#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
977#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
978#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
979#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
980#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
981#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
982#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
983#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
984#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
985#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
986#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
987#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
988
989#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
990#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
991#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
992#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
993#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
994#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
995#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
996#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
997#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
998#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
999#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1000
1001/* Error bits that are common to a device */
1002#define QIB_E_RESET ERR_MASK(ResetNegated)
1003#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1004#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1005
1006
1007/*
1008 * Per chip (rather than per-port) errors. Most either do
1009 * nothing but trigger a print (because they self-recover, or
1010 * always occur in tandem with other errors that handle the
1011 * issue), or because they indicate errors with no recovery,
1012 * but we want to know that they happened.
1013 */
1014#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1015#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1016#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1017#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1018#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1019#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1020#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1021#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1022
1023/* SDMA chip errors (not per port)
1024 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1025 * the SDMAHALT error immediately, so we just print the dup error via the
1026 * E_AUTO mechanism. This is true of most of the per-port fatal errors
1027 * as well, but since this is port-independent, by definition, it's
1028 * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
1029 * packet send errors, and so are handled in the same manner as other
1030 * per-packet errors.
1031 */
1032#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1033#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1034#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1035
1036/*
1037 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1038 * it is used to print "common" packet errors.
1039 */
1040#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1041 QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1042 QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1043 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1044 QIB_E_P_REBP)
1045
1046/* Error Bits that Packet-related (Receive, per-port) */
1047#define QIB_E_P_RPKTERRS (\
1048 QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1049 QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1050 QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1051 QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1052 QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1053 QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1054
1055/*
1056 * Error bits that are Send-related (per port)
1057 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1058 * All of these potentially need to have a buffer disarmed
1059 */
1060#define QIB_E_P_SPKTERRS (\
1061 QIB_E_P_SUNEXP_PKTNUM |\
1062 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1063 QIB_E_P_SMAXPKTLEN |\
1064 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1065 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1066 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1067
1068#define QIB_E_SPKTERRS ( \
1069 QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1070 ERR_MASK_N(SendUnsupportedVLErr) | \
1071 QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1072
1073#define QIB_E_P_SDMAERRS ( \
1074 QIB_E_P_SDMAHALT | \
1075 QIB_E_P_SDMADESCADDRMISALIGN | \
1076 QIB_E_P_SDMAUNEXPDATA | \
1077 QIB_E_P_SDMAMISSINGDW | \
1078 QIB_E_P_SDMADWEN | \
1079 QIB_E_P_SDMARPYTAG | \
1080 QIB_E_P_SDMA1STDESC | \
1081 QIB_E_P_SDMABASE | \
1082 QIB_E_P_SDMATAILOUTOFBOUND | \
1083 QIB_E_P_SDMAOUTOFBOUND | \
1084 QIB_E_P_SDMAGENMISMATCH)
1085
1086/*
1087 * This sets some bits more than once, but makes it more obvious which
1088 * bits are not handled under other categories, and the repeat definition
1089 * is not a problem.
1090 */
1091#define QIB_E_P_BITSEXTANT ( \
1092 QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1093 QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1094 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1095 QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1096 )
1097
1098/*
1099 * These are errors that can occur when the link
1100 * changes state while a packet is being sent or received. This doesn't
1101 * cover things like EBP or VCRC that can be the result of a sending
1102 * having the link change state, so we receive a "known bad" packet.
1103 * All of these are "per port", so renamed:
1104 */
1105#define QIB_E_P_LINK_PKTERRS (\
1106 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1107 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1108 QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1109 QIB_E_P_RUNEXPCHAR)
1110
1111/*
1112 * This sets some bits more than once, but makes it more obvious which
1113 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1114 * and the repeat definition is not a problem.
1115 */
1116#define QIB_E_C_BITSEXTANT (\
1117 QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1118 QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1119 QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1120
1121/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1122#define E_SPKT_ERRS_IGNORE 0
1123
1124#define QIB_EXTS_MEMBIST_DISABLED \
1125 SYM_MASK(EXTStatus, MemBISTDisabled)
1126#define QIB_EXTS_MEMBIST_ENDTEST \
1127 SYM_MASK(EXTStatus, MemBISTEndTest)
1128
1129#define QIB_E_SPIOARMLAUNCH \
1130 ERR_MASK(SendArmLaunchErr)
1131
1132#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1133#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1134
1135/*
1136 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1137 * and also if forced QDR (only QDR enabled). It's enabled for the
1138 * forced QDR case so that scrambling will be enabled by the TS3
1139 * exchange, when supported by both sides of the link.
1140 */
1141#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1142#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1143#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1144#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1145#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1146#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1147 SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1148#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1149
1150#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1151#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1152
1153#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1154#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1155#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1156
1157#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1158#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1159#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1160 SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1161#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1162 SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1163#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1164
1165#define IBA7322_REDIRECT_VEC_PER_REG 12
1166
1167#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1168#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1169#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1170#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1171#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1172
1173#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1174
1175#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1176 .msg = #fldname }
1177#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1178 fldname##Mask##_##port), .msg = #fldname }
1179static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1180 HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1181 HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1182 HWE_AUTO(PCIESerdesPClkNotDetect),
1183 HWE_AUTO(PowerOnBISTFailed),
1184 HWE_AUTO(TempsenseTholdReached),
1185 HWE_AUTO(MemoryErr),
1186 HWE_AUTO(PCIeBusParityErr),
1187 HWE_AUTO(PcieCplTimeout),
1188 HWE_AUTO(PciePoisonedTLP),
1189 HWE_AUTO_P(SDmaMemReadErr, 1),
1190 HWE_AUTO_P(SDmaMemReadErr, 0),
1191 HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1192 HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1193 HWE_AUTO_P(statusValidNoEop, 1),
1194 HWE_AUTO_P(statusValidNoEop, 0),
1195 HWE_AUTO(LATriggered),
1196 { .mask = 0 }
1197};
1198
1199#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1200 .msg = #fldname }
1201#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1202 .msg = #fldname }
1203static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1204 E_AUTO(ResetNegated),
1205 E_AUTO(HardwareErr),
1206 E_AUTO(InvalidAddrErr),
1207 E_AUTO(SDmaVL15Err),
1208 E_AUTO(SBufVL15MisUseErr),
1209 E_AUTO(InvalidEEPCmd),
1210 E_AUTO(RcvContextShareErr),
1211 E_AUTO(SendVLMismatchErr),
1212 E_AUTO(SendArmLaunchErr),
1213 E_AUTO(SendSpecialTriggerErr),
1214 E_AUTO(SDmaWrongPortErr),
1215 E_AUTO(SDmaBufMaskDuplicateErr),
1216 E_AUTO(RcvHdrFullErr),
1217 E_AUTO(RcvEgrFullErr),
1218 { .mask = 0 }
1219};
1220
1221static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1222 E_P_AUTO(IBStatusChanged),
1223 E_P_AUTO(SHeadersErr),
1224 E_P_AUTO(VL15BufMisuseErr),
1225 /*
1226 * SDmaHaltErr is not really an error, make it clearer;
1227 */
1228 {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
1229 E_P_AUTO(SDmaDescAddrMisalignErr),
1230 E_P_AUTO(SDmaUnexpDataErr),
1231 E_P_AUTO(SDmaMissingDwErr),
1232 E_P_AUTO(SDmaDwEnErr),
1233 E_P_AUTO(SDmaRpyTagErr),
1234 E_P_AUTO(SDma1stDescErr),
1235 E_P_AUTO(SDmaBaseErr),
1236 E_P_AUTO(SDmaTailOutOfBoundErr),
1237 E_P_AUTO(SDmaOutOfBoundErr),
1238 E_P_AUTO(SDmaGenMismatchErr),
1239 E_P_AUTO(SendBufMisuseErr),
1240 E_P_AUTO(SendUnsupportedVLErr),
1241 E_P_AUTO(SendUnexpectedPktNumErr),
1242 E_P_AUTO(SendDroppedDataPktErr),
1243 E_P_AUTO(SendDroppedSmpPktErr),
1244 E_P_AUTO(SendPktLenErr),
1245 E_P_AUTO(SendUnderRunErr),
1246 E_P_AUTO(SendMaxPktLenErr),
1247 E_P_AUTO(SendMinPktLenErr),
1248 E_P_AUTO(RcvIBLostLinkErr),
1249 E_P_AUTO(RcvHdrErr),
1250 E_P_AUTO(RcvHdrLenErr),
1251 E_P_AUTO(RcvBadTidErr),
1252 E_P_AUTO(RcvBadVersionErr),
1253 E_P_AUTO(RcvIBFlowErr),
1254 E_P_AUTO(RcvEBPErr),
1255 E_P_AUTO(RcvUnsupportedVLErr),
1256 E_P_AUTO(RcvUnexpectedCharErr),
1257 E_P_AUTO(RcvShortPktLenErr),
1258 E_P_AUTO(RcvLongPktLenErr),
1259 E_P_AUTO(RcvMaxPktLenErr),
1260 E_P_AUTO(RcvMinPktLenErr),
1261 E_P_AUTO(RcvICRCErr),
1262 E_P_AUTO(RcvVCRCErr),
1263 E_P_AUTO(RcvFormatErr),
1264 { .mask = 0 }
1265};
1266
1267/*
1268 * Below generates "auto-message" for interrupts not specific to any port or
1269 * context
1270 */
1271#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1272 .msg = #fldname }
1273/* Below generates "auto-message" for interrupts specific to a port */
1274#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1275 SYM_LSB(IntMask, fldname##Mask##_0), \
1276 SYM_LSB(IntMask, fldname##Mask##_1)), \
1277 .msg = #fldname "_P" }
1278/* For some reason, the SerDesTrimDone bits are reversed */
1279#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1280 SYM_LSB(IntMask, fldname##Mask##_1), \
1281 SYM_LSB(IntMask, fldname##Mask##_0)), \
1282 .msg = #fldname "_P" }
1283/*
1284 * Below generates "auto-message" for interrupts specific to a context,
1285 * with ctxt-number appended
1286 */
1287#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1288 SYM_LSB(IntMask, fldname##0IntMask), \
1289 SYM_LSB(IntMask, fldname##17IntMask)), \
1290 .msg = #fldname "_C"}
1291
1292static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
1293 INTR_AUTO_P(SDmaInt),
1294 INTR_AUTO_P(SDmaProgressInt),
1295 INTR_AUTO_P(SDmaIdleInt),
1296 INTR_AUTO_P(SDmaCleanupDone),
1297 INTR_AUTO_C(RcvUrg),
1298 INTR_AUTO_P(ErrInt),
1299 INTR_AUTO(ErrInt), /* non-port-specific errs */
1300 INTR_AUTO(AssertGPIOInt),
1301 INTR_AUTO_P(SendDoneInt),
1302 INTR_AUTO(SendBufAvailInt),
1303 INTR_AUTO_C(RcvAvail),
1304 { .mask = 0 }
1305};
1306
1307#define TXSYMPTOM_AUTO_P(fldname) \
1308 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
1309static const struct qib_hwerror_msgs hdrchk_msgs[] = {
1310 TXSYMPTOM_AUTO_P(NonKeyPacket),
1311 TXSYMPTOM_AUTO_P(GRHFail),
1312 TXSYMPTOM_AUTO_P(PkeyFail),
1313 TXSYMPTOM_AUTO_P(QPFail),
1314 TXSYMPTOM_AUTO_P(SLIDFail),
1315 TXSYMPTOM_AUTO_P(RawIPV6),
1316 TXSYMPTOM_AUTO_P(PacketTooSmall),
1317 { .mask = 0 }
1318};
1319
1320#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1321
1322/*
1323 * Called when we might have an error that is specific to a particular
1324 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1325 * because we don't need to force the update of pioavail
1326 */
1327static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1328{
1329 struct qib_devdata *dd = ppd->dd;
1330 u32 i;
1331 int any;
1332 u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1333 u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1334 unsigned long sbuf[4];
1335
1336 /*
1337 * It's possible that sendbuffererror could have bits set; might
1338 * have already done this as a result of hardware error handling.
1339 */
1340 any = 0;
1341 for (i = 0; i < regcnt; ++i) {
1342 sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1343 if (sbuf[i]) {
1344 any = 1;
1345 qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1346 }
1347 }
1348
1349 if (any)
1350 qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1351}
1352
1353/* No txe_recover yet, if ever */
1354
1355/* No decode__errors yet */
1356static void err_decode(char *msg, size_t len, u64 errs,
1357 const struct qib_hwerror_msgs *msp)
1358{
1359 u64 these, lmask;
1360 int took, multi, n = 0;
1361
1362 while (msp && msp->mask) {
1363 multi = (msp->mask & (msp->mask - 1));
1364 while (errs & msp->mask) {
1365 these = (errs & msp->mask);
1366 lmask = (these & (these - 1)) ^ these;
1367 if (len) {
1368 if (n++) {
1369 /* separate the strings */
1370 *msg++ = ',';
1371 len--;
1372 }
1373 took = scnprintf(msg, len, "%s", msp->msg);
1374 len -= took;
1375 msg += took;
1376 }
1377 errs &= ~lmask;
1378 if (len && multi) {
1379 /* More than one bit this mask */
1380 int idx = -1;
1381
1382 while (lmask & msp->mask) {
1383 ++idx;
1384 lmask >>= 1;
1385 }
1386 took = scnprintf(msg, len, "_%d", idx);
1387 len -= took;
1388 msg += took;
1389 }
1390 }
1391 ++msp;
1392 }
1393 /* If some bits are left, show in hex. */
1394 if (len && errs)
1395 snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1396 (unsigned long long) errs);
1397}
1398
1399/* only called if r1 set */
1400static void flush_fifo(struct qib_pportdata *ppd)
1401{
1402 struct qib_devdata *dd = ppd->dd;
1403 u32 __iomem *piobuf;
1404 u32 bufn;
1405 u32 *hdr;
1406 u64 pbc;
1407 const unsigned hdrwords = 7;
1408 static struct qib_ib_header ibhdr = {
1409 .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1410 .lrh[1] = IB_LID_PERMISSIVE,
1411 .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1412 .lrh[3] = IB_LID_PERMISSIVE,
1413 .u.oth.bth[0] = cpu_to_be32(
1414 (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1415 .u.oth.bth[1] = cpu_to_be32(0),
1416 .u.oth.bth[2] = cpu_to_be32(0),
1417 .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1418 .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1419 };
1420
1421 /*
1422 * Send a dummy VL15 packet to flush the launch FIFO.
1423 * This will not actually be sent since the TxeBypassIbc bit is set.
1424 */
1425 pbc = PBC_7322_VL15_SEND |
1426 (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1427 (hdrwords + SIZE_OF_CRC);
1428 piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1429 if (!piobuf)
1430 return;
1431 writeq(pbc, piobuf);
1432 hdr = (u32 *) &ibhdr;
1433 if (dd->flags & QIB_PIO_FLUSH_WC) {
1434 qib_flush_wc();
1435 qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1436 qib_flush_wc();
1437 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1438 qib_flush_wc();
1439 } else
1440 qib_pio_copy(piobuf + 2, hdr, hdrwords);
1441 qib_sendbuf_done(dd, bufn);
1442}
1443
1444/*
1445 * This is called with interrupts disabled and sdma_lock held.
1446 */
1447static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1448{
1449 struct qib_devdata *dd = ppd->dd;
1450 u64 set_sendctrl = 0;
1451 u64 clr_sendctrl = 0;
1452
1453 if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1454 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1455 else
1456 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1457
1458 if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1459 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1460 else
1461 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1462
1463 if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1464 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1465 else
1466 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1467
1468 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1469 set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1470 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1471 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1472 else
1473 clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1474 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1475 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1476
1477 spin_lock(&dd->sendctrl_lock);
1478
1479 /* If we are draining everything, block sends first */
1480 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1481 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1482 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1483 qib_write_kreg(dd, kr_scratch, 0);
1484 }
1485
1486 ppd->p_sendctrl |= set_sendctrl;
1487 ppd->p_sendctrl &= ~clr_sendctrl;
1488
1489 if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1490 qib_write_kreg_port(ppd, krp_sendctrl,
1491 ppd->p_sendctrl |
1492 SYM_MASK(SendCtrl_0, SDmaCleanup));
1493 else
1494 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1495 qib_write_kreg(dd, kr_scratch, 0);
1496
1497 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1498 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1499 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1500 qib_write_kreg(dd, kr_scratch, 0);
1501 }
1502
1503 spin_unlock(&dd->sendctrl_lock);
1504
1505 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1506 flush_fifo(ppd);
1507}
1508
1509static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1510{
1511 __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1512}
1513
1514static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1515{
1516 /*
1517 * Set SendDmaLenGen and clear and set
1518 * the MSB of the generation count to enable generation checking
1519 * and load the internal generation counter.
1520 */
1521 qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1522 qib_write_kreg_port(ppd, krp_senddmalengen,
1523 ppd->sdma_descq_cnt |
1524 (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1525}
1526
1527/*
1528 * Must be called with sdma_lock held, or before init finished.
1529 */
1530static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1531{
1532 /* Commit writes to memory and advance the tail on the chip */
1533 wmb();
1534 ppd->sdma_descq_tail = tail;
1535 qib_write_kreg_port(ppd, krp_senddmatail, tail);
1536}
1537
1538/*
1539 * This is called with interrupts disabled and sdma_lock held.
1540 */
1541static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1542{
1543 /*
1544 * Drain all FIFOs.
1545 * The hardware doesn't require this but we do it so that verbs
1546 * and user applications don't wait for link active to send stale
1547 * data.
1548 */
1549 sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1550
1551 qib_sdma_7322_setlengen(ppd);
1552 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1553 ppd->sdma_head_dma[0] = 0;
1554 qib_7322_sdma_sendctrl(ppd,
1555 ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1556}
1557
1558#define DISABLES_SDMA ( \
1559 QIB_E_P_SDMAHALT | \
1560 QIB_E_P_SDMADESCADDRMISALIGN | \
1561 QIB_E_P_SDMAMISSINGDW | \
1562 QIB_E_P_SDMADWEN | \
1563 QIB_E_P_SDMARPYTAG | \
1564 QIB_E_P_SDMA1STDESC | \
1565 QIB_E_P_SDMABASE | \
1566 QIB_E_P_SDMATAILOUTOFBOUND | \
1567 QIB_E_P_SDMAOUTOFBOUND | \
1568 QIB_E_P_SDMAGENMISMATCH)
1569
1570static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1571{
1572 unsigned long flags;
1573 struct qib_devdata *dd = ppd->dd;
1574
1575 errs &= QIB_E_P_SDMAERRS;
1576
1577 if (errs & QIB_E_P_SDMAUNEXPDATA)
1578 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1579 ppd->port);
1580
1581 spin_lock_irqsave(&ppd->sdma_lock, flags);
1582
1583 switch (ppd->sdma_state.current_state) {
1584 case qib_sdma_state_s00_hw_down:
1585 break;
1586
1587 case qib_sdma_state_s10_hw_start_up_wait:
1588 if (errs & QIB_E_P_SDMAHALT)
1589 __qib_sdma_process_event(ppd,
1590 qib_sdma_event_e20_hw_started);
1591 break;
1592
1593 case qib_sdma_state_s20_idle:
1594 break;
1595
1596 case qib_sdma_state_s30_sw_clean_up_wait:
1597 break;
1598
1599 case qib_sdma_state_s40_hw_clean_up_wait:
1600 if (errs & QIB_E_P_SDMAHALT)
1601 __qib_sdma_process_event(ppd,
1602 qib_sdma_event_e50_hw_cleaned);
1603 break;
1604
1605 case qib_sdma_state_s50_hw_halt_wait:
1606 if (errs & QIB_E_P_SDMAHALT)
1607 __qib_sdma_process_event(ppd,
1608 qib_sdma_event_e60_hw_halted);
1609 break;
1610
1611 case qib_sdma_state_s99_running:
1612 __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1613 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1614 break;
1615 }
1616
1617 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1618}
1619
1620/*
1621 * handle per-device errors (not per-port errors)
1622 */
1623static noinline void handle_7322_errors(struct qib_devdata *dd)
1624{
1625 char *msg;
1626 u64 iserr = 0;
1627 u64 errs;
1628 u64 mask;
1629 int log_idx;
1630
1631 qib_stats.sps_errints++;
1632 errs = qib_read_kreg64(dd, kr_errstatus);
1633 if (!errs) {
1634 qib_devinfo(dd->pcidev, "device error interrupt, "
1635 "but no error bits set!\n");
1636 goto done;
1637 }
1638
1639 /* don't report errors that are masked */
1640 errs &= dd->cspec->errormask;
1641 msg = dd->cspec->emsgbuf;
1642
1643 /* do these first, they are most important */
1644 if (errs & QIB_E_HARDWARE) {
1645 *msg = '\0';
1646 qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1647 } else
1648 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1649 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1650 qib_inc_eeprom_err(dd, log_idx, 1);
1651
1652 if (errs & QIB_E_SPKTERRS) {
1653 qib_disarm_7322_senderrbufs(dd->pport);
1654 qib_stats.sps_txerrs++;
1655 } else if (errs & QIB_E_INVALIDADDR)
1656 qib_stats.sps_txerrs++;
1657 else if (errs & QIB_E_ARMLAUNCH) {
1658 qib_stats.sps_txerrs++;
1659 qib_disarm_7322_senderrbufs(dd->pport);
1660 }
1661 qib_write_kreg(dd, kr_errclear, errs);
1662
1663 /*
1664 * The ones we mask off are handled specially below
1665 * or above. Also mask SDMADISABLED by default as it
1666 * is too chatty.
1667 */
1668 mask = QIB_E_HARDWARE;
1669 *msg = '\0';
1670
1671 err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1672 qib_7322error_msgs);
1673
1674 /*
1675 * Getting reset is a tragedy for all ports. Mark the device
1676 * _and_ the ports as "offline" in way meaningful to each.
1677 */
1678 if (errs & QIB_E_RESET) {
1679 int pidx;
1680
1681 qib_dev_err(dd, "Got reset, requires re-init "
1682 "(unload and reload driver)\n");
1683 dd->flags &= ~QIB_INITTED; /* needs re-init */
1684 /* mark as having had error */
1685 *dd->devstatusp |= QIB_STATUS_HWERROR;
1686 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1687 if (dd->pport[pidx].link_speed_supported)
1688 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1689 }
1690
1691 if (*msg && iserr)
1692 qib_dev_err(dd, "%s error\n", msg);
1693
1694 /*
1695 * If there were hdrq or egrfull errors, wake up any processes
1696 * waiting in poll. We used to try to check which contexts had
1697 * the overflow, but given the cost of that and the chip reads
1698 * to support it, it's better to just wake everybody up if we
1699 * get an overflow; waiters can poll again if it's not them.
1700 */
1701 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1702 qib_handle_urcv(dd, ~0U);
1703 if (errs & ERR_MASK(RcvEgrFullErr))
1704 qib_stats.sps_buffull++;
1705 else
1706 qib_stats.sps_hdrfull++;
1707 }
1708
1709done:
1710 return;
1711}
1712
1713static void reenable_chase(unsigned long opaque)
1714{
1715 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1716
1717 ppd->cpspec->chase_timer.expires = 0;
1718 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1719 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1720}
1721
1722static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
1723{
1724 ppd->cpspec->chase_end = 0;
1725
1726 if (!qib_chase)
1727 return;
1728
1729 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1730 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1731 ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1732 add_timer(&ppd->cpspec->chase_timer);
1733}
1734
1735static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1736{
1737 u8 ibclt;
1738 u64 tnow;
1739
1740 ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1741
1742 /*
1743 * Detect and handle the state chase issue, where we can
1744 * get stuck if we are unlucky on timing on both sides of
1745 * the link. If we are, we disable, set a timer, and
1746 * then re-enable.
1747 */
1748 switch (ibclt) {
1749 case IB_7322_LT_STATE_CFGRCVFCFG:
1750 case IB_7322_LT_STATE_CFGWAITRMT:
1751 case IB_7322_LT_STATE_TXREVLANES:
1752 case IB_7322_LT_STATE_CFGENH:
1753 tnow = get_jiffies_64();
1754 if (ppd->cpspec->chase_end &&
1755 time_after64(tnow, ppd->cpspec->chase_end))
1756 disable_chase(ppd, tnow, ibclt);
1757 else if (!ppd->cpspec->chase_end)
1758 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1759 break;
1760 default:
1761 ppd->cpspec->chase_end = 0;
1762 break;
1763 }
1764
1765 if (ibclt == IB_7322_LT_STATE_CFGTEST &&
1766 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1767 force_h1(ppd);
1768 ppd->cpspec->qdr_reforce = 1;
1769 } else if (ppd->cpspec->qdr_reforce &&
1770 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1771 (ibclt == IB_7322_LT_STATE_CFGENH ||
1772 ibclt == IB_7322_LT_STATE_CFGIDLE ||
1773 ibclt == IB_7322_LT_STATE_LINKUP))
1774 force_h1(ppd);
1775
1776 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1777 ppd->link_speed_enabled == QIB_IB_QDR &&
1778 (ibclt == IB_7322_LT_STATE_CFGTEST ||
1779 ibclt == IB_7322_LT_STATE_CFGENH ||
1780 (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1781 ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1782 adj_tx_serdes(ppd);
1783
1784 if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
1785 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1786 ppd->cpspec->qdr_dfe_on = 1;
1787 ppd->cpspec->qdr_dfe_time = 0;
1788 /* On link down, reenable QDR adaptation */
1789 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1790 ppd->dd->cspec->r1 ?
1791 QDR_STATIC_ADAPT_DOWN_R1 :
1792 QDR_STATIC_ADAPT_DOWN);
1793 }
1794}
1795
1796/*
1797 * This is per-pport error handling.
1798 * will likely get it's own MSIx interrupt (one for each port,
1799 * although just a single handler).
1800 */
1801static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1802{
1803 char *msg;
1804 u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1805 struct qib_devdata *dd = ppd->dd;
1806
1807 /* do this as soon as possible */
1808 fmask = qib_read_kreg64(dd, kr_act_fmask);
1809 if (!fmask)
1810 check_7322_rxe_status(ppd);
1811
1812 errs = qib_read_kreg_port(ppd, krp_errstatus);
1813 if (!errs)
1814 qib_devinfo(dd->pcidev,
1815 "Port%d error interrupt, but no error bits set!\n",
1816 ppd->port);
1817 if (!fmask)
1818 errs &= ~QIB_E_P_IBSTATUSCHANGED;
1819 if (!errs)
1820 goto done;
1821
1822 msg = ppd->cpspec->epmsgbuf;
1823 *msg = '\0';
1824
1825 if (errs & ~QIB_E_P_BITSEXTANT) {
1826 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1827 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1828 if (!*msg)
1829 snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1830 "no others");
1831 qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
1832 " errors 0x%016Lx set (and %s)\n",
1833 (errs & ~QIB_E_P_BITSEXTANT), msg);
1834 *msg = '\0';
1835 }
1836
1837 if (errs & QIB_E_P_SHDR) {
1838 u64 symptom;
1839
1840 /* determine cause, then write to clear */
1841 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1842 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1843 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1844 hdrchk_msgs);
1845 *msg = '\0';
1846 /* senderrbuf cleared in SPKTERRS below */
1847 }
1848
1849 if (errs & QIB_E_P_SPKTERRS) {
1850 if ((errs & QIB_E_P_LINK_PKTERRS) &&
1851 !(ppd->lflags & QIBL_LINKACTIVE)) {
1852 /*
1853 * This can happen when trying to bring the link
1854 * up, but the IB link changes state at the "wrong"
1855 * time. The IB logic then complains that the packet
1856 * isn't valid. We don't want to confuse people, so
1857 * we just don't print them, except at debug
1858 */
1859 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1860 (errs & QIB_E_P_LINK_PKTERRS),
1861 qib_7322p_error_msgs);
1862 *msg = '\0';
1863 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1864 }
1865 qib_disarm_7322_senderrbufs(ppd);
1866 } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1867 !(ppd->lflags & QIBL_LINKACTIVE)) {
1868 /*
1869 * This can happen when SMA is trying to bring the link
1870 * up, but the IB link changes state at the "wrong" time.
1871 * The IB logic then complains that the packet isn't
1872 * valid. We don't want to confuse people, so we just
1873 * don't print them, except at debug
1874 */
1875 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1876 qib_7322p_error_msgs);
1877 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1878 *msg = '\0';
1879 }
1880
1881 qib_write_kreg_port(ppd, krp_errclear, errs);
1882
1883 errs &= ~ignore_this_time;
1884 if (!errs)
1885 goto done;
1886
1887 if (errs & QIB_E_P_RPKTERRS)
1888 qib_stats.sps_rcverrs++;
1889 if (errs & QIB_E_P_SPKTERRS)
1890 qib_stats.sps_txerrs++;
1891
1892 iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1893
1894 if (errs & QIB_E_P_SDMAERRS)
1895 sdma_7322_p_errors(ppd, errs);
1896
1897 if (errs & QIB_E_P_IBSTATUSCHANGED) {
1898 u64 ibcs;
1899 u8 ltstate;
1900
1901 ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1902 ltstate = qib_7322_phys_portstate(ibcs);
1903
1904 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1905 handle_serdes_issues(ppd, ibcs);
1906 if (!(ppd->cpspec->ibcctrl_a &
1907 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1908 /*
1909 * We got our interrupt, so init code should be
1910 * happy and not try alternatives. Now squelch
1911 * other "chatter" from link-negotiation (pre Init)
1912 */
1913 ppd->cpspec->ibcctrl_a |=
1914 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1915 qib_write_kreg_port(ppd, krp_ibcctrl_a,
1916 ppd->cpspec->ibcctrl_a);
1917 }
1918
1919 /* Update our picture of width and speed from chip */
1920 ppd->link_width_active =
1921 (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1922 IB_WIDTH_4X : IB_WIDTH_1X;
1923 ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1924 LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1925 SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1926 QIB_IB_DDR : QIB_IB_SDR;
1927
1928 if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1929 IB_PHYSPORTSTATE_DISABLED)
1930 qib_set_ib_7322_lstate(ppd, 0,
1931 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1932 else
1933 /*
1934 * Since going into a recovery state causes the link
1935 * state to go down and since recovery is transitory,
1936 * it is better if we "miss" ever seeing the link
1937 * training state go into recovery (i.e., ignore this
1938 * transition for link state special handling purposes)
1939 * without updating lastibcstat.
1940 */
1941 if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1942 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1943 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1944 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1945 qib_handle_e_ibstatuschanged(ppd, ibcs);
1946 }
1947 if (*msg && iserr)
1948 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1949
1950 if (ppd->state_wanted & ppd->lflags)
1951 wake_up_interruptible(&ppd->state_wait);
1952done:
1953 return;
1954}
1955
1956/* enable/disable chip from delivering interrupts */
1957static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
1958{
1959 if (enable) {
1960 if (dd->flags & QIB_BADINTR)
1961 return;
1962 qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
1963 /* cause any pending enabled interrupts to be re-delivered */
1964 qib_write_kreg(dd, kr_intclear, 0ULL);
1965 if (dd->cspec->num_msix_entries) {
1966 /* and same for MSIx */
1967 u64 val = qib_read_kreg64(dd, kr_intgranted);
1968 if (val)
1969 qib_write_kreg(dd, kr_intgranted, val);
1970 }
1971 } else
1972 qib_write_kreg(dd, kr_intmask, 0ULL);
1973}
1974
1975/*
1976 * Try to cleanup as much as possible for anything that might have gone
1977 * wrong while in freeze mode, such as pio buffers being written by user
1978 * processes (causing armlaunch), send errors due to going into freeze mode,
1979 * etc., and try to avoid causing extra interrupts while doing so.
1980 * Forcibly update the in-memory pioavail register copies after cleanup
1981 * because the chip won't do it while in freeze mode (the register values
1982 * themselves are kept correct).
1983 * Make sure that we don't lose any important interrupts by using the chip
1984 * feature that says that writing 0 to a bit in *clear that is set in
1985 * *status will cause an interrupt to be generated again (if allowed by
1986 * the *mask value).
1987 * This is in chip-specific code because of all of the register accesses,
1988 * even though the details are similar on most chips.
1989 */
1990static void qib_7322_clear_freeze(struct qib_devdata *dd)
1991{
1992 int pidx;
1993
1994 /* disable error interrupts, to avoid confusion */
1995 qib_write_kreg(dd, kr_errmask, 0ULL);
1996
1997 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1998 if (dd->pport[pidx].link_speed_supported)
1999 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2000 0ULL);
2001
2002 /* also disable interrupts; errormask is sometimes overwriten */
2003 qib_7322_set_intr_state(dd, 0);
2004
2005 /* clear the freeze, and be sure chip saw it */
2006 qib_write_kreg(dd, kr_control, dd->control);
2007 qib_read_kreg32(dd, kr_scratch);
2008
2009 /*
2010 * Force new interrupt if any hwerr, error or interrupt bits are
2011 * still set, and clear "safe" send packet errors related to freeze
2012 * and cancelling sends. Re-enable error interrupts before possible
2013 * force of re-interrupt on pending interrupts.
2014 */
2015 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2016 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2017 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2018 /* We need to purge per-port errs and reset mask, too */
2019 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2020 if (!dd->pport[pidx].link_speed_supported)
2021 continue;
2022 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2023 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2024 }
2025 qib_7322_set_intr_state(dd, 1);
2026}
2027
2028/* no error handling to speak of */
2029/**
2030 * qib_7322_handle_hwerrors - display hardware errors.
2031 * @dd: the qlogic_ib device
2032 * @msg: the output buffer
2033 * @msgl: the size of the output buffer
2034 *
2035 * Use same msg buffer as regular errors to avoid excessive stack
2036 * use. Most hardware errors are catastrophic, but for right now,
2037 * we'll print them and continue. We reuse the same message buffer as
2038 * qib_handle_errors() to avoid excessive stack usage.
2039 */
2040static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2041 size_t msgl)
2042{
2043 u64 hwerrs;
2044 u32 ctrl;
2045 int isfatal = 0;
2046
2047 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2048 if (!hwerrs)
2049 goto bail;
2050 if (hwerrs == ~0ULL) {
2051 qib_dev_err(dd, "Read of hardware error status failed "
2052 "(all bits set); ignoring\n");
2053 goto bail;
2054 }
2055 qib_stats.sps_hwerrs++;
2056
2057 /* Always clear the error status register, except BIST fail */
2058 qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2059 ~HWE_MASK(PowerOnBISTFailed));
2060
2061 hwerrs &= dd->cspec->hwerrmask;
2062
2063 /* no EEPROM logging, yet */
2064
2065 if (hwerrs)
2066 qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
2067 "(cleared)\n", (unsigned long long) hwerrs);
2068
2069 ctrl = qib_read_kreg32(dd, kr_control);
2070 if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2071 /*
2072 * No recovery yet...
2073 */
2074 if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2075 dd->cspec->stay_in_freeze) {
2076 /*
2077 * If any set that we aren't ignoring only make the
2078 * complaint once, in case it's stuck or recurring,
2079 * and we get here multiple times
2080 * Force link down, so switch knows, and
2081 * LEDs are turned off.
2082 */
2083 if (dd->flags & QIB_INITTED)
2084 isfatal = 1;
2085 } else
2086 qib_7322_clear_freeze(dd);
2087 }
2088
2089 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2090 isfatal = 1;
2091 strlcpy(msg, "[Memory BIST test failed, "
2092 "InfiniPath hardware unusable]", msgl);
2093 /* ignore from now on, so disable until driver reloaded */
2094 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2095 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2096 }
2097
2098 err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2099
2100 /* Ignore esoteric PLL failures et al. */
2101
2102 qib_dev_err(dd, "%s hardware error\n", msg);
2103
2104 if (isfatal && !dd->diag_client) {
2105 qib_dev_err(dd, "Fatal Hardware Error, no longer"
2106 " usable, SN %.16s\n", dd->serial);
2107 /*
2108 * for /sys status file and user programs to print; if no
2109 * trailing brace is copied, we'll know it was truncated.
2110 */
2111 if (dd->freezemsg)
2112 snprintf(dd->freezemsg, dd->freezelen,
2113 "{%s}", msg);
2114 qib_disable_after_error(dd);
2115 }
2116bail:;
2117}
2118
2119/**
2120 * qib_7322_init_hwerrors - enable hardware errors
2121 * @dd: the qlogic_ib device
2122 *
2123 * now that we have finished initializing everything that might reasonably
2124 * cause a hardware error, and cleared those errors bits as they occur,
2125 * we can enable hardware errors in the mask (potentially enabling
2126 * freeze mode), and enable hardware errors as errors (along with
2127 * everything else) in errormask
2128 */
2129static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2130{
2131 int pidx;
2132 u64 extsval;
2133
2134 extsval = qib_read_kreg64(dd, kr_extstatus);
2135 if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2136 QIB_EXTS_MEMBIST_ENDTEST)))
2137 qib_dev_err(dd, "MemBIST did not complete!\n");
2138
2139 /* never clear BIST failure, so reported on each driver load */
2140 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2141 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2142
2143 /* clear all */
2144 qib_write_kreg(dd, kr_errclear, ~0ULL);
2145 /* enable errors that are masked, at least this first time. */
2146 qib_write_kreg(dd, kr_errmask, ~0ULL);
2147 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2148 for (pidx = 0; pidx < dd->num_pports; ++pidx)
2149 if (dd->pport[pidx].link_speed_supported)
2150 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2151 ~0ULL);
2152}
2153
2154/*
2155 * Disable and enable the armlaunch error. Used for PIO bandwidth testing
2156 * on chips that are count-based, rather than trigger-based. There is no
2157 * reference counting, but that's also fine, given the intended use.
2158 * Only chip-specific because it's all register accesses
2159 */
2160static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2161{
2162 if (enable) {
2163 qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2164 dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2165 } else
2166 dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2167 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2168}
2169
2170/*
2171 * Formerly took parameter <which> in pre-shifted,
2172 * pre-merged form with LinkCmd and LinkInitCmd
2173 * together, and assuming the zero was NOP.
2174 */
2175static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2176 u16 linitcmd)
2177{
2178 u64 mod_wd;
2179 struct qib_devdata *dd = ppd->dd;
2180 unsigned long flags;
2181
2182 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2183 /*
2184 * If we are told to disable, note that so link-recovery
2185 * code does not attempt to bring us back up.
2186 * Also reset everything that we can, so we start
2187 * completely clean when re-enabled (before we
2188 * actually issue the disable to the IBC)
2189 */
2190 qib_7322_mini_pcs_reset(ppd);
2191 spin_lock_irqsave(&ppd->lflags_lock, flags);
2192 ppd->lflags |= QIBL_IB_LINK_DISABLED;
2193 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2194 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2195 /*
2196 * Any other linkinitcmd will lead to LINKDOWN and then
2197 * to INIT (if all is well), so clear flag to let
2198 * link-recovery code attempt to bring us back up.
2199 */
2200 spin_lock_irqsave(&ppd->lflags_lock, flags);
2201 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2202 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2203 /*
2204 * Clear status change interrupt reduction so the
2205 * new state is seen.
2206 */
2207 ppd->cpspec->ibcctrl_a &=
2208 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2209 }
2210
2211 mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2212 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2213
2214 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2215 mod_wd);
2216 /* write to chip to prevent back-to-back writes of ibc reg */
2217 qib_write_kreg(dd, kr_scratch, 0);
2218
2219}
2220
2221/*
2222 * The total RCV buffer memory is 64KB, used for both ports, and is
2223 * in units of 64 bytes (same as IB flow control credit unit).
2224 * The consumedVL unit in the same registers are in 32 byte units!
2225 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2226 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2227 * in krp_rxcreditvl15, rather than 10.
2228 */
2229#define RCV_BUF_UNITSZ 64
2230#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2231
2232static void set_vls(struct qib_pportdata *ppd)
2233{
2234 int i, numvls, totcred, cred_vl, vl0extra;
2235 struct qib_devdata *dd = ppd->dd;
2236 u64 val;
2237
2238 numvls = qib_num_vls(ppd->vls_operational);
2239
2240 /*
2241 * Set up per-VL credits. Below is kluge based on these assumptions:
2242 * 1) port is disabled at the time early_init is called.
2243 * 2) give VL15 17 credits, for two max-plausible packets.
2244 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2245 */
2246 /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2247 totcred = NUM_RCV_BUF_UNITS(dd);
2248 cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2249 totcred -= cred_vl;
2250 qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2251 cred_vl = totcred / numvls;
2252 vl0extra = totcred - cred_vl * numvls;
2253 qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2254 for (i = 1; i < numvls; i++)
2255 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2256 for (; i < 8; i++) /* no buffer space for other VLs */
2257 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2258
2259 /* Notify IBC that credits need to be recalculated */
2260 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2261 val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2262 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2263 qib_write_kreg(dd, kr_scratch, 0ULL);
2264 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2265 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2266
2267 for (i = 0; i < numvls; i++)
2268 val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2269 val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2270
2271 /* Change the number of operational VLs */
2272 ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2273 ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2274 ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2275 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2276 qib_write_kreg(dd, kr_scratch, 0ULL);
2277}
2278
2279/*
2280 * The code that deals with actual SerDes is in serdes_7322_init().
2281 * Compared to the code for iba7220, it is minimal.
2282 */
2283static int serdes_7322_init(struct qib_pportdata *ppd);
2284
2285/**
2286 * qib_7322_bringup_serdes - bring up the serdes
2287 * @ppd: physical port on the qlogic_ib device
2288 */
2289static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2290{
2291 struct qib_devdata *dd = ppd->dd;
2292 u64 val, guid, ibc;
2293 unsigned long flags;
2294 int ret = 0;
2295
2296 /*
2297 * SerDes model not in Pd, but still need to
2298 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2299 * eventually.
2300 */
2301 /* Put IBC in reset, sends disabled (should be in reset already) */
2302 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2303 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2304 qib_write_kreg(dd, kr_scratch, 0ULL);
2305
2306 if (qib_compat_ddr_negotiate) {
2307 ppd->cpspec->ibdeltainprog = 1;
2308 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2309 crp_ibsymbolerr);
2310 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2311 crp_iblinkerrrecov);
2312 }
2313
2314 /* flowcontrolwatermark is in units of KBytes */
2315 ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2316 /*
2317 * Flow control is sent this often, even if no changes in
2318 * buffer space occur. Units are 128ns for this chip.
2319 * Set to 3usec.
2320 */
2321 ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2322 /* max error tolerance */
2323 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2324 /* IB credit flow control. */
2325 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2326 /*
2327 * set initial max size pkt IBC will send, including ICRC; it's the
2328 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2329 */
2330 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2331 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2332 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2333
2334 /* initially come up waiting for TS1, without sending anything. */
2335 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2336 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2337
2338 /*
2339 * Reset the PCS interface to the serdes (and also ibc, which is still
2340 * in reset from above). Writes new value of ibcctrl_a as last step.
2341 */
2342 qib_7322_mini_pcs_reset(ppd);
2343 qib_write_kreg(dd, kr_scratch, 0ULL);
2344
2345 if (!ppd->cpspec->ibcctrl_b) {
2346 unsigned lse = ppd->link_speed_enabled;
2347
2348 /*
2349 * Not on re-init after reset, establish shadow
2350 * and force initial config.
2351 */
2352 ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2353 krp_ibcctrl_b);
2354 ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2355 IBA7322_IBC_SPEED_DDR |
2356 IBA7322_IBC_SPEED_SDR |
2357 IBA7322_IBC_WIDTH_AUTONEG |
2358 SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2359 if (lse & (lse - 1)) /* Muliple speeds enabled */
2360 ppd->cpspec->ibcctrl_b |=
2361 (lse << IBA7322_IBC_SPEED_LSB) |
2362 IBA7322_IBC_IBTA_1_2_MASK |
2363 IBA7322_IBC_MAX_SPEED_MASK;
2364 else
2365 ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2366 IBA7322_IBC_SPEED_QDR |
2367 IBA7322_IBC_IBTA_1_2_MASK :
2368 (lse == QIB_IB_DDR) ?
2369 IBA7322_IBC_SPEED_DDR :
2370 IBA7322_IBC_SPEED_SDR;
2371 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2372 (IB_WIDTH_1X | IB_WIDTH_4X))
2373 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2374 else
2375 ppd->cpspec->ibcctrl_b |=
2376 ppd->link_width_enabled == IB_WIDTH_4X ?
2377 IBA7322_IBC_WIDTH_4X_ONLY :
2378 IBA7322_IBC_WIDTH_1X_ONLY;
2379
2380 /* always enable these on driver reload, not sticky */
2381 ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2382 IBA7322_IBC_HRTBT_MASK);
2383 }
2384 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2385
2386 /* setup so we have more time at CFGTEST to change H1 */
2387 val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2388 val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2389 val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2390 qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2391
2392 serdes_7322_init(ppd);
2393
2394 guid = be64_to_cpu(ppd->guid);
2395 if (!guid) {
2396 if (dd->base_guid)
2397 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2398 ppd->guid = cpu_to_be64(guid);
2399 }
2400
2401 qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2402 /* write to chip to prevent back-to-back writes of ibc reg */
2403 qib_write_kreg(dd, kr_scratch, 0);
2404
2405 /* Enable port */
2406 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2407 set_vls(ppd);
2408
2409 /* be paranoid against later code motion, etc. */
2410 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2411 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2412 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2413 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2414
2415 /* Also enable IBSTATUSCHG interrupt. */
2416 val = qib_read_kreg_port(ppd, krp_errmask);
2417 qib_write_kreg_port(ppd, krp_errmask,
2418 val | ERR_MASK_N(IBStatusChanged));
2419
2420 /* Always zero until we start messing with SerDes for real */
2421 return ret;
2422}
2423
2424/**
2425 * qib_7322_quiet_serdes - set serdes to txidle
2426 * @dd: the qlogic_ib device
2427 * Called when driver is being unloaded
2428 */
2429static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2430{
2431 u64 val;
2432 unsigned long flags;
2433
2434 qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2435
2436 spin_lock_irqsave(&ppd->lflags_lock, flags);
2437 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2438 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2439 wake_up(&ppd->cpspec->autoneg_wait);
2440 cancel_delayed_work(&ppd->cpspec->autoneg_work);
2441 if (ppd->dd->cspec->r1)
2442 cancel_delayed_work(&ppd->cpspec->ipg_work);
2443 flush_scheduled_work();
2444
2445 ppd->cpspec->chase_end = 0;
2446 if (ppd->cpspec->chase_timer.data) /* if initted */
2447 del_timer_sync(&ppd->cpspec->chase_timer);
2448
2449 /*
2450 * Despite the name, actually disables IBC as well. Do it when
2451 * we are as sure as possible that no more packets can be
2452 * received, following the down and the PCS reset.
2453 * The actual disabling happens in qib_7322_mini_pci_reset(),
2454 * along with the PCS being reset.
2455 */
2456 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2457 qib_7322_mini_pcs_reset(ppd);
2458
2459 /*
2460 * Update the adjusted counters so the adjustment persists
2461 * across driver reload.
2462 */
2463 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2464 ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2465 struct qib_devdata *dd = ppd->dd;
2466 u64 diagc;
2467
2468 /* enable counter writes */
2469 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2470 qib_write_kreg(dd, kr_hwdiagctrl,
2471 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2472
2473 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2474 val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2475 if (ppd->cpspec->ibdeltainprog)
2476 val -= val - ppd->cpspec->ibsymsnap;
2477 val -= ppd->cpspec->ibsymdelta;
2478 write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2479 }
2480 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2481 val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2482 if (ppd->cpspec->ibdeltainprog)
2483 val -= val - ppd->cpspec->iblnkerrsnap;
2484 val -= ppd->cpspec->iblnkerrdelta;
2485 write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2486 }
2487 if (ppd->cpspec->iblnkdowndelta) {
2488 val = read_7322_creg32_port(ppd, crp_iblinkdown);
2489 val += ppd->cpspec->iblnkdowndelta;
2490 write_7322_creg_port(ppd, crp_iblinkdown, val);
2491 }
2492 /*
2493 * No need to save ibmalfdelta since IB perfcounters
2494 * are cleared on driver reload.
2495 */
2496
2497 /* and disable counter writes */
2498 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2499 }
2500}
2501
2502/**
2503 * qib_setup_7322_setextled - set the state of the two external LEDs
2504 * @ppd: physical port on the qlogic_ib device
2505 * @on: whether the link is up or not
2506 *
2507 * The exact combo of LEDs if on is true is determined by looking
2508 * at the ibcstatus.
2509 *
2510 * These LEDs indicate the physical and logical state of IB link.
2511 * For this chip (at least with recommended board pinouts), LED1
2512 * is Yellow (logical state) and LED2 is Green (physical state),
2513 *
2514 * Note: We try to match the Mellanox HCA LED behavior as best
2515 * we can. Green indicates physical link state is OK (something is
2516 * plugged in, and we can train).
2517 * Amber indicates the link is logically up (ACTIVE).
2518 * Mellanox further blinks the amber LED to indicate data packet
2519 * activity, but we have no hardware support for that, so it would
2520 * require waking up every 10-20 msecs and checking the counters
2521 * on the chip, and then turning the LED off if appropriate. That's
2522 * visible overhead, so not something we will do.
2523 */
2524static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2525{
2526 struct qib_devdata *dd = ppd->dd;
2527 u64 extctl, ledblink = 0, val;
2528 unsigned long flags;
2529 int yel, grn;
2530
2531 /*
2532 * The diags use the LED to indicate diag info, so we leave
2533 * the external LED alone when the diags are running.
2534 */
2535 if (dd->diag_client)
2536 return;
2537
2538 /* Allow override of LED display for, e.g. Locating system in rack */
2539 if (ppd->led_override) {
2540 grn = (ppd->led_override & QIB_LED_PHYS);
2541 yel = (ppd->led_override & QIB_LED_LOG);
2542 } else if (on) {
2543 val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2544 grn = qib_7322_phys_portstate(val) ==
2545 IB_PHYSPORTSTATE_LINKUP;
2546 yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2547 } else {
2548 grn = 0;
2549 yel = 0;
2550 }
2551
2552 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2553 extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2554 ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2555 if (grn) {
2556 extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2557 /*
2558 * Counts are in chip clock (4ns) periods.
2559 * This is 1/16 sec (66.6ms) on,
2560 * 3/16 sec (187.5 ms) off, with packets rcvd.
2561 */
2562 ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2563 ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2564 }
2565 if (yel)
2566 extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2567 dd->cspec->extctrl = extctl;
2568 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2569 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2570
2571 if (ledblink) /* blink the LED on packet receive */
2572 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2573}
2574
2575#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2576static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd)
2577{
2578 struct qib_devdata *dd = rcd->dd;
2579 struct qib_chip_specific *cspec = dd->cspec;
2580 int cpu = get_cpu();
2581
2582 if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2583 const struct dca_reg_map *rmp;
2584
2585 cspec->rhdr_cpu[rcd->ctxt] = cpu;
2586 rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2587 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2588 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2589 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2590 qib_write_kreg(dd, rmp->regno,
2591 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2592 cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2593 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2594 }
2595 put_cpu();
2596}
2597
2598static void qib_update_sdma_dca(struct qib_pportdata *ppd)
2599{
2600 struct qib_devdata *dd = ppd->dd;
2601 struct qib_chip_specific *cspec = dd->cspec;
2602 int cpu = get_cpu();
2603 unsigned pidx = ppd->port - 1;
2604
2605 if (cspec->sdma_cpu[pidx] != cpu) {
2606 cspec->sdma_cpu[pidx] = cpu;
2607 cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2608 SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2609 SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2610 cspec->dca_rcvhdr_ctrl[4] |=
2611 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2612 (ppd->hw_pidx ?
2613 SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2614 SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2615 qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2616 cspec->dca_rcvhdr_ctrl[4]);
2617 cspec->dca_ctrl |= ppd->hw_pidx ?
2618 SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2619 SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2620 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2621 }
2622 put_cpu();
2623}
2624
2625static void qib_setup_dca(struct qib_devdata *dd)
2626{
2627 struct qib_chip_specific *cspec = dd->cspec;
2628 int i;
2629
2630 for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2631 cspec->rhdr_cpu[i] = -1;
2632 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2633 cspec->sdma_cpu[i] = -1;
2634 cspec->dca_rcvhdr_ctrl[0] =
2635 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2636 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2637 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2638 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2639 cspec->dca_rcvhdr_ctrl[1] =
2640 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2641 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2642 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2643 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2644 cspec->dca_rcvhdr_ctrl[2] =
2645 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2646 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2647 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2648 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2649 cspec->dca_rcvhdr_ctrl[3] =
2650 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2651 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2652 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2653 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2654 cspec->dca_rcvhdr_ctrl[4] =
2655 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2656 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2657 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2658 qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2659 cspec->dca_rcvhdr_ctrl[i]);
2660}
2661
2662#endif
2663
2664/*
2665 * Disable MSIx interrupt if enabled, call generic MSIx code
2666 * to cleanup, and clear pending MSIx interrupts.
2667 * Used for fallback to INTx, after reset, and when MSIx setup fails.
2668 */
2669static void qib_7322_nomsix(struct qib_devdata *dd)
2670{
2671 u64 intgranted;
2672 int n;
2673
2674 dd->cspec->main_int_mask = ~0ULL;
2675 n = dd->cspec->num_msix_entries;
2676 if (n) {
2677 int i;
2678
2679 dd->cspec->num_msix_entries = 0;
2680 for (i = 0; i < n; i++)
2681 free_irq(dd->cspec->msix_entries[i].vector,
2682 dd->cspec->msix_arg[i]);
2683 qib_nomsix(dd);
2684 }
2685 /* make sure no MSIx interrupts are left pending */
2686 intgranted = qib_read_kreg64(dd, kr_intgranted);
2687 if (intgranted)
2688 qib_write_kreg(dd, kr_intgranted, intgranted);
2689}
2690
2691static void qib_7322_free_irq(struct qib_devdata *dd)
2692{
2693 if (dd->cspec->irq) {
2694 free_irq(dd->cspec->irq, dd);
2695 dd->cspec->irq = 0;
2696 }
2697 qib_7322_nomsix(dd);
2698}
2699
2700static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2701{
2702 int i;
2703
2704#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2705 if (dd->flags & QIB_DCA_ENABLED) {
2706 dca_remove_requester(&dd->pcidev->dev);
2707 dd->flags &= ~QIB_DCA_ENABLED;
2708 dd->cspec->dca_ctrl = 0;
2709 qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2710 }
2711#endif
2712
2713 qib_7322_free_irq(dd);
2714 kfree(dd->cspec->cntrs);
2715 kfree(dd->cspec->sendchkenable);
2716 kfree(dd->cspec->sendgrhchk);
2717 kfree(dd->cspec->sendibchk);
2718 kfree(dd->cspec->msix_entries);
2719 kfree(dd->cspec->msix_arg);
2720 for (i = 0; i < dd->num_pports; i++) {
2721 unsigned long flags;
2722 u32 mask = QSFP_GPIO_MOD_PRS_N |
2723 (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2724
2725 kfree(dd->pport[i].cpspec->portcntrs);
2726 if (dd->flags & QIB_HAS_QSFP) {
2727 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2728 dd->cspec->gpio_mask &= ~mask;
2729 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2730 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2731 qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2732 }
2733 if (dd->pport[i].ibport_data.smi_ah)
2734 ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2735 }
2736}
2737
2738/* handle SDMA interrupts */
2739static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2740{
2741 struct qib_pportdata *ppd0 = &dd->pport[0];
2742 struct qib_pportdata *ppd1 = &dd->pport[1];
2743 u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2744 INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2745 u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2746 INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2747
2748 if (intr0)
2749 qib_sdma_intr(ppd0);
2750 if (intr1)
2751 qib_sdma_intr(ppd1);
2752
2753 if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2754 qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2755 if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2756 qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2757}
2758
2759/*
2760 * Set or clear the Send buffer available interrupt enable bit.
2761 */
2762static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2763{
2764 unsigned long flags;
2765
2766 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2767 if (needint)
2768 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2769 else
2770 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2771 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2772 qib_write_kreg(dd, kr_scratch, 0ULL);
2773 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2774}
2775
2776/*
2777 * Somehow got an interrupt with reserved bits set in interrupt status.
2778 * Print a message so we know it happened, then clear them.
2779 * keep mainline interrupt handler cache-friendly
2780 */
2781static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2782{
2783 u64 kills;
2784 char msg[128];
2785
2786 kills = istat & ~QIB_I_BITSEXTANT;
2787 qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
2788 " %s\n", (unsigned long long) kills, msg);
2789 qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2790}
2791
2792/* keep mainline interrupt handler cache-friendly */
2793static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2794{
2795 u32 gpiostatus;
2796 int handled = 0;
2797 int pidx;
2798
2799 /*
2800 * Boards for this chip currently don't use GPIO interrupts,
2801 * so clear by writing GPIOstatus to GPIOclear, and complain
2802 * to developer. To avoid endless repeats, clear
2803 * the bits in the mask, since there is some kind of
2804 * programming error or chip problem.
2805 */
2806 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2807 /*
2808 * In theory, writing GPIOstatus to GPIOclear could
2809 * have a bad side-effect on some diagnostic that wanted
2810 * to poll for a status-change, but the various shadows
2811 * make that problematic at best. Diags will just suppress
2812 * all GPIO interrupts during such tests.
2813 */
2814 qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2815 /*
2816 * Check for QSFP MOD_PRS changes
2817 * only works for single port if IB1 != pidx1
2818 */
2819 for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2820 ++pidx) {
2821 struct qib_pportdata *ppd;
2822 struct qib_qsfp_data *qd;
2823 u32 mask;
2824 if (!dd->pport[pidx].link_speed_supported)
2825 continue;
2826 mask = QSFP_GPIO_MOD_PRS_N;
2827 ppd = dd->pport + pidx;
2828 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2829 if (gpiostatus & dd->cspec->gpio_mask & mask) {
2830 u64 pins;
2831 qd = &ppd->cpspec->qsfp_data;
2832 gpiostatus &= ~mask;
2833 pins = qib_read_kreg64(dd, kr_extstatus);
2834 pins >>= SYM_LSB(EXTStatus, GPIOIn);
2835 if (!(pins & mask)) {
2836 ++handled;
2837 qd->t_insert = get_jiffies_64();
2838 schedule_work(&qd->work);
2839 }
2840 }
2841 }
2842
2843 if (gpiostatus && !handled) {
2844 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2845 u32 gpio_irq = mask & gpiostatus;
2846
2847 /*
2848 * Clear any troublemakers, and update chip from shadow
2849 */
2850 dd->cspec->gpio_mask &= ~gpio_irq;
2851 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2852 }
2853}
2854
2855/*
2856 * Handle errors and unusual events first, separate function
2857 * to improve cache hits for fast path interrupt handling.
2858 */
2859static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2860{
2861 if (istat & ~QIB_I_BITSEXTANT)
2862 unknown_7322_ibits(dd, istat);
2863 if (istat & QIB_I_GPIO)
2864 unknown_7322_gpio_intr(dd);
2865 if (istat & QIB_I_C_ERROR)
2866 handle_7322_errors(dd);
2867 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
2868 handle_7322_p_errors(dd->rcd[0]->ppd);
2869 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
2870 handle_7322_p_errors(dd->rcd[1]->ppd);
2871}
2872
2873/*
2874 * Dynamically adjust the rcv int timeout for a context based on incoming
2875 * packet rate.
2876 */
2877static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
2878{
2879 struct qib_devdata *dd = rcd->dd;
2880 u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
2881
2882 /*
2883 * Dynamically adjust idle timeout on chip
2884 * based on number of packets processed.
2885 */
2886 if (npkts < rcv_int_count && timeout > 2)
2887 timeout >>= 1;
2888 else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
2889 timeout = min(timeout << 1, rcv_int_timeout);
2890 else
2891 return;
2892
2893 dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
2894 qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
2895}
2896
2897/*
2898 * This is the main interrupt handler.
2899 * It will normally only be used for low frequency interrupts but may
2900 * have to handle all interrupts if INTx is enabled or fewer than normal
2901 * MSIx interrupts were allocated.
2902 * This routine should ignore the interrupt bits for any of the
2903 * dedicated MSIx handlers.
2904 */
2905static irqreturn_t qib_7322intr(int irq, void *data)
2906{
2907 struct qib_devdata *dd = data;
2908 irqreturn_t ret;
2909 u64 istat;
2910 u64 ctxtrbits;
2911 u64 rmask;
2912 unsigned i;
2913 u32 npkts;
2914
2915 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
2916 /*
2917 * This return value is not great, but we do not want the
2918 * interrupt core code to remove our interrupt handler
2919 * because we don't appear to be handling an interrupt
2920 * during a chip reset.
2921 */
2922 ret = IRQ_HANDLED;
2923 goto bail;
2924 }
2925
2926 istat = qib_read_kreg64(dd, kr_intstatus);
2927
2928 if (unlikely(istat == ~0ULL)) {
2929 qib_bad_intrstatus(dd);
2930 qib_dev_err(dd, "Interrupt status all f's, skipping\n");
2931 /* don't know if it was our interrupt or not */
2932 ret = IRQ_NONE;
2933 goto bail;
2934 }
2935
2936 istat &= dd->cspec->main_int_mask;
2937 if (unlikely(!istat)) {
2938 /* already handled, or shared and not us */
2939 ret = IRQ_NONE;
2940 goto bail;
2941 }
2942
2943 qib_stats.sps_ints++;
2944 if (dd->int_counter != (u32) -1)
2945 dd->int_counter++;
2946
2947 /* handle "errors" of various kinds first, device ahead of port */
2948 if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
2949 QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
2950 INT_MASK_P(Err, 1))))
2951 unlikely_7322_intr(dd, istat);
2952
2953 /*
2954 * Clear the interrupt bits we found set, relatively early, so we
2955 * "know" know the chip will have seen this by the time we process
2956 * the queue, and will re-interrupt if necessary. The processor
2957 * itself won't take the interrupt again until we return.
2958 */
2959 qib_write_kreg(dd, kr_intclear, istat);
2960
2961 /*
2962 * Handle kernel receive queues before checking for pio buffers
2963 * available since receives can overflow; piobuf waiters can afford
2964 * a few extra cycles, since they were waiting anyway.
2965 */
2966 ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
2967 if (ctxtrbits) {
2968 rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
2969 (1ULL << QIB_I_RCVURG_LSB);
2970 for (i = 0; i < dd->first_user_ctxt; i++) {
2971 if (ctxtrbits & rmask) {
2972 ctxtrbits &= ~rmask;
2973 if (dd->rcd[i]) {
2974 qib_kreceive(dd->rcd[i], NULL, &npkts);
2975 adjust_rcv_timeout(dd->rcd[i], npkts);
2976 }
2977 }
2978 rmask <<= 1;
2979 }
2980 if (ctxtrbits) {
2981 ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
2982 (ctxtrbits >> QIB_I_RCVURG_LSB);
2983 qib_handle_urcv(dd, ctxtrbits);
2984 }
2985 }
2986
2987 if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
2988 sdma_7322_intr(dd, istat);
2989
2990 if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2991 qib_ib_piobufavail(dd);
2992
2993 ret = IRQ_HANDLED;
2994bail:
2995 return ret;
2996}
2997
2998/*
2999 * Dedicated receive packet available interrupt handler.
3000 */
3001static irqreturn_t qib_7322pintr(int irq, void *data)
3002{
3003 struct qib_ctxtdata *rcd = data;
3004 struct qib_devdata *dd = rcd->dd;
3005 u32 npkts;
3006
3007 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3008 /*
3009 * This return value is not great, but we do not want the
3010 * interrupt core code to remove our interrupt handler
3011 * because we don't appear to be handling an interrupt
3012 * during a chip reset.
3013 */
3014 return IRQ_HANDLED;
3015
3016 qib_stats.sps_ints++;
3017 if (dd->int_counter != (u32) -1)
3018 dd->int_counter++;
3019
3020#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3021 if (dd->flags & QIB_DCA_ENABLED)
3022 qib_update_rhdrq_dca(rcd);
3023#endif
3024
3025 /* Clear the interrupt bit we expect to be set. */
3026 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3027 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3028
3029 qib_kreceive(rcd, NULL, &npkts);
3030 adjust_rcv_timeout(rcd, npkts);
3031
3032 return IRQ_HANDLED;
3033}
3034
3035/*
3036 * Dedicated Send buffer available interrupt handler.
3037 */
3038static irqreturn_t qib_7322bufavail(int irq, void *data)
3039{
3040 struct qib_devdata *dd = data;
3041
3042 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3043 /*
3044 * This return value is not great, but we do not want the
3045 * interrupt core code to remove our interrupt handler
3046 * because we don't appear to be handling an interrupt
3047 * during a chip reset.
3048 */
3049 return IRQ_HANDLED;
3050
3051 qib_stats.sps_ints++;
3052 if (dd->int_counter != (u32) -1)
3053 dd->int_counter++;
3054
3055 /* Clear the interrupt bit we expect to be set. */
3056 qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3057
3058 /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3059 if (dd->flags & QIB_INITTED)
3060 qib_ib_piobufavail(dd);
3061 else
3062 qib_wantpiobuf_7322_intr(dd, 0);
3063
3064 return IRQ_HANDLED;
3065}
3066
3067/*
3068 * Dedicated Send DMA interrupt handler.
3069 */
3070static irqreturn_t sdma_intr(int irq, void *data)
3071{
3072 struct qib_pportdata *ppd = data;
3073 struct qib_devdata *dd = ppd->dd;
3074
3075 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3076 /*
3077 * This return value is not great, but we do not want the
3078 * interrupt core code to remove our interrupt handler
3079 * because we don't appear to be handling an interrupt
3080 * during a chip reset.
3081 */
3082 return IRQ_HANDLED;
3083
3084 qib_stats.sps_ints++;
3085 if (dd->int_counter != (u32) -1)
3086 dd->int_counter++;
3087
3088#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3089 if (dd->flags & QIB_DCA_ENABLED)
3090 qib_update_sdma_dca(ppd);
3091#endif
3092
3093 /* Clear the interrupt bit we expect to be set. */
3094 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3095 INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3096 qib_sdma_intr(ppd);
3097
3098 return IRQ_HANDLED;
3099}
3100
3101/*
3102 * Dedicated Send DMA idle interrupt handler.
3103 */
3104static irqreturn_t sdma_idle_intr(int irq, void *data)
3105{
3106 struct qib_pportdata *ppd = data;
3107 struct qib_devdata *dd = ppd->dd;
3108
3109 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3110 /*
3111 * This return value is not great, but we do not want the
3112 * interrupt core code to remove our interrupt handler
3113 * because we don't appear to be handling an interrupt
3114 * during a chip reset.
3115 */
3116 return IRQ_HANDLED;
3117
3118 qib_stats.sps_ints++;
3119 if (dd->int_counter != (u32) -1)
3120 dd->int_counter++;
3121
3122#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3123 if (dd->flags & QIB_DCA_ENABLED)
3124 qib_update_sdma_dca(ppd);
3125#endif
3126
3127 /* Clear the interrupt bit we expect to be set. */
3128 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3129 INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3130 qib_sdma_intr(ppd);
3131
3132 return IRQ_HANDLED;
3133}
3134
3135/*
3136 * Dedicated Send DMA progress interrupt handler.
3137 */
3138static irqreturn_t sdma_progress_intr(int irq, void *data)
3139{
3140 struct qib_pportdata *ppd = data;
3141 struct qib_devdata *dd = ppd->dd;
3142
3143 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3144 /*
3145 * This return value is not great, but we do not want the
3146 * interrupt core code to remove our interrupt handler
3147 * because we don't appear to be handling an interrupt
3148 * during a chip reset.
3149 */
3150 return IRQ_HANDLED;
3151
3152 qib_stats.sps_ints++;
3153 if (dd->int_counter != (u32) -1)
3154 dd->int_counter++;
3155
3156#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3157 if (dd->flags & QIB_DCA_ENABLED)
3158 qib_update_sdma_dca(ppd);
3159#endif
3160
3161 /* Clear the interrupt bit we expect to be set. */
3162 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3163 INT_MASK_P(SDmaProgress, 1) :
3164 INT_MASK_P(SDmaProgress, 0));
3165 qib_sdma_intr(ppd);
3166
3167 return IRQ_HANDLED;
3168}
3169
3170/*
3171 * Dedicated Send DMA cleanup interrupt handler.
3172 */
3173static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3174{
3175 struct qib_pportdata *ppd = data;
3176 struct qib_devdata *dd = ppd->dd;
3177
3178 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3179 /*
3180 * This return value is not great, but we do not want the
3181 * interrupt core code to remove our interrupt handler
3182 * because we don't appear to be handling an interrupt
3183 * during a chip reset.
3184 */
3185 return IRQ_HANDLED;
3186
3187 qib_stats.sps_ints++;
3188 if (dd->int_counter != (u32) -1)
3189 dd->int_counter++;
3190
3191#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3192 if (dd->flags & QIB_DCA_ENABLED)
3193 qib_update_sdma_dca(ppd);
3194#endif
3195
3196 /* Clear the interrupt bit we expect to be set. */
3197 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3198 INT_MASK_PM(SDmaCleanupDone, 1) :
3199 INT_MASK_PM(SDmaCleanupDone, 0));
3200 qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3201
3202 return IRQ_HANDLED;
3203}
3204
3205/*
3206 * Set up our chip-specific interrupt handler.
3207 * The interrupt type has already been setup, so
3208 * we just need to do the registration and error checking.
3209 * If we are using MSIx interrupts, we may fall back to
3210 * INTx later, if the interrupt handler doesn't get called
3211 * within 1/2 second (see verify_interrupt()).
3212 */
3213static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3214{
3215 int ret, i, msixnum;
3216 u64 redirect[6];
3217 u64 mask;
3218
3219 if (!dd->num_pports)
3220 return;
3221
3222 if (clearpend) {
3223 /*
3224 * if not switching interrupt types, be sure interrupts are
3225 * disabled, and then clear anything pending at this point,
3226 * because we are starting clean.
3227 */
3228 qib_7322_set_intr_state(dd, 0);
3229
3230 /* clear the reset error, init error/hwerror mask */
3231 qib_7322_init_hwerrors(dd);
3232
3233 /* clear any interrupt bits that might be set */
3234 qib_write_kreg(dd, kr_intclear, ~0ULL);
3235
3236 /* make sure no pending MSIx intr, and clear diag reg */
3237 qib_write_kreg(dd, kr_intgranted, ~0ULL);
3238 qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3239 }
3240
3241 if (!dd->cspec->num_msix_entries) {
3242 /* Try to get INTx interrupt */
3243try_intx:
3244 if (!dd->pcidev->irq) {
3245 qib_dev_err(dd, "irq is 0, BIOS error? "
3246 "Interrupts won't work\n");
3247 goto bail;
3248 }
3249 ret = request_irq(dd->pcidev->irq, qib_7322intr,
3250 IRQF_SHARED, QIB_DRV_NAME, dd);
3251 if (ret) {
3252 qib_dev_err(dd, "Couldn't setup INTx "
3253 "interrupt (irq=%d): %d\n",
3254 dd->pcidev->irq, ret);
3255 goto bail;
3256 }
3257 dd->cspec->irq = dd->pcidev->irq;
3258 dd->cspec->main_int_mask = ~0ULL;
3259 goto bail;
3260 }
3261
3262 /* Try to get MSIx interrupts */
3263 memset(redirect, 0, sizeof redirect);
3264 mask = ~0ULL;
3265 msixnum = 0;
3266 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3267 irq_handler_t handler;
3268 const char *name;
3269 void *arg;
3270 u64 val;
3271 int lsb, reg, sh;
3272
3273 if (i < ARRAY_SIZE(irq_table)) {
3274 if (irq_table[i].port) {
3275 /* skip if for a non-configured port */
3276 if (irq_table[i].port > dd->num_pports)
3277 continue;
3278 arg = dd->pport + irq_table[i].port - 1;
3279 } else
3280 arg = dd;
3281 lsb = irq_table[i].lsb;
3282 handler = irq_table[i].handler;
3283 name = irq_table[i].name;
3284 } else {
3285 unsigned ctxt;
3286
3287 ctxt = i - ARRAY_SIZE(irq_table);
3288 /* per krcvq context receive interrupt */
3289 arg = dd->rcd[ctxt];
3290 if (!arg)
3291 continue;
3292 lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3293 handler = qib_7322pintr;
3294 name = QIB_DRV_NAME " (kctx)";
3295 }
3296 ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
3297 handler, 0, name, arg);
3298 if (ret) {
3299 /*
3300 * Shouldn't happen since the enable said we could
3301 * have as many as we are trying to setup here.
3302 */
3303 qib_dev_err(dd, "Couldn't setup MSIx "
3304 "interrupt (vec=%d, irq=%d): %d\n", msixnum,
3305 dd->cspec->msix_entries[msixnum].vector,
3306 ret);
3307 qib_7322_nomsix(dd);
3308 goto try_intx;
3309 }
3310 dd->cspec->msix_arg[msixnum] = arg;
3311 if (lsb >= 0) {
3312 reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3313 sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3314 SYM_LSB(IntRedirect0, vec1);
3315 mask &= ~(1ULL << lsb);
3316 redirect[reg] |= ((u64) msixnum) << sh;
3317 }
3318 val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3319 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3320 msixnum++;
3321 }
3322 /* Initialize the vector mapping */
3323 for (i = 0; i < ARRAY_SIZE(redirect); i++)
3324 qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3325 dd->cspec->main_int_mask = mask;
3326bail:;
3327}
3328
3329/**
3330 * qib_7322_boardname - fill in the board name and note features
3331 * @dd: the qlogic_ib device
3332 *
3333 * info will be based on the board revision register
3334 */
3335static unsigned qib_7322_boardname(struct qib_devdata *dd)
3336{
3337 /* Will need enumeration of board-types here */
3338 char *n;
3339 u32 boardid, namelen;
3340 unsigned features = DUAL_PORT_CAP;
3341
3342 boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3343
3344 switch (boardid) {
3345 case 0:
3346 n = "InfiniPath_QLE7342_Emulation";
3347 break;
3348 case 1:
3349 n = "InfiniPath_QLE7340";
3350 dd->flags |= QIB_HAS_QSFP;
3351 features = PORT_SPD_CAP;
3352 break;
3353 case 2:
3354 n = "InfiniPath_QLE7342";
3355 dd->flags |= QIB_HAS_QSFP;
3356 break;
3357 case 3:
3358 n = "InfiniPath_QMI7342";
3359 break;
3360 case 4:
3361 n = "InfiniPath_Unsupported7342";
3362 qib_dev_err(dd, "Unsupported version of QMH7342\n");
3363 features = 0;
3364 break;
3365 case BOARD_QMH7342:
3366 n = "InfiniPath_QMH7342";
3367 features = 0x24;
3368 break;
3369 case BOARD_QME7342:
3370 n = "InfiniPath_QME7342";
3371 break;
3372 case 15:
3373 n = "InfiniPath_QLE7342_TEST";
3374 dd->flags |= QIB_HAS_QSFP;
3375 break;
3376 default:
3377 n = "InfiniPath_QLE73xy_UNKNOWN";
3378 qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3379 break;
3380 }
3381 dd->board_atten = 1; /* index into txdds_Xdr */
3382
3383 namelen = strlen(n) + 1;
3384 dd->boardname = kmalloc(namelen, GFP_KERNEL);
3385 if (!dd->boardname)
3386 qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3387 else
3388 snprintf(dd->boardname, namelen, "%s", n);
3389
3390 snprintf(dd->boardversion, sizeof(dd->boardversion),
3391 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3392 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3393 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3394 dd->majrev, dd->minrev,
3395 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3396
3397 if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3398 qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
3399 " by module parameter\n", dd->unit);
3400 features &= PORT_SPD_CAP;
3401 }
3402
3403 return features;
3404}
3405
3406/*
3407 * This routine sleeps, so it can only be called from user context, not
3408 * from interrupt context.
3409 */
3410static int qib_do_7322_reset(struct qib_devdata *dd)
3411{
3412 u64 val;
3413 u64 *msix_vecsave;
3414 int i, msix_entries, ret = 1;
3415 u16 cmdval;
3416 u8 int_line, clinesz;
3417 unsigned long flags;
3418
3419 /* Use dev_err so it shows up in logs, etc. */
3420 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3421
3422 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3423
3424 msix_entries = dd->cspec->num_msix_entries;
3425
3426 /* no interrupts till re-initted */
3427 qib_7322_set_intr_state(dd, 0);
3428
3429 if (msix_entries) {
3430 qib_7322_nomsix(dd);
3431 /* can be up to 512 bytes, too big for stack */
3432 msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3433 sizeof(u64), GFP_KERNEL);
3434 if (!msix_vecsave)
3435 qib_dev_err(dd, "No mem to save MSIx data\n");
3436 } else
3437 msix_vecsave = NULL;
3438
3439 /*
3440 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3441 * info that is set up by the BIOS, so we have to save and restore
3442 * it ourselves. There is some risk something could change it,
3443 * after we save it, but since we have disabled the MSIx, it
3444 * shouldn't be touched...
3445 */
3446 for (i = 0; i < msix_entries; i++) {
3447 u64 vecaddr, vecdata;
3448 vecaddr = qib_read_kreg64(dd, 2 * i +
3449 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3450 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3451 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3452 if (msix_vecsave) {
3453 msix_vecsave[2 * i] = vecaddr;
3454 /* save it without the masked bit set */
3455 msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3456 }
3457 }
3458
3459 dd->pport->cpspec->ibdeltainprog = 0;
3460 dd->pport->cpspec->ibsymdelta = 0;
3461 dd->pport->cpspec->iblnkerrdelta = 0;
3462 dd->pport->cpspec->ibmalfdelta = 0;
3463 dd->int_counter = 0; /* so we check interrupts work again */
3464
3465 /*
3466 * Keep chip from being accessed until we are ready. Use
3467 * writeq() directly, to allow the write even though QIB_PRESENT
3468 * isnt' set.
3469 */
3470 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3471 dd->flags |= QIB_DOING_RESET;
3472 val = dd->control | QLOGIC_IB_C_RESET;
3473 writeq(val, &dd->kregbase[kr_control]);
3474
3475 for (i = 1; i <= 5; i++) {
3476 /*
3477 * Allow MBIST, etc. to complete; longer on each retry.
3478 * We sometimes get machine checks from bus timeout if no
3479 * response, so for now, make it *really* long.
3480 */
3481 msleep(1000 + (1 + i) * 3000);
3482
3483 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3484
3485 /*
3486 * Use readq directly, so we don't need to mark it as PRESENT
3487 * until we get a successful indication that all is well.
3488 */
3489 val = readq(&dd->kregbase[kr_revision]);
3490 if (val == dd->revision)
3491 break;
3492 if (i == 5) {
3493 qib_dev_err(dd, "Failed to initialize after reset, "
3494 "unusable\n");
3495 ret = 0;
3496 goto bail;
3497 }
3498 }
3499
3500 dd->flags |= QIB_PRESENT; /* it's back */
3501
3502 if (msix_entries) {
3503 /* restore the MSIx vector address and data if saved above */
3504 for (i = 0; i < msix_entries; i++) {
3505 dd->cspec->msix_entries[i].entry = i;
3506 if (!msix_vecsave || !msix_vecsave[2 * i])
3507 continue;
3508 qib_write_kreg(dd, 2 * i +
3509 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3510 msix_vecsave[2 * i]);
3511 qib_write_kreg(dd, 1 + 2 * i +
3512 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3513 msix_vecsave[1 + 2 * i]);
3514 }
3515 }
3516
3517 /* initialize the remaining registers. */
3518 for (i = 0; i < dd->num_pports; ++i)
3519 write_7322_init_portregs(&dd->pport[i]);
3520 write_7322_initregs(dd);
3521
3522 if (qib_pcie_params(dd, dd->lbus_width,
3523 &dd->cspec->num_msix_entries,
3524 dd->cspec->msix_entries))
3525 qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
3526 "continuing anyway\n");
3527
3528 qib_setup_7322_interrupt(dd, 1);
3529
3530 for (i = 0; i < dd->num_pports; ++i) {
3531 struct qib_pportdata *ppd = &dd->pport[i];
3532
3533 spin_lock_irqsave(&ppd->lflags_lock, flags);
3534 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3535 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3536 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3537 }
3538
3539bail:
3540 dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3541 kfree(msix_vecsave);
3542 return ret;
3543}
3544
3545/**
3546 * qib_7322_put_tid - write a TID to the chip
3547 * @dd: the qlogic_ib device
3548 * @tidptr: pointer to the expected TID (in chip) to update
3549 * @tidtype: 0 for eager, 1 for expected
3550 * @pa: physical address of in memory buffer; tidinvalid if freeing
3551 */
3552static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3553 u32 type, unsigned long pa)
3554{
3555 if (!(dd->flags & QIB_PRESENT))
3556 return;
3557 if (pa != dd->tidinvalid) {
3558 u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3559
3560 /* paranoia checks */
3561 if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3562 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3563 pa);
3564 return;
3565 }
3566 if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3567 qib_dev_err(dd, "Physical page address 0x%lx "
3568 "larger than supported\n", pa);
3569 return;
3570 }
3571
3572 if (type == RCVHQ_RCV_TYPE_EAGER)
3573 chippa |= dd->tidtemplate;
3574 else /* for now, always full 4KB page */
3575 chippa |= IBA7322_TID_SZ_4K;
3576 pa = chippa;
3577 }
3578 writeq(pa, tidptr);
3579 mmiowb();
3580}
3581
3582/**
3583 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3584 * @dd: the qlogic_ib device
3585 * @ctxt: the ctxt
3586 *
3587 * clear all TID entries for a ctxt, expected and eager.
3588 * Used from qib_close().
3589 */
3590static void qib_7322_clear_tids(struct qib_devdata *dd,
3591 struct qib_ctxtdata *rcd)
3592{
3593 u64 __iomem *tidbase;
3594 unsigned long tidinv;
3595 u32 ctxt;
3596 int i;
3597
3598 if (!dd->kregbase || !rcd)
3599 return;
3600
3601 ctxt = rcd->ctxt;
3602
3603 tidinv = dd->tidinvalid;
3604 tidbase = (u64 __iomem *)
3605 ((char __iomem *) dd->kregbase +
3606 dd->rcvtidbase +
3607 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3608
3609 for (i = 0; i < dd->rcvtidcnt; i++)
3610 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3611 tidinv);
3612
3613 tidbase = (u64 __iomem *)
3614 ((char __iomem *) dd->kregbase +
3615 dd->rcvegrbase +
3616 rcd->rcvegr_tid_base * sizeof(*tidbase));
3617
3618 for (i = 0; i < rcd->rcvegrcnt; i++)
3619 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3620 tidinv);
3621}
3622
3623/**
3624 * qib_7322_tidtemplate - setup constants for TID updates
3625 * @dd: the qlogic_ib device
3626 *
3627 * We setup stuff that we use a lot, to avoid calculating each time
3628 */
3629static void qib_7322_tidtemplate(struct qib_devdata *dd)
3630{
3631 /*
3632 * For now, we always allocate 4KB buffers (at init) so we can
3633 * receive max size packets. We may want a module parameter to
3634 * specify 2KB or 4KB and/or make it per port instead of per device
3635 * for those who want to reduce memory footprint. Note that the
3636 * rcvhdrentsize size must be large enough to hold the largest
3637 * IB header (currently 96 bytes) that we expect to handle (plus of
3638 * course the 2 dwords of RHF).
3639 */
3640 if (dd->rcvegrbufsize == 2048)
3641 dd->tidtemplate = IBA7322_TID_SZ_2K;
3642 else if (dd->rcvegrbufsize == 4096)
3643 dd->tidtemplate = IBA7322_TID_SZ_4K;
3644 dd->tidinvalid = 0;
3645}
3646
3647/**
3648 * qib_init_7322_get_base_info - set chip-specific flags for user code
3649 * @rcd: the qlogic_ib ctxt
3650 * @kbase: qib_base_info pointer
3651 *
3652 * We set the PCIE flag because the lower bandwidth on PCIe vs
3653 * HyperTransport can affect some user packet algorithims.
3654 */
3655
3656static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3657 struct qib_base_info *kinfo)
3658{
3659 kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3660 QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3661 QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3662 if (rcd->dd->cspec->r1)
3663 kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3664 if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3665 kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3666
3667 return 0;
3668}
3669
3670static struct qib_message_header *
3671qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3672{
3673 u32 offset = qib_hdrget_offset(rhf_addr);
3674
3675 return (struct qib_message_header *)
3676 (rhf_addr - dd->rhf_offset + offset);
3677}
3678
3679/*
3680 * Configure number of contexts.
3681 */
3682static void qib_7322_config_ctxts(struct qib_devdata *dd)
3683{
3684 unsigned long flags;
3685 u32 nchipctxts;
3686
3687 nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3688 dd->cspec->numctxts = nchipctxts;
3689 if (qib_n_krcv_queues > 1 && dd->num_pports) {
3690 /*
3691 * Set the mask for which bits from the QPN are used
3692 * to select a context number.
3693 */
3694 dd->qpn_mask = 0x3f;
3695 dd->first_user_ctxt = NUM_IB_PORTS +
3696 (qib_n_krcv_queues - 1) * dd->num_pports;
3697 if (dd->first_user_ctxt > nchipctxts)
3698 dd->first_user_ctxt = nchipctxts;
3699 dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3700 } else {
3701 dd->first_user_ctxt = NUM_IB_PORTS;
3702 dd->n_krcv_queues = 1;
3703 }
3704
3705 if (!qib_cfgctxts) {
3706 int nctxts = dd->first_user_ctxt + num_online_cpus();
3707
3708 if (nctxts <= 6)
3709 dd->ctxtcnt = 6;
3710 else if (nctxts <= 10)
3711 dd->ctxtcnt = 10;
3712 else if (nctxts <= nchipctxts)
3713 dd->ctxtcnt = nchipctxts;
3714 } else if (qib_cfgctxts < dd->num_pports)
3715 dd->ctxtcnt = dd->num_pports;
3716 else if (qib_cfgctxts <= nchipctxts)
3717 dd->ctxtcnt = qib_cfgctxts;
3718 if (!dd->ctxtcnt) /* none of the above, set to max */
3719 dd->ctxtcnt = nchipctxts;
3720
3721 /*
3722 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3723 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3724 * Lock to be paranoid about later motion, etc.
3725 */
3726 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3727 if (dd->ctxtcnt > 10)
3728 dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3729 else if (dd->ctxtcnt > 6)
3730 dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3731 /* else configure for default 6 receive ctxts */
3732
3733 /* The XRC opcode is 5. */
3734 dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3735
3736 /*
3737 * RcvCtrl *must* be written here so that the
3738 * chip understands how to change rcvegrcnt below.
3739 */
3740 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3741 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3742
3743 /* kr_rcvegrcnt changes based on the number of contexts enabled */
3744 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3745 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
3746 dd->num_pports > 1 ? 1024U : 2048U);
3747}
3748
3749static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3750{
3751
3752 int lsb, ret = 0;
3753 u64 maskr; /* right-justified mask */
3754
3755 switch (which) {
3756
3757 case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3758 ret = ppd->link_width_enabled;
3759 goto done;
3760
3761 case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3762 ret = ppd->link_width_active;
3763 goto done;
3764
3765 case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3766 ret = ppd->link_speed_enabled;
3767 goto done;
3768
3769 case QIB_IB_CFG_SPD: /* Get current Link spd */
3770 ret = ppd->link_speed_active;
3771 goto done;
3772
3773 case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3774 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3775 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3776 break;
3777
3778 case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3779 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3780 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3781 break;
3782
3783 case QIB_IB_CFG_LINKLATENCY:
3784 ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3785 SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3786 goto done;
3787
3788 case QIB_IB_CFG_OP_VLS:
3789 ret = ppd->vls_operational;
3790 goto done;
3791
3792 case QIB_IB_CFG_VL_HIGH_CAP:
3793 ret = 16;
3794 goto done;
3795
3796 case QIB_IB_CFG_VL_LOW_CAP:
3797 ret = 16;
3798 goto done;
3799
3800 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3801 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3802 OverrunThreshold);
3803 goto done;
3804
3805 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3806 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3807 PhyerrThreshold);
3808 goto done;
3809
3810 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3811 /* will only take effect when the link state changes */
3812 ret = (ppd->cpspec->ibcctrl_a &
3813 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
3814 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
3815 goto done;
3816
3817 case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
3818 lsb = IBA7322_IBC_HRTBT_LSB;
3819 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
3820 break;
3821
3822 case QIB_IB_CFG_PMA_TICKS:
3823 /*
3824 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
3825 * Since the clock is always 250MHz, the value is 3, 1 or 0.
3826 */
3827 if (ppd->link_speed_active == QIB_IB_QDR)
3828 ret = 3;
3829 else if (ppd->link_speed_active == QIB_IB_DDR)
3830 ret = 1;
3831 else
3832 ret = 0;
3833 goto done;
3834
3835 default:
3836 ret = -EINVAL;
3837 goto done;
3838 }
3839 ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
3840done:
3841 return ret;
3842}
3843
3844/*
3845 * Below again cribbed liberally from older version. Do not lean
3846 * heavily on it.
3847 */
3848#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
3849#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
3850 | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
3851
3852static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
3853{
3854 struct qib_devdata *dd = ppd->dd;
3855 u64 maskr; /* right-justified mask */
3856 int lsb, ret = 0;
3857 u16 lcmd, licmd;
3858 unsigned long flags;
3859
3860 switch (which) {
3861 case QIB_IB_CFG_LIDLMC:
3862 /*
3863 * Set LID and LMC. Combined to avoid possible hazard
3864 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
3865 */
3866 lsb = IBA7322_IBC_DLIDLMC_SHIFT;
3867 maskr = IBA7322_IBC_DLIDLMC_MASK;
3868 /*
3869 * For header-checking, the SLID in the packet will
3870 * be masked with SendIBSLMCMask, and compared
3871 * with SendIBSLIDAssignMask. Make sure we do not
3872 * set any bits not covered by the mask, or we get
3873 * false-positives.
3874 */
3875 qib_write_kreg_port(ppd, krp_sendslid,
3876 val & (val >> 16) & SendIBSLIDAssignMask);
3877 qib_write_kreg_port(ppd, krp_sendslidmask,
3878 (val >> 16) & SendIBSLMCMask);
3879 break;
3880
3881 case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
3882 ppd->link_width_enabled = val;
3883 /* convert IB value to chip register value */
3884 if (val == IB_WIDTH_1X)
3885 val = 0;
3886 else if (val == IB_WIDTH_4X)
3887 val = 1;
3888 else
3889 val = 3;
3890 maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
3891 lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
3892 break;
3893
3894 case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
3895 /*
3896 * As with width, only write the actual register if the
3897 * link is currently down, otherwise takes effect on next
3898 * link change. Since setting is being explictly requested
3899 * (via MAD or sysfs), clear autoneg failure status if speed
3900 * autoneg is enabled.
3901 */
3902 ppd->link_speed_enabled = val;
3903 val <<= IBA7322_IBC_SPEED_LSB;
3904 maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
3905 IBA7322_IBC_MAX_SPEED_MASK;
3906 if (val & (val - 1)) {
3907 /* Muliple speeds enabled */
3908 val |= IBA7322_IBC_IBTA_1_2_MASK |
3909 IBA7322_IBC_MAX_SPEED_MASK;
3910 spin_lock_irqsave(&ppd->lflags_lock, flags);
3911 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3912 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3913 } else if (val & IBA7322_IBC_SPEED_QDR)
3914 val |= IBA7322_IBC_IBTA_1_2_MASK;
3915 /* IBTA 1.2 mode + min/max + speed bits are contiguous */
3916 lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
3917 break;
3918
3919 case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
3920 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3921 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3922 break;
3923
3924 case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
3925 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3926 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3927 break;
3928
3929 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3930 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3931 OverrunThreshold);
3932 if (maskr != val) {
3933 ppd->cpspec->ibcctrl_a &=
3934 ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
3935 ppd->cpspec->ibcctrl_a |= (u64) val <<
3936 SYM_LSB(IBCCtrlA_0, OverrunThreshold);
3937 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3938 ppd->cpspec->ibcctrl_a);
3939 qib_write_kreg(dd, kr_scratch, 0ULL);
3940 }
3941 goto bail;
3942
3943 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3944 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3945 PhyerrThreshold);
3946 if (maskr != val) {
3947 ppd->cpspec->ibcctrl_a &=
3948 ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
3949 ppd->cpspec->ibcctrl_a |= (u64) val <<
3950 SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
3951 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3952 ppd->cpspec->ibcctrl_a);
3953 qib_write_kreg(dd, kr_scratch, 0ULL);
3954 }
3955 goto bail;
3956
3957 case QIB_IB_CFG_PKEYS: /* update pkeys */
3958 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
3959 ((u64) ppd->pkeys[2] << 32) |
3960 ((u64) ppd->pkeys[3] << 48);
3961 qib_write_kreg_port(ppd, krp_partitionkey, maskr);
3962 goto bail;
3963
3964 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3965 /* will only take effect when the link state changes */
3966 if (val == IB_LINKINITCMD_POLL)
3967 ppd->cpspec->ibcctrl_a &=
3968 ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3969 else /* SLEEP */
3970 ppd->cpspec->ibcctrl_a |=
3971 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3972 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
3973 qib_write_kreg(dd, kr_scratch, 0ULL);
3974 goto bail;
3975
3976 case QIB_IB_CFG_MTU: /* update the MTU in IBC */
3977 /*
3978 * Update our housekeeping variables, and set IBC max
3979 * size, same as init code; max IBC is max we allow in
3980 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
3981 * Set even if it's unchanged, print debug message only
3982 * on changes.
3983 */
3984 val = (ppd->ibmaxlen >> 2) + 1;
3985 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
3986 ppd->cpspec->ibcctrl_a |= (u64)val <<
3987 SYM_LSB(IBCCtrlA_0, MaxPktLen);
3988 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3989 ppd->cpspec->ibcctrl_a);
3990 qib_write_kreg(dd, kr_scratch, 0ULL);
3991 goto bail;
3992
3993 case QIB_IB_CFG_LSTATE: /* set the IB link state */
3994 switch (val & 0xffff0000) {
3995 case IB_LINKCMD_DOWN:
3996 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
3997 ppd->cpspec->ibmalfusesnap = 1;
3998 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
3999 crp_errlink);
4000 if (!ppd->cpspec->ibdeltainprog &&
4001 qib_compat_ddr_negotiate) {
4002 ppd->cpspec->ibdeltainprog = 1;
4003 ppd->cpspec->ibsymsnap =
4004 read_7322_creg32_port(ppd,
4005 crp_ibsymbolerr);
4006 ppd->cpspec->iblnkerrsnap =
4007 read_7322_creg32_port(ppd,
4008 crp_iblinkerrrecov);
4009 }
4010 break;
4011
4012 case IB_LINKCMD_ARMED:
4013 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4014 if (ppd->cpspec->ibmalfusesnap) {
4015 ppd->cpspec->ibmalfusesnap = 0;
4016 ppd->cpspec->ibmalfdelta +=
4017 read_7322_creg32_port(ppd,
4018 crp_errlink) -
4019 ppd->cpspec->ibmalfsnap;
4020 }
4021 break;
4022
4023 case IB_LINKCMD_ACTIVE:
4024 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4025 break;
4026
4027 default:
4028 ret = -EINVAL;
4029 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4030 goto bail;
4031 }
4032 switch (val & 0xffff) {
4033 case IB_LINKINITCMD_NOP:
4034 licmd = 0;
4035 break;
4036
4037 case IB_LINKINITCMD_POLL:
4038 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4039 break;
4040
4041 case IB_LINKINITCMD_SLEEP:
4042 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4043 break;
4044
4045 case IB_LINKINITCMD_DISABLE:
4046 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4047 ppd->cpspec->chase_end = 0;
4048 /*
4049 * stop state chase counter and timer, if running.
4050 * wait forpending timer, but don't clear .data (ppd)!
4051 */
4052 if (ppd->cpspec->chase_timer.expires) {
4053 del_timer_sync(&ppd->cpspec->chase_timer);
4054 ppd->cpspec->chase_timer.expires = 0;
4055 }
4056 break;
4057
4058 default:
4059 ret = -EINVAL;
4060 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4061 val & 0xffff);
4062 goto bail;
4063 }
4064 qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4065 goto bail;
4066
4067 case QIB_IB_CFG_OP_VLS:
4068 if (ppd->vls_operational != val) {
4069 ppd->vls_operational = val;
4070 set_vls(ppd);
4071 }
4072 goto bail;
4073
4074 case QIB_IB_CFG_VL_HIGH_LIMIT:
4075 qib_write_kreg_port(ppd, krp_highprio_limit, val);
4076 goto bail;
4077
4078 case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4079 if (val > 3) {
4080 ret = -EINVAL;
4081 goto bail;
4082 }
4083 lsb = IBA7322_IBC_HRTBT_LSB;
4084 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4085 break;
4086
4087 case QIB_IB_CFG_PORT:
4088 /* val is the port number of the switch we are connected to. */
4089 if (ppd->dd->cspec->r1) {
4090 cancel_delayed_work(&ppd->cpspec->ipg_work);
4091 ppd->cpspec->ipg_tries = 0;
4092 }
4093 goto bail;
4094
4095 default:
4096 ret = -EINVAL;
4097 goto bail;
4098 }
4099 ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4100 ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4101 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4102 qib_write_kreg(dd, kr_scratch, 0);
4103bail:
4104 return ret;
4105}
4106
4107static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4108{
4109 int ret = 0;
4110 u64 val, ctrlb;
4111
4112 /* only IBC loopback, may add serdes and xgxs loopbacks later */
4113 if (!strncmp(what, "ibc", 3)) {
4114 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4115 Loopback);
4116 val = 0; /* disable heart beat, so link will come up */
4117 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4118 ppd->dd->unit, ppd->port);
4119 } else if (!strncmp(what, "off", 3)) {
4120 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4121 Loopback);
4122 /* enable heart beat again */
4123 val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4124 qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
4125 "(normal)\n", ppd->dd->unit, ppd->port);
4126 } else
4127 ret = -EINVAL;
4128 if (!ret) {
4129 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4130 ppd->cpspec->ibcctrl_a);
4131 ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4132 << IBA7322_IBC_HRTBT_LSB);
4133 ppd->cpspec->ibcctrl_b = ctrlb | val;
4134 qib_write_kreg_port(ppd, krp_ibcctrl_b,
4135 ppd->cpspec->ibcctrl_b);
4136 qib_write_kreg(ppd->dd, kr_scratch, 0);
4137 }
4138 return ret;
4139}
4140
4141static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4142 struct ib_vl_weight_elem *vl)
4143{
4144 unsigned i;
4145
4146 for (i = 0; i < 16; i++, regno++, vl++) {
4147 u32 val = qib_read_kreg_port(ppd, regno);
4148
4149 vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4150 SYM_RMASK(LowPriority0_0, VirtualLane);
4151 vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4152 SYM_RMASK(LowPriority0_0, Weight);
4153 }
4154}
4155
4156static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4157 struct ib_vl_weight_elem *vl)
4158{
4159 unsigned i;
4160
4161 for (i = 0; i < 16; i++, regno++, vl++) {
4162 u64 val;
4163
4164 val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4165 SYM_LSB(LowPriority0_0, VirtualLane)) |
4166 ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4167 SYM_LSB(LowPriority0_0, Weight));
4168 qib_write_kreg_port(ppd, regno, val);
4169 }
4170 if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4171 struct qib_devdata *dd = ppd->dd;
4172 unsigned long flags;
4173
4174 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4175 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4176 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4177 qib_write_kreg(dd, kr_scratch, 0);
4178 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4179 }
4180}
4181
4182static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4183{
4184 switch (which) {
4185 case QIB_IB_TBL_VL_HIGH_ARB:
4186 get_vl_weights(ppd, krp_highprio_0, t);
4187 break;
4188
4189 case QIB_IB_TBL_VL_LOW_ARB:
4190 get_vl_weights(ppd, krp_lowprio_0, t);
4191 break;
4192
4193 default:
4194 return -EINVAL;
4195 }
4196 return 0;
4197}
4198
4199static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4200{
4201 switch (which) {
4202 case QIB_IB_TBL_VL_HIGH_ARB:
4203 set_vl_weights(ppd, krp_highprio_0, t);
4204 break;
4205
4206 case QIB_IB_TBL_VL_LOW_ARB:
4207 set_vl_weights(ppd, krp_lowprio_0, t);
4208 break;
4209
4210 default:
4211 return -EINVAL;
4212 }
4213 return 0;
4214}
4215
4216static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4217 u32 updegr, u32 egrhd)
4218{
4219 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4220 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4221 if (updegr)
4222 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4223}
4224
4225static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4226{
4227 u32 head, tail;
4228
4229 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4230 if (rcd->rcvhdrtail_kvaddr)
4231 tail = qib_get_rcvhdrtail(rcd);
4232 else
4233 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4234 return head == tail;
4235}
4236
4237#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4238 QIB_RCVCTRL_CTXT_DIS | \
4239 QIB_RCVCTRL_TIDFLOW_ENB | \
4240 QIB_RCVCTRL_TIDFLOW_DIS | \
4241 QIB_RCVCTRL_TAILUPD_ENB | \
4242 QIB_RCVCTRL_TAILUPD_DIS | \
4243 QIB_RCVCTRL_INTRAVAIL_ENB | \
4244 QIB_RCVCTRL_INTRAVAIL_DIS | \
4245 QIB_RCVCTRL_BP_ENB | \
4246 QIB_RCVCTRL_BP_DIS)
4247
4248#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4249 QIB_RCVCTRL_CTXT_DIS | \
4250 QIB_RCVCTRL_PKEY_DIS | \
4251 QIB_RCVCTRL_PKEY_ENB)
4252
4253/*
4254 * Modify the RCVCTRL register in chip-specific way. This
4255 * is a function because bit positions and (future) register
4256 * location is chip-specifc, but the needed operations are
4257 * generic. <op> is a bit-mask because we often want to
4258 * do multiple modifications.
4259 */
4260static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4261 int ctxt)
4262{
4263 struct qib_devdata *dd = ppd->dd;
4264 struct qib_ctxtdata *rcd;
4265 u64 mask, val;
4266 unsigned long flags;
4267
4268 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4269
4270 if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4271 dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4272 if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4273 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4274 if (op & QIB_RCVCTRL_TAILUPD_ENB)
4275 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4276 if (op & QIB_RCVCTRL_TAILUPD_DIS)
4277 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4278 if (op & QIB_RCVCTRL_PKEY_ENB)
4279 ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4280 if (op & QIB_RCVCTRL_PKEY_DIS)
4281 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4282 if (ctxt < 0) {
4283 mask = (1ULL << dd->ctxtcnt) - 1;
4284 rcd = NULL;
4285 } else {
4286 mask = (1ULL << ctxt);
4287 rcd = dd->rcd[ctxt];
4288 }
4289 if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4290 ppd->p_rcvctrl |=
4291 (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4292 if (!(dd->flags & QIB_NODMA_RTAIL)) {
4293 op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4294 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4295 }
4296 /* Write these registers before the context is enabled. */
4297 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4298 rcd->rcvhdrqtailaddr_phys);
4299 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4300 rcd->rcvhdrq_phys);
4301 rcd->seq_cnt = 1;
4302#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
4303 if (dd->flags & QIB_DCA_ENABLED)
4304 qib_update_rhdrq_dca(rcd);
4305#endif
4306 }
4307 if (op & QIB_RCVCTRL_CTXT_DIS)
4308 ppd->p_rcvctrl &=
4309 ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4310 if (op & QIB_RCVCTRL_BP_ENB)
4311 dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4312 if (op & QIB_RCVCTRL_BP_DIS)
4313 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4314 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4315 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4316 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4317 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4318 /*
4319 * Decide which registers to write depending on the ops enabled.
4320 * Special case is "flush" (no bits set at all)
4321 * which needs to write both.
4322 */
4323 if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4324 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4325 if (op == 0 || (op & RCVCTRL_PORT_MODS))
4326 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4327 if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4328 /*
4329 * Init the context registers also; if we were
4330 * disabled, tail and head should both be zero
4331 * already from the enable, but since we don't
4332 * know, we have to do it explictly.
4333 */
4334 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4335 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4336
4337 /* be sure enabling write seen; hd/tl should be 0 */
4338 (void) qib_read_kreg32(dd, kr_scratch);
4339 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4340 dd->rcd[ctxt]->head = val;
4341 /* If kctxt, interrupt on next receive. */
4342 if (ctxt < dd->first_user_ctxt)
4343 val |= dd->rhdrhead_intr_off;
4344 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4345 } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4346 dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4347 /* arm rcv interrupt */
4348 val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4349 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4350 }
4351 if (op & QIB_RCVCTRL_CTXT_DIS) {
4352 unsigned f;
4353
4354 /* Now that the context is disabled, clear these registers. */
4355 if (ctxt >= 0) {
4356 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4357 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4358 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4359 qib_write_ureg(dd, ur_rcvflowtable + f,
4360 TIDFLOW_ERRBITS, ctxt);
4361 } else {
4362 unsigned i;
4363
4364 for (i = 0; i < dd->cfgctxts; i++) {
4365 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4366 i, 0);
4367 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4368 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4369 qib_write_ureg(dd, ur_rcvflowtable + f,
4370 TIDFLOW_ERRBITS, i);
4371 }
4372 }
4373 }
4374 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4375}
4376
4377/*
4378 * Modify the SENDCTRL register in chip-specific way. This
4379 * is a function where there are multiple such registers with
4380 * slightly different layouts.
4381 * The chip doesn't allow back-to-back sendctrl writes, so write
4382 * the scratch register after writing sendctrl.
4383 *
4384 * Which register is written depends on the operation.
4385 * Most operate on the common register, while
4386 * SEND_ENB and SEND_DIS operate on the per-port ones.
4387 * SEND_ENB is included in common because it can change SPCL_TRIG
4388 */
4389#define SENDCTRL_COMMON_MODS (\
4390 QIB_SENDCTRL_CLEAR | \
4391 QIB_SENDCTRL_AVAIL_DIS | \
4392 QIB_SENDCTRL_AVAIL_ENB | \
4393 QIB_SENDCTRL_AVAIL_BLIP | \
4394 QIB_SENDCTRL_DISARM | \
4395 QIB_SENDCTRL_DISARM_ALL | \
4396 QIB_SENDCTRL_SEND_ENB)
4397
4398#define SENDCTRL_PORT_MODS (\
4399 QIB_SENDCTRL_CLEAR | \
4400 QIB_SENDCTRL_SEND_ENB | \
4401 QIB_SENDCTRL_SEND_DIS | \
4402 QIB_SENDCTRL_FLUSH)
4403
4404static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4405{
4406 struct qib_devdata *dd = ppd->dd;
4407 u64 tmp_dd_sendctrl;
4408 unsigned long flags;
4409
4410 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4411
4412 /* First the dd ones that are "sticky", saved in shadow */
4413 if (op & QIB_SENDCTRL_CLEAR)
4414 dd->sendctrl = 0;
4415 if (op & QIB_SENDCTRL_AVAIL_DIS)
4416 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4417 else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4418 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4419 if (dd->flags & QIB_USE_SPCL_TRIG)
4420 dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4421 }
4422
4423 /* Then the ppd ones that are "sticky", saved in shadow */
4424 if (op & QIB_SENDCTRL_SEND_DIS)
4425 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4426 else if (op & QIB_SENDCTRL_SEND_ENB)
4427 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4428
4429 if (op & QIB_SENDCTRL_DISARM_ALL) {
4430 u32 i, last;
4431
4432 tmp_dd_sendctrl = dd->sendctrl;
4433 last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4434 /*
4435 * Disarm any buffers that are not yet launched,
4436 * disabling updates until done.
4437 */
4438 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4439 for (i = 0; i < last; i++) {
4440 qib_write_kreg(dd, kr_sendctrl,
4441 tmp_dd_sendctrl |
4442 SYM_MASK(SendCtrl, Disarm) | i);
4443 qib_write_kreg(dd, kr_scratch, 0);
4444 }
4445 }
4446
4447 if (op & QIB_SENDCTRL_FLUSH) {
4448 u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4449
4450 /*
4451 * Now drain all the fifos. The Abort bit should never be
4452 * needed, so for now, at least, we don't use it.
4453 */
4454 tmp_ppd_sendctrl |=
4455 SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4456 SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4457 SYM_MASK(SendCtrl_0, TxeBypassIbc);
4458 qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4459 qib_write_kreg(dd, kr_scratch, 0);
4460 }
4461
4462 tmp_dd_sendctrl = dd->sendctrl;
4463
4464 if (op & QIB_SENDCTRL_DISARM)
4465 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4466 ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4467 SYM_LSB(SendCtrl, DisarmSendBuf));
4468 if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4469 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4470 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4471
4472 if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4473 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4474 qib_write_kreg(dd, kr_scratch, 0);
4475 }
4476
4477 if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4478 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4479 qib_write_kreg(dd, kr_scratch, 0);
4480 }
4481
4482 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4483 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4484 qib_write_kreg(dd, kr_scratch, 0);
4485 }
4486
4487 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4488
4489 if (op & QIB_SENDCTRL_FLUSH) {
4490 u32 v;
4491 /*
4492 * ensure writes have hit chip, then do a few
4493 * more reads, to allow DMA of pioavail registers
4494 * to occur, so in-memory copy is in sync with
4495 * the chip. Not always safe to sleep.
4496 */
4497 v = qib_read_kreg32(dd, kr_scratch);
4498 qib_write_kreg(dd, kr_scratch, v);
4499 v = qib_read_kreg32(dd, kr_scratch);
4500 qib_write_kreg(dd, kr_scratch, v);
4501 qib_read_kreg32(dd, kr_scratch);
4502 }
4503}
4504
4505#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4506#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4507#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4508
4509/**
4510 * qib_portcntr_7322 - read a per-port chip counter
4511 * @ppd: the qlogic_ib pport
4512 * @creg: the counter to read (not a chip offset)
4513 */
4514static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4515{
4516 struct qib_devdata *dd = ppd->dd;
4517 u64 ret = 0ULL;
4518 u16 creg;
4519 /* 0xffff for unimplemented or synthesized counters */
4520 static const u32 xlator[] = {
4521 [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4522 [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4523 [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4524 [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4525 [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4526 [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4527 [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4528 [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4529 [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4530 [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4531 [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4532 [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4533 [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
4534 [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4535 [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4536 [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4537 [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4538 [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4539 [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4540 [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4541 [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4542 [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4543 [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4544 [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4545 [QIBPORTCNTR_ERRLINK] = crp_errlink,
4546 [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4547 [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4548 [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4549 [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4550 [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4551 /*
4552 * the next 3 aren't really counters, but were implemented
4553 * as counters in older chips, so still get accessed as
4554 * though they were counters from this code.
4555 */
4556 [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4557 [QIBPORTCNTR_PSSTART] = krp_psstart,
4558 [QIBPORTCNTR_PSSTAT] = krp_psstat,
4559 /* pseudo-counter, summed for all ports */
4560 [QIBPORTCNTR_KHDROVFL] = 0xffff,
4561 };
4562
4563 if (reg >= ARRAY_SIZE(xlator)) {
4564 qib_devinfo(ppd->dd->pcidev,
4565 "Unimplemented portcounter %u\n", reg);
4566 goto done;
4567 }
4568 creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4569
4570 /* handle non-counters and special cases first */
4571 if (reg == QIBPORTCNTR_KHDROVFL) {
4572 int i;
4573
4574 /* sum over all kernel contexts (skip if mini_init) */
4575 for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4576 struct qib_ctxtdata *rcd = dd->rcd[i];
4577
4578 if (!rcd || rcd->ppd != ppd)
4579 continue;
4580 ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4581 }
4582 goto done;
4583 } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4584 /*
4585 * Used as part of the synthesis of port_rcv_errors
4586 * in the verbs code for IBTA counters. Not needed for 7322,
4587 * because all the errors are already counted by other cntrs.
4588 */
4589 goto done;
4590 } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4591 reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4592 /* were counters in older chips, now per-port kernel regs */
4593 ret = qib_read_kreg_port(ppd, creg);
4594 goto done;
4595 }
4596
4597 /*
4598 * Only fast increment counters are 64 bits; use 32 bit reads to
4599 * avoid two independent reads when on Opteron.
4600 */
4601 if (xlator[reg] & _PORT_64BIT_FLAG)
4602 ret = read_7322_creg_port(ppd, creg);
4603 else
4604 ret = read_7322_creg32_port(ppd, creg);
4605 if (creg == crp_ibsymbolerr) {
4606 if (ppd->cpspec->ibdeltainprog)
4607 ret -= ret - ppd->cpspec->ibsymsnap;
4608 ret -= ppd->cpspec->ibsymdelta;
4609 } else if (creg == crp_iblinkerrrecov) {
4610 if (ppd->cpspec->ibdeltainprog)
4611 ret -= ret - ppd->cpspec->iblnkerrsnap;
4612 ret -= ppd->cpspec->iblnkerrdelta;
4613 } else if (creg == crp_errlink)
4614 ret -= ppd->cpspec->ibmalfdelta;
4615 else if (creg == crp_iblinkdown)
4616 ret += ppd->cpspec->iblnkdowndelta;
4617done:
4618 return ret;
4619}
4620
4621/*
4622 * Device counter names (not port-specific), one line per stat,
4623 * single string. Used by utilities like ipathstats to print the stats
4624 * in a way which works for different versions of drivers, without changing
4625 * the utility. Names need to be 12 chars or less (w/o newline), for proper
4626 * display by utility.
4627 * Non-error counters are first.
4628 * Start of "error" conters is indicated by a leading "E " on the first
4629 * "error" counter, and doesn't count in label length.
4630 * The EgrOvfl list needs to be last so we truncate them at the configured
4631 * context count for the device.
4632 * cntr7322indices contains the corresponding register indices.
4633 */
4634static const char cntr7322names[] =
4635 "Interrupts\n"
4636 "HostBusStall\n"
4637 "E RxTIDFull\n"
4638 "RxTIDInvalid\n"
4639 "RxTIDFloDrop\n" /* 7322 only */
4640 "Ctxt0EgrOvfl\n"
4641 "Ctxt1EgrOvfl\n"
4642 "Ctxt2EgrOvfl\n"
4643 "Ctxt3EgrOvfl\n"
4644 "Ctxt4EgrOvfl\n"
4645 "Ctxt5EgrOvfl\n"
4646 "Ctxt6EgrOvfl\n"
4647 "Ctxt7EgrOvfl\n"
4648 "Ctxt8EgrOvfl\n"
4649 "Ctxt9EgrOvfl\n"
4650 "Ctx10EgrOvfl\n"
4651 "Ctx11EgrOvfl\n"
4652 "Ctx12EgrOvfl\n"
4653 "Ctx13EgrOvfl\n"
4654 "Ctx14EgrOvfl\n"
4655 "Ctx15EgrOvfl\n"
4656 "Ctx16EgrOvfl\n"
4657 "Ctx17EgrOvfl\n"
4658 ;
4659
4660static const u32 cntr7322indices[] = {
4661 cr_lbint | _PORT_64BIT_FLAG,
4662 cr_lbstall | _PORT_64BIT_FLAG,
4663 cr_tidfull,
4664 cr_tidinvalid,
4665 cr_rxtidflowdrop,
4666 cr_base_egrovfl + 0,
4667 cr_base_egrovfl + 1,
4668 cr_base_egrovfl + 2,
4669 cr_base_egrovfl + 3,
4670 cr_base_egrovfl + 4,
4671 cr_base_egrovfl + 5,
4672 cr_base_egrovfl + 6,
4673 cr_base_egrovfl + 7,
4674 cr_base_egrovfl + 8,
4675 cr_base_egrovfl + 9,
4676 cr_base_egrovfl + 10,
4677 cr_base_egrovfl + 11,
4678 cr_base_egrovfl + 12,
4679 cr_base_egrovfl + 13,
4680 cr_base_egrovfl + 14,
4681 cr_base_egrovfl + 15,
4682 cr_base_egrovfl + 16,
4683 cr_base_egrovfl + 17,
4684};
4685
4686/*
4687 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4688 * portcntr7322indices is somewhat complicated by some registers needing
4689 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4690 */
4691static const char portcntr7322names[] =
4692 "TxPkt\n"
4693 "TxFlowPkt\n"
4694 "TxWords\n"
4695 "RxPkt\n"
4696 "RxFlowPkt\n"
4697 "RxWords\n"
4698 "TxFlowStall\n"
4699 "TxDmaDesc\n" /* 7220 and 7322-only */
4700 "E RxDlidFltr\n" /* 7220 and 7322-only */
4701 "IBStatusChng\n"
4702 "IBLinkDown\n"
4703 "IBLnkRecov\n"
4704 "IBRxLinkErr\n"
4705 "IBSymbolErr\n"
4706 "RxLLIErr\n"
4707 "RxBadFormat\n"
4708 "RxBadLen\n"
4709 "RxBufOvrfl\n"
4710 "RxEBP\n"
4711 "RxFlowCtlErr\n"
4712 "RxICRCerr\n"
4713 "RxLPCRCerr\n"
4714 "RxVCRCerr\n"
4715 "RxInvalLen\n"
4716 "RxInvalPKey\n"
4717 "RxPktDropped\n"
4718 "TxBadLength\n"
4719 "TxDropped\n"
4720 "TxInvalLen\n"
4721 "TxUnderrun\n"
4722 "TxUnsupVL\n"
4723 "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4724 "RxVL15Drop\n"
4725 "RxVlErr\n"
4726 "XcessBufOvfl\n"
4727 "RxQPBadCtxt\n" /* 7322-only from here down */
4728 "TXBadHeader\n"
4729 ;
4730
4731static const u32 portcntr7322indices[] = {
4732 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4733 crp_pktsendflow,
4734 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4735 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4736 crp_pktrcvflowctrl,
4737 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4738 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4739 crp_txsdmadesc | _PORT_64BIT_FLAG,
4740 crp_rxdlidfltr,
4741 crp_ibstatuschange,
4742 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4743 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4744 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4745 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4746 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4747 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4748 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4749 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4750 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4751 crp_rcvflowctrlviol,
4752 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4753 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4754 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4755 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4756 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4757 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4758 crp_txminmaxlenerr,
4759 crp_txdroppedpkt,
4760 crp_txlenerr,
4761 crp_txunderrun,
4762 crp_txunsupvl,
4763 QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4764 QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4765 QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4766 QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4767 crp_rxqpinvalidctxt,
4768 crp_txhdrerr,
4769};
4770
4771/* do all the setup to make the counter reads efficient later */
4772static void init_7322_cntrnames(struct qib_devdata *dd)
4773{
4774 int i, j = 0;
4775 char *s;
4776
4777 for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4778 i++) {
4779 /* we always have at least one counter before the egrovfl */
4780 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4781 j = 1;
4782 s = strchr(s + 1, '\n');
4783 if (s && j)
4784 j++;
4785 }
4786 dd->cspec->ncntrs = i;
4787 if (!s)
4788 /* full list; size is without terminating null */
4789 dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
4790 else
4791 dd->cspec->cntrnamelen = 1 + s - cntr7322names;
4792 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
4793 * sizeof(u64), GFP_KERNEL);
4794 if (!dd->cspec->cntrs)
4795 qib_dev_err(dd, "Failed allocation for counters\n");
4796
4797 for (i = 0, s = (char *)portcntr7322names; s; i++)
4798 s = strchr(s + 1, '\n');
4799 dd->cspec->nportcntrs = i - 1;
4800 dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
4801 for (i = 0; i < dd->num_pports; ++i) {
4802 dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
4803 * sizeof(u64), GFP_KERNEL);
4804 if (!dd->pport[i].cpspec->portcntrs)
4805 qib_dev_err(dd, "Failed allocation for"
4806 " portcounters\n");
4807 }
4808}
4809
4810static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
4811 u64 **cntrp)
4812{
4813 u32 ret;
4814
4815 if (namep) {
4816 ret = dd->cspec->cntrnamelen;
4817 if (pos >= ret)
4818 ret = 0; /* final read after getting everything */
4819 else
4820 *namep = (char *) cntr7322names;
4821 } else {
4822 u64 *cntr = dd->cspec->cntrs;
4823 int i;
4824
4825 ret = dd->cspec->ncntrs * sizeof(u64);
4826 if (!cntr || pos >= ret) {
4827 /* everything read, or couldn't get memory */
4828 ret = 0;
4829 goto done;
4830 }
4831 *cntrp = cntr;
4832 for (i = 0; i < dd->cspec->ncntrs; i++)
4833 if (cntr7322indices[i] & _PORT_64BIT_FLAG)
4834 *cntr++ = read_7322_creg(dd,
4835 cntr7322indices[i] &
4836 _PORT_CNTR_IDXMASK);
4837 else
4838 *cntr++ = read_7322_creg32(dd,
4839 cntr7322indices[i]);
4840 }
4841done:
4842 return ret;
4843}
4844
4845static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
4846 char **namep, u64 **cntrp)
4847{
4848 u32 ret;
4849
4850 if (namep) {
4851 ret = dd->cspec->portcntrnamelen;
4852 if (pos >= ret)
4853 ret = 0; /* final read after getting everything */
4854 else
4855 *namep = (char *)portcntr7322names;
4856 } else {
4857 struct qib_pportdata *ppd = &dd->pport[port];
4858 u64 *cntr = ppd->cpspec->portcntrs;
4859 int i;
4860
4861 ret = dd->cspec->nportcntrs * sizeof(u64);
4862 if (!cntr || pos >= ret) {
4863 /* everything read, or couldn't get memory */
4864 ret = 0;
4865 goto done;
4866 }
4867 *cntrp = cntr;
4868 for (i = 0; i < dd->cspec->nportcntrs; i++) {
4869 if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
4870 *cntr++ = qib_portcntr_7322(ppd,
4871 portcntr7322indices[i] &
4872 _PORT_CNTR_IDXMASK);
4873 else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
4874 *cntr++ = read_7322_creg_port(ppd,
4875 portcntr7322indices[i] &
4876 _PORT_CNTR_IDXMASK);
4877 else
4878 *cntr++ = read_7322_creg32_port(ppd,
4879 portcntr7322indices[i]);
4880 }
4881 }
4882done:
4883 return ret;
4884}
4885
4886/**
4887 * qib_get_7322_faststats - get word counters from chip before they overflow
4888 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
4889 *
4890 * VESTIGIAL IBA7322 has no "small fast counters", so the only
4891 * real purpose of this function is to maintain the notion of
4892 * "active time", which in turn is only logged into the eeprom,
4893 * which we don;t have, yet, for 7322-based boards.
4894 *
4895 * called from add_timer
4896 */
4897static void qib_get_7322_faststats(unsigned long opaque)
4898{
4899 struct qib_devdata *dd = (struct qib_devdata *) opaque;
4900 struct qib_pportdata *ppd;
4901 unsigned long flags;
4902 u64 traffic_wds;
4903 int pidx;
4904
4905 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
4906 ppd = dd->pport + pidx;
4907
4908 /*
4909 * If port isn't enabled or not operational ports, or
4910 * diags is running (can cause memory diags to fail)
4911 * skip this port this time.
4912 */
4913 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
4914 || dd->diag_client)
4915 continue;
4916
4917 /*
4918 * Maintain an activity timer, based on traffic
4919 * exceeding a threshold, so we need to check the word-counts
4920 * even if they are 64-bit.
4921 */
4922 traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
4923 qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
4924 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
4925 traffic_wds -= ppd->dd->traffic_wds;
4926 ppd->dd->traffic_wds += traffic_wds;
4927 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
4928 atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
4929 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
4930 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
4931 QIB_IB_QDR) &&
4932 (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
4933 QIBL_LINKACTIVE)) &&
4934 ppd->cpspec->qdr_dfe_time &&
4935 time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
4936 ppd->cpspec->qdr_dfe_on = 0;
4937
4938 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
4939 ppd->dd->cspec->r1 ?
4940 QDR_STATIC_ADAPT_INIT_R1 :
4941 QDR_STATIC_ADAPT_INIT);
4942 force_h1(ppd);
4943 }
4944 }
4945 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
4946}
4947
4948/*
4949 * If we were using MSIx, try to fallback to INTx.
4950 */
4951static int qib_7322_intr_fallback(struct qib_devdata *dd)
4952{
4953 if (!dd->cspec->num_msix_entries)
4954 return 0; /* already using INTx */
4955
4956 qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
4957 " trying INTx interrupts\n");
4958 qib_7322_nomsix(dd);
4959 qib_enable_intx(dd->pcidev);
4960 qib_setup_7322_interrupt(dd, 0);
4961 return 1;
4962}
4963
4964/*
4965 * Reset the XGXS (between serdes and IBC). Slightly less intrusive
4966 * than resetting the IBC or external link state, and useful in some
4967 * cases to cause some retraining. To do this right, we reset IBC
4968 * as well, then return to previous state (which may be still in reset)
4969 * NOTE: some callers of this "know" this writes the current value
4970 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
4971 * check all callers.
4972 */
4973static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
4974{
4975 u64 val;
4976 struct qib_devdata *dd = ppd->dd;
4977 const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
4978 SYM_MASK(IBPCSConfig_0, xcv_treset) |
4979 SYM_MASK(IBPCSConfig_0, tx_rx_reset);
4980
4981 val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
4982 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4983 ppd->cpspec->ibcctrl_a &
4984 ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
4985
4986 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
4987 qib_read_kreg32(dd, kr_scratch);
4988 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
4989 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4990 qib_write_kreg(dd, kr_scratch, 0ULL);
4991}
4992
4993/*
4994 * This code for non-IBTA-compliant IB speed negotiation is only known to
4995 * work for the SDR to DDR transition, and only between an HCA and a switch
4996 * with recent firmware. It is based on observed heuristics, rather than
4997 * actual knowledge of the non-compliant speed negotiation.
4998 * It has a number of hard-coded fields, since the hope is to rewrite this
4999 * when a spec is available on how the negoation is intended to work.
5000 */
5001static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5002 u32 dcnt, u32 *data)
5003{
5004 int i;
5005 u64 pbc;
5006 u32 __iomem *piobuf;
5007 u32 pnum, control, len;
5008 struct qib_devdata *dd = ppd->dd;
5009
5010 i = 0;
5011 len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5012 control = qib_7322_setpbc_control(ppd, len, 0, 15);
5013 pbc = ((u64) control << 32) | len;
5014 while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5015 if (i++ > 15)
5016 return;
5017 udelay(2);
5018 }
5019 /* disable header check on this packet, since it can't be valid */
5020 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5021 writeq(pbc, piobuf);
5022 qib_flush_wc();
5023 qib_pio_copy(piobuf + 2, hdr, 7);
5024 qib_pio_copy(piobuf + 9, data, dcnt);
5025 if (dd->flags & QIB_USE_SPCL_TRIG) {
5026 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5027
5028 qib_flush_wc();
5029 __raw_writel(0xaebecede, piobuf + spcl_off);
5030 }
5031 qib_flush_wc();
5032 qib_sendbuf_done(dd, pnum);
5033 /* and re-enable hdr check */
5034 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5035}
5036
5037/*
5038 * _start packet gets sent twice at start, _done gets sent twice at end
5039 */
5040static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5041{
5042 struct qib_devdata *dd = ppd->dd;
5043 static u32 swapped;
5044 u32 dw, i, hcnt, dcnt, *data;
5045 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5046 static u32 madpayload_start[0x40] = {
5047 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5048 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5049 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5050 };
5051 static u32 madpayload_done[0x40] = {
5052 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5053 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5054 0x40000001, 0x1388, 0x15e, /* rest 0's */
5055 };
5056
5057 dcnt = ARRAY_SIZE(madpayload_start);
5058 hcnt = ARRAY_SIZE(hdr);
5059 if (!swapped) {
5060 /* for maintainability, do it at runtime */
5061 for (i = 0; i < hcnt; i++) {
5062 dw = (__force u32) cpu_to_be32(hdr[i]);
5063 hdr[i] = dw;
5064 }
5065 for (i = 0; i < dcnt; i++) {
5066 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5067 madpayload_start[i] = dw;
5068 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5069 madpayload_done[i] = dw;
5070 }
5071 swapped = 1;
5072 }
5073
5074 data = which ? madpayload_done : madpayload_start;
5075
5076 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5077 qib_read_kreg64(dd, kr_scratch);
5078 udelay(2);
5079 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5080 qib_read_kreg64(dd, kr_scratch);
5081 udelay(2);
5082}
5083
5084/*
5085 * Do the absolute minimum to cause an IB speed change, and make it
5086 * ready, but don't actually trigger the change. The caller will
5087 * do that when ready (if link is in Polling training state, it will
5088 * happen immediately, otherwise when link next goes down)
5089 *
5090 * This routine should only be used as part of the DDR autonegotation
5091 * code for devices that are not compliant with IB 1.2 (or code that
5092 * fixes things up for same).
5093 *
5094 * When link has gone down, and autoneg enabled, or autoneg has
5095 * failed and we give up until next time we set both speeds, and
5096 * then we want IBTA enabled as well as "use max enabled speed.
5097 */
5098static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5099{
5100 u64 newctrlb;
5101 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5102 IBA7322_IBC_IBTA_1_2_MASK |
5103 IBA7322_IBC_MAX_SPEED_MASK);
5104
5105 if (speed & (speed - 1)) /* multiple speeds */
5106 newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5107 IBA7322_IBC_IBTA_1_2_MASK |
5108 IBA7322_IBC_MAX_SPEED_MASK;
5109 else
5110 newctrlb |= speed == QIB_IB_QDR ?
5111 IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5112 ((speed == QIB_IB_DDR ?
5113 IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5114
5115 if (newctrlb == ppd->cpspec->ibcctrl_b)
5116 return;
5117
5118 ppd->cpspec->ibcctrl_b = newctrlb;
5119 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5120 qib_write_kreg(ppd->dd, kr_scratch, 0);
5121}
5122
5123/*
5124 * This routine is only used when we are not talking to another
5125 * IB 1.2-compliant device that we think can do DDR.
5126 * (This includes all existing switch chips as of Oct 2007.)
5127 * 1.2-compliant devices go directly to DDR prior to reaching INIT
5128 */
5129static void try_7322_autoneg(struct qib_pportdata *ppd)
5130{
5131 unsigned long flags;
5132
5133 spin_lock_irqsave(&ppd->lflags_lock, flags);
5134 ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5135 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5136 qib_autoneg_7322_send(ppd, 0);
5137 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5138 qib_7322_mini_pcs_reset(ppd);
5139 /* 2 msec is minimum length of a poll cycle */
5140 schedule_delayed_work(&ppd->cpspec->autoneg_work,
5141 msecs_to_jiffies(2));
5142}
5143
5144/*
5145 * Handle the empirically determined mechanism for auto-negotiation
5146 * of DDR speed with switches.
5147 */
5148static void autoneg_7322_work(struct work_struct *work)
5149{
5150 struct qib_pportdata *ppd;
5151 struct qib_devdata *dd;
5152 u64 startms;
5153 u32 i;
5154 unsigned long flags;
5155
5156 ppd = container_of(work, struct qib_chippport_specific,
5157 autoneg_work.work)->ppd;
5158 dd = ppd->dd;
5159
5160 startms = jiffies_to_msecs(jiffies);
5161
5162 /*
5163 * Busy wait for this first part, it should be at most a
5164 * few hundred usec, since we scheduled ourselves for 2msec.
5165 */
5166 for (i = 0; i < 25; i++) {
5167 if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5168 == IB_7322_LT_STATE_POLLQUIET) {
5169 qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5170 break;
5171 }
5172 udelay(100);
5173 }
5174
5175 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5176 goto done; /* we got there early or told to stop */
5177
5178 /* we expect this to timeout */
5179 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5180 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5181 msecs_to_jiffies(90)))
5182 goto done;
5183 qib_7322_mini_pcs_reset(ppd);
5184
5185 /* we expect this to timeout */
5186 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5187 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5188 msecs_to_jiffies(1700)))
5189 goto done;
5190 qib_7322_mini_pcs_reset(ppd);
5191
5192 set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5193
5194 /*
5195 * Wait up to 250 msec for link to train and get to INIT at DDR;
5196 * this should terminate early.
5197 */
5198 wait_event_timeout(ppd->cpspec->autoneg_wait,
5199 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5200 msecs_to_jiffies(250));
5201done:
5202 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5203 spin_lock_irqsave(&ppd->lflags_lock, flags);
5204 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5205 if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5206 ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5207 ppd->cpspec->autoneg_tries = 0;
5208 }
5209 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5210 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5211 }
5212}
5213
5214/*
5215 * This routine is used to request IPG set in the QLogic switch.
5216 * Only called if r1.
5217 */
5218static void try_7322_ipg(struct qib_pportdata *ppd)
5219{
5220 struct qib_ibport *ibp = &ppd->ibport_data;
5221 struct ib_mad_send_buf *send_buf;
5222 struct ib_mad_agent *agent;
5223 struct ib_smp *smp;
5224 unsigned delay;
5225 int ret;
5226
5227 agent = ibp->send_agent;
5228 if (!agent)
5229 goto retry;
5230
5231 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5232 IB_MGMT_MAD_DATA, GFP_ATOMIC);
5233 if (IS_ERR(send_buf))
5234 goto retry;
5235
5236 if (!ibp->smi_ah) {
5237 struct ib_ah_attr attr;
5238 struct ib_ah *ah;
5239
5240 memset(&attr, 0, sizeof attr);
5241 attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
5242 attr.port_num = ppd->port;
5243 ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
5244 if (IS_ERR(ah))
5245 ret = -EINVAL;
5246 else {
5247 send_buf->ah = ah;
5248 ibp->smi_ah = to_iah(ah);
5249 ret = 0;
5250 }
5251 } else {
5252 send_buf->ah = &ibp->smi_ah->ibah;
5253 ret = 0;
5254 }
5255
5256 smp = send_buf->mad;
5257 smp->base_version = IB_MGMT_BASE_VERSION;
5258 smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5259 smp->class_version = 1;
5260 smp->method = IB_MGMT_METHOD_SEND;
5261 smp->hop_cnt = 1;
5262 smp->attr_id = QIB_VENDOR_IPG;
5263 smp->attr_mod = 0;
5264
5265 if (!ret)
5266 ret = ib_post_send_mad(send_buf, NULL);
5267 if (ret)
5268 ib_free_send_mad(send_buf);
5269retry:
5270 delay = 2 << ppd->cpspec->ipg_tries;
5271 schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
5272}
5273
5274/*
5275 * Timeout handler for setting IPG.
5276 * Only called if r1.
5277 */
5278static void ipg_7322_work(struct work_struct *work)
5279{
5280 struct qib_pportdata *ppd;
5281
5282 ppd = container_of(work, struct qib_chippport_specific,
5283 ipg_work.work)->ppd;
5284 if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5285 && ++ppd->cpspec->ipg_tries <= 10)
5286 try_7322_ipg(ppd);
5287}
5288
5289static u32 qib_7322_iblink_state(u64 ibcs)
5290{
5291 u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5292
5293 switch (state) {
5294 case IB_7322_L_STATE_INIT:
5295 state = IB_PORT_INIT;
5296 break;
5297 case IB_7322_L_STATE_ARM:
5298 state = IB_PORT_ARMED;
5299 break;
5300 case IB_7322_L_STATE_ACTIVE:
5301 /* fall through */
5302 case IB_7322_L_STATE_ACT_DEFER:
5303 state = IB_PORT_ACTIVE;
5304 break;
5305 default: /* fall through */
5306 case IB_7322_L_STATE_DOWN:
5307 state = IB_PORT_DOWN;
5308 break;
5309 }
5310 return state;
5311}
5312
5313/* returns the IBTA port state, rather than the IBC link training state */
5314static u8 qib_7322_phys_portstate(u64 ibcs)
5315{
5316 u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5317 return qib_7322_physportstate[state];
5318}
5319
5320static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5321{
5322 int ret = 0, symadj = 0;
5323 unsigned long flags;
5324 int mult;
5325
5326 spin_lock_irqsave(&ppd->lflags_lock, flags);
5327 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5328 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5329
5330 /* Update our picture of width and speed from chip */
5331 if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5332 ppd->link_speed_active = QIB_IB_QDR;
5333 mult = 4;
5334 } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5335 ppd->link_speed_active = QIB_IB_DDR;
5336 mult = 2;
5337 } else {
5338 ppd->link_speed_active = QIB_IB_SDR;
5339 mult = 1;
5340 }
5341 if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5342 ppd->link_width_active = IB_WIDTH_4X;
5343 mult *= 4;
5344 } else
5345 ppd->link_width_active = IB_WIDTH_1X;
5346 ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5347
5348 if (!ibup) {
5349 u64 clr;
5350
5351 /* Link went down. */
5352 /* do IPG MAD again after linkdown, even if last time failed */
5353 ppd->cpspec->ipg_tries = 0;
5354 clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5355 (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5356 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5357 if (clr)
5358 qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5359 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5360 QIBL_IB_AUTONEG_INPROG)))
5361 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5362 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5363 qib_cancel_sends(ppd);
5364 spin_lock_irqsave(&ppd->sdma_lock, flags);
5365 if (__qib_sdma_running(ppd))
5366 __qib_sdma_process_event(ppd,
5367 qib_sdma_event_e70_go_idle);
5368 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5369 }
5370 clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5371 if (clr == ppd->cpspec->iblnkdownsnap)
5372 ppd->cpspec->iblnkdowndelta++;
5373 } else {
5374 if (qib_compat_ddr_negotiate &&
5375 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5376 QIBL_IB_AUTONEG_INPROG)) &&
5377 ppd->link_speed_active == QIB_IB_SDR &&
5378 (ppd->link_speed_enabled & QIB_IB_DDR)
5379 && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5380 /* we are SDR, and auto-negotiation enabled */
5381 ++ppd->cpspec->autoneg_tries;
5382 if (!ppd->cpspec->ibdeltainprog) {
5383 ppd->cpspec->ibdeltainprog = 1;
5384 ppd->cpspec->ibsymdelta +=
5385 read_7322_creg32_port(ppd,
5386 crp_ibsymbolerr) -
5387 ppd->cpspec->ibsymsnap;
5388 ppd->cpspec->iblnkerrdelta +=
5389 read_7322_creg32_port(ppd,
5390 crp_iblinkerrrecov) -
5391 ppd->cpspec->iblnkerrsnap;
5392 }
5393 try_7322_autoneg(ppd);
5394 ret = 1; /* no other IB status change processing */
5395 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5396 ppd->link_speed_active == QIB_IB_SDR) {
5397 qib_autoneg_7322_send(ppd, 1);
5398 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5399 qib_7322_mini_pcs_reset(ppd);
5400 udelay(2);
5401 ret = 1; /* no other IB status change processing */
5402 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5403 (ppd->link_speed_active & QIB_IB_DDR)) {
5404 spin_lock_irqsave(&ppd->lflags_lock, flags);
5405 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5406 QIBL_IB_AUTONEG_FAILED);
5407 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5408 ppd->cpspec->autoneg_tries = 0;
5409 /* re-enable SDR, for next link down */
5410 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5411 wake_up(&ppd->cpspec->autoneg_wait);
5412 symadj = 1;
5413 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5414 /*
5415 * Clear autoneg failure flag, and do setup
5416 * so we'll try next time link goes down and
5417 * back to INIT (possibly connected to a
5418 * different device).
5419 */
5420 spin_lock_irqsave(&ppd->lflags_lock, flags);
5421 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5422 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5423 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5424 symadj = 1;
5425 }
5426 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5427 symadj = 1;
5428 if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5429 try_7322_ipg(ppd);
5430 if (!ppd->cpspec->recovery_init)
5431 setup_7322_link_recovery(ppd, 0);
5432 ppd->cpspec->qdr_dfe_time = jiffies +
5433 msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5434 }
5435 ppd->cpspec->ibmalfusesnap = 0;
5436 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5437 crp_errlink);
5438 }
5439 if (symadj) {
5440 ppd->cpspec->iblnkdownsnap =
5441 read_7322_creg32_port(ppd, crp_iblinkdown);
5442 if (ppd->cpspec->ibdeltainprog) {
5443 ppd->cpspec->ibdeltainprog = 0;
5444 ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5445 crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5446 ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5447 crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5448 }
5449 } else if (!ibup && qib_compat_ddr_negotiate &&
5450 !ppd->cpspec->ibdeltainprog &&
5451 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5452 ppd->cpspec->ibdeltainprog = 1;
5453 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5454 crp_ibsymbolerr);
5455 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5456 crp_iblinkerrrecov);
5457 }
5458
5459 if (!ret)
5460 qib_setup_7322_setextled(ppd, ibup);
5461 return ret;
5462}
5463
5464/*
5465 * Does read/modify/write to appropriate registers to
5466 * set output and direction bits selected by mask.
5467 * these are in their canonical postions (e.g. lsb of
5468 * dir will end up in D48 of extctrl on existing chips).
5469 * returns contents of GP Inputs.
5470 */
5471static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5472{
5473 u64 read_val, new_out;
5474 unsigned long flags;
5475
5476 if (mask) {
5477 /* some bits being written, lock access to GPIO */
5478 dir &= mask;
5479 out &= mask;
5480 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5481 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5482 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5483 new_out = (dd->cspec->gpio_out & ~mask) | out;
5484
5485 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5486 qib_write_kreg(dd, kr_gpio_out, new_out);
5487 dd->cspec->gpio_out = new_out;
5488 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5489 }
5490 /*
5491 * It is unlikely that a read at this time would get valid
5492 * data on a pin whose direction line was set in the same
5493 * call to this function. We include the read here because
5494 * that allows us to potentially combine a change on one pin with
5495 * a read on another, and because the old code did something like
5496 * this.
5497 */
5498 read_val = qib_read_kreg64(dd, kr_extstatus);
5499 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5500}
5501
5502/* Enable writes to config EEPROM, if possible. Returns previous state */
5503static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5504{
5505 int prev_wen;
5506 u32 mask;
5507
5508 mask = 1 << QIB_EEPROM_WEN_NUM;
5509 prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5510 gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5511
5512 return prev_wen & 1;
5513}
5514
5515/*
5516 * Read fundamental info we need to use the chip. These are
5517 * the registers that describe chip capabilities, and are
5518 * saved in shadow registers.
5519 */
5520static void get_7322_chip_params(struct qib_devdata *dd)
5521{
5522 u64 val;
5523 u32 piobufs;
5524 int mtu;
5525
5526 dd->palign = qib_read_kreg32(dd, kr_pagealign);
5527
5528 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5529
5530 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5531 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5532 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5533 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5534 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5535
5536 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5537 dd->piobcnt2k = val & ~0U;
5538 dd->piobcnt4k = val >> 32;
5539 val = qib_read_kreg64(dd, kr_sendpiosize);
5540 dd->piosize2k = val & ~0U;
5541 dd->piosize4k = val >> 32;
5542
5543 mtu = ib_mtu_enum_to_int(qib_ibmtu);
5544 if (mtu == -1)
5545 mtu = QIB_DEFAULT_MTU;
5546 dd->pport[0].ibmtu = (u32)mtu;
5547 dd->pport[1].ibmtu = (u32)mtu;
5548
5549 /* these may be adjusted in init_chip_wc_pat() */
5550 dd->pio2kbase = (u32 __iomem *)
5551 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5552 dd->pio4kbase = (u32 __iomem *)
5553 ((char __iomem *) dd->kregbase +
5554 (dd->piobufbase >> 32));
5555 /*
5556 * 4K buffers take 2 pages; we use roundup just to be
5557 * paranoid; we calculate it once here, rather than on
5558 * ever buf allocate
5559 */
5560 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5561
5562 piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5563
5564 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5565 (sizeof(u64) * BITS_PER_BYTE / 2);
5566}
5567
5568/*
5569 * The chip base addresses in cspec and cpspec have to be set
5570 * after possible init_chip_wc_pat(), rather than in
5571 * get_7322_chip_params(), so split out as separate function
5572 */
5573static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5574{
5575 u32 cregbase;
5576 cregbase = qib_read_kreg32(dd, kr_counterregbase);
5577
5578 dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5579 (char __iomem *)dd->kregbase);
5580
5581 dd->egrtidbase = (u64 __iomem *)
5582 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5583
5584 /* port registers are defined as relative to base of chip */
5585 dd->pport[0].cpspec->kpregbase =
5586 (u64 __iomem *)((char __iomem *)dd->kregbase);
5587 dd->pport[1].cpspec->kpregbase =
5588 (u64 __iomem *)(dd->palign +
5589 (char __iomem *)dd->kregbase);
5590 dd->pport[0].cpspec->cpregbase =
5591 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5592 kr_counterregbase) + (char __iomem *)dd->kregbase);
5593 dd->pport[1].cpspec->cpregbase =
5594 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5595 kr_counterregbase) + (char __iomem *)dd->kregbase);
5596}
5597
5598/*
5599 * This is a fairly special-purpose observer, so we only support
5600 * the port-specific parts of SendCtrl
5601 */
5602
5603#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
5604 SYM_MASK(SendCtrl_0, SDmaEnable) | \
5605 SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
5606 SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5607 SYM_MASK(SendCtrl_0, SDmaHalt) | \
5608 SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
5609 SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5610
5611static int sendctrl_hook(struct qib_devdata *dd,
5612 const struct diag_observer *op, u32 offs,
5613 u64 *data, u64 mask, int only_32)
5614{
5615 unsigned long flags;
5616 unsigned idx;
5617 unsigned pidx;
5618 struct qib_pportdata *ppd = NULL;
5619 u64 local_data, all_bits;
5620
5621 /*
5622 * The fixed correspondence between Physical ports and pports is
5623 * severed. We need to hunt for the ppd that corresponds
5624 * to the offset we got. And we have to do that without admitting
5625 * we know the stride, apparently.
5626 */
5627 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5628 u64 __iomem *psptr;
5629 u32 psoffs;
5630
5631 ppd = dd->pport + pidx;
5632 if (!ppd->cpspec->kpregbase)
5633 continue;
5634
5635 psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5636 psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5637 if (psoffs == offs)
5638 break;
5639 }
5640
5641 /* If pport is not being managed by driver, just avoid shadows. */
5642 if (pidx >= dd->num_pports)
5643 ppd = NULL;
5644
5645 /* In any case, "idx" is flat index in kreg space */
5646 idx = offs / sizeof(u64);
5647
5648 all_bits = ~0ULL;
5649 if (only_32)
5650 all_bits >>= 32;
5651
5652 spin_lock_irqsave(&dd->sendctrl_lock, flags);
5653 if (!ppd || (mask & all_bits) != all_bits) {
5654 /*
5655 * At least some mask bits are zero, so we need
5656 * to read. The judgement call is whether from
5657 * reg or shadow. First-cut: read reg, and complain
5658 * if any bits which should be shadowed are different
5659 * from their shadowed value.
5660 */
5661 if (only_32)
5662 local_data = (u64)qib_read_kreg32(dd, idx);
5663 else
5664 local_data = qib_read_kreg64(dd, idx);
5665 *data = (local_data & ~mask) | (*data & mask);
5666 }
5667 if (mask) {
5668 /*
5669 * At least some mask bits are one, so we need
5670 * to write, but only shadow some bits.
5671 */
5672 u64 sval, tval; /* Shadowed, transient */
5673
5674 /*
5675 * New shadow val is bits we don't want to touch,
5676 * ORed with bits we do, that are intended for shadow.
5677 */
5678 if (ppd) {
5679 sval = ppd->p_sendctrl & ~mask;
5680 sval |= *data & SENDCTRL_SHADOWED & mask;
5681 ppd->p_sendctrl = sval;
5682 } else
5683 sval = *data & SENDCTRL_SHADOWED & mask;
5684 tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5685 qib_write_kreg(dd, idx, tval);
5686 qib_write_kreg(dd, kr_scratch, 0Ull);
5687 }
5688 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5689 return only_32 ? 4 : 8;
5690}
5691
5692static const struct diag_observer sendctrl_0_observer = {
5693 sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5694 KREG_IDX(SendCtrl_0) * sizeof(u64)
5695};
5696
5697static const struct diag_observer sendctrl_1_observer = {
5698 sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5699 KREG_IDX(SendCtrl_1) * sizeof(u64)
5700};
5701
5702static ushort sdma_fetch_prio = 8;
5703module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5704MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5705
5706/* Besides logging QSFP events, we set appropriate TxDDS values */
5707static void init_txdds_table(struct qib_pportdata *ppd, int override);
5708
5709static void qsfp_7322_event(struct work_struct *work)
5710{
5711 struct qib_qsfp_data *qd;
5712 struct qib_pportdata *ppd;
5713 u64 pwrup;
5714 int ret;
5715 u32 le2;
5716
5717 qd = container_of(work, struct qib_qsfp_data, work);
5718 ppd = qd->ppd;
5719 pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
5720
5721 /*
5722 * Some QSFP's not only do not respond until the full power-up
5723 * time, but may behave badly if we try. So hold off responding
5724 * to insertion.
5725 */
5726 while (1) {
5727 u64 now = get_jiffies_64();
5728 if (time_after64(now, pwrup))
5729 break;
5730 msleep(1);
5731 }
5732 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5733 /*
5734 * Need to change LE2 back to defaults if we couldn't
5735 * read the cable type (to handle cable swaps), so do this
5736 * even on failure to read cable information. We don't
5737 * get here for QME, so IS_QME check not needed here.
5738 */
5739 le2 = (!ret && qd->cache.atten[1] >= qib_long_atten &&
5740 !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ?
5741 LE2_5m : LE2_DEFAULT;
5742 ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5743 init_txdds_table(ppd, 0);
5744}
5745
5746/*
5747 * There is little we can do but complain to the user if QSFP
5748 * initialization fails.
5749 */
5750static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
5751{
5752 unsigned long flags;
5753 struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
5754 struct qib_devdata *dd = ppd->dd;
5755 u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
5756
5757 mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
5758 qd->ppd = ppd;
5759 qib_qsfp_init(qd, qsfp_7322_event);
5760 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5761 dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
5762 dd->cspec->gpio_mask |= mod_prs_bit;
5763 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5764 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
5765 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5766}
5767
5768/*
5769 * called at device initialization time, and also if the cable_atten
5770 * module parameter is changed. This is used for cables that don't
5771 * have valid QSFP EEPROMs (not present, or attenuation is zero).
5772 * We initialize to the default, then if there is a specific
5773 * unit,port match, we use that.
5774 * String format is "default# unit#,port#=# ... u,p=#", separators must
5775 * be a SPACE character. A newline terminates.
5776 * The last specific match is used (actually, all are used, but last
5777 * one is the one that winds up set); if none at all, fall back on default.
5778 */
5779static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5780{
5781 char *nxt, *str;
5782 int pidx, unit, port, deflt;
5783 unsigned long val;
5784 int any = 0;
5785
5786 str = cable_atten_list;
5787
5788 /* default number is validated in setup_cable_atten() */
5789 deflt = simple_strtoul(str, &nxt, 0);
5790 for (pidx = 0; pidx < dd->num_pports; ++pidx)
5791 dd->pport[pidx].cpspec->no_eep = deflt;
5792
5793 while (*nxt && nxt[1]) {
5794 str = ++nxt;
5795 unit = simple_strtoul(str, &nxt, 0);
5796 if (nxt == str || !*nxt || *nxt != ',') {
5797 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5798 ;
5799 continue;
5800 }
5801 str = ++nxt;
5802 port = simple_strtoul(str, &nxt, 0);
5803 if (nxt == str || *nxt != '=') {
5804 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5805 ;
5806 continue;
5807 }
5808 str = ++nxt;
5809 val = simple_strtoul(str, &nxt, 0);
5810 if (nxt == str) {
5811 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5812 ;
5813 continue;
5814 }
5815 if (val >= TXDDS_TABLE_SZ)
5816 continue;
5817 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
5818 ++pidx) {
5819 if (dd->pport[pidx].port != port ||
5820 !dd->pport[pidx].link_speed_supported)
5821 continue;
5822 dd->pport[pidx].cpspec->no_eep = val;
5823 /* now change the IBC and serdes, overriding generic */
5824 init_txdds_table(&dd->pport[pidx], 1);
5825 any++;
5826 }
5827 if (*nxt == '\n')
5828 break; /* done */
5829 }
5830 if (change && !any) {
5831 /* no specific setting, use the default.
5832 * Change the IBC and serdes, but since it's
5833 * general, don't override specific settings.
5834 */
5835 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5836 if (!dd->pport[pidx].link_speed_supported)
5837 continue;
5838 init_txdds_table(&dd->pport[pidx], 0);
5839 }
5840 }
5841}
5842
5843/* handle the cable_atten parameter changing */
5844static int setup_cable_atten(const char *str, struct kernel_param *kp)
5845{
5846 struct qib_devdata *dd;
5847 unsigned long val;
5848 char *n;
5849 if (strlen(str) >= MAX_ATTEN_LEN) {
5850 printk(KERN_INFO QIB_DRV_NAME " cable_atten_values string "
5851 "too long\n");
5852 return -ENOSPC;
5853 }
5854 val = simple_strtoul(str, &n, 0);
5855 if (n == str || val >= TXDDS_TABLE_SZ) {
5856 printk(KERN_INFO QIB_DRV_NAME
5857 "cable_atten_values must start with a number\n");
5858 return -EINVAL;
5859 }
5860 strcpy(cable_atten_list, str);
5861
5862 list_for_each_entry(dd, &qib_dev_list, list)
5863 set_no_qsfp_atten(dd, 1);
5864 return 0;
5865}
5866
5867/*
5868 * Write the final few registers that depend on some of the
5869 * init setup. Done late in init, just before bringing up
5870 * the serdes.
5871 */
5872static int qib_late_7322_initreg(struct qib_devdata *dd)
5873{
5874 int ret = 0, n;
5875 u64 val;
5876
5877 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
5878 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
5879 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
5880 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
5881 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
5882 if (val != dd->pioavailregs_phys) {
5883 qib_dev_err(dd, "Catastrophic software error, "
5884 "SendPIOAvailAddr written as %lx, "
5885 "read back as %llx\n",
5886 (unsigned long) dd->pioavailregs_phys,
5887 (unsigned long long) val);
5888 ret = -EINVAL;
5889 }
5890
5891 n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
5892 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
5893 /* driver sends get pkey, lid, etc. checking also, to catch bugs */
5894 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
5895
5896 qib_register_observer(dd, &sendctrl_0_observer);
5897 qib_register_observer(dd, &sendctrl_1_observer);
5898
5899 dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
5900 qib_write_kreg(dd, kr_control, dd->control);
5901 /*
5902 * Set SendDmaFetchPriority and init Tx params, including
5903 * QSFP handler on boards that have QSFP.
5904 * First set our default attenuation entry for cables that
5905 * don't have valid attenuation.
5906 */
5907 set_no_qsfp_atten(dd, 0);
5908 for (n = 0; n < dd->num_pports; ++n) {
5909 struct qib_pportdata *ppd = dd->pport + n;
5910
5911 qib_write_kreg_port(ppd, krp_senddmaprioritythld,
5912 sdma_fetch_prio & 0xf);
5913 /* Initialize qsfp if present on board. */
5914 if (dd->flags & QIB_HAS_QSFP)
5915 qib_init_7322_qsfp(ppd);
5916 }
5917 dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
5918 qib_write_kreg(dd, kr_control, dd->control);
5919
5920 return ret;
5921}
5922
5923/* per IB port errors. */
5924#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
5925 MASK_ACROSS(8, 15))
5926#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
5927#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
5928 MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
5929 MASK_ACROSS(0, 11))
5930
5931/*
5932 * Write the initialization per-port registers that need to be done at
5933 * driver load and after reset completes (i.e., that aren't done as part
5934 * of other init procedures called from qib_init.c).
5935 * Some of these should be redundant on reset, but play safe.
5936 */
5937static void write_7322_init_portregs(struct qib_pportdata *ppd)
5938{
5939 u64 val;
5940 int i;
5941
5942 if (!ppd->link_speed_supported) {
5943 /* no buffer credits for this port */
5944 for (i = 1; i < 8; i++)
5945 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
5946 qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
5947 qib_write_kreg(ppd->dd, kr_scratch, 0);
5948 return;
5949 }
5950
5951 /*
5952 * Set the number of supported virtual lanes in IBC,
5953 * for flow control packet handling on unsupported VLs
5954 */
5955 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
5956 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
5957 val |= (u64)(ppd->vls_supported - 1) <<
5958 SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
5959 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
5960
5961 qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
5962
5963 /* enable tx header checking */
5964 qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
5965 IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
5966 IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
5967
5968 qib_write_kreg_port(ppd, krp_ncmodectrl,
5969 SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
5970
5971 /*
5972 * Unconditionally clear the bufmask bits. If SDMA is
5973 * enabled, we'll set them appropriately later.
5974 */
5975 qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
5976 qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
5977 qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
5978 if (ppd->dd->cspec->r1)
5979 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
5980}
5981
5982/*
5983 * Write the initialization per-device registers that need to be done at
5984 * driver load and after reset completes (i.e., that aren't done as part
5985 * of other init procedures called from qib_init.c). Also write per-port
5986 * registers that are affected by overall device config, such as QP mapping
5987 * Some of these should be redundant on reset, but play safe.
5988 */
5989static void write_7322_initregs(struct qib_devdata *dd)
5990{
5991 struct qib_pportdata *ppd;
5992 int i, pidx;
5993 u64 val;
5994
5995 /* Set Multicast QPs received by port 2 to map to context one. */
5996 qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
5997
5998 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5999 unsigned n, regno;
6000 unsigned long flags;
6001
6002 if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
6003 continue;
6004
6005 ppd = &dd->pport[pidx];
6006
6007 /* be paranoid against later code motion, etc. */
6008 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6009 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6010 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6011
6012 /* Initialize QP to context mapping */
6013 regno = krp_rcvqpmaptable;
6014 val = 0;
6015 if (dd->num_pports > 1)
6016 n = dd->first_user_ctxt / dd->num_pports;
6017 else
6018 n = dd->first_user_ctxt - 1;
6019 for (i = 0; i < 32; ) {
6020 unsigned ctxt;
6021
6022 if (dd->num_pports > 1)
6023 ctxt = (i % n) * dd->num_pports + pidx;
6024 else if (i % n)
6025 ctxt = (i % n) + 1;
6026 else
6027 ctxt = ppd->hw_pidx;
6028 val |= ctxt << (5 * (i % 6));
6029 i++;
6030 if (i % 6 == 0) {
6031 qib_write_kreg_port(ppd, regno, val);
6032 val = 0;
6033 regno++;
6034 }
6035 }
6036 qib_write_kreg_port(ppd, regno, val);
6037 }
6038
6039 /*
6040 * Setup up interrupt mitigation for kernel contexts, but
6041 * not user contexts (user contexts use interrupts when
6042 * stalled waiting for any packet, so want those interrupts
6043 * right away).
6044 */
6045 for (i = 0; i < dd->first_user_ctxt; i++) {
6046 dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6047 qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6048 }
6049
6050 /*
6051 * Initialize as (disabled) rcvflow tables. Application code
6052 * will setup each flow as it uses the flow.
6053 * Doesn't clear any of the error bits that might be set.
6054 */
6055 val = TIDFLOW_ERRBITS; /* these are W1C */
6056 for (i = 0; i < dd->ctxtcnt; i++) {
6057 int flow;
6058 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6059 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6060 }
6061
6062 /*
6063 * dual cards init to dual port recovery, single port cards to
6064 * the one port. Dual port cards may later adjust to 1 port,
6065 * and then back to dual port if both ports are connected
6066 * */
6067 if (dd->num_pports)
6068 setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6069}
6070
6071static int qib_init_7322_variables(struct qib_devdata *dd)
6072{
6073 struct qib_pportdata *ppd;
6074 unsigned features, pidx, sbufcnt;
6075 int ret, mtu;
6076 u32 sbufs, updthresh;
6077
6078 /* pport structs are contiguous, allocated after devdata */
6079 ppd = (struct qib_pportdata *)(dd + 1);
6080 dd->pport = ppd;
6081 ppd[0].dd = dd;
6082 ppd[1].dd = dd;
6083
6084 dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6085
6086 ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6087 ppd[1].cpspec = &ppd[0].cpspec[1];
6088 ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6089 ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6090
6091 spin_lock_init(&dd->cspec->rcvmod_lock);
6092 spin_lock_init(&dd->cspec->gpio_lock);
6093
6094 /* we haven't yet set QIB_PRESENT, so use read directly */
6095 dd->revision = readq(&dd->kregbase[kr_revision]);
6096
6097 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6098 qib_dev_err(dd, "Revision register read failure, "
6099 "giving up initialization\n");
6100 ret = -ENODEV;
6101 goto bail;
6102 }
6103 dd->flags |= QIB_PRESENT; /* now register routines work */
6104
6105 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6106 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6107 dd->cspec->r1 = dd->minrev == 1;
6108
6109 get_7322_chip_params(dd);
6110 features = qib_7322_boardname(dd);
6111
6112 /* now that piobcnt2k and 4k set, we can allocate these */
6113 sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6114 NUM_VL15_BUFS + BITS_PER_LONG - 1;
6115 sbufcnt /= BITS_PER_LONG;
6116 dd->cspec->sendchkenable = kmalloc(sbufcnt *
6117 sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6118 dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6119 sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6120 dd->cspec->sendibchk = kmalloc(sbufcnt *
6121 sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6122 if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6123 !dd->cspec->sendibchk) {
6124 qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
6125 ret = -ENOMEM;
6126 goto bail;
6127 }
6128
6129 ppd = dd->pport;
6130
6131 /*
6132 * GPIO bits for TWSI data and clock,
6133 * used for serial EEPROM.
6134 */
6135 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6136 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6137 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6138
6139 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6140 QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6141 QIB_HAS_THRESH_UPDATE |
6142 (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6143 dd->flags |= qib_special_trigger ?
6144 QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6145
6146 /*
6147 * Setup initial values. These may change when PAT is enabled, but
6148 * we need these to do initial chip register accesses.
6149 */
6150 qib_7322_set_baseaddrs(dd);
6151
6152 mtu = ib_mtu_enum_to_int(qib_ibmtu);
6153 if (mtu == -1)
6154 mtu = QIB_DEFAULT_MTU;
6155
6156 dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6157 /* all hwerrors become interrupts, unless special purposed */
6158 dd->cspec->hwerrmask = ~0ULL;
6159 /* link_recovery setup causes these errors, so ignore them,
6160 * other than clearing them when they occur */
6161 dd->cspec->hwerrmask &=
6162 ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6163 SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6164 HWE_MASK(LATriggered));
6165
6166 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6167 struct qib_chippport_specific *cp = ppd->cpspec;
6168 ppd->link_speed_supported = features & PORT_SPD_CAP;
6169 features >>= PORT_SPD_CAP_SHIFT;
6170 if (!ppd->link_speed_supported) {
6171 /* single port mode (7340, or configured) */
6172 dd->skip_kctxt_mask |= 1 << pidx;
6173 if (pidx == 0) {
6174 /* Make sure port is disabled. */
6175 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6176 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6177 ppd[0] = ppd[1];
6178 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6179 IBSerdesPClkNotDetectMask_0)
6180 | SYM_MASK(HwErrMask,
6181 SDmaMemReadErrMask_0));
6182 dd->cspec->int_enable_mask &= ~(
6183 SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6184 SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6185 SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6186 SYM_MASK(IntMask, SDmaIntMask_0) |
6187 SYM_MASK(IntMask, ErrIntMask_0) |
6188 SYM_MASK(IntMask, SendDoneIntMask_0));
6189 } else {
6190 /* Make sure port is disabled. */
6191 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6192 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6193 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6194 IBSerdesPClkNotDetectMask_1)
6195 | SYM_MASK(HwErrMask,
6196 SDmaMemReadErrMask_1));
6197 dd->cspec->int_enable_mask &= ~(
6198 SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6199 SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6200 SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6201 SYM_MASK(IntMask, SDmaIntMask_1) |
6202 SYM_MASK(IntMask, ErrIntMask_1) |
6203 SYM_MASK(IntMask, SendDoneIntMask_1));
6204 }
6205 continue;
6206 }
6207
6208 dd->num_pports++;
6209 qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6210
6211 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6212 ppd->link_width_enabled = IB_WIDTH_4X;
6213 ppd->link_speed_enabled = ppd->link_speed_supported;
6214 /*
6215 * Set the initial values to reasonable default, will be set
6216 * for real when link is up.
6217 */
6218 ppd->link_width_active = IB_WIDTH_4X;
6219 ppd->link_speed_active = QIB_IB_SDR;
6220 ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6221 switch (qib_num_cfg_vls) {
6222 case 1:
6223 ppd->vls_supported = IB_VL_VL0;
6224 break;
6225 case 2:
6226 ppd->vls_supported = IB_VL_VL0_1;
6227 break;
6228 default:
6229 qib_devinfo(dd->pcidev,
6230 "Invalid num_vls %u, using 4 VLs\n",
6231 qib_num_cfg_vls);
6232 qib_num_cfg_vls = 4;
6233 /* fall through */
6234 case 4:
6235 ppd->vls_supported = IB_VL_VL0_3;
6236 break;
6237 case 8:
6238 if (mtu <= 2048)
6239 ppd->vls_supported = IB_VL_VL0_7;
6240 else {
6241 qib_devinfo(dd->pcidev,
6242 "Invalid num_vls %u for MTU %d "
6243 ", using 4 VLs\n",
6244 qib_num_cfg_vls, mtu);
6245 ppd->vls_supported = IB_VL_VL0_3;
6246 qib_num_cfg_vls = 4;
6247 }
6248 break;
6249 }
6250 ppd->vls_operational = ppd->vls_supported;
6251
6252 init_waitqueue_head(&cp->autoneg_wait);
6253 INIT_DELAYED_WORK(&cp->autoneg_work,
6254 autoneg_7322_work);
6255 if (ppd->dd->cspec->r1)
6256 INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6257
6258 /*
6259 * For Mez and similar cards, no qsfp info, so do
6260 * the "cable info" setup here. Can be overridden
6261 * in adapter-specific routines.
6262 */
6263 if (!(ppd->dd->flags & QIB_HAS_QSFP)) {
6264 int i;
6265 const struct txdds_ent *txdds;
6266
6267 if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd))
6268 qib_devinfo(ppd->dd->pcidev, "IB%u:%u: "
6269 "Unknown mezzanine card type\n",
6270 ppd->dd->unit, ppd->port);
6271 txdds = IS_QMH(ppd->dd) ? &qmh_qdr_txdds :
6272 &qme_qdr_txdds;
6273
6274 /*
6275 * set values in case link comes up
6276 * before table is written to driver.
6277 */
6278 cp->h1_val = IS_QMH(ppd->dd) ? H1_FORCE_QMH :
6279 H1_FORCE_QME;
6280 for (i = 0; i < SERDES_CHANS; i++) {
6281 cp->amp[i] = txdds->amp;
6282 cp->pre[i] = txdds->pre;
6283 cp->mainv[i] = txdds->main;
6284 cp->post[i] = txdds->post;
6285 }
6286 } else
6287 cp->h1_val = H1_FORCE_VAL;
6288
6289 /* Avoid writes to chip for mini_init */
6290 if (!qib_mini_init)
6291 write_7322_init_portregs(ppd);
6292
6293 init_timer(&cp->chase_timer);
6294 cp->chase_timer.function = reenable_chase;
6295 cp->chase_timer.data = (unsigned long)ppd;
6296
6297 ppd++;
6298 }
6299
6300 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
6301 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
6302 dd->rhf_offset =
6303 dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6304
6305 /* we always allocate at least 2048 bytes for eager buffers */
6306 dd->rcvegrbufsize = max(mtu, 2048);
6307
6308 qib_7322_tidtemplate(dd);
6309
6310 /*
6311 * We can request a receive interrupt for 1 or
6312 * more packets from current offset.
6313 */
6314 dd->rhdrhead_intr_off =
6315 (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6316
6317 /* setup the stats timer; the add_timer is done at end of init */
6318 init_timer(&dd->stats_timer);
6319 dd->stats_timer.function = qib_get_7322_faststats;
6320 dd->stats_timer.data = (unsigned long) dd;
6321
6322 dd->ureg_align = 0x10000; /* 64KB alignment */
6323
6324 dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6325
6326 qib_7322_config_ctxts(dd);
6327 qib_set_ctxtcnt(dd);
6328
6329 if (qib_wc_pat) {
6330 ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k);
6331 if (ret)
6332 goto bail;
6333 }
6334 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6335
6336 ret = 0;
6337 if (qib_mini_init)
6338 goto bail;
6339 if (!dd->num_pports) {
6340 qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6341 goto bail; /* no error, so can still figure out why err */
6342 }
6343
6344 write_7322_initregs(dd);
6345 ret = qib_create_ctxts(dd);
6346 init_7322_cntrnames(dd);
6347
6348 updthresh = 8U; /* update threshold */
6349
6350 /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6351 * reserve the update threshold amount for other kernel use, such
6352 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6353 * unless we aren't enabling SDMA, in which case we want to use
6354 * all the 4k bufs for the kernel.
6355 * if this was less than the update threshold, we could wait
6356 * a long time for an update. Coded this way because we
6357 * sometimes change the update threshold for various reasons,
6358 * and we want this to remain robust.
6359 */
6360 if (dd->flags & QIB_HAS_SEND_DMA) {
6361 dd->cspec->sdmabufcnt = dd->piobcnt4k;
6362 sbufs = updthresh > 3 ? updthresh : 3;
6363 } else {
6364 dd->cspec->sdmabufcnt = 0;
6365 sbufs = dd->piobcnt4k;
6366 }
6367 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6368 dd->cspec->sdmabufcnt;
6369 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6370 dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6371 dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6372 dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6373
6374 /*
6375 * If we have 16 user contexts, we will have 7 sbufs
6376 * per context, so reduce the update threshold to match. We
6377 * want to update before we actually run out, at low pbufs/ctxt
6378 * so give ourselves some margin.
6379 */
6380 if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6381 updthresh = dd->pbufsctxt - 2;
6382 dd->cspec->updthresh_dflt = updthresh;
6383 dd->cspec->updthresh = updthresh;
6384
6385 /* before full enable, no interrupts, no locking needed */
6386 dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6387 << SYM_LSB(SendCtrl, AvailUpdThld)) |
6388 SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6389
6390 dd->psxmitwait_supported = 1;
6391 dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6392bail:
6393 if (!dd->ctxtcnt)
6394 dd->ctxtcnt = 1; /* for other initialization code */
6395
6396 return ret;
6397}
6398
6399static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6400 u32 *pbufnum)
6401{
6402 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6403 struct qib_devdata *dd = ppd->dd;
6404
6405 /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6406 if (pbc & PBC_7322_VL15_SEND) {
6407 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6408 last = first;
6409 } else {
6410 if ((plen + 1) > dd->piosize2kmax_dwords)
6411 first = dd->piobcnt2k;
6412 else
6413 first = 0;
6414 last = dd->cspec->lastbuf_for_pio;
6415 }
6416 return qib_getsendbuf_range(dd, pbufnum, first, last);
6417}
6418
6419static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6420 u32 start)
6421{
6422 qib_write_kreg_port(ppd, krp_psinterval, intv);
6423 qib_write_kreg_port(ppd, krp_psstart, start);
6424}
6425
6426/*
6427 * Must be called with sdma_lock held, or before init finished.
6428 */
6429static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6430{
6431 qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6432}
6433
6434static struct sdma_set_state_action sdma_7322_action_table[] = {
6435 [qib_sdma_state_s00_hw_down] = {
6436 .go_s99_running_tofalse = 1,
6437 .op_enable = 0,
6438 .op_intenable = 0,
6439 .op_halt = 0,
6440 .op_drain = 0,
6441 },
6442 [qib_sdma_state_s10_hw_start_up_wait] = {
6443 .op_enable = 0,
6444 .op_intenable = 1,
6445 .op_halt = 1,
6446 .op_drain = 0,
6447 },
6448 [qib_sdma_state_s20_idle] = {
6449 .op_enable = 1,
6450 .op_intenable = 1,
6451 .op_halt = 1,
6452 .op_drain = 0,
6453 },
6454 [qib_sdma_state_s30_sw_clean_up_wait] = {
6455 .op_enable = 0,
6456 .op_intenable = 1,
6457 .op_halt = 1,
6458 .op_drain = 0,
6459 },
6460 [qib_sdma_state_s40_hw_clean_up_wait] = {
6461 .op_enable = 1,
6462 .op_intenable = 1,
6463 .op_halt = 1,
6464 .op_drain = 0,
6465 },
6466 [qib_sdma_state_s50_hw_halt_wait] = {
6467 .op_enable = 1,
6468 .op_intenable = 1,
6469 .op_halt = 1,
6470 .op_drain = 1,
6471 },
6472 [qib_sdma_state_s99_running] = {
6473 .op_enable = 1,
6474 .op_intenable = 1,
6475 .op_halt = 0,
6476 .op_drain = 0,
6477 .go_s99_running_totrue = 1,
6478 },
6479};
6480
6481static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6482{
6483 ppd->sdma_state.set_state_action = sdma_7322_action_table;
6484}
6485
6486static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6487{
6488 struct qib_devdata *dd = ppd->dd;
6489 unsigned lastbuf, erstbuf;
6490 u64 senddmabufmask[3] = { 0 };
6491 int n, ret = 0;
6492
6493 qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6494 qib_sdma_7322_setlengen(ppd);
6495 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6496 qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6497 qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6498 qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6499
6500 if (dd->num_pports)
6501 n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6502 else
6503 n = dd->cspec->sdmabufcnt; /* failsafe for init */
6504 erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6505 ((dd->num_pports == 1 || ppd->port == 2) ? n :
6506 dd->cspec->sdmabufcnt);
6507 lastbuf = erstbuf + n;
6508
6509 ppd->sdma_state.first_sendbuf = erstbuf;
6510 ppd->sdma_state.last_sendbuf = lastbuf;
6511 for (; erstbuf < lastbuf; ++erstbuf) {
6512 unsigned word = erstbuf / BITS_PER_LONG;
6513 unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6514
6515 BUG_ON(word >= 3);
6516 senddmabufmask[word] |= 1ULL << bit;
6517 }
6518 qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6519 qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6520 qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6521 return ret;
6522}
6523
6524/* sdma_lock must be held */
6525static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6526{
6527 struct qib_devdata *dd = ppd->dd;
6528 int sane;
6529 int use_dmahead;
6530 u16 swhead;
6531 u16 swtail;
6532 u16 cnt;
6533 u16 hwhead;
6534
6535 use_dmahead = __qib_sdma_running(ppd) &&
6536 (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6537retry:
6538 hwhead = use_dmahead ?
6539 (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6540 (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6541
6542 swhead = ppd->sdma_descq_head;
6543 swtail = ppd->sdma_descq_tail;
6544 cnt = ppd->sdma_descq_cnt;
6545
6546 if (swhead < swtail)
6547 /* not wrapped */
6548 sane = (hwhead >= swhead) & (hwhead <= swtail);
6549 else if (swhead > swtail)
6550 /* wrapped around */
6551 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6552 (hwhead <= swtail);
6553 else
6554 /* empty */
6555 sane = (hwhead == swhead);
6556
6557 if (unlikely(!sane)) {
6558 if (use_dmahead) {
6559 /* try one more time, directly from the register */
6560 use_dmahead = 0;
6561 goto retry;
6562 }
6563 /* proceed as if no progress */
6564 hwhead = swhead;
6565 }
6566
6567 return hwhead;
6568}
6569
6570static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6571{
6572 u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6573
6574 return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6575 (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6576 !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6577 !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6578}
6579
6580/*
6581 * Compute the amount of delay before sending the next packet if the
6582 * port's send rate differs from the static rate set for the QP.
6583 * The delay affects the next packet and the amount of the delay is
6584 * based on the length of the this packet.
6585 */
6586static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6587 u8 srate, u8 vl)
6588{
6589 u8 snd_mult = ppd->delay_mult;
6590 u8 rcv_mult = ib_rate_to_delay[srate];
6591 u32 ret;
6592
6593 ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6594
6595 /* Indicate VL15, else set the VL in the control word */
6596 if (vl == 15)
6597 ret |= PBC_7322_VL15_SEND_CTRL;
6598 else
6599 ret |= vl << PBC_VL_NUM_LSB;
6600 ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6601
6602 return ret;
6603}
6604
6605/*
6606 * Enable the per-port VL15 send buffers for use.
6607 * They follow the rest of the buffers, without a config parameter.
6608 * This was in initregs, but that is done before the shadow
6609 * is set up, and this has to be done after the shadow is
6610 * set up.
6611 */
6612static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6613{
6614 unsigned vl15bufs;
6615
6616 vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
6617 qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
6618 TXCHK_CHG_TYPE_KERN, NULL);
6619}
6620
6621static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
6622{
6623 if (rcd->ctxt < NUM_IB_PORTS) {
6624 if (rcd->dd->num_pports > 1) {
6625 rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
6626 rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
6627 } else {
6628 rcd->rcvegrcnt = KCTXT0_EGRCNT;
6629 rcd->rcvegr_tid_base = 0;
6630 }
6631 } else {
6632 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
6633 rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
6634 (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
6635 }
6636}
6637
6638#define QTXSLEEPS 5000
6639static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6640 u32 len, u32 which, struct qib_ctxtdata *rcd)
6641{
6642 int i;
6643 const int last = start + len - 1;
6644 const int lastr = last / BITS_PER_LONG;
6645 u32 sleeps = 0;
6646 int wait = rcd != NULL;
6647 unsigned long flags;
6648
6649 while (wait) {
6650 unsigned long shadow;
6651 int cstart, previ = -1;
6652
6653 /*
6654 * when flipping from kernel to user, we can't change
6655 * the checking type if the buffer is allocated to the
6656 * driver. It's OK the other direction, because it's
6657 * from close, and we have just disarm'ed all the
6658 * buffers. All the kernel to kernel changes are also
6659 * OK.
6660 */
6661 for (cstart = start; cstart <= last; cstart++) {
6662 i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6663 / BITS_PER_LONG;
6664 if (i != previ) {
6665 shadow = (unsigned long)
6666 le64_to_cpu(dd->pioavailregs_dma[i]);
6667 previ = i;
6668 }
6669 if (test_bit(((2 * cstart) +
6670 QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6671 % BITS_PER_LONG, &shadow))
6672 break;
6673 }
6674
6675 if (cstart > last)
6676 break;
6677
6678 if (sleeps == QTXSLEEPS)
6679 break;
6680 /* make sure we see an updated copy next time around */
6681 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6682 sleeps++;
6683 msleep(1);
6684 }
6685
6686 switch (which) {
6687 case TXCHK_CHG_TYPE_DIS1:
6688 /*
6689 * disable checking on a range; used by diags; just
6690 * one buffer, but still written generically
6691 */
6692 for (i = start; i <= last; i++)
6693 clear_bit(i, dd->cspec->sendchkenable);
6694 break;
6695
6696 case TXCHK_CHG_TYPE_ENAB1:
6697 /*
6698 * (re)enable checking on a range; used by diags; just
6699 * one buffer, but still written generically; read
6700 * scratch to be sure buffer actually triggered, not
6701 * just flushed from processor.
6702 */
6703 qib_read_kreg32(dd, kr_scratch);
6704 for (i = start; i <= last; i++)
6705 set_bit(i, dd->cspec->sendchkenable);
6706 break;
6707
6708 case TXCHK_CHG_TYPE_KERN:
6709 /* usable by kernel */
6710 for (i = start; i <= last; i++) {
6711 set_bit(i, dd->cspec->sendibchk);
6712 clear_bit(i, dd->cspec->sendgrhchk);
6713 }
6714 spin_lock_irqsave(&dd->uctxt_lock, flags);
6715 /* see if we need to raise avail update threshold */
6716 for (i = dd->first_user_ctxt;
6717 dd->cspec->updthresh != dd->cspec->updthresh_dflt
6718 && i < dd->cfgctxts; i++)
6719 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
6720 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
6721 < dd->cspec->updthresh_dflt)
6722 break;
6723 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
6724 if (i == dd->cfgctxts) {
6725 spin_lock_irqsave(&dd->sendctrl_lock, flags);
6726 dd->cspec->updthresh = dd->cspec->updthresh_dflt;
6727 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6728 dd->sendctrl |= (dd->cspec->updthresh &
6729 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
6730 SYM_LSB(SendCtrl, AvailUpdThld);
6731 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6732 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6733 }
6734 break;
6735
6736 case TXCHK_CHG_TYPE_USER:
6737 /* for user process */
6738 for (i = start; i <= last; i++) {
6739 clear_bit(i, dd->cspec->sendibchk);
6740 set_bit(i, dd->cspec->sendgrhchk);
6741 }
6742 spin_lock_irqsave(&dd->sendctrl_lock, flags);
6743 if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
6744 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
6745 dd->cspec->updthresh = (rcd->piocnt /
6746 rcd->subctxt_cnt) - 1;
6747 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6748 dd->sendctrl |= (dd->cspec->updthresh &
6749 SYM_RMASK(SendCtrl, AvailUpdThld))
6750 << SYM_LSB(SendCtrl, AvailUpdThld);
6751 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6752 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6753 } else
6754 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6755 break;
6756
6757 default:
6758 break;
6759 }
6760
6761 for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
6762 qib_write_kreg(dd, kr_sendcheckmask + i,
6763 dd->cspec->sendchkenable[i]);
6764
6765 for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
6766 qib_write_kreg(dd, kr_sendgrhcheckmask + i,
6767 dd->cspec->sendgrhchk[i]);
6768 qib_write_kreg(dd, kr_sendibpktmask + i,
6769 dd->cspec->sendibchk[i]);
6770 }
6771
6772 /*
6773 * Be sure whatever we did was seen by the chip and acted upon,
6774 * before we return. Mostly important for which >= 2.
6775 */
6776 qib_read_kreg32(dd, kr_scratch);
6777}
6778
6779
6780/* useful for trigger analyzers, etc. */
6781static void writescratch(struct qib_devdata *dd, u32 val)
6782{
6783 qib_write_kreg(dd, kr_scratch, val);
6784}
6785
6786/* Dummy for now, use chip regs soon */
6787static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
6788{
6789 return -ENXIO;
6790}
6791
6792/**
6793 * qib_init_iba7322_funcs - set up the chip-specific function pointers
6794 * @dev: the pci_dev for qlogic_ib device
6795 * @ent: pci_device_id struct for this dev
6796 *
6797 * Also allocates, inits, and returns the devdata struct for this
6798 * device instance
6799 *
6800 * This is global, and is called directly at init to set up the
6801 * chip-specific function pointers for later use.
6802 */
6803struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6804 const struct pci_device_id *ent)
6805{
6806 struct qib_devdata *dd;
6807 int ret, i;
6808 u32 tabsize, actual_cnt = 0;
6809
6810 dd = qib_alloc_devdata(pdev,
6811 NUM_IB_PORTS * sizeof(struct qib_pportdata) +
6812 sizeof(struct qib_chip_specific) +
6813 NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
6814 if (IS_ERR(dd))
6815 goto bail;
6816
6817 dd->f_bringup_serdes = qib_7322_bringup_serdes;
6818 dd->f_cleanup = qib_setup_7322_cleanup;
6819 dd->f_clear_tids = qib_7322_clear_tids;
6820 dd->f_free_irq = qib_7322_free_irq;
6821 dd->f_get_base_info = qib_7322_get_base_info;
6822 dd->f_get_msgheader = qib_7322_get_msgheader;
6823 dd->f_getsendbuf = qib_7322_getsendbuf;
6824 dd->f_gpio_mod = gpio_7322_mod;
6825 dd->f_eeprom_wen = qib_7322_eeprom_wen;
6826 dd->f_hdrqempty = qib_7322_hdrqempty;
6827 dd->f_ib_updown = qib_7322_ib_updown;
6828 dd->f_init_ctxt = qib_7322_init_ctxt;
6829 dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
6830 dd->f_intr_fallback = qib_7322_intr_fallback;
6831 dd->f_late_initreg = qib_late_7322_initreg;
6832 dd->f_setpbc_control = qib_7322_setpbc_control;
6833 dd->f_portcntr = qib_portcntr_7322;
6834 dd->f_put_tid = qib_7322_put_tid;
6835 dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
6836 dd->f_rcvctrl = rcvctrl_7322_mod;
6837 dd->f_read_cntrs = qib_read_7322cntrs;
6838 dd->f_read_portcntrs = qib_read_7322portcntrs;
6839 dd->f_reset = qib_do_7322_reset;
6840 dd->f_init_sdma_regs = init_sdma_7322_regs;
6841 dd->f_sdma_busy = qib_sdma_7322_busy;
6842 dd->f_sdma_gethead = qib_sdma_7322_gethead;
6843 dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
6844 dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
6845 dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
6846 dd->f_sendctrl = sendctrl_7322_mod;
6847 dd->f_set_armlaunch = qib_set_7322_armlaunch;
6848 dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
6849 dd->f_iblink_state = qib_7322_iblink_state;
6850 dd->f_ibphys_portstate = qib_7322_phys_portstate;
6851 dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
6852 dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
6853 dd->f_set_ib_loopback = qib_7322_set_loopback;
6854 dd->f_get_ib_table = qib_7322_get_ib_table;
6855 dd->f_set_ib_table = qib_7322_set_ib_table;
6856 dd->f_set_intr_state = qib_7322_set_intr_state;
6857 dd->f_setextled = qib_setup_7322_setextled;
6858 dd->f_txchk_change = qib_7322_txchk_change;
6859 dd->f_update_usrhead = qib_update_7322_usrhead;
6860 dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
6861 dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
6862 dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
6863 dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
6864 dd->f_sdma_init_early = qib_7322_sdma_init_early;
6865 dd->f_writescratch = writescratch;
6866 dd->f_tempsense_rd = qib_7322_tempsense_rd;
6867 /*
6868 * Do remaining PCIe setup and save PCIe values in dd.
6869 * Any error printing is already done by the init code.
6870 * On return, we have the chip mapped, but chip registers
6871 * are not set up until start of qib_init_7322_variables.
6872 */
6873 ret = qib_pcie_ddinit(dd, pdev, ent);
6874 if (ret < 0)
6875 goto bail_free;
6876
6877 /* initialize chip-specific variables */
6878 ret = qib_init_7322_variables(dd);
6879 if (ret)
6880 goto bail_cleanup;
6881
6882 if (qib_mini_init || !dd->num_pports)
6883 goto bail;
6884
6885 /*
6886 * Determine number of vectors we want; depends on port count
6887 * and number of configured kernel receive queues actually used.
6888 * Should also depend on whether sdma is enabled or not, but
6889 * that's such a rare testing case it's not worth worrying about.
6890 */
6891 tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
6892 for (i = 0; i < tabsize; i++)
6893 if ((i < ARRAY_SIZE(irq_table) &&
6894 irq_table[i].port <= dd->num_pports) ||
6895 (i >= ARRAY_SIZE(irq_table) &&
6896 dd->rcd[i - ARRAY_SIZE(irq_table)]))
6897 actual_cnt++;
6898 tabsize = actual_cnt;
6899 dd->cspec->msix_entries = kmalloc(tabsize *
6900 sizeof(struct msix_entry), GFP_KERNEL);
6901 dd->cspec->msix_arg = kmalloc(tabsize *
6902 sizeof(void *), GFP_KERNEL);
6903 if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
6904 qib_dev_err(dd, "No memory for MSIx table\n");
6905 tabsize = 0;
6906 }
6907 for (i = 0; i < tabsize; i++)
6908 dd->cspec->msix_entries[i].entry = i;
6909
6910 if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
6911 qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
6912 "continuing anyway\n");
6913 /* may be less than we wanted, if not enough available */
6914 dd->cspec->num_msix_entries = tabsize;
6915
6916 /* setup interrupt handler */
6917 qib_setup_7322_interrupt(dd, 1);
6918
6919 /* clear diagctrl register, in case diags were running and crashed */
6920 qib_write_kreg(dd, kr_hwdiagctrl, 0);
6921
6922#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
6923 ret = dca_add_requester(&pdev->dev);
6924 if (!ret) {
6925 dd->flags |= QIB_DCA_ENABLED;
6926 qib_setup_dca(dd);
6927 }
6928#endif
6929 goto bail;
6930
6931bail_cleanup:
6932 qib_pcie_ddcleanup(dd);
6933bail_free:
6934 qib_free_devdata(dd);
6935 dd = ERR_PTR(ret);
6936bail:
6937 return dd;
6938}
6939
6940/*
6941 * Set the table entry at the specified index from the table specifed.
6942 * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
6943 * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
6944 * 'idx' below addresses the correct entry, while its 4 LSBs select the
6945 * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
6946 */
6947#define DDS_ENT_AMP_LSB 14
6948#define DDS_ENT_MAIN_LSB 9
6949#define DDS_ENT_POST_LSB 5
6950#define DDS_ENT_PRE_XTRA_LSB 3
6951#define DDS_ENT_PRE_LSB 0
6952
6953/*
6954 * Set one entry in the TxDDS table for spec'd port
6955 * ridx picks one of the entries, while tp points
6956 * to the appropriate table entry.
6957 */
6958static void set_txdds(struct qib_pportdata *ppd, int ridx,
6959 const struct txdds_ent *tp)
6960{
6961 struct qib_devdata *dd = ppd->dd;
6962 u32 pack_ent;
6963 int regidx;
6964
6965 /* Get correct offset in chip-space, and in source table */
6966 regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
6967 /*
6968 * We do not use qib_write_kreg_port() because it was intended
6969 * only for registers in the lower "port specific" pages.
6970 * So do index calculation by hand.
6971 */
6972 if (ppd->hw_pidx)
6973 regidx += (dd->palign / sizeof(u64));
6974
6975 pack_ent = tp->amp << DDS_ENT_AMP_LSB;
6976 pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
6977 pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
6978 pack_ent |= tp->post << DDS_ENT_POST_LSB;
6979 qib_write_kreg(dd, regidx, pack_ent);
6980 /* Prevent back-to-back writes by hitting scratch */
6981 qib_write_kreg(ppd->dd, kr_scratch, 0);
6982}
6983
6984static const struct vendor_txdds_ent vendor_txdds[] = {
6985 { /* Amphenol 1m 30awg NoEq */
6986 { 0x41, 0x50, 0x48 }, "584470002 ",
6987 { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
6988 },
6989 { /* Amphenol 3m 28awg NoEq */
6990 { 0x41, 0x50, 0x48 }, "584470004 ",
6991 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
6992 },
6993 { /* Finisar 3m OM2 Optical */
6994 { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
6995 { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
6996 },
6997 { /* Finisar 30m OM2 Optical */
6998 { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
6999 { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
7000 },
7001 { /* Finisar Default OM2 Optical */
7002 { 0x00, 0x90, 0x65 }, NULL,
7003 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
7004 },
7005 { /* Gore 1m 30awg NoEq */
7006 { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
7007 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
7008 },
7009 { /* Gore 2m 30awg NoEq */
7010 { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
7011 { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
7012 },
7013 { /* Gore 1m 28awg NoEq */
7014 { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
7015 { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
7016 },
7017 { /* Gore 3m 28awg NoEq */
7018 { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
7019 { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
7020 },
7021 { /* Gore 5m 24awg Eq */
7022 { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
7023 { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
7024 },
7025 { /* Gore 7m 24awg Eq */
7026 { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
7027 { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
7028 },
7029 { /* Gore 5m 26awg Eq */
7030 { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
7031 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
7032 },
7033 { /* Gore 7m 26awg Eq */
7034 { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
7035 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
7036 },
7037 { /* Intersil 12m 24awg Active */
7038 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7039 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
7040 },
7041 { /* Intersil 10m 28awg Active */
7042 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7043 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
7044 },
7045 { /* Intersil 7m 30awg Active */
7046 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7047 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
7048 },
7049 { /* Intersil 5m 32awg Active */
7050 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7051 { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
7052 },
7053 { /* Intersil Default Active */
7054 { 0x00, 0x30, 0xB4 }, NULL,
7055 { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
7056 },
7057 { /* Luxtera 20m Active Optical */
7058 { 0x00, 0x25, 0x63 }, NULL,
7059 { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
7060 },
7061 { /* Molex 1M Cu loopback */
7062 { 0x00, 0x09, 0x3A }, "74763-0025 ",
7063 { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
7064 },
7065 { /* Molex 2m 28awg NoEq */
7066 { 0x00, 0x09, 0x3A }, "74757-2201 ",
7067 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
7068 },
7069};
7070
7071static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7072 /* amp, pre, main, post */
7073 { 2, 2, 15, 6 }, /* Loopback */
7074 { 0, 0, 0, 1 }, /* 2 dB */
7075 { 0, 0, 0, 2 }, /* 3 dB */
7076 { 0, 0, 0, 3 }, /* 4 dB */
7077 { 0, 0, 0, 4 }, /* 5 dB */
7078 { 0, 0, 0, 5 }, /* 6 dB */
7079 { 0, 0, 0, 6 }, /* 7 dB */
7080 { 0, 0, 0, 7 }, /* 8 dB */
7081 { 0, 0, 0, 8 }, /* 9 dB */
7082 { 0, 0, 0, 9 }, /* 10 dB */
7083 { 0, 0, 0, 10 }, /* 11 dB */
7084 { 0, 0, 0, 11 }, /* 12 dB */
7085 { 0, 0, 0, 12 }, /* 13 dB */
7086 { 0, 0, 0, 13 }, /* 14 dB */
7087 { 0, 0, 0, 14 }, /* 15 dB */
7088 { 0, 0, 0, 15 }, /* 16 dB */
7089};
7090
7091static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7092 /* amp, pre, main, post */
7093 { 2, 2, 15, 6 }, /* Loopback */
7094 { 0, 0, 0, 8 }, /* 2 dB */
7095 { 0, 0, 0, 8 }, /* 3 dB */
7096 { 0, 0, 0, 9 }, /* 4 dB */
7097 { 0, 0, 0, 9 }, /* 5 dB */
7098 { 0, 0, 0, 10 }, /* 6 dB */
7099 { 0, 0, 0, 10 }, /* 7 dB */
7100 { 0, 0, 0, 11 }, /* 8 dB */
7101 { 0, 0, 0, 11 }, /* 9 dB */
7102 { 0, 0, 0, 12 }, /* 10 dB */
7103 { 0, 0, 0, 12 }, /* 11 dB */
7104 { 0, 0, 0, 13 }, /* 12 dB */
7105 { 0, 0, 0, 13 }, /* 13 dB */
7106 { 0, 0, 0, 14 }, /* 14 dB */
7107 { 0, 0, 0, 14 }, /* 15 dB */
7108 { 0, 0, 0, 15 }, /* 16 dB */
7109};
7110
7111static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7112 /* amp, pre, main, post */
7113 { 2, 2, 15, 6 }, /* Loopback */
7114 { 0, 1, 0, 7 }, /* 2 dB */
7115 { 0, 1, 0, 9 }, /* 3 dB */
7116 { 0, 1, 0, 11 }, /* 4 dB */
7117 { 0, 1, 0, 13 }, /* 5 dB */
7118 { 0, 1, 0, 15 }, /* 6 dB */
7119 { 0, 1, 3, 15 }, /* 7 dB */
7120 { 0, 1, 7, 15 }, /* 8 dB */
7121 { 0, 1, 7, 15 }, /* 9 dB */
7122 { 0, 1, 8, 15 }, /* 10 dB */
7123 { 0, 1, 9, 15 }, /* 11 dB */
7124 { 0, 1, 10, 15 }, /* 12 dB */
7125 { 0, 2, 6, 15 }, /* 13 dB */
7126 { 0, 2, 7, 15 }, /* 14 dB */
7127 { 0, 2, 8, 15 }, /* 15 dB */
7128 { 0, 2, 9, 15 }, /* 16 dB */
7129};
7130
7131static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7132 unsigned atten)
7133{
7134 /*
7135 * The attenuation table starts at 2dB for entry 1,
7136 * with entry 0 being the loopback entry.
7137 */
7138 if (atten <= 2)
7139 atten = 1;
7140 else if (atten > TXDDS_TABLE_SZ)
7141 atten = TXDDS_TABLE_SZ - 1;
7142 else
7143 atten--;
7144 return txdds + atten;
7145}
7146
7147/*
7148 * if override is set, the module parameter cable_atten has a value
7149 * for this specific port, so use it, rather than our normal mechanism.
7150 */
7151static void find_best_ent(struct qib_pportdata *ppd,
7152 const struct txdds_ent **sdr_dds,
7153 const struct txdds_ent **ddr_dds,
7154 const struct txdds_ent **qdr_dds, int override)
7155{
7156 struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7157 int idx;
7158
7159 /* Search table of known cables */
7160 for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7161 const struct vendor_txdds_ent *v = vendor_txdds + idx;
7162
7163 if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7164 (!v->partnum ||
7165 !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7166 *sdr_dds = &v->sdr;
7167 *ddr_dds = &v->ddr;
7168 *qdr_dds = &v->qdr;
7169 return;
7170 }
7171 }
7172
7173 /* Lookup serdes setting by cable type and attenuation */
7174 if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7175 *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7176 *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7177 *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7178 return;
7179 }
7180
7181 if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7182 qd->atten[1])) {
7183 *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7184 *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7185 *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7186 return;
7187 } else {
7188 /*
7189 * If we have no (or incomplete) data from the cable
7190 * EEPROM, or no QSFP, use the module parameter value
7191 * to index into the attentuation table.
7192 */
7193 *sdr_dds = &txdds_sdr[ppd->cpspec->no_eep];
7194 *ddr_dds = &txdds_ddr[ppd->cpspec->no_eep];
7195 *qdr_dds = &txdds_qdr[ppd->cpspec->no_eep];
7196 }
7197}
7198
7199static void init_txdds_table(struct qib_pportdata *ppd, int override)
7200{
7201 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7202 struct txdds_ent *dds;
7203 int idx;
7204 int single_ent = 0;
7205
7206 if (IS_QMH(ppd->dd)) {
7207 /* normally will be overridden, via setup_qmh() */
7208 sdr_dds = &qmh_sdr_txdds;
7209 ddr_dds = &qmh_ddr_txdds;
7210 qdr_dds = &qmh_qdr_txdds;
7211 single_ent = 1;
7212 } else if (IS_QME(ppd->dd)) {
7213 sdr_dds = &qme_sdr_txdds;
7214 ddr_dds = &qme_ddr_txdds;
7215 qdr_dds = &qme_qdr_txdds;
7216 single_ent = 1;
7217 } else
7218 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7219
7220 /* Fill in the first entry with the best entry found. */
7221 set_txdds(ppd, 0, sdr_dds);
7222 set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7223 set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7224
7225 /*
7226 * for our current speed, also write that value into the
7227 * tx serdes registers.
7228 */
7229 dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
7230 qdr_dds : (ppd->link_speed_active ==
7231 QIB_IB_DDR ? ddr_dds : sdr_dds));
7232 write_tx_serdes_param(ppd, dds);
7233
7234 /* Fill in the remaining entries with the default table values. */
7235 for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7236 set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7237 set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7238 single_ent ? ddr_dds : txdds_ddr + idx);
7239 set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7240 single_ent ? qdr_dds : txdds_qdr + idx);
7241 }
7242}
7243
7244#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7245#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7246#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7247#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7248#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7249#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7250#define AHB_TRANS_TRIES 10
7251
7252/*
7253 * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7254 * 5=subsystem which is why most calls have "chan + chan >> 1"
7255 * for the channel argument.
7256 */
7257static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7258 u32 data, u32 mask)
7259{
7260 u32 rd_data, wr_data, sz_mask;
7261 u64 trans, acc, prev_acc;
7262 u32 ret = 0xBAD0BAD;
7263 int tries;
7264
7265 prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7266 /* From this point on, make sure we return access */
7267 acc = (quad << 1) | 1;
7268 qib_write_kreg(dd, KR_AHB_ACC, acc);
7269
7270 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7271 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7272 if (trans & AHB_TRANS_RDY)
7273 break;
7274 }
7275 if (tries >= AHB_TRANS_TRIES) {
7276 qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7277 goto bail;
7278 }
7279
7280 /* If mask is not all 1s, we need to read, but different SerDes
7281 * entities have different sizes
7282 */
7283 sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7284 wr_data = data & mask & sz_mask;
7285 if ((~mask & sz_mask) != 0) {
7286 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7287 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7288
7289 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7290 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7291 if (trans & AHB_TRANS_RDY)
7292 break;
7293 }
7294 if (tries >= AHB_TRANS_TRIES) {
7295 qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7296 AHB_TRANS_TRIES);
7297 goto bail;
7298 }
7299 /* Re-read in case host split reads and read data first */
7300 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7301 rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7302 wr_data |= (rd_data & ~mask & sz_mask);
7303 }
7304
7305 /* If mask is not zero, we need to write. */
7306 if (mask & sz_mask) {
7307 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7308 trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7309 trans |= AHB_WR;
7310 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7311
7312 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7313 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7314 if (trans & AHB_TRANS_RDY)
7315 break;
7316 }
7317 if (tries >= AHB_TRANS_TRIES) {
7318 qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7319 AHB_TRANS_TRIES);
7320 goto bail;
7321 }
7322 }
7323 ret = wr_data;
7324bail:
7325 qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7326 return ret;
7327}
7328
7329static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7330 unsigned mask)
7331{
7332 struct qib_devdata *dd = ppd->dd;
7333 int chan;
7334 u32 rbc;
7335
7336 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7337 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7338 data, mask);
7339 rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7340 addr, 0, 0);
7341 }
7342}
7343
7344static int serdes_7322_init(struct qib_pportdata *ppd)
7345{
7346 u64 data;
7347 u32 le_val;
7348
7349 /*
7350 * Initialize the Tx DDS tables. Also done every QSFP event,
7351 * for adapters with QSFP
7352 */
7353 init_txdds_table(ppd, 0);
7354
7355 /* Patch some SerDes defaults to "Better for IB" */
7356 /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7357 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7358
7359 /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7360 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7361 /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7362 ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7363
7364 /* May be overridden in qsfp_7322_event */
7365 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7366 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7367
7368 /* enable LE1 adaptation for all but QME, which is disabled */
7369 le_val = IS_QME(ppd->dd) ? 0 : 1;
7370 ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7371
7372 /* Clear cmode-override, may be set from older driver */
7373 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7374
7375 /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7376 ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7377
7378 /* setup LoS params; these are subsystem, so chan == 5 */
7379 /* LoS filter threshold_count on, ch 0-3, set to 8 */
7380 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7381 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7382 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7383 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7384
7385 /* LoS filter threshold_count off, ch 0-3, set to 4 */
7386 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7387 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7388 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7389 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7390
7391 /* LoS filter select enabled */
7392 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7393
7394 /* LoS target data: SDR=4, DDR=2, QDR=1 */
7395 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7396 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7397 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7398
7399 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7400 qib_write_kreg_port(ppd, krp_serdesctrl, data |
7401 SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
7402
7403 /* rxbistena; set 0 to avoid effects of it switch later */
7404 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7405
7406 /* Configure 4 DFE taps, and only they adapt */
7407 ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7408
7409 /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7410 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7411 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7412
7413 /*
7414 * Set receive adaptation mode. SDR and DDR adaptation are
7415 * always on, and QDR is initially enabled; later disabled.
7416 */
7417 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7418 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7419 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7420 ppd->dd->cspec->r1 ?
7421 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7422 ppd->cpspec->qdr_dfe_on = 1;
7423
7424 /* (FLoop LOS gate: PPM filter enabled */
7425 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7426
7427 /* rx offset center enabled */
7428 ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7429
7430 if (!ppd->dd->cspec->r1) {
7431 ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7432 ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7433 }
7434
7435 /* Set the frequency loop bandwidth to 15 */
7436 ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7437
7438 return 0;
7439}
7440
7441/* start adjust QMH serdes parameters */
7442
7443static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
7444{
7445 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7446 9, code << 9, 0x3f << 9);
7447}
7448
7449static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
7450 int enable, u32 tapenable)
7451{
7452 if (enable)
7453 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7454 1, 3 << 10, 0x1f << 10);
7455 else
7456 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7457 1, 0, 0x1f << 10);
7458}
7459
7460/* Set clock to 1, 0, 1, 0 */
7461static void clock_man(struct qib_pportdata *ppd, int chan)
7462{
7463 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7464 4, 0x4000, 0x4000);
7465 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7466 4, 0, 0x4000);
7467 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7468 4, 0x4000, 0x4000);
7469 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7470 4, 0, 0x4000);
7471}
7472
7473/*
7474 * write the current Tx serdes pre,post,main,amp settings into the serdes.
7475 * The caller must pass the settings appropriate for the current speed,
7476 * or not care if they are correct for the current speed.
7477 */
7478static void write_tx_serdes_param(struct qib_pportdata *ppd,
7479 struct txdds_ent *txdds)
7480{
7481 u64 deemph;
7482
7483 deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
7484 /* field names for amp, main, post, pre, respectively */
7485 deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
7486 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
7487 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
7488 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
7489 deemph |= 1ULL << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7490 tx_override_deemphasis_select);
7491 deemph |= txdds->amp << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7492 txampcntl_d2a);
7493 deemph |= txdds->main << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7494 txc0_ena);
7495 deemph |= txdds->post << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7496 txcp1_ena);
7497 deemph |= txdds->pre << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7498 txcn1_ena);
7499 qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
7500}
7501
7502/*
7503 * set per-bay, per channel parameters. For now, we ignore
7504 * do_tx, and always set tx parameters, and set them with the same value
7505 * for all channels, using the channel 0 value. We may switch to
7506 * per-channel settings in the future, and that method only needs
7507 * to be done once.
7508 * Because this also writes the IBC txdds table with a single set
7509 * of values, it should be called only for cases where we want to completely
7510 * force a specific setting, typically only for mez cards.
7511 */
7512static void adj_tx_serdes(struct qib_pportdata *ppd)
7513{
7514 struct txdds_ent txdds;
7515 int i;
7516 u8 *amp, *pre, *mainv, *post;
7517
7518 /*
7519 * Because we use TX_DEEMPHASIS_OVERRIDE, we need to
7520 * always do tx side, just like H1, since it is cleared
7521 * by link down
7522 */
7523 amp = ppd->cpspec->amp;
7524 pre = ppd->cpspec->pre;
7525 mainv = ppd->cpspec->mainv;
7526 post = ppd->cpspec->post;
7527
7528 amp[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7529 txampcntl_d2a);
7530 mainv[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7531 txc0_ena);
7532 post[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7533 txcp1_ena);
7534 pre[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7535 txcn1_ena);
7536
7537 /*
7538 * Use the channel zero values, only, for now, for
7539 * all channels
7540 */
7541 txdds.amp = amp[0];
7542 txdds.pre = pre[0];
7543 txdds.main = mainv[0];
7544 txdds.post = post[0];
7545
7546 /* write the QDR table for IBC use, as backup for link down */
7547 for (i = 0; i < ARRAY_SIZE(txdds_qdr); ++i)
7548 set_txdds(ppd, i + 32, &txdds);
7549
7550 write_tx_serdes_param(ppd, &txdds);
7551}
7552
7553/* set QDR forced value for H1, if needed */
7554static void force_h1(struct qib_pportdata *ppd)
7555{
7556 int chan;
7557
7558 ppd->cpspec->qdr_reforce = 0;
7559 if (!ppd->dd->cspec->r1)
7560 return;
7561
7562 for (chan = 0; chan < SERDES_CHANS; chan++) {
7563 set_man_mode_h1(ppd, chan, 1, 0);
7564 set_man_code(ppd, chan, ppd->cpspec->h1_val);
7565 clock_man(ppd, chan);
7566 set_man_mode_h1(ppd, chan, 0, 0);
7567 }
7568}
7569
7570/*
7571 * Parse the parameters for the QMH7342, to get rx and tx serdes
7572 * settings for that Bay, for both possible mez connectors (PCIe bus)
7573 * and IB link (one link on mez1, two possible on mez2).
7574 *
7575 * Data is comma or white space separated.
7576 *
7577 * A set of data has 7 groups, rx and tx groups have SERDES_CHANS values,
7578 * one per IB lane (serdes channel).
7579 * The groups are Bay, bus# H1 rcv, and amp, pre, post, main Tx values (QDR).
7580 * The Bay # is used only for debugging currently.
7581 * H1 values are set whenever the link goes down, or is at cfg_test or
7582 * cfg_wait_enh. Tx values are programmed once, when this routine is called
7583 * (and with default values at chip initialization). Values are any base, in
7584 * strtoul style, and values are seperated by comma, or any white space
7585 * (space, tab, newline).
7586 *
7587 * An example set might look like this (white space vs
7588 * comma used for human ease of reading)
7589 * The ordering is a set of Bay# Bus# H1, amp, pre, post, and main for mez1 IB1,
7590 * repeat for mez2 IB1, then mez2 IB2.
7591 *
7592 * B B H1:0 amp:0 pre:0 post: 0 main:0
7593 * a u H1: 1 amp: 1 pre: 1 post: 1 main: 1
7594 * y s H1: 2 amp: 2 pre: 2 post: 2 main: 2
7595 * H1: 4 amp: 3 pre: 3 post: 3 main: 3
7596 * 1 3 8,6,5,6 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3
7597 * 1 6 7,6,6,7 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3
7598 * 1 6 9,7,7,8 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3
7599 */
7600#define N_QMH_FIELDS 22
7601static int setup_qmh_params(const char *str, struct kernel_param *kp)
7602{
7603 char *abuf, *v, *nv, *nvp;
7604 struct qib_devdata *dd;
7605 struct qib_pportdata *ppd;
7606 u32 mez, vlen, nf, port, bay;
7607 int ret = 0, found = 0;
7608
7609 vlen = strlen(str) + 1;
7610 abuf = kmalloc(vlen, GFP_KERNEL);
7611 if (!abuf) {
7612 printk(KERN_INFO QIB_DRV_NAME
7613 " Unable to allocate QMH param buffer; ignoring\n");
7614 return 0;
7615 }
7616 memcpy(abuf, str, vlen);
7617 v = abuf;
7618
7619 /* these 3 are because gcc can't know they are set before used */
7620 port = 1;
7621 mez = 1; /* used only for debugging */
7622 bay = 0; /* used only for debugging */
7623 ppd = NULL;
7624 for (nf = 0; (nv = strsep(&v, ", \t\n\r")) &&
7625 nf < (N_QMH_FIELDS * 3);) {
7626 u32 val;
7627
7628 if (!*nv)
7629 /* allow for multiple separators */
7630 continue;
7631
7632 val = simple_strtoul(nv, &nvp, 0);
7633 if (nv == nvp) {
7634 printk(KERN_INFO QIB_DRV_NAME
7635 " Bay%u, mez%u IB%u non-numeric value (%s) "
7636 "field #%u, ignoring rest\n", bay, mez,
7637 port, nv, nf % (N_QMH_FIELDS * 3));
7638 ret = -EINVAL;
7639 goto bail;
7640 }
7641 if (!(nf % N_QMH_FIELDS)) {
7642 ppd = NULL;
7643 bay = val;
7644 if (!bay || bay > 16) {
7645 printk(KERN_INFO QIB_DRV_NAME
7646 " Invalid bay # %u, field %u, "
7647 "ignoring rest\n", bay, nf);
7648 ret = -EINVAL;
7649 goto bail;
7650 }
7651 } else if ((nf % N_QMH_FIELDS) == 1) {
7652 u32 bus = val;
7653 if (nf == 1) {
7654 mez = 1;
7655 port = 1;
7656 } else if (nf == (N_QMH_FIELDS + 1)) {
7657 mez = 2;
7658 port = 1;
7659 } else {
7660 mez = 2;
7661 port = 2;
7662 }
7663 list_for_each_entry(dd, &qib_dev_list, list) {
7664 if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322
7665 || !IS_QMH(dd))
7666 continue; /* only for QMH cards */
7667 if (dd->pcidev->bus->number == bus) {
7668 found++;
7669 ppd = &dd->pport[port - 1];
7670 }
7671 }
7672 } else if (ppd) {
7673 u32 parm = (nf % N_QMH_FIELDS) - 2;
7674 if (parm < SERDES_CHANS && !(parm % SERDES_CHANS))
7675 ppd->cpspec->h1_val = val;
7676 else if (parm < (2 * SERDES_CHANS))
7677 ppd->cpspec->amp[parm % SERDES_CHANS] = val;
7678 else if (parm < (3 * SERDES_CHANS))
7679 ppd->cpspec->pre[parm % SERDES_CHANS] = val;
7680 else if (parm < (4 * SERDES_CHANS))
7681 ppd->cpspec->post[parm % SERDES_CHANS] = val;
7682 else {
7683 ppd->cpspec->mainv[parm % SERDES_CHANS] = val;
7684 /* At the end of a port, set params */
7685 if (parm == ((5 * SERDES_CHANS) - 1))
7686 adj_tx_serdes(ppd);
7687 }
7688 }
7689 nf++;
7690 }
7691 if (!found) {
7692 printk(KERN_ERR QIB_DRV_NAME
7693 ": No match found for qmh_serdes_setup parameter\n");
7694 ret = -EINVAL;
7695 }
7696bail:
7697 kfree(abuf);
7698 return ret;
7699}
7700
7701/*
7702 * Similarly for QME7342, but the format is simpler, values are the
7703 * same for all mez card positions in a blade (2 or 4 per blade), but
7704 * are different for some blades vs others, and we don't need to
7705 * specify different parameters for different serdes channels or different
7706 * IB ports.
7707 * Format is: h1 amp,pre,post,main
7708 * Alternate format (so ports can be different): Pport# h1 amp,pre,post,main
7709 */
7710#define N_QME_FIELDS 5
7711static int setup_qme_params(const char *str, struct kernel_param *kp)
7712{
7713 char *abuf, *v, *nv, *nvp;
7714 struct qib_devdata *dd;
7715 u32 vlen, nf, port = 0;
7716 u8 h1, tx[4]; /* amp, pre, post, main */
7717 int ret = -EINVAL;
7718 char *seplist;
7719
7720 vlen = strlen(str) + 1;
7721 abuf = kmalloc(vlen, GFP_KERNEL);
7722 if (!abuf) {
7723 printk(KERN_INFO QIB_DRV_NAME
7724 " Unable to allocate QME param buffer; ignoring\n");
7725 return 0;
7726 }
7727 strncpy(abuf, str, vlen);
7728
7729 v = abuf;
7730 seplist = " \t";
7731 h1 = H1_FORCE_QME; /* gcc can't figure out always set before used */
7732
7733 for (nf = 0; (nv = strsep(&v, seplist)); ) {
7734 u32 val;
7735
7736 if (!*nv)
7737 /* allow for multiple separators */
7738 continue;
7739
7740 if (!nf && *nv == 'P') {
7741 /* alternate format with port */
7742 val = simple_strtoul(++nv, &nvp, 0);
7743 if (nv == nvp || port >= NUM_IB_PORTS) {
7744 printk(KERN_INFO QIB_DRV_NAME
7745 " %s: non-numeric port value (%s) "
7746 "ignoring rest\n", __func__, nv);
7747 goto done;
7748 }
7749 port = val;
7750 continue; /* without incrementing nf */
7751 }
7752 val = simple_strtoul(nv, &nvp, 0);
7753 if (nv == nvp) {
7754 printk(KERN_INFO QIB_DRV_NAME
7755 " %s: non-numeric value (%s) "
7756 "field #%u, ignoring rest\n", __func__,
7757 nv, nf);
7758 goto done;
7759 }
7760 if (!nf) {
7761 h1 = val;
7762 seplist = ",";
7763 } else
7764 tx[nf - 1] = val;
7765 if (++nf == N_QME_FIELDS) {
7766 list_for_each_entry(dd, &qib_dev_list, list) {
7767 int pidx, i;
7768 if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322
7769 || !IS_QME(dd))
7770 continue; /* only for QME cards */
7771 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
7772 struct qib_pportdata *ppd;
7773 ppd = &dd->pport[pidx];
7774 if ((port && ppd->port != port) ||
7775 !ppd->link_speed_supported)
7776 continue;
7777 ppd->cpspec->h1_val = h1;
7778 for (i = 0; i < SERDES_CHANS; i++) {
7779 ppd->cpspec->amp[i] = tx[0];
7780 ppd->cpspec->pre[i] = tx[1];
7781 ppd->cpspec->post[i] = tx[2];
7782 ppd->cpspec->mainv[i] = tx[3];
7783 }
7784 adj_tx_serdes(ppd);
7785 }
7786 }
7787 ret = 0;
7788 goto done;
7789 }
7790 }
7791 printk(KERN_INFO QIB_DRV_NAME
7792 " %s: Only %u of %u fields provided, skipping\n",
7793 __func__, nf, N_QME_FIELDS);
7794done:
7795 kfree(abuf);
7796 return ret;
7797}
7798
7799#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
7800#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
7801
7802#define R_OPCODE_LSB 3
7803#define R_OP_NOP 0
7804#define R_OP_SHIFT 2
7805#define R_OP_UPDATE 3
7806#define R_TDI_LSB 2
7807#define R_TDO_LSB 1
7808#define R_RDY 1
7809
7810static int qib_r_grab(struct qib_devdata *dd)
7811{
7812 u64 val;
7813 val = SJA_EN;
7814 qib_write_kreg(dd, kr_r_access, val);
7815 qib_read_kreg32(dd, kr_scratch);
7816 return 0;
7817}
7818
7819/* qib_r_wait_for_rdy() not only waits for the ready bit, it
7820 * returns the current state of R_TDO
7821 */
7822static int qib_r_wait_for_rdy(struct qib_devdata *dd)
7823{
7824 u64 val;
7825 int timeout;
7826 for (timeout = 0; timeout < 100 ; ++timeout) {
7827 val = qib_read_kreg32(dd, kr_r_access);
7828 if (val & R_RDY)
7829 return (val >> R_TDO_LSB) & 1;
7830 }
7831 return -1;
7832}
7833
7834static int qib_r_shift(struct qib_devdata *dd, int bisten,
7835 int len, u8 *inp, u8 *outp)
7836{
7837 u64 valbase, val;
7838 int ret, pos;
7839
7840 valbase = SJA_EN | (bisten << BISTEN_LSB) |
7841 (R_OP_SHIFT << R_OPCODE_LSB);
7842 ret = qib_r_wait_for_rdy(dd);
7843 if (ret < 0)
7844 goto bail;
7845 for (pos = 0; pos < len; ++pos) {
7846 val = valbase;
7847 if (outp) {
7848 outp[pos >> 3] &= ~(1 << (pos & 7));
7849 outp[pos >> 3] |= (ret << (pos & 7));
7850 }
7851 if (inp) {
7852 int tdi = inp[pos >> 3] >> (pos & 7);
7853 val |= ((tdi & 1) << R_TDI_LSB);
7854 }
7855 qib_write_kreg(dd, kr_r_access, val);
7856 qib_read_kreg32(dd, kr_scratch);
7857 ret = qib_r_wait_for_rdy(dd);
7858 if (ret < 0)
7859 break;
7860 }
7861 /* Restore to NOP between operations. */
7862 val = SJA_EN | (bisten << BISTEN_LSB);
7863 qib_write_kreg(dd, kr_r_access, val);
7864 qib_read_kreg32(dd, kr_scratch);
7865 ret = qib_r_wait_for_rdy(dd);
7866
7867 if (ret >= 0)
7868 ret = pos;
7869bail:
7870 return ret;
7871}
7872
7873static int qib_r_update(struct qib_devdata *dd, int bisten)
7874{
7875 u64 val;
7876 int ret;
7877
7878 val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
7879 ret = qib_r_wait_for_rdy(dd);
7880 if (ret >= 0) {
7881 qib_write_kreg(dd, kr_r_access, val);
7882 qib_read_kreg32(dd, kr_scratch);
7883 }
7884 return ret;
7885}
7886
7887#define BISTEN_PORT_SEL 15
7888#define LEN_PORT_SEL 625
7889#define BISTEN_AT 17
7890#define LEN_AT 156
7891#define BISTEN_ETM 16
7892#define LEN_ETM 632
7893
7894#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
7895
7896/* these are common for all IB port use cases. */
7897static u8 reset_at[BIT2BYTE(LEN_AT)] = {
7898 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7899 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7900};
7901static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
7902 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7903 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7904 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
7905 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
7906 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
7907 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
7908 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7909 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
7910};
7911static u8 at[BIT2BYTE(LEN_AT)] = {
7912 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
7913 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7914};
7915
7916/* used for IB1 or IB2, only one in use */
7917static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
7918 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7919 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7920 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7921 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
7922 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7923 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
7924 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
7925 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
7926};
7927
7928/* used when both IB1 and IB2 are in use */
7929static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
7930 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7931 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
7932 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7933 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
7934 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
7935 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
7936 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
7937 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
7938};
7939
7940/* used when only IB1 is in use */
7941static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
7942 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7943 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7944 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7945 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7946 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7947 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7948 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7949 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7950};
7951
7952/* used when only IB2 is in use */
7953static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
7954 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
7955 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
7956 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7957 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7958 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
7959 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7960 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7961 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
7962};
7963
7964/* used when both IB1 and IB2 are in use */
7965static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
7966 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7967 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7968 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7969 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7970 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7971 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
7972 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7973 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7974};
7975
7976/*
7977 * Do setup to properly handle IB link recovery; if port is zero, we
7978 * are initializing to cover both ports; otherwise we are initializing
7979 * to cover a single port card, or the port has reached INIT and we may
7980 * need to switch coverage types.
7981 */
7982static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
7983{
7984 u8 *portsel, *etm;
7985 struct qib_devdata *dd = ppd->dd;
7986
7987 if (!ppd->dd->cspec->r1)
7988 return;
7989 if (!both) {
7990 dd->cspec->recovery_ports_initted++;
7991 ppd->cpspec->recovery_init = 1;
7992 }
7993 if (!both && dd->cspec->recovery_ports_initted == 1) {
7994 portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
7995 etm = atetm_1port;
7996 } else {
7997 portsel = portsel_2port;
7998 etm = atetm_2port;
7999 }
8000
8001 if (qib_r_grab(dd) < 0 ||
8002 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8003 qib_r_update(dd, BISTEN_ETM) < 0 ||
8004 qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8005 qib_r_update(dd, BISTEN_AT) < 0 ||
8006 qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8007 portsel, NULL) < 0 ||
8008 qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8009 qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8010 qib_r_update(dd, BISTEN_AT) < 0 ||
8011 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8012 qib_r_update(dd, BISTEN_ETM) < 0)
8013 qib_dev_err(dd, "Failed IB link recovery setup\n");
8014}
8015
8016static void check_7322_rxe_status(struct qib_pportdata *ppd)
8017{
8018 struct qib_devdata *dd = ppd->dd;
8019 u64 fmask;
8020
8021 if (dd->cspec->recovery_ports_initted != 1)
8022 return; /* rest doesn't apply to dualport */
8023 qib_write_kreg(dd, kr_control, dd->control |
8024 SYM_MASK(Control, FreezeMode));
8025 (void)qib_read_kreg64(dd, kr_scratch);
8026 udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8027 fmask = qib_read_kreg64(dd, kr_act_fmask);
8028 if (!fmask) {
8029 /*
8030 * require a powercycle before we'll work again, and make
8031 * sure we get no more interrupts, and don't turn off
8032 * freeze.
8033 */
8034 ppd->dd->cspec->stay_in_freeze = 1;
8035 qib_7322_set_intr_state(ppd->dd, 0);
8036 qib_write_kreg(dd, kr_fmask, 0ULL);
8037 qib_dev_err(dd, "HCA unusable until powercycled\n");
8038 return; /* eventually reset */
8039 }
8040
8041 qib_write_kreg(ppd->dd, kr_hwerrclear,
8042 SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8043
8044 /* don't do the full clear_freeze(), not needed for this */
8045 qib_write_kreg(dd, kr_control, dd->control);
8046 qib_read_kreg32(dd, kr_scratch);
8047 /* take IBC out of reset */
8048 if (ppd->link_speed_supported) {
8049 ppd->cpspec->ibcctrl_a &=
8050 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8051 qib_write_kreg_port(ppd, krp_ibcctrl_a,
8052 ppd->cpspec->ibcctrl_a);
8053 qib_read_kreg32(dd, kr_scratch);
8054 if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8055 qib_set_ib_7322_lstate(ppd, 0,
8056 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8057 }
8058}