aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qib')
-rw-r--r--drivers/infiniband/hw/qib/Kconfig7
-rw-r--r--drivers/infiniband/hw/qib/Makefile15
-rw-r--r--drivers/infiniband/hw/qib/qib.h1439
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h156
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h758
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c484
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c894
-rw-r--r--drivers/infiniband/hw/qib/qib_dma.c182
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c665
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c451
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2317
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c616
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3576
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4618
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c7645
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1586
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c236
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c328
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2173
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h373
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c174
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c503
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c738
-rw-r--r--drivers/infiniband/hw/qib/qib_pio_copy.c64
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c1255
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c564
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h184
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2288
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c817
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c1413
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220_img.c1081
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c973
-rw-r--r--drivers/infiniband/hw/qib/qib_srq.c375
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c691
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c498
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c557
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c555
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c607
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c157
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c897
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2248
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h1100
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c368
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c62
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c171
49 files changed, 52547 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
new file mode 100644
index 000000000000..7c03a70c55a2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -0,0 +1,7 @@
1config INFINIBAND_QIB
2 tristate "QLogic PCIe HCA support"
3 depends on 64BIT && NET
4 ---help---
5 This is a low-level driver for QLogic PCIe QLE InfiniBand host
6 channel adapters. This driver does not support the QLogic
7 HyperTransport card (model QHT7140).
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
new file mode 100644
index 000000000000..c6515a1b9a6a
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Makefile
@@ -0,0 +1,15 @@
1obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
2
3ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \
4 qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
5 qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
6 qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
7 qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
8 qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
9 qib_sd7220.o qib_sd7220_img.o qib_iba7322.o qib_verbs.o
10
11# 6120 has no fallback if no MSI interrupts, others can do INTx
12ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o
13
14ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o
15ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
new file mode 100644
index 000000000000..32d9208efcff
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -0,0 +1,1439 @@
1#ifndef _QIB_KERNEL_H
2#define _QIB_KERNEL_H
3/*
4 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
5 * All rights reserved.
6 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37/*
38 * This header file is the base header file for qlogic_ib kernel code
39 * qib_user.h serves a similar purpose for user code.
40 */
41
42#include <linux/interrupt.h>
43#include <linux/pci.h>
44#include <linux/dma-mapping.h>
45#include <linux/mutex.h>
46#include <linux/list.h>
47#include <linux/scatterlist.h>
48#include <linux/io.h>
49#include <linux/fs.h>
50#include <linux/completion.h>
51#include <linux/kref.h>
52#include <linux/sched.h>
53
54#include "qib_common.h"
55#include "qib_verbs.h"
56
57/* only s/w major version of QLogic_IB we can handle */
58#define QIB_CHIP_VERS_MAJ 2U
59
60/* don't care about this except printing */
61#define QIB_CHIP_VERS_MIN 0U
62
63/* The Organization Unique Identifier (Mfg code), and its position in GUID */
64#define QIB_OUI 0x001175
65#define QIB_OUI_LSB 40
66
67/*
68 * per driver stats, either not device nor port-specific, or
69 * summed over all of the devices and ports.
70 * They are described by name via ipathfs filesystem, so layout
71 * and number of elements can change without breaking compatibility.
72 * If members are added or deleted qib_statnames[] in qib_fs.c must
73 * change to match.
74 */
75struct qlogic_ib_stats {
76 __u64 sps_ints; /* number of interrupts handled */
77 __u64 sps_errints; /* number of error interrupts */
78 __u64 sps_txerrs; /* tx-related packet errors */
79 __u64 sps_rcverrs; /* non-crc rcv packet errors */
80 __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
81 __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
82 __u64 sps_ctxts; /* number of contexts currently open */
83 __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
84 __u64 sps_buffull;
85 __u64 sps_hdrfull;
86};
87
88extern struct qlogic_ib_stats qib_stats;
89extern struct pci_error_handlers qib_pci_err_handler;
90extern struct pci_driver qib_driver;
91
92#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
93/*
94 * First-cut critierion for "device is active" is
95 * two thousand dwords combined Tx, Rx traffic per
96 * 5-second interval. SMA packets are 64 dwords,
97 * and occur "a few per second", presumably each way.
98 */
99#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
100
101/*
102 * Struct used to indicate which errors are logged in each of the
103 * error-counters that are logged to EEPROM. A counter is incremented
104 * _once_ (saturating at 255) for each event with any bits set in
105 * the error or hwerror register masks below.
106 */
107#define QIB_EEP_LOG_CNT (4)
108struct qib_eep_log_mask {
109 u64 errs_to_log;
110 u64 hwerrs_to_log;
111};
112
113/*
114 * Below contains all data related to a single context (formerly called port).
115 */
116struct qib_ctxtdata {
117 void **rcvegrbuf;
118 dma_addr_t *rcvegrbuf_phys;
119 /* rcvhdrq base, needs mmap before useful */
120 void *rcvhdrq;
121 /* kernel virtual address where hdrqtail is updated */
122 void *rcvhdrtail_kvaddr;
123 /*
124 * temp buffer for expected send setup, allocated at open, instead
125 * of each setup call
126 */
127 void *tid_pg_list;
128 /*
129 * Shared page for kernel to signal user processes that send buffers
130 * need disarming. The process should call QIB_CMD_DISARM_BUFS
131 * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
132 */
133 unsigned long *user_event_mask;
134 /* when waiting for rcv or pioavail */
135 wait_queue_head_t wait;
136 /*
137 * rcvegr bufs base, physical, must fit
138 * in 44 bits so 32 bit programs mmap64 44 bit works)
139 */
140 dma_addr_t rcvegr_phys;
141 /* mmap of hdrq, must fit in 44 bits */
142 dma_addr_t rcvhdrq_phys;
143 dma_addr_t rcvhdrqtailaddr_phys;
144
145 /*
146 * number of opens (including slave sub-contexts) on this instance
147 * (ignoring forks, dup, etc. for now)
148 */
149 int cnt;
150 /*
151 * how much space to leave at start of eager TID entries for
152 * protocol use, on each TID
153 */
154 /* instead of calculating it */
155 unsigned ctxt;
156 /* non-zero if ctxt is being shared. */
157 u16 subctxt_cnt;
158 /* non-zero if ctxt is being shared. */
159 u16 subctxt_id;
160 /* number of eager TID entries. */
161 u16 rcvegrcnt;
162 /* index of first eager TID entry. */
163 u16 rcvegr_tid_base;
164 /* number of pio bufs for this ctxt (all procs, if shared) */
165 u32 piocnt;
166 /* first pio buffer for this ctxt */
167 u32 pio_base;
168 /* chip offset of PIO buffers for this ctxt */
169 u32 piobufs;
170 /* how many alloc_pages() chunks in rcvegrbuf_pages */
171 u32 rcvegrbuf_chunks;
172 /* how many egrbufs per chunk */
173 u32 rcvegrbufs_perchunk;
174 /* order for rcvegrbuf_pages */
175 size_t rcvegrbuf_size;
176 /* rcvhdrq size (for freeing) */
177 size_t rcvhdrq_size;
178 /* per-context flags for fileops/intr communication */
179 unsigned long flag;
180 /* next expected TID to check when looking for free */
181 u32 tidcursor;
182 /* WAIT_RCV that timed out, no interrupt */
183 u32 rcvwait_to;
184 /* WAIT_PIO that timed out, no interrupt */
185 u32 piowait_to;
186 /* WAIT_RCV already happened, no wait */
187 u32 rcvnowait;
188 /* WAIT_PIO already happened, no wait */
189 u32 pionowait;
190 /* total number of polled urgent packets */
191 u32 urgent;
192 /* saved total number of polled urgent packets for poll edge trigger */
193 u32 urgent_poll;
194 /* pid of process using this ctxt */
195 pid_t pid;
196 pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
197 /* same size as task_struct .comm[], command that opened context */
198 char comm[16];
199 /* pkeys set by this use of this ctxt */
200 u16 pkeys[4];
201 /* so file ops can get at unit */
202 struct qib_devdata *dd;
203 /* so funcs that need physical port can get it easily */
204 struct qib_pportdata *ppd;
205 /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
206 void *subctxt_uregbase;
207 /* An array of pages for the eager receive buffers * N */
208 void *subctxt_rcvegrbuf;
209 /* An array of pages for the eager header queue entries * N */
210 void *subctxt_rcvhdr_base;
211 /* The version of the library which opened this ctxt */
212 u32 userversion;
213 /* Bitmask of active slaves */
214 u32 active_slaves;
215 /* Type of packets or conditions we want to poll for */
216 u16 poll_type;
217 /* receive packet sequence counter */
218 u8 seq_cnt;
219 u8 redirect_seq_cnt;
220 /* ctxt rcvhdrq head offset */
221 u32 head;
222 u32 pkt_count;
223 /* QPs waiting for context processing */
224 struct list_head qp_wait_list;
225};
226
227struct qib_sge_state;
228
229struct qib_sdma_txreq {
230 int flags;
231 int sg_count;
232 dma_addr_t addr;
233 void (*callback)(struct qib_sdma_txreq *, int);
234 u16 start_idx; /* sdma private */
235 u16 next_descq_idx; /* sdma private */
236 struct list_head list; /* sdma private */
237};
238
239struct qib_sdma_desc {
240 __le64 qw[2];
241};
242
243struct qib_verbs_txreq {
244 struct qib_sdma_txreq txreq;
245 struct qib_qp *qp;
246 struct qib_swqe *wqe;
247 u32 dwords;
248 u16 hdr_dwords;
249 u16 hdr_inx;
250 struct qib_pio_header *align_buf;
251 struct qib_mregion *mr;
252 struct qib_sge_state *ss;
253};
254
255#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
256#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
257#define QIB_SDMA_TXREQ_F_INTREQ 0x4
258#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
259#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
260
261#define QIB_SDMA_TXREQ_S_OK 0
262#define QIB_SDMA_TXREQ_S_SENDERROR 1
263#define QIB_SDMA_TXREQ_S_ABORTED 2
264#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
265
266/*
267 * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
268 * Mostly for MADs that set or query link parameters, also ipath
269 * config interfaces
270 */
271#define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
272#define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */
273#define QIB_IB_CFG_LWID 3 /* currently active Link-width */
274#define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
275#define QIB_IB_CFG_SPD 5 /* current Link spd */
276#define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
277#define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
278#define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
279#define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
280#define QIB_IB_CFG_OP_VLS 10 /* operational VLs */
281#define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
282#define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
283#define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
284#define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
285#define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
286#define QIB_IB_CFG_PKEYS 16 /* update partition keys */
287#define QIB_IB_CFG_MTU 17 /* update MTU in IBC */
288#define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */
289#define QIB_IB_CFG_VL_HIGH_LIMIT 19
290#define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
291#define QIB_IB_CFG_PORT 21 /* switch port we are connected to */
292
293/*
294 * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
295 * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
296 * QIB_IB_CFG_LINKDEFAULT cmd
297 */
298#define IB_LINKCMD_DOWN (0 << 16)
299#define IB_LINKCMD_ARMED (1 << 16)
300#define IB_LINKCMD_ACTIVE (2 << 16)
301#define IB_LINKINITCMD_NOP 0
302#define IB_LINKINITCMD_POLL 1
303#define IB_LINKINITCMD_SLEEP 2
304#define IB_LINKINITCMD_DISABLE 3
305
306/*
307 * valid states passed to qib_set_linkstate() user call
308 */
309#define QIB_IB_LINKDOWN 0
310#define QIB_IB_LINKARM 1
311#define QIB_IB_LINKACTIVE 2
312#define QIB_IB_LINKDOWN_ONLY 3
313#define QIB_IB_LINKDOWN_SLEEP 4
314#define QIB_IB_LINKDOWN_DISABLE 5
315
316/*
317 * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
318 * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
319 * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
320 * are also the the possible values for qib_link_speed_enabled and active
321 * The values were chosen to match values used within the IB spec.
322 */
323#define QIB_IB_SDR 1
324#define QIB_IB_DDR 2
325#define QIB_IB_QDR 4
326
327#define QIB_DEFAULT_MTU 4096
328
329/*
330 * Possible IB config parameters for f_get/set_ib_table()
331 */
332#define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */
333#define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */
334
335/*
336 * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
337 * these are bits so they can be combined, e.g.
338 * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
339 */
340#define QIB_RCVCTRL_TAILUPD_ENB 0x01
341#define QIB_RCVCTRL_TAILUPD_DIS 0x02
342#define QIB_RCVCTRL_CTXT_ENB 0x04
343#define QIB_RCVCTRL_CTXT_DIS 0x08
344#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
345#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
346#define QIB_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
347#define QIB_RCVCTRL_PKEY_DIS 0x80
348#define QIB_RCVCTRL_BP_ENB 0x0100
349#define QIB_RCVCTRL_BP_DIS 0x0200
350#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
351#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
352
353/*
354 * Possible "operations" for f_sendctrl(ppd, op, var)
355 * these are bits so they can be combined, e.g.
356 * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
357 * Some operations (e.g. DISARM, ABORT) are known to
358 * be "one-shot", so do not modify shadow.
359 */
360#define QIB_SENDCTRL_DISARM (0x1000)
361#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
362 /* available (0x2000) */
363#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
364#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
365#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
366#define QIB_SENDCTRL_SEND_DIS (0x20000)
367#define QIB_SENDCTRL_SEND_ENB (0x40000)
368#define QIB_SENDCTRL_FLUSH (0x80000)
369#define QIB_SENDCTRL_CLEAR (0x100000)
370#define QIB_SENDCTRL_DISARM_ALL (0x200000)
371
372/*
373 * These are the generic indices for requesting per-port
374 * counter values via the f_portcntr function. They
375 * are always returned as 64 bit values, although most
376 * are 32 bit counters.
377 */
378/* send-related counters */
379#define QIBPORTCNTR_PKTSEND 0U
380#define QIBPORTCNTR_WORDSEND 1U
381#define QIBPORTCNTR_PSXMITDATA 2U
382#define QIBPORTCNTR_PSXMITPKTS 3U
383#define QIBPORTCNTR_PSXMITWAIT 4U
384#define QIBPORTCNTR_SENDSTALL 5U
385/* receive-related counters */
386#define QIBPORTCNTR_PKTRCV 6U
387#define QIBPORTCNTR_PSRCVDATA 7U
388#define QIBPORTCNTR_PSRCVPKTS 8U
389#define QIBPORTCNTR_RCVEBP 9U
390#define QIBPORTCNTR_RCVOVFL 10U
391#define QIBPORTCNTR_WORDRCV 11U
392/* IB link related error counters */
393#define QIBPORTCNTR_RXLOCALPHYERR 12U
394#define QIBPORTCNTR_RXVLERR 13U
395#define QIBPORTCNTR_ERRICRC 14U
396#define QIBPORTCNTR_ERRVCRC 15U
397#define QIBPORTCNTR_ERRLPCRC 16U
398#define QIBPORTCNTR_BADFORMAT 17U
399#define QIBPORTCNTR_ERR_RLEN 18U
400#define QIBPORTCNTR_IBSYMBOLERR 19U
401#define QIBPORTCNTR_INVALIDRLEN 20U
402#define QIBPORTCNTR_UNSUPVL 21U
403#define QIBPORTCNTR_EXCESSBUFOVFL 22U
404#define QIBPORTCNTR_ERRLINK 23U
405#define QIBPORTCNTR_IBLINKDOWN 24U
406#define QIBPORTCNTR_IBLINKERRRECOV 25U
407#define QIBPORTCNTR_LLI 26U
408/* other error counters */
409#define QIBPORTCNTR_RXDROPPKT 27U
410#define QIBPORTCNTR_VL15PKTDROP 28U
411#define QIBPORTCNTR_ERRPKEY 29U
412#define QIBPORTCNTR_KHDROVFL 30U
413/* sampling counters (these are actually control registers) */
414#define QIBPORTCNTR_PSINTERVAL 31U
415#define QIBPORTCNTR_PSSTART 32U
416#define QIBPORTCNTR_PSSTAT 33U
417
418/* how often we check for packet activity for "power on hours (in seconds) */
419#define ACTIVITY_TIMER 5
420
421/* Below is an opaque struct. Each chip (device) can maintain
422 * private data needed for its operation, but not germane to the
423 * rest of the driver. For convenience, we define another that
424 * is chip-specific, per-port
425 */
426struct qib_chip_specific;
427struct qib_chipport_specific;
428
429enum qib_sdma_states {
430 qib_sdma_state_s00_hw_down,
431 qib_sdma_state_s10_hw_start_up_wait,
432 qib_sdma_state_s20_idle,
433 qib_sdma_state_s30_sw_clean_up_wait,
434 qib_sdma_state_s40_hw_clean_up_wait,
435 qib_sdma_state_s50_hw_halt_wait,
436 qib_sdma_state_s99_running,
437};
438
439enum qib_sdma_events {
440 qib_sdma_event_e00_go_hw_down,
441 qib_sdma_event_e10_go_hw_start,
442 qib_sdma_event_e20_hw_started,
443 qib_sdma_event_e30_go_running,
444 qib_sdma_event_e40_sw_cleaned,
445 qib_sdma_event_e50_hw_cleaned,
446 qib_sdma_event_e60_hw_halted,
447 qib_sdma_event_e70_go_idle,
448 qib_sdma_event_e7220_err_halted,
449 qib_sdma_event_e7322_err_halted,
450 qib_sdma_event_e90_timer_tick,
451};
452
453extern char *qib_sdma_state_names[];
454extern char *qib_sdma_event_names[];
455
456struct sdma_set_state_action {
457 unsigned op_enable:1;
458 unsigned op_intenable:1;
459 unsigned op_halt:1;
460 unsigned op_drain:1;
461 unsigned go_s99_running_tofalse:1;
462 unsigned go_s99_running_totrue:1;
463};
464
465struct qib_sdma_state {
466 struct kref kref;
467 struct completion comp;
468 enum qib_sdma_states current_state;
469 struct sdma_set_state_action *set_state_action;
470 unsigned current_op;
471 unsigned go_s99_running;
472 unsigned first_sendbuf;
473 unsigned last_sendbuf; /* really last +1 */
474 /* debugging/devel */
475 enum qib_sdma_states previous_state;
476 unsigned previous_op;
477 enum qib_sdma_events last_event;
478};
479
480struct xmit_wait {
481 struct timer_list timer;
482 u64 counter;
483 u8 flags;
484 struct cache {
485 u64 psxmitdata;
486 u64 psrcvdata;
487 u64 psxmitpkts;
488 u64 psrcvpkts;
489 u64 psxmitwait;
490 } counter_cache;
491};
492
493/*
494 * The structure below encapsulates data relevant to a physical IB Port.
495 * Current chips support only one such port, but the separation
496 * clarifies things a bit. Note that to conform to IB conventions,
497 * port-numbers are one-based. The first or only port is port1.
498 */
499struct qib_pportdata {
500 struct qib_ibport ibport_data;
501
502 struct qib_devdata *dd;
503 struct qib_chippport_specific *cpspec; /* chip-specific per-port */
504 struct kobject pport_kobj;
505 struct kobject sl2vl_kobj;
506 struct kobject diagc_kobj;
507
508 /* GUID for this interface, in network order */
509 __be64 guid;
510
511 /* QIB_POLL, etc. link-state specific flags, per port */
512 u32 lflags;
513 /* qib_lflags driver is waiting for */
514 u32 state_wanted;
515 spinlock_t lflags_lock;
516 /* number of (port-specific) interrupts for this port -- saturates... */
517 u32 int_counter;
518
519 /* ref count for each pkey */
520 atomic_t pkeyrefs[4];
521
522 /*
523 * this address is mapped readonly into user processes so they can
524 * get status cheaply, whenever they want. One qword of status per port
525 */
526 u64 *statusp;
527
528 /* SendDMA related entries */
529 spinlock_t sdma_lock;
530 struct qib_sdma_state sdma_state;
531 unsigned long sdma_buf_jiffies;
532 struct qib_sdma_desc *sdma_descq;
533 u64 sdma_descq_added;
534 u64 sdma_descq_removed;
535 u16 sdma_descq_cnt;
536 u16 sdma_descq_tail;
537 u16 sdma_descq_head;
538 u16 sdma_next_intr;
539 u16 sdma_reset_wait;
540 u8 sdma_generation;
541 struct tasklet_struct sdma_sw_clean_up_task;
542 struct list_head sdma_activelist;
543
544 dma_addr_t sdma_descq_phys;
545 volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
546 dma_addr_t sdma_head_phys;
547
548 wait_queue_head_t state_wait; /* for state_wanted */
549
550 /* HoL blocking for SMP replies */
551 unsigned hol_state;
552 struct timer_list hol_timer;
553
554 /*
555 * Shadow copies of registers; size indicates read access size.
556 * Most of them are readonly, but some are write-only register,
557 * where we manipulate the bits in the shadow copy, and then write
558 * the shadow copy to qlogic_ib.
559 *
560 * We deliberately make most of these 32 bits, since they have
561 * restricted range. For any that we read, we won't to generate 32
562 * bit accesses, since Opteron will generate 2 separate 32 bit HT
563 * transactions for a 64 bit read, and we want to avoid unnecessary
564 * bus transactions.
565 */
566
567 /* This is the 64 bit group */
568 /* last ibcstatus. opaque outside chip-specific code */
569 u64 lastibcstat;
570
571 /* these are the "32 bit" regs */
572
573 /*
574 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
575 * all expect bit fields to be "unsigned long"
576 */
577 unsigned long p_rcvctrl; /* shadow per-port rcvctrl */
578 unsigned long p_sendctrl; /* shadow per-port sendctrl */
579
580 u32 ibmtu; /* The MTU programmed for this unit */
581 /*
582 * Current max size IB packet (in bytes) including IB headers, that
583 * we can send. Changes when ibmtu changes.
584 */
585 u32 ibmaxlen;
586 /*
587 * ibmaxlen at init time, limited by chip and by receive buffer
588 * size. Not changed after init.
589 */
590 u32 init_ibmaxlen;
591 /* LID programmed for this instance */
592 u16 lid;
593 /* list of pkeys programmed; 0 if not set */
594 u16 pkeys[4];
595 /* LID mask control */
596 u8 lmc;
597 u8 link_width_supported;
598 u8 link_speed_supported;
599 u8 link_width_enabled;
600 u8 link_speed_enabled;
601 u8 link_width_active;
602 u8 link_speed_active;
603 u8 vls_supported;
604 u8 vls_operational;
605 /* Rx Polarity inversion (compensate for ~tx on partner) */
606 u8 rx_pol_inv;
607
608 u8 hw_pidx; /* physical port index */
609 u8 port; /* IB port number and index into dd->pports - 1 */
610
611 u8 delay_mult;
612
613 /* used to override LED behavior */
614 u8 led_override; /* Substituted for normal value, if non-zero */
615 u16 led_override_timeoff; /* delta to next timer event */
616 u8 led_override_vals[2]; /* Alternates per blink-frame */
617 u8 led_override_phase; /* Just counts, LSB picks from vals[] */
618 atomic_t led_override_timer_active;
619 /* Used to flash LEDs in override mode */
620 struct timer_list led_override_timer;
621 struct xmit_wait cong_stats;
622 struct timer_list symerr_clear_timer;
623};
624
625/* Observers. Not to be taken lightly, possibly not to ship. */
626/*
627 * If a diag read or write is to (bottom <= offset <= top),
628 * the "hoook" is called, allowing, e.g. shadows to be
629 * updated in sync with the driver. struct diag_observer
630 * is the "visible" part.
631 */
632struct diag_observer;
633
634typedef int (*diag_hook) (struct qib_devdata *dd,
635 const struct diag_observer *op,
636 u32 offs, u64 *data, u64 mask, int only_32);
637
638struct diag_observer {
639 diag_hook hook;
640 u32 bottom;
641 u32 top;
642};
643
644extern int qib_register_observer(struct qib_devdata *dd,
645 const struct diag_observer *op);
646
647/* Only declared here, not defined. Private to diags */
648struct diag_observer_list_elt;
649
650/* device data struct now contains only "general per-device" info.
651 * fields related to a physical IB port are in a qib_pportdata struct,
652 * described above) while fields only used by a particualr chip-type are in
653 * a qib_chipdata struct, whose contents are opaque to this file.
654 */
655struct qib_devdata {
656 struct qib_ibdev verbs_dev; /* must be first */
657 struct list_head list;
658 /* pointers to related structs for this device */
659 /* pci access data structure */
660 struct pci_dev *pcidev;
661 struct cdev *user_cdev;
662 struct cdev *diag_cdev;
663 struct device *user_device;
664 struct device *diag_device;
665
666 /* mem-mapped pointer to base of chip regs */
667 u64 __iomem *kregbase;
668 /* end of mem-mapped chip space excluding sendbuf and user regs */
669 u64 __iomem *kregend;
670 /* physical address of chip for io_remap, etc. */
671 resource_size_t physaddr;
672 /* qib_cfgctxts pointers */
673 struct qib_ctxtdata **rcd; /* Receive Context Data */
674
675 /* qib_pportdata, points to array of (physical) port-specific
676 * data structs, indexed by pidx (0..n-1)
677 */
678 struct qib_pportdata *pport;
679 struct qib_chip_specific *cspec; /* chip-specific */
680
681 /* kvirt address of 1st 2k pio buffer */
682 void __iomem *pio2kbase;
683 /* kvirt address of 1st 4k pio buffer */
684 void __iomem *pio4kbase;
685 /* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
686 void __iomem *piobase;
687 /* mem-mapped pointer to base of user chip regs (if using WC PAT) */
688 u64 __iomem *userbase;
689 /*
690 * points to area where PIOavail registers will be DMA'ed.
691 * Has to be on a page of it's own, because the page will be
692 * mapped into user program space. This copy is *ONLY* ever
693 * written by DMA, not by the driver! Need a copy per device
694 * when we get to multiple devices
695 */
696 volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */
697 /* physical address where updates occur */
698 dma_addr_t pioavailregs_phys;
699
700 /* device-specific implementations of functions needed by
701 * common code. Contrary to previous consensus, we can't
702 * really just point to a device-specific table, because we
703 * may need to "bend", e.g. *_f_put_tid
704 */
705 /* fallback to alternate interrupt type if possible */
706 int (*f_intr_fallback)(struct qib_devdata *);
707 /* hard reset chip */
708 int (*f_reset)(struct qib_devdata *);
709 void (*f_quiet_serdes)(struct qib_pportdata *);
710 int (*f_bringup_serdes)(struct qib_pportdata *);
711 int (*f_early_init)(struct qib_devdata *);
712 void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
713 void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
714 u32, unsigned long);
715 void (*f_cleanup)(struct qib_devdata *);
716 void (*f_setextled)(struct qib_pportdata *, u32);
717 /* fill out chip-specific fields */
718 int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
719 /* free irq */
720 void (*f_free_irq)(struct qib_devdata *);
721 struct qib_message_header *(*f_get_msgheader)
722 (struct qib_devdata *, __le32 *);
723 void (*f_config_ctxts)(struct qib_devdata *);
724 int (*f_get_ib_cfg)(struct qib_pportdata *, int);
725 int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
726 int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
727 int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
728 int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
729 u32 (*f_iblink_state)(u64);
730 u8 (*f_ibphys_portstate)(u64);
731 void (*f_xgxs_reset)(struct qib_pportdata *);
732 /* per chip actions needed for IB Link up/down changes */
733 int (*f_ib_updown)(struct qib_pportdata *, int, u64);
734 u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
735 /* Read/modify/write of GPIO pins (potentially chip-specific */
736 int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
737 u32 mask);
738 /* Enable writes to config EEPROM (if supported) */
739 int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
740 /*
741 * modify rcvctrl shadow[s] and write to appropriate chip-regs.
742 * see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
743 * (ctxt == -1) means "all contexts", only meaningful for
744 * clearing. Could remove if chip_spec shutdown properly done.
745 */
746 void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
747 int ctxt);
748 /* Read/modify/write sendctrl appropriately for op and port. */
749 void (*f_sendctrl)(struct qib_pportdata *, u32 op);
750 void (*f_set_intr_state)(struct qib_devdata *, u32);
751 void (*f_set_armlaunch)(struct qib_devdata *, u32);
752 void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
753 int (*f_late_initreg)(struct qib_devdata *);
754 int (*f_init_sdma_regs)(struct qib_pportdata *);
755 u16 (*f_sdma_gethead)(struct qib_pportdata *);
756 int (*f_sdma_busy)(struct qib_pportdata *);
757 void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
758 void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
759 void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
760 void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
761 void (*f_sdma_hw_start_up)(struct qib_pportdata *);
762 void (*f_sdma_init_early)(struct qib_pportdata *);
763 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
764 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
765 u32 (*f_hdrqempty)(struct qib_ctxtdata *);
766 u64 (*f_portcntr)(struct qib_pportdata *, u32);
767 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
768 u64 **);
769 u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
770 char **, u64 **);
771 u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
772 void (*f_initvl15_bufs)(struct qib_devdata *);
773 void (*f_init_ctxt)(struct qib_ctxtdata *);
774 void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
775 struct qib_ctxtdata *);
776 void (*f_writescratch)(struct qib_devdata *, u32);
777 int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
778
779 char *boardname; /* human readable board info */
780
781 /* template for writing TIDs */
782 u64 tidtemplate;
783 /* value to write to free TIDs */
784 u64 tidinvalid;
785
786 /* number of registers used for pioavail */
787 u32 pioavregs;
788 /* device (not port) flags, basically device capabilities */
789 u32 flags;
790 /* last buffer for user use */
791 u32 lastctxt_piobuf;
792
793 /* saturating counter of (non-port-specific) device interrupts */
794 u32 int_counter;
795
796 /* pio bufs allocated per ctxt */
797 u32 pbufsctxt;
798 /* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
799 u32 ctxts_extrabuf;
800 /*
801 * number of ctxts configured as max; zero is set to number chip
802 * supports, less gives more pio bufs/ctxt, etc.
803 */
804 u32 cfgctxts;
805
806 /*
807 * hint that we should update pioavailshadow before
808 * looking for a PIO buffer
809 */
810 u32 upd_pio_shadow;
811
812 /* internal debugging stats */
813 u32 maxpkts_call;
814 u32 avgpkts_call;
815 u64 nopiobufs;
816
817 /* PCI Vendor ID (here for NodeInfo) */
818 u16 vendorid;
819 /* PCI Device ID (here for NodeInfo) */
820 u16 deviceid;
821 /* for write combining settings */
822 unsigned long wc_cookie;
823 unsigned long wc_base;
824 unsigned long wc_len;
825
826 /* shadow copy of struct page *'s for exp tid pages */
827 struct page **pageshadow;
828 /* shadow copy of dma handles for exp tid pages */
829 dma_addr_t *physshadow;
830 u64 __iomem *egrtidbase;
831 spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */
832 /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
833 spinlock_t uctxt_lock; /* rcd and user context changes */
834 /*
835 * per unit status, see also portdata statusp
836 * mapped readonly into user processes so they can get unit and
837 * IB link status cheaply
838 */
839 u64 *devstatusp;
840 char *freezemsg; /* freeze msg if hw error put chip in freeze */
841 u32 freezelen; /* max length of freezemsg */
842 /* timer used to prevent stats overflow, error throttling, etc. */
843 struct timer_list stats_timer;
844
845 /* timer to verify interrupts work, and fallback if possible */
846 struct timer_list intrchk_timer;
847 unsigned long ureg_align; /* user register alignment */
848
849 /*
850 * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
851 * pio_writing.
852 */
853 spinlock_t pioavail_lock;
854
855 /*
856 * Shadow copies of registers; size indicates read access size.
857 * Most of them are readonly, but some are write-only register,
858 * where we manipulate the bits in the shadow copy, and then write
859 * the shadow copy to qlogic_ib.
860 *
861 * We deliberately make most of these 32 bits, since they have
862 * restricted range. For any that we read, we won't to generate 32
863 * bit accesses, since Opteron will generate 2 separate 32 bit HT
864 * transactions for a 64 bit read, and we want to avoid unnecessary
865 * bus transactions.
866 */
867
868 /* This is the 64 bit group */
869
870 unsigned long pioavailshadow[6];
871 /* bitmap of send buffers available for the kernel to use with PIO. */
872 unsigned long pioavailkernel[6];
873 /* bitmap of send buffers which need to be disarmed. */
874 unsigned long pio_need_disarm[3];
875 /* bitmap of send buffers which are being written to. */
876 unsigned long pio_writing[3];
877 /* kr_revision shadow */
878 u64 revision;
879 /* Base GUID for device (from eeprom, network order) */
880 __be64 base_guid;
881
882 /*
883 * kr_sendpiobufbase value (chip offset of pio buffers), and the
884 * base of the 2KB buffer s(user processes only use 2K)
885 */
886 u64 piobufbase;
887 u32 pio2k_bufbase;
888
889 /* these are the "32 bit" regs */
890
891 /* number of GUIDs in the flash for this interface */
892 u32 nguid;
893 /*
894 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
895 * all expect bit fields to be "unsigned long"
896 */
897 unsigned long rcvctrl; /* shadow per device rcvctrl */
898 unsigned long sendctrl; /* shadow per device sendctrl */
899
900 /* value we put in kr_rcvhdrcnt */
901 u32 rcvhdrcnt;
902 /* value we put in kr_rcvhdrsize */
903 u32 rcvhdrsize;
904 /* value we put in kr_rcvhdrentsize */
905 u32 rcvhdrentsize;
906 /* kr_ctxtcnt value */
907 u32 ctxtcnt;
908 /* kr_pagealign value */
909 u32 palign;
910 /* number of "2KB" PIO buffers */
911 u32 piobcnt2k;
912 /* size in bytes of "2KB" PIO buffers */
913 u32 piosize2k;
914 /* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
915 u32 piosize2kmax_dwords;
916 /* number of "4KB" PIO buffers */
917 u32 piobcnt4k;
918 /* size in bytes of "4KB" PIO buffers */
919 u32 piosize4k;
920 /* kr_rcvegrbase value */
921 u32 rcvegrbase;
922 /* kr_rcvtidbase value */
923 u32 rcvtidbase;
924 /* kr_rcvtidcnt value */
925 u32 rcvtidcnt;
926 /* kr_userregbase */
927 u32 uregbase;
928 /* shadow the control register contents */
929 u32 control;
930
931 /* chip address space used by 4k pio buffers */
932 u32 align4k;
933 /* size of each rcvegrbuffer */
934 u32 rcvegrbufsize;
935 /* localbus width (1, 2,4,8,16,32) from config space */
936 u32 lbus_width;
937 /* localbus speed in MHz */
938 u32 lbus_speed;
939 int unit; /* unit # of this chip */
940
941 /* start of CHIP_SPEC move to chipspec, but need code changes */
942 /* low and high portions of MSI capability/vector */
943 u32 msi_lo;
944 /* saved after PCIe init for restore after reset */
945 u32 msi_hi;
946 /* MSI data (vector) saved for restore */
947 u16 msi_data;
948 /* so we can rewrite it after a chip reset */
949 u32 pcibar0;
950 /* so we can rewrite it after a chip reset */
951 u32 pcibar1;
952 u64 rhdrhead_intr_off;
953
954 /*
955 * ASCII serial number, from flash, large enough for original
956 * all digit strings, and longer QLogic serial number format
957 */
958 u8 serial[16];
959 /* human readable board version */
960 u8 boardversion[96];
961 u8 lbus_info[32]; /* human readable localbus info */
962 /* chip major rev, from qib_revision */
963 u8 majrev;
964 /* chip minor rev, from qib_revision */
965 u8 minrev;
966
967 /* Misc small ints */
968 /* Number of physical ports available */
969 u8 num_pports;
970 /* Lowest context number which can be used by user processes */
971 u8 first_user_ctxt;
972 u8 n_krcv_queues;
973 u8 qpn_mask;
974 u8 skip_kctxt_mask;
975
976 u16 rhf_offset; /* offset of RHF within receive header entry */
977
978 /*
979 * GPIO pins for twsi-connected devices, and device code for eeprom
980 */
981 u8 gpio_sda_num;
982 u8 gpio_scl_num;
983 u8 twsi_eeprom_dev;
984 u8 board_atten;
985
986 /* Support (including locks) for EEPROM logging of errors and time */
987 /* control access to actual counters, timer */
988 spinlock_t eep_st_lock;
989 /* control high-level access to EEPROM */
990 struct mutex eep_lock;
991 uint64_t traffic_wds;
992 /* active time is kept in seconds, but logged in hours */
993 atomic_t active_time;
994 /* Below are nominal shadow of EEPROM, new since last EEPROM update */
995 uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
996 uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
997 uint16_t eep_hrs;
998 /*
999 * masks for which bits of errs, hwerrs that cause
1000 * each of the counters to increment.
1001 */
1002 struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
1003 struct qib_diag_client *diag_client;
1004 spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
1005 struct diag_observer_list_elt *diag_observer_list;
1006
1007 u8 psxmitwait_supported;
1008 /* cycle length of PS* counters in HW (in picoseconds) */
1009 u16 psxmitwait_check_rate;
1010};
1011
1012/* hol_state values */
1013#define QIB_HOL_UP 0
1014#define QIB_HOL_INIT 1
1015
1016#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
1017#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
1018#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
1019#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
1020#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
1021
1022/* operation types for f_txchk_change() */
1023#define TXCHK_CHG_TYPE_DIS1 3
1024#define TXCHK_CHG_TYPE_ENAB1 2
1025#define TXCHK_CHG_TYPE_KERN 1
1026#define TXCHK_CHG_TYPE_USER 0
1027
1028#define QIB_CHASE_TIME msecs_to_jiffies(145)
1029#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
1030
1031/* Private data for file operations */
1032struct qib_filedata {
1033 struct qib_ctxtdata *rcd;
1034 unsigned subctxt;
1035 unsigned tidcursor;
1036 struct qib_user_sdma_queue *pq;
1037 int rec_cpu_num; /* for cpu affinity; -1 if none */
1038};
1039
1040extern struct list_head qib_dev_list;
1041extern spinlock_t qib_devs_lock;
1042extern struct qib_devdata *qib_lookup(int unit);
1043extern u32 qib_cpulist_count;
1044extern unsigned long *qib_cpulist;
1045
1046extern unsigned qib_wc_pat;
1047int qib_init(struct qib_devdata *, int);
1048int init_chip_wc_pat(struct qib_devdata *dd, u32);
1049int qib_enable_wc(struct qib_devdata *dd);
1050void qib_disable_wc(struct qib_devdata *dd);
1051int qib_count_units(int *npresentp, int *nupp);
1052int qib_count_active_units(void);
1053
1054int qib_cdev_init(int minor, const char *name,
1055 const struct file_operations *fops,
1056 struct cdev **cdevp, struct device **devp);
1057void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
1058int qib_dev_init(void);
1059void qib_dev_cleanup(void);
1060
1061int qib_diag_add(struct qib_devdata *);
1062void qib_diag_remove(struct qib_devdata *);
1063void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
1064void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */
1065
1066int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
1067void qib_bad_intrstatus(struct qib_devdata *);
1068void qib_handle_urcv(struct qib_devdata *, u64);
1069
1070/* clean up any per-chip chip-specific stuff */
1071void qib_chip_cleanup(struct qib_devdata *);
1072/* clean up any chip type-specific stuff */
1073void qib_chip_done(void);
1074
1075/* check to see if we have to force ordering for write combining */
1076int qib_unordered_wc(void);
1077void qib_pio_copy(void __iomem *to, const void *from, size_t count);
1078
1079void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
1080int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
1081void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
1082void qib_cancel_sends(struct qib_pportdata *);
1083
1084int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
1085int qib_setup_eagerbufs(struct qib_ctxtdata *);
1086void qib_set_ctxtcnt(struct qib_devdata *);
1087int qib_create_ctxts(struct qib_devdata *dd);
1088struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32);
1089void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
1090void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
1091
1092u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
1093int qib_reset_device(int);
1094int qib_wait_linkstate(struct qib_pportdata *, u32, int);
1095int qib_set_linkstate(struct qib_pportdata *, u8);
1096int qib_set_mtu(struct qib_pportdata *, u16);
1097int qib_set_lid(struct qib_pportdata *, u32, u8);
1098void qib_hol_down(struct qib_pportdata *);
1099void qib_hol_init(struct qib_pportdata *);
1100void qib_hol_up(struct qib_pportdata *);
1101void qib_hol_event(unsigned long);
1102void qib_disable_after_error(struct qib_devdata *);
1103int qib_set_uevent_bits(struct qib_pportdata *, const int);
1104
1105/* for use in system calls, where we want to know device type, etc. */
1106#define ctxt_fp(fp) \
1107 (((struct qib_filedata *)(fp)->private_data)->rcd)
1108#define subctxt_fp(fp) \
1109 (((struct qib_filedata *)(fp)->private_data)->subctxt)
1110#define tidcursor_fp(fp) \
1111 (((struct qib_filedata *)(fp)->private_data)->tidcursor)
1112#define user_sdma_queue_fp(fp) \
1113 (((struct qib_filedata *)(fp)->private_data)->pq)
1114
1115static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
1116{
1117 return ppd->dd;
1118}
1119
1120static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
1121{
1122 return container_of(dev, struct qib_devdata, verbs_dev);
1123}
1124
1125static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
1126{
1127 return dd_from_dev(to_idev(ibdev));
1128}
1129
1130static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
1131{
1132 return container_of(ibp, struct qib_pportdata, ibport_data);
1133}
1134
1135static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
1136{
1137 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1138 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
1139
1140 WARN_ON(pidx >= dd->num_pports);
1141 return &dd->pport[pidx].ibport_data;
1142}
1143
1144/*
1145 * values for dd->flags (_device_ related flags) and
1146 */
1147#define QIB_HAS_LINK_LATENCY 0x1 /* supports link latency (IB 1.2) */
1148#define QIB_INITTED 0x2 /* chip and driver up and initted */
1149#define QIB_DOING_RESET 0x4 /* in the middle of doing chip reset */
1150#define QIB_PRESENT 0x8 /* chip accesses can be done */
1151#define QIB_PIO_FLUSH_WC 0x10 /* Needs Write combining flush for PIO */
1152#define QIB_HAS_THRESH_UPDATE 0x40
1153#define QIB_HAS_SDMA_TIMEOUT 0x80
1154#define QIB_USE_SPCL_TRIG 0x100 /* SpecialTrigger launch enabled */
1155#define QIB_NODMA_RTAIL 0x200 /* rcvhdrtail register DMA enabled */
1156#define QIB_HAS_INTX 0x800 /* Supports INTx interrupts */
1157#define QIB_HAS_SEND_DMA 0x1000 /* Supports Send DMA */
1158#define QIB_HAS_VLSUPP 0x2000 /* Supports multiple VLs; PBC different */
1159#define QIB_HAS_HDRSUPP 0x4000 /* Supports header suppression */
1160#define QIB_BADINTR 0x8000 /* severe interrupt problems */
1161#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
1162#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
1163
1164/*
1165 * values for ppd->lflags (_ib_port_ related flags)
1166 */
1167#define QIBL_LINKV 0x1 /* IB link state valid */
1168#define QIBL_LINKDOWN 0x8 /* IB link is down */
1169#define QIBL_LINKINIT 0x10 /* IB link level is up */
1170#define QIBL_LINKARMED 0x20 /* IB link is ARMED */
1171#define QIBL_LINKACTIVE 0x40 /* IB link is ACTIVE */
1172/* leave a gap for more IB-link state */
1173#define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */
1174#define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */
1175#define QIBL_IB_LINK_DISABLED 0x4000 /* Linkdown-disable forced,
1176 * Do not try to bring up */
1177#define QIBL_IB_FORCE_NOTIFY 0x8000 /* force notify on next ib change */
1178
1179/* IB dword length mask in PBC (lower 11 bits); same for all chips */
1180#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
1181
1182
1183/* ctxt_flag bit offsets */
1184 /* waiting for a packet to arrive */
1185#define QIB_CTXT_WAITING_RCV 2
1186 /* master has not finished initializing */
1187#define QIB_CTXT_MASTER_UNINIT 4
1188 /* waiting for an urgent packet to arrive */
1189#define QIB_CTXT_WAITING_URG 5
1190
1191/* free up any allocated data at closes */
1192void qib_free_data(struct qib_ctxtdata *dd);
1193void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
1194 u32, struct qib_ctxtdata *);
1195struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
1196 const struct pci_device_id *);
1197struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
1198 const struct pci_device_id *);
1199struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
1200 const struct pci_device_id *);
1201void qib_free_devdata(struct qib_devdata *);
1202struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
1203
1204#define QIB_TWSI_NO_DEV 0xFF
1205/* Below qib_twsi_ functions must be called with eep_lock held */
1206int qib_twsi_reset(struct qib_devdata *dd);
1207int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
1208 int len);
1209int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
1210 const void *buffer, int len);
1211void qib_get_eeprom_info(struct qib_devdata *);
1212int qib_update_eeprom_log(struct qib_devdata *dd);
1213void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
1214void qib_dump_lookup_output_queue(struct qib_devdata *);
1215void qib_force_pio_avail_update(struct qib_devdata *);
1216void qib_clear_symerror_on_linkup(unsigned long opaque);
1217
1218/*
1219 * Set LED override, only the two LSBs have "public" meaning, but
1220 * any non-zero value substitutes them for the Link and LinkTrain
1221 * LED states.
1222 */
1223#define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
1224#define QIB_LED_LOG 2 /* Logical (link) YELLOW LED */
1225void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
1226
1227/* send dma routines */
1228int qib_setup_sdma(struct qib_pportdata *);
1229void qib_teardown_sdma(struct qib_pportdata *);
1230void __qib_sdma_intr(struct qib_pportdata *);
1231void qib_sdma_intr(struct qib_pportdata *);
1232int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
1233 u32, struct qib_verbs_txreq *);
1234/* ppd->sdma_lock should be locked before calling this. */
1235int qib_sdma_make_progress(struct qib_pportdata *dd);
1236
1237/* must be called under qib_sdma_lock */
1238static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
1239{
1240 return ppd->sdma_descq_cnt -
1241 (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
1242}
1243
1244static inline int __qib_sdma_running(struct qib_pportdata *ppd)
1245{
1246 return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
1247}
1248int qib_sdma_running(struct qib_pportdata *);
1249
1250void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1251void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1252
1253/*
1254 * number of words used for protocol header if not set by qib_userinit();
1255 */
1256#define QIB_DFLT_RCVHDRSIZE 9
1257
1258/*
1259 * We need to be able to handle an IB header of at least 24 dwords.
1260 * We need the rcvhdrq large enough to handle largest IB header, but
1261 * still have room for a 2KB MTU standard IB packet.
1262 * Additionally, some processor/memory controller combinations
1263 * benefit quite strongly from having the DMA'ed data be cacheline
1264 * aligned and a cacheline multiple, so we set the size to 32 dwords
1265 * (2 64-byte primary cachelines for pretty much all processors of
1266 * interest). The alignment hurts nothing, other than using somewhat
1267 * more memory.
1268 */
1269#define QIB_RCVHDR_ENTSIZE 32
1270
1271int qib_get_user_pages(unsigned long, size_t, struct page **);
1272void qib_release_user_pages(struct page **, size_t);
1273int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
1274int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
1275u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
1276void qib_sendbuf_done(struct qib_devdata *, unsigned);
1277
1278static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
1279{
1280 *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
1281}
1282
1283static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
1284{
1285 /*
1286 * volatile because it's a DMA target from the chip, routine is
1287 * inlined, and don't want register caching or reordering.
1288 */
1289 return (u32) le64_to_cpu(
1290 *((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
1291}
1292
1293static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
1294{
1295 const struct qib_devdata *dd = rcd->dd;
1296 u32 hdrqtail;
1297
1298 if (dd->flags & QIB_NODMA_RTAIL) {
1299 __le32 *rhf_addr;
1300 u32 seq;
1301
1302 rhf_addr = (__le32 *) rcd->rcvhdrq +
1303 rcd->head + dd->rhf_offset;
1304 seq = qib_hdrget_seq(rhf_addr);
1305 hdrqtail = rcd->head;
1306 if (seq == rcd->seq_cnt)
1307 hdrqtail++;
1308 } else
1309 hdrqtail = qib_get_rcvhdrtail(rcd);
1310
1311 return hdrqtail;
1312}
1313
1314/*
1315 * sysfs interface.
1316 */
1317
1318extern const char ib_qib_version[];
1319
1320int qib_device_create(struct qib_devdata *);
1321void qib_device_remove(struct qib_devdata *);
1322
1323int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
1324 struct kobject *kobj);
1325int qib_verbs_register_sysfs(struct qib_devdata *);
1326void qib_verbs_unregister_sysfs(struct qib_devdata *);
1327/* Hook for sysfs read of QSFP */
1328extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
1329
1330int __init qib_init_qibfs(void);
1331int __exit qib_exit_qibfs(void);
1332
1333int qibfs_add(struct qib_devdata *);
1334int qibfs_remove(struct qib_devdata *);
1335
1336int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
1337int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
1338 const struct pci_device_id *);
1339void qib_pcie_ddcleanup(struct qib_devdata *);
1340int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *);
1341int qib_reinit_intr(struct qib_devdata *);
1342void qib_enable_intx(struct pci_dev *);
1343void qib_nomsi(struct qib_devdata *);
1344void qib_nomsix(struct qib_devdata *);
1345void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
1346void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
1347
1348/*
1349 * dma_addr wrappers - all 0's invalid for hw
1350 */
1351dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
1352 size_t, int);
1353const char *qib_get_unit_name(int unit);
1354
1355/*
1356 * Flush write combining store buffers (if present) and perform a write
1357 * barrier.
1358 */
1359#if defined(CONFIG_X86_64)
1360#define qib_flush_wc() asm volatile("sfence" : : : "memory")
1361#else
1362#define qib_flush_wc() wmb() /* no reorder around wc flush */
1363#endif
1364
1365/* global module parameter variables */
1366extern unsigned qib_ibmtu;
1367extern ushort qib_cfgctxts;
1368extern ushort qib_num_cfg_vls;
1369extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */
1370extern unsigned qib_n_krcv_queues;
1371extern unsigned qib_sdma_fetch_arb;
1372extern unsigned qib_compat_ddr_negotiate;
1373extern int qib_special_trigger;
1374
1375extern struct mutex qib_mutex;
1376
1377/* Number of seconds before our card status check... */
1378#define STATUS_TIMEOUT 60
1379
1380#define QIB_DRV_NAME "ib_qib"
1381#define QIB_USER_MINOR_BASE 0
1382#define QIB_TRACE_MINOR 127
1383#define QIB_DIAGPKT_MINOR 128
1384#define QIB_DIAG_MINOR_BASE 129
1385#define QIB_NMINORS 255
1386
1387#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
1388#define PCI_VENDOR_ID_QLOGIC 0x1077
1389#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
1390#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
1391#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
1392
1393/*
1394 * qib_early_err is used (only!) to print early errors before devdata is
1395 * allocated, or when dd->pcidev may not be valid, and at the tail end of
1396 * cleanup when devdata may have been freed, etc. qib_dev_porterr is
1397 * the same as qib_dev_err, but is used when the message really needs
1398 * the IB port# to be definitive as to what's happening..
1399 * All of these go to the trace log, and the trace log entry is done
1400 * first to avoid possible serial port delays from printk.
1401 */
1402#define qib_early_err(dev, fmt, ...) \
1403 do { \
1404 dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \
1405 } while (0)
1406
1407#define qib_dev_err(dd, fmt, ...) \
1408 do { \
1409 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
1410 qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
1411 } while (0)
1412
1413#define qib_dev_porterr(dd, port, fmt, ...) \
1414 do { \
1415 dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
1416 qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
1417 ##__VA_ARGS__); \
1418 } while (0)
1419
1420#define qib_devinfo(pcidev, fmt, ...) \
1421 do { \
1422 dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \
1423 } while (0)
1424
1425/*
1426 * this is used for formatting hw error messages...
1427 */
1428struct qib_hwerror_msgs {
1429 u64 mask;
1430 const char *msg;
1431};
1432
1433#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
1434
1435/* in qib_intr.c... */
1436void qib_format_hwerrors(u64 hwerrs,
1437 const struct qib_hwerror_msgs *hwerrmsgs,
1438 size_t nhwerrmsgs, char *msg, size_t lmsg);
1439#endif /* _QIB_KERNEL_H */
diff --git a/drivers/infiniband/hw/qib/qib_6120_regs.h b/drivers/infiniband/hw/qib/qib_6120_regs.h
new file mode 100644
index 000000000000..e16cb6f7de2c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_6120_regs.h
@@ -0,0 +1,977 @@
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
34
35#define QIB_6120_Revision_OFFS 0x0
36#define QIB_6120_Revision_R_Simulator_LSB 0x3F
37#define QIB_6120_Revision_R_Simulator_RMASK 0x1
38#define QIB_6120_Revision_Reserved_LSB 0x28
39#define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF
40#define QIB_6120_Revision_BoardID_LSB 0x20
41#define QIB_6120_Revision_BoardID_RMASK 0xFF
42#define QIB_6120_Revision_R_SW_LSB 0x18
43#define QIB_6120_Revision_R_SW_RMASK 0xFF
44#define QIB_6120_Revision_R_Arch_LSB 0x10
45#define QIB_6120_Revision_R_Arch_RMASK 0xFF
46#define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8
47#define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF
48#define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0
49#define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF
50
51#define QIB_6120_Control_OFFS 0x8
52#define QIB_6120_Control_TxLatency_LSB 0x4
53#define QIB_6120_Control_TxLatency_RMASK 0x1
54#define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3
55#define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1
56#define QIB_6120_Control_LinkEn_LSB 0x2
57#define QIB_6120_Control_LinkEn_RMASK 0x1
58#define QIB_6120_Control_FreezeMode_LSB 0x1
59#define QIB_6120_Control_FreezeMode_RMASK 0x1
60#define QIB_6120_Control_SyncReset_LSB 0x0
61#define QIB_6120_Control_SyncReset_RMASK 0x1
62
63#define QIB_6120_PageAlign_OFFS 0x10
64
65#define QIB_6120_PortCnt_OFFS 0x18
66
67#define QIB_6120_SendRegBase_OFFS 0x30
68
69#define QIB_6120_UserRegBase_OFFS 0x38
70
71#define QIB_6120_CntrRegBase_OFFS 0x40
72
73#define QIB_6120_Scratch_OFFS 0x48
74#define QIB_6120_Scratch_TopHalf_LSB 0x20
75#define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF
76#define QIB_6120_Scratch_BottomHalf_LSB 0x0
77#define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF
78
79#define QIB_6120_IntBlocked_OFFS 0x60
80#define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F
81#define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1
82#define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E
83#define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1
84#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D
85#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1
86#define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C
87#define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1
88#define QIB_6120_IntBlocked_Reserved_LSB 0xF
89#define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF
90#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10
91#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1
92#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF
93#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1
94#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE
95#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1
96#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD
97#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1
98#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC
99#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1
100#define QIB_6120_IntBlocked_Reserved1_LSB 0x5
101#define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F
102#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4
103#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1
104#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3
105#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1
106#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2
107#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1
108#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1
109#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1
110#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0
111#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1
112
113#define QIB_6120_IntMask_OFFS 0x68
114#define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F
115#define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1
116#define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E
117#define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1
118#define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D
119#define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1
120#define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C
121#define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1
122#define QIB_6120_IntMask_Reserved_LSB 0x11
123#define QIB_6120_IntMask_Reserved_RMASK 0x7FF
124#define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10
125#define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1
126#define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF
127#define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1
128#define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE
129#define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1
130#define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD
131#define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1
132#define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC
133#define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1
134#define QIB_6120_IntMask_Reserved1_LSB 0x5
135#define QIB_6120_IntMask_Reserved1_RMASK 0x7F
136#define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4
137#define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1
138#define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3
139#define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1
140#define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2
141#define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1
142#define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1
143#define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1
144#define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0
145#define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1
146
147#define QIB_6120_IntStatus_OFFS 0x70
148#define QIB_6120_IntStatus_Error_LSB 0x1F
149#define QIB_6120_IntStatus_Error_RMASK 0x1
150#define QIB_6120_IntStatus_PioSent_LSB 0x1E
151#define QIB_6120_IntStatus_PioSent_RMASK 0x1
152#define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D
153#define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1
154#define QIB_6120_IntStatus_assertGPIO_LSB 0x1C
155#define QIB_6120_IntStatus_assertGPIO_RMASK 0x1
156#define QIB_6120_IntStatus_Reserved_LSB 0xF
157#define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF
158#define QIB_6120_IntStatus_RcvAvail4_LSB 0x10
159#define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1
160#define QIB_6120_IntStatus_RcvAvail3_LSB 0xF
161#define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1
162#define QIB_6120_IntStatus_RcvAvail2_LSB 0xE
163#define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1
164#define QIB_6120_IntStatus_RcvAvail1_LSB 0xD
165#define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1
166#define QIB_6120_IntStatus_RcvAvail0_LSB 0xC
167#define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1
168#define QIB_6120_IntStatus_Reserved1_LSB 0x5
169#define QIB_6120_IntStatus_Reserved1_RMASK 0x7F
170#define QIB_6120_IntStatus_RcvUrg4_LSB 0x4
171#define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1
172#define QIB_6120_IntStatus_RcvUrg3_LSB 0x3
173#define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1
174#define QIB_6120_IntStatus_RcvUrg2_LSB 0x2
175#define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1
176#define QIB_6120_IntStatus_RcvUrg1_LSB 0x1
177#define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1
178#define QIB_6120_IntStatus_RcvUrg0_LSB 0x0
179#define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1
180
181#define QIB_6120_IntClear_OFFS 0x78
182#define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F
183#define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1
184#define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E
185#define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1
186#define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D
187#define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1
188#define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C
189#define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1
190#define QIB_6120_IntClear_Reserved_LSB 0xF
191#define QIB_6120_IntClear_Reserved_RMASK 0x1FFF
192#define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10
193#define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1
194#define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF
195#define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1
196#define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE
197#define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1
198#define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD
199#define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1
200#define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC
201#define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1
202#define QIB_6120_IntClear_Reserved1_LSB 0x5
203#define QIB_6120_IntClear_Reserved1_RMASK 0x7F
204#define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4
205#define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1
206#define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3
207#define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1
208#define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2
209#define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1
210#define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1
211#define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1
212#define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0
213#define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1
214
215#define QIB_6120_ErrMask_OFFS 0x80
216#define QIB_6120_ErrMask_Reserved_LSB 0x34
217#define QIB_6120_ErrMask_Reserved_RMASK 0xFFF
218#define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33
219#define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1
220#define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32
221#define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1
222#define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31
223#define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1
224#define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30
225#define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1
226#define QIB_6120_ErrMask_Reserved1_LSB 0x26
227#define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF
228#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
229#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
230#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
231#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
232#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
233#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
234#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
235#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
236#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
237#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
238#define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20
239#define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1
240#define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F
241#define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1
242#define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
243#define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
244#define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D
245#define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1
246#define QIB_6120_ErrMask_Reserved2_LSB 0x12
247#define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF
248#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
249#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
250#define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10
251#define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1
252#define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF
253#define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1
254#define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE
255#define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1
256#define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD
257#define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1
258#define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC
259#define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1
260#define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB
261#define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1
262#define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA
263#define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1
264#define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9
265#define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1
266#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
267#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
268#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
269#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
270#define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6
271#define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
272#define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5
273#define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
274#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
275#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
276#define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3
277#define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
278#define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2
279#define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1
280#define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1
281#define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1
282#define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0
283#define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1
284
285#define QIB_6120_ErrStatus_OFFS 0x88
286#define QIB_6120_ErrStatus_Reserved_LSB 0x34
287#define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF
288#define QIB_6120_ErrStatus_HardwareErr_LSB 0x33
289#define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1
290#define QIB_6120_ErrStatus_ResetNegated_LSB 0x32
291#define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1
292#define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31
293#define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1
294#define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30
295#define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1
296#define QIB_6120_ErrStatus_Reserved1_LSB 0x26
297#define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF
298#define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25
299#define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
300#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
301#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
302#define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23
303#define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
304#define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22
305#define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
306#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
307#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
308#define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20
309#define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1
310#define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F
311#define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1
312#define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E
313#define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1
314#define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D
315#define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1
316#define QIB_6120_ErrStatus_Reserved2_LSB 0x12
317#define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF
318#define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11
319#define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
320#define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10
321#define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1
322#define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF
323#define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1
324#define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE
325#define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1
326#define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD
327#define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1
328#define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC
329#define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1
330#define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB
331#define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1
332#define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA
333#define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1
334#define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9
335#define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1
336#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
337#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
338#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
339#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
340#define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6
341#define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1
342#define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5
343#define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1
344#define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4
345#define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
346#define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3
347#define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1
348#define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2
349#define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1
350#define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1
351#define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1
352#define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0
353#define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1
354
355#define QIB_6120_ErrClear_OFFS 0x90
356#define QIB_6120_ErrClear_Reserved_LSB 0x34
357#define QIB_6120_ErrClear_Reserved_RMASK 0xFFF
358#define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33
359#define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1
360#define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32
361#define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1
362#define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31
363#define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1
364#define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30
365#define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1
366#define QIB_6120_ErrClear_Reserved1_LSB 0x26
367#define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF
368#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
369#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
370#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
371#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
372#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
373#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
374#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
375#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
376#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
377#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
378#define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20
379#define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1
380#define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F
381#define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1
382#define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
383#define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
384#define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D
385#define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1
386#define QIB_6120_ErrClear_Reserved2_LSB 0x12
387#define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF
388#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
389#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
390#define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10
391#define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1
392#define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF
393#define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1
394#define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE
395#define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1
396#define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD
397#define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1
398#define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC
399#define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1
400#define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB
401#define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1
402#define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA
403#define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1
404#define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9
405#define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1
406#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
407#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
408#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
409#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
410#define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6
411#define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
412#define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5
413#define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
414#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
415#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
416#define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3
417#define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
418#define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2
419#define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1
420#define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1
421#define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1
422#define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0
423#define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1
424
425#define QIB_6120_HwErrMask_OFFS 0x98
426#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
427#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
428#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
429#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
430#define QIB_6120_HwErrMask_Reserved_LSB 0x3D
431#define QIB_6120_HwErrMask_Reserved_RMASK 0x1
432#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
433#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
434#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B
435#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
436#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A
437#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
438#define QIB_6120_HwErrMask_Reserved1_LSB 0x39
439#define QIB_6120_HwErrMask_Reserved1_RMASK 0x1
440#define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38
441#define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1
442#define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37
443#define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1
444#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
445#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
446#define QIB_6120_HwErrMask_Reserved2_LSB 0x33
447#define QIB_6120_HwErrMask_Reserved2_RMASK 0x7
448#define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C
449#define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
450#define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28
451#define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF
452#define QIB_6120_HwErrMask_Reserved3_LSB 0x22
453#define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F
454#define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
455#define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
456#define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
457#define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
458#define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D
459#define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1
460#define QIB_6120_HwErrMask_Reserved4_LSB 0x6
461#define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF
462#define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0
463#define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F
464
465#define QIB_6120_HwErrStatus_OFFS 0xA0
466#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
467#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
468#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
469#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
470#define QIB_6120_HwErrStatus_Reserved_LSB 0x3D
471#define QIB_6120_HwErrStatus_Reserved_RMASK 0x1
472#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
473#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
474#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B
475#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
476#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A
477#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
478#define QIB_6120_HwErrStatus_Reserved1_LSB 0x39
479#define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1
480#define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38
481#define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1
482#define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37
483#define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1
484#define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36
485#define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
486#define QIB_6120_HwErrStatus_Reserved2_LSB 0x33
487#define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7
488#define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C
489#define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F
490#define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28
491#define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF
492#define QIB_6120_HwErrStatus_Reserved3_LSB 0x22
493#define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F
494#define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F
495#define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7
496#define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E
497#define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1
498#define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D
499#define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1
500#define QIB_6120_HwErrStatus_Reserved4_LSB 0x6
501#define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF
502#define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0
503#define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F
504
505#define QIB_6120_HwErrClear_OFFS 0xA8
506#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
507#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
508#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
509#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
510#define QIB_6120_HwErrClear_Reserved_LSB 0x3D
511#define QIB_6120_HwErrClear_Reserved_RMASK 0x1
512#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
513#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
514#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B
515#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
516#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A
517#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
518#define QIB_6120_HwErrClear_Reserved1_LSB 0x39
519#define QIB_6120_HwErrClear_Reserved1_RMASK 0x1
520#define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38
521#define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1
522#define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37
523#define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1
524#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
525#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
526#define QIB_6120_HwErrClear_Reserved2_LSB 0x33
527#define QIB_6120_HwErrClear_Reserved2_RMASK 0x7
528#define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C
529#define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F
530#define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28
531#define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF
532#define QIB_6120_HwErrClear_Reserved3_LSB 0x22
533#define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F
534#define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F
535#define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7
536#define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
537#define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
538#define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D
539#define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1
540#define QIB_6120_HwErrClear_Reserved4_LSB 0x6
541#define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF
542#define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0
543#define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F
544
545#define QIB_6120_HwDiagCtrl_OFFS 0xB0
546#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
547#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
548#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
549#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
550#define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D
551#define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1
552#define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C
553#define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1
554#define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33
555#define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF
556#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
557#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
558#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
559#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
560#define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23
561#define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F
562#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
563#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
564#define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6
565#define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF
566#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
567#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F
568
569#define QIB_6120_IBCStatus_OFFS 0xC0
570#define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F
571#define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1
572#define QIB_6120_IBCStatus_TxReady_LSB 0x1E
573#define QIB_6120_IBCStatus_TxReady_RMASK 0x1
574#define QIB_6120_IBCStatus_Reserved_LSB 0x7
575#define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF
576#define QIB_6120_IBCStatus_LinkState_LSB 0x4
577#define QIB_6120_IBCStatus_LinkState_RMASK 0x7
578#define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0
579#define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF
580
581#define QIB_6120_IBCCtrl_OFFS 0xC8
582#define QIB_6120_IBCCtrl_Loopback_LSB 0x3F
583#define QIB_6120_IBCCtrl_Loopback_RMASK 0x1
584#define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E
585#define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1
586#define QIB_6120_IBCCtrl_Reserved_LSB 0x2B
587#define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF
588#define QIB_6120_IBCCtrl_CreditScale_LSB 0x28
589#define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7
590#define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24
591#define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF
592#define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20
593#define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF
594#define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F
595#define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1
596#define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14
597#define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF
598#define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12
599#define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3
600#define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10
601#define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3
602#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
603#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
604#define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0
605#define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
606
607#define QIB_6120_EXTStatus_OFFS 0xD0
608#define QIB_6120_EXTStatus_GPIOIn_LSB 0x30
609#define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF
610#define QIB_6120_EXTStatus_Reserved_LSB 0x20
611#define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF
612#define QIB_6120_EXTStatus_Reserved1_LSB 0x10
613#define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF
614#define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF
615#define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1
616#define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE
617#define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1
618#define QIB_6120_EXTStatus_Reserved2_LSB 0x0
619#define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF
620
621#define QIB_6120_EXTCtrl_OFFS 0xD8
622#define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30
623#define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF
624#define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20
625#define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF
626#define QIB_6120_EXTCtrl_Reserved_LSB 0x4
627#define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF
628#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
629#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
630#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
631#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
632#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
633#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
634#define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0
635#define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
636
637#define QIB_6120_GPIOOut_OFFS 0xE0
638
639#define QIB_6120_GPIOMask_OFFS 0xE8
640
641#define QIB_6120_GPIOStatus_OFFS 0xF0
642
643#define QIB_6120_GPIOClear_OFFS 0xF8
644
645#define QIB_6120_RcvCtrl_OFFS 0x100
646#define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F
647#define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1
648#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E
649#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
650#define QIB_6120_RcvCtrl_Reserved_LSB 0x15
651#define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF
652#define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10
653#define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F
654#define QIB_6120_RcvCtrl_Reserved1_LSB 0x9
655#define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F
656#define QIB_6120_RcvCtrl_Reserved2_LSB 0x5
657#define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF
658#define QIB_6120_RcvCtrl_PortEnable_LSB 0x0
659#define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F
660
661#define QIB_6120_RcvBTHQP_OFFS 0x108
662#define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E
663#define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3
664#define QIB_6120_RcvBTHQP_Reserved_LSB 0x18
665#define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F
666#define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0
667#define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
668
669#define QIB_6120_RcvHdrSize_OFFS 0x110
670
671#define QIB_6120_RcvHdrCnt_OFFS 0x118
672
673#define QIB_6120_RcvHdrEntSize_OFFS 0x120
674
675#define QIB_6120_RcvTIDBase_OFFS 0x128
676
677#define QIB_6120_RcvTIDCnt_OFFS 0x130
678
679#define QIB_6120_RcvEgrBase_OFFS 0x138
680
681#define QIB_6120_RcvEgrCnt_OFFS 0x140
682
683#define QIB_6120_RcvBufBase_OFFS 0x148
684
685#define QIB_6120_RcvBufSize_OFFS 0x150
686
687#define QIB_6120_RxIntMemBase_OFFS 0x158
688
689#define QIB_6120_RxIntMemSize_OFFS 0x160
690
691#define QIB_6120_RcvPartitionKey_OFFS 0x168
692
693#define QIB_6120_RcvPktLEDCnt_OFFS 0x178
694#define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20
695#define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
696#define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0
697#define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
698
699#define QIB_6120_SendCtrl_OFFS 0x1C0
700#define QIB_6120_SendCtrl_Disarm_LSB 0x1F
701#define QIB_6120_SendCtrl_Disarm_RMASK 0x1
702#define QIB_6120_SendCtrl_Reserved_LSB 0x17
703#define QIB_6120_SendCtrl_Reserved_RMASK 0xFF
704#define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10
705#define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F
706#define QIB_6120_SendCtrl_Reserved1_LSB 0x4
707#define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF
708#define QIB_6120_SendCtrl_PIOEnable_LSB 0x3
709#define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1
710#define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2
711#define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1
712#define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1
713#define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1
714#define QIB_6120_SendCtrl_Abort_LSB 0x0
715#define QIB_6120_SendCtrl_Abort_RMASK 0x1
716
717#define QIB_6120_SendPIOBufBase_OFFS 0x1C8
718#define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35
719#define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF
720#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20
721#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
722#define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15
723#define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF
724#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0
725#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
726
727#define QIB_6120_SendPIOSize_OFFS 0x1D0
728#define QIB_6120_SendPIOSize_Reserved_LSB 0x2D
729#define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF
730#define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20
731#define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF
732#define QIB_6120_SendPIOSize_Reserved1_LSB 0xC
733#define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF
734#define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0
735#define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF
736
737#define QIB_6120_SendPIOBufCnt_OFFS 0x1D8
738#define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24
739#define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF
740#define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20
741#define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF
742#define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9
743#define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF
744#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0
745#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF
746
747#define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0
748#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6
749#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF
750#define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0
751#define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F
752
753#define QIB_6120_SendBufErr0_OFFS 0x240
754#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0
755#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0
756
757#define QIB_6120_RcvHdrAddr0_OFFS 0x280
758#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
759#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
760#define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0
761#define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3
762
763#define QIB_6120_RcvHdrTailAddr0_OFFS 0x300
764#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
765#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
766#define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0
767#define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3
768
769#define QIB_6120_SerdesCfg0_OFFS 0x3C0
770#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F
771#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1
772#define QIB_6120_SerdesCfg0_Reserved_LSB 0x38
773#define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F
774#define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36
775#define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3
776#define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34
777#define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3
778#define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32
779#define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3
780#define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31
781#define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1
782#define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30
783#define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1
784#define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F
785#define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1
786#define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E
787#define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1
788#define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D
789#define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1
790#define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C
791#define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1
792#define QIB_6120_SerdesCfg0_PW_LSB 0x2B
793#define QIB_6120_SerdesCfg0_PW_RMASK 0x1
794#define QIB_6120_SerdesCfg0_RefSel_LSB 0x29
795#define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3
796#define QIB_6120_SerdesCfg0_ParReset_LSB 0x28
797#define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1
798#define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27
799#define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1
800#define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26
801#define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1
802#define QIB_6120_SerdesCfg0_Offset_LSB 0x1E
803#define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF
804#define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D
805#define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1
806#define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C
807#define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1
808#define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18
809#define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF
810#define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14
811#define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF
812#define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10
813#define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF
814#define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC
815#define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF
816#define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8
817#define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF
818#define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7
819#define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1
820#define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6
821#define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1
822#define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5
823#define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1
824#define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4
825#define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1
826#define QIB_6120_SerdesCfg0_ResetA_LSB 0x3
827#define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1
828#define QIB_6120_SerdesCfg0_ResetB_LSB 0x2
829#define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1
830#define QIB_6120_SerdesCfg0_ResetC_LSB 0x1
831#define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1
832#define QIB_6120_SerdesCfg0_ResetD_LSB 0x0
833#define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1
834
835#define QIB_6120_SerdesStat_OFFS 0x3D0
836#define QIB_6120_SerdesStat_Reserved_LSB 0xC
837#define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF
838#define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB
839#define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1
840#define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA
841#define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1
842#define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9
843#define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1
844#define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8
845#define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1
846#define QIB_6120_SerdesStat_RxDetA_LSB 0x7
847#define QIB_6120_SerdesStat_RxDetA_RMASK 0x1
848#define QIB_6120_SerdesStat_RxDetB_LSB 0x6
849#define QIB_6120_SerdesStat_RxDetB_RMASK 0x1
850#define QIB_6120_SerdesStat_RxDetC_LSB 0x5
851#define QIB_6120_SerdesStat_RxDetC_RMASK 0x1
852#define QIB_6120_SerdesStat_RxDetD_LSB 0x4
853#define QIB_6120_SerdesStat_RxDetD_RMASK 0x1
854#define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3
855#define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1
856#define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2
857#define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1
858#define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1
859#define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1
860#define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0
861#define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1
862
863#define QIB_6120_XGXSCfg_OFFS 0x3D8
864#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F
865#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1
866#define QIB_6120_XGXSCfg_Reserved_LSB 0x17
867#define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF
868#define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13
869#define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF
870#define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9
871#define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF
872#define QIB_6120_XGXSCfg_port_addr_LSB 0x4
873#define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F
874#define QIB_6120_XGXSCfg_mdd_30_LSB 0x3
875#define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1
876#define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2
877#define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1
878#define QIB_6120_XGXSCfg_Reserved1_LSB 0x1
879#define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1
880#define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0
881#define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1
882
883#define QIB_6120_LBIntCnt_OFFS 0x12000
884
885#define QIB_6120_LBFlowStallCnt_OFFS 0x12008
886
887#define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018
888
889#define QIB_6120_TxDataPktCnt_OFFS 0x12020
890
891#define QIB_6120_TxFlowPktCnt_OFFS 0x12028
892
893#define QIB_6120_TxDwordCnt_OFFS 0x12030
894
895#define QIB_6120_TxLenErrCnt_OFFS 0x12038
896
897#define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040
898
899#define QIB_6120_TxUnderrunCnt_OFFS 0x12048
900
901#define QIB_6120_TxFlowStallCnt_OFFS 0x12050
902
903#define QIB_6120_TxDroppedPktCnt_OFFS 0x12058
904
905#define QIB_6120_RxDroppedPktCnt_OFFS 0x12060
906
907#define QIB_6120_RxDataPktCnt_OFFS 0x12068
908
909#define QIB_6120_RxFlowPktCnt_OFFS 0x12070
910
911#define QIB_6120_RxDwordCnt_OFFS 0x12078
912
913#define QIB_6120_RxLenErrCnt_OFFS 0x12080
914
915#define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088
916
917#define QIB_6120_RxICRCErrCnt_OFFS 0x12090
918
919#define QIB_6120_RxVCRCErrCnt_OFFS 0x12098
920
921#define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0
922
923#define QIB_6120_RxBadFormatCnt_OFFS 0x120A8
924
925#define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0
926
927#define QIB_6120_RxEBPCnt_OFFS 0x120B8
928
929#define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0
930
931#define QIB_6120_RxBufOvflCnt_OFFS 0x120C8
932
933#define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0
934
935#define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8
936
937#define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0
938
939#define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8
940
941#define QIB_6120_IBStatusChangeCnt_OFFS 0x12140
942
943#define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148
944
945#define QIB_6120_IBLinkDownedCnt_OFFS 0x12150
946
947#define QIB_6120_IBSymbolErrCnt_OFFS 0x12158
948
949#define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170
950
951#define QIB_6120_RcvEgrArray0_OFFS 0x14000
952
953#define QIB_6120_RcvTIDArray0_OFFS 0x54000
954
955#define QIB_6120_PIOLaunchFIFO_OFFS 0x64000
956
957#define QIB_6120_SendPIOpbcCache_OFFS 0x64800
958
959#define QIB_6120_RcvBuf1_OFFS 0x72000
960
961#define QIB_6120_RcvBuf2_OFFS 0x75000
962
963#define QIB_6120_RcvFlags_OFFS 0x77000
964
965#define QIB_6120_RcvLookupBuf1_OFFS 0x79000
966
967#define QIB_6120_RcvDMABuf_OFFS 0x7B000
968
969#define QIB_6120_MiscRXEIntMem_OFFS 0x7C000
970
971#define QIB_6120_PCIERcvBuf_OFFS 0x80000
972
973#define QIB_6120_PCIERetryBuf_OFFS 0x82000
974
975#define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000
976
977#define QIB_6120_PIOBuf0_MA_OFFS 0x100000
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
new file mode 100644
index 000000000000..ea0bfd896f92
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -0,0 +1,156 @@
1#ifndef _QIB_7220_H
2#define _QIB_7220_H
3/*
4 * Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35/* grab register-defs auto-generated by HW */
36#include "qib_7220_regs.h"
37
38/* The number of eager receive TIDs for context zero. */
39#define IBA7220_KRCVEGRCNT 2048U
40
41#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
42#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
43#define IB_7220_LT_STATE_TXREVLANES 0x0d
44#define IB_7220_LT_STATE_CFGENH 0x10
45
46struct qib_chip_specific {
47 u64 __iomem *cregbase;
48 u64 *cntrs;
49 u64 *portcntrs;
50 spinlock_t sdepb_lock; /* serdes EPB bus */
51 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
52 spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
53 u64 hwerrmask;
54 u64 errormask;
55 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
56 u64 gpio_mask; /* shadow the gpio mask register */
57 u64 extctrl; /* shadow the gpio output enable, etc... */
58 u32 ncntrs;
59 u32 nportcntrs;
60 u32 cntrnamelen;
61 u32 portcntrnamelen;
62 u32 numctxts;
63 u32 rcvegrcnt;
64 u32 autoneg_tries;
65 u32 serdes_first_init_done;
66 u32 sdmabufcnt;
67 u32 lastbuf_for_pio;
68 u32 updthresh; /* current AvailUpdThld */
69 u32 updthresh_dflt; /* default AvailUpdThld */
70 int irq;
71 u8 presets_needed;
72 u8 relock_timer_active;
73 char emsgbuf[128];
74 char sdmamsgbuf[192];
75 char bitsmsgbuf[64];
76 struct timer_list relock_timer;
77 unsigned int relock_interval; /* in jiffies */
78};
79
80struct qib_chippport_specific {
81 struct qib_pportdata pportdata;
82 wait_queue_head_t autoneg_wait;
83 struct delayed_work autoneg_work;
84 struct timer_list chase_timer;
85 /*
86 * these 5 fields are used to establish deltas for IB symbol
87 * errors and linkrecovery errors. They can be reported on
88 * some chips during link negotiation prior to INIT, and with
89 * DDR when faking DDR negotiations with non-IBTA switches.
90 * The chip counters are adjusted at driver unload if there is
91 * a non-zero delta.
92 */
93 u64 ibdeltainprog;
94 u64 ibsymdelta;
95 u64 ibsymsnap;
96 u64 iblnkerrdelta;
97 u64 iblnkerrsnap;
98 u64 ibcctrl; /* kr_ibcctrl shadow */
99 u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
100 u64 chase_end;
101 u32 last_delay_mult;
102};
103
104/*
105 * This header file provides the declarations and common definitions
106 * for (mostly) manipulation of the SerDes blocks within the IBA7220.
107 * the functions declared should only be called from within other
108 * 7220-related files such as qib_iba7220.c or qib_sd7220.c.
109 */
110int qib_sd7220_presets(struct qib_devdata *dd);
111int qib_sd7220_init(struct qib_devdata *dd);
112int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, u8 *img,
113 int len, int offset);
114int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, const u8 *img,
115 int len, int offset);
116void qib_sd7220_clr_ibpar(struct qib_devdata *);
117/*
118 * Below used for sdnum parameter, selecting one of the two sections
119 * used for PCIe, or the single SerDes used for IB, which is the
120 * only one currently used
121 */
122#define IB_7220_SERDES 2
123
124int qib_sd7220_ib_load(struct qib_devdata *dd);
125int qib_sd7220_ib_vfy(struct qib_devdata *dd);
126
127static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
128 const u16 regno)
129{
130 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
131 return -1;
132 return readl((u32 __iomem *)&dd->kregbase[regno]);
133}
134
135static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
136 const u16 regno)
137{
138 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
139 return -1;
140
141 return readq(&dd->kregbase[regno]);
142}
143
144static inline void qib_write_kreg(const struct qib_devdata *dd,
145 const u16 regno, u64 value)
146{
147 if (dd->kregbase)
148 writeq(value, &dd->kregbase[regno]);
149}
150
151void set_7220_relock_poll(struct qib_devdata *, int);
152void shutdown_7220_relock_poll(struct qib_devdata *);
153void toggle_7220_rclkrls(struct qib_devdata *);
154
155
156#endif /* _QIB_7220_H */
diff --git a/drivers/infiniband/hw/qib/qib_7220_regs.h b/drivers/infiniband/hw/qib/qib_7220_regs.h
new file mode 100644
index 000000000000..0da5bb750e52
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220_regs.h
@@ -0,0 +1,1496 @@
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
36
37#define QIB_7220_Revision_OFFS 0x0
38#define QIB_7220_Revision_R_Simulator_LSB 0x3F
39#define QIB_7220_Revision_R_Simulator_RMASK 0x1
40#define QIB_7220_Revision_R_Emulation_LSB 0x3E
41#define QIB_7220_Revision_R_Emulation_RMASK 0x1
42#define QIB_7220_Revision_R_Emulation_Revcode_LSB 0x28
43#define QIB_7220_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
44#define QIB_7220_Revision_BoardID_LSB 0x20
45#define QIB_7220_Revision_BoardID_RMASK 0xFF
46#define QIB_7220_Revision_R_SW_LSB 0x18
47#define QIB_7220_Revision_R_SW_RMASK 0xFF
48#define QIB_7220_Revision_R_Arch_LSB 0x10
49#define QIB_7220_Revision_R_Arch_RMASK 0xFF
50#define QIB_7220_Revision_R_ChipRevMajor_LSB 0x8
51#define QIB_7220_Revision_R_ChipRevMajor_RMASK 0xFF
52#define QIB_7220_Revision_R_ChipRevMinor_LSB 0x0
53#define QIB_7220_Revision_R_ChipRevMinor_RMASK 0xFF
54
55#define QIB_7220_Control_OFFS 0x8
56#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_LSB 0x7
57#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_RMASK 0x1
58#define QIB_7220_Control_PCIECplQDiagEn_LSB 0x6
59#define QIB_7220_Control_PCIECplQDiagEn_RMASK 0x1
60#define QIB_7220_Control_Reserved_LSB 0x5
61#define QIB_7220_Control_Reserved_RMASK 0x1
62#define QIB_7220_Control_TxLatency_LSB 0x4
63#define QIB_7220_Control_TxLatency_RMASK 0x1
64#define QIB_7220_Control_PCIERetryBufDiagEn_LSB 0x3
65#define QIB_7220_Control_PCIERetryBufDiagEn_RMASK 0x1
66#define QIB_7220_Control_LinkEn_LSB 0x2
67#define QIB_7220_Control_LinkEn_RMASK 0x1
68#define QIB_7220_Control_FreezeMode_LSB 0x1
69#define QIB_7220_Control_FreezeMode_RMASK 0x1
70#define QIB_7220_Control_SyncReset_LSB 0x0
71#define QIB_7220_Control_SyncReset_RMASK 0x1
72
73#define QIB_7220_PageAlign_OFFS 0x10
74
75#define QIB_7220_PortCnt_OFFS 0x18
76
77#define QIB_7220_SendRegBase_OFFS 0x30
78
79#define QIB_7220_UserRegBase_OFFS 0x38
80
81#define QIB_7220_CntrRegBase_OFFS 0x40
82
83#define QIB_7220_Scratch_OFFS 0x48
84
85#define QIB_7220_IntMask_OFFS 0x68
86#define QIB_7220_IntMask_SDmaIntMask_LSB 0x3F
87#define QIB_7220_IntMask_SDmaIntMask_RMASK 0x1
88#define QIB_7220_IntMask_SDmaDisabledMasked_LSB 0x3E
89#define QIB_7220_IntMask_SDmaDisabledMasked_RMASK 0x1
90#define QIB_7220_IntMask_Reserved_LSB 0x31
91#define QIB_7220_IntMask_Reserved_RMASK 0x1FFF
92#define QIB_7220_IntMask_RcvUrg16IntMask_LSB 0x30
93#define QIB_7220_IntMask_RcvUrg16IntMask_RMASK 0x1
94#define QIB_7220_IntMask_RcvUrg15IntMask_LSB 0x2F
95#define QIB_7220_IntMask_RcvUrg15IntMask_RMASK 0x1
96#define QIB_7220_IntMask_RcvUrg14IntMask_LSB 0x2E
97#define QIB_7220_IntMask_RcvUrg14IntMask_RMASK 0x1
98#define QIB_7220_IntMask_RcvUrg13IntMask_LSB 0x2D
99#define QIB_7220_IntMask_RcvUrg13IntMask_RMASK 0x1
100#define QIB_7220_IntMask_RcvUrg12IntMask_LSB 0x2C
101#define QIB_7220_IntMask_RcvUrg12IntMask_RMASK 0x1
102#define QIB_7220_IntMask_RcvUrg11IntMask_LSB 0x2B
103#define QIB_7220_IntMask_RcvUrg11IntMask_RMASK 0x1
104#define QIB_7220_IntMask_RcvUrg10IntMask_LSB 0x2A
105#define QIB_7220_IntMask_RcvUrg10IntMask_RMASK 0x1
106#define QIB_7220_IntMask_RcvUrg9IntMask_LSB 0x29
107#define QIB_7220_IntMask_RcvUrg9IntMask_RMASK 0x1
108#define QIB_7220_IntMask_RcvUrg8IntMask_LSB 0x28
109#define QIB_7220_IntMask_RcvUrg8IntMask_RMASK 0x1
110#define QIB_7220_IntMask_RcvUrg7IntMask_LSB 0x27
111#define QIB_7220_IntMask_RcvUrg7IntMask_RMASK 0x1
112#define QIB_7220_IntMask_RcvUrg6IntMask_LSB 0x26
113#define QIB_7220_IntMask_RcvUrg6IntMask_RMASK 0x1
114#define QIB_7220_IntMask_RcvUrg5IntMask_LSB 0x25
115#define QIB_7220_IntMask_RcvUrg5IntMask_RMASK 0x1
116#define QIB_7220_IntMask_RcvUrg4IntMask_LSB 0x24
117#define QIB_7220_IntMask_RcvUrg4IntMask_RMASK 0x1
118#define QIB_7220_IntMask_RcvUrg3IntMask_LSB 0x23
119#define QIB_7220_IntMask_RcvUrg3IntMask_RMASK 0x1
120#define QIB_7220_IntMask_RcvUrg2IntMask_LSB 0x22
121#define QIB_7220_IntMask_RcvUrg2IntMask_RMASK 0x1
122#define QIB_7220_IntMask_RcvUrg1IntMask_LSB 0x21
123#define QIB_7220_IntMask_RcvUrg1IntMask_RMASK 0x1
124#define QIB_7220_IntMask_RcvUrg0IntMask_LSB 0x20
125#define QIB_7220_IntMask_RcvUrg0IntMask_RMASK 0x1
126#define QIB_7220_IntMask_ErrorIntMask_LSB 0x1F
127#define QIB_7220_IntMask_ErrorIntMask_RMASK 0x1
128#define QIB_7220_IntMask_PioSetIntMask_LSB 0x1E
129#define QIB_7220_IntMask_PioSetIntMask_RMASK 0x1
130#define QIB_7220_IntMask_PioBufAvailIntMask_LSB 0x1D
131#define QIB_7220_IntMask_PioBufAvailIntMask_RMASK 0x1
132#define QIB_7220_IntMask_assertGPIOIntMask_LSB 0x1C
133#define QIB_7220_IntMask_assertGPIOIntMask_RMASK 0x1
134#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_LSB 0x1B
135#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_RMASK 0x1
136#define QIB_7220_IntMask_JIntMask_LSB 0x1A
137#define QIB_7220_IntMask_JIntMask_RMASK 0x1
138#define QIB_7220_IntMask_Reserved1_LSB 0x11
139#define QIB_7220_IntMask_Reserved1_RMASK 0x1FF
140#define QIB_7220_IntMask_RcvAvail16IntMask_LSB 0x10
141#define QIB_7220_IntMask_RcvAvail16IntMask_RMASK 0x1
142#define QIB_7220_IntMask_RcvAvail15IntMask_LSB 0xF
143#define QIB_7220_IntMask_RcvAvail15IntMask_RMASK 0x1
144#define QIB_7220_IntMask_RcvAvail14IntMask_LSB 0xE
145#define QIB_7220_IntMask_RcvAvail14IntMask_RMASK 0x1
146#define QIB_7220_IntMask_RcvAvail13IntMask_LSB 0xD
147#define QIB_7220_IntMask_RcvAvail13IntMask_RMASK 0x1
148#define QIB_7220_IntMask_RcvAvail12IntMask_LSB 0xC
149#define QIB_7220_IntMask_RcvAvail12IntMask_RMASK 0x1
150#define QIB_7220_IntMask_RcvAvail11IntMask_LSB 0xB
151#define QIB_7220_IntMask_RcvAvail11IntMask_RMASK 0x1
152#define QIB_7220_IntMask_RcvAvail10IntMask_LSB 0xA
153#define QIB_7220_IntMask_RcvAvail10IntMask_RMASK 0x1
154#define QIB_7220_IntMask_RcvAvail9IntMask_LSB 0x9
155#define QIB_7220_IntMask_RcvAvail9IntMask_RMASK 0x1
156#define QIB_7220_IntMask_RcvAvail8IntMask_LSB 0x8
157#define QIB_7220_IntMask_RcvAvail8IntMask_RMASK 0x1
158#define QIB_7220_IntMask_RcvAvail7IntMask_LSB 0x7
159#define QIB_7220_IntMask_RcvAvail7IntMask_RMASK 0x1
160#define QIB_7220_IntMask_RcvAvail6IntMask_LSB 0x6
161#define QIB_7220_IntMask_RcvAvail6IntMask_RMASK 0x1
162#define QIB_7220_IntMask_RcvAvail5IntMask_LSB 0x5
163#define QIB_7220_IntMask_RcvAvail5IntMask_RMASK 0x1
164#define QIB_7220_IntMask_RcvAvail4IntMask_LSB 0x4
165#define QIB_7220_IntMask_RcvAvail4IntMask_RMASK 0x1
166#define QIB_7220_IntMask_RcvAvail3IntMask_LSB 0x3
167#define QIB_7220_IntMask_RcvAvail3IntMask_RMASK 0x1
168#define QIB_7220_IntMask_RcvAvail2IntMask_LSB 0x2
169#define QIB_7220_IntMask_RcvAvail2IntMask_RMASK 0x1
170#define QIB_7220_IntMask_RcvAvail1IntMask_LSB 0x1
171#define QIB_7220_IntMask_RcvAvail1IntMask_RMASK 0x1
172#define QIB_7220_IntMask_RcvAvail0IntMask_LSB 0x0
173#define QIB_7220_IntMask_RcvAvail0IntMask_RMASK 0x1
174
175#define QIB_7220_IntStatus_OFFS 0x70
176#define QIB_7220_IntStatus_SDmaInt_LSB 0x3F
177#define QIB_7220_IntStatus_SDmaInt_RMASK 0x1
178#define QIB_7220_IntStatus_SDmaDisabled_LSB 0x3E
179#define QIB_7220_IntStatus_SDmaDisabled_RMASK 0x1
180#define QIB_7220_IntStatus_Reserved_LSB 0x31
181#define QIB_7220_IntStatus_Reserved_RMASK 0x1FFF
182#define QIB_7220_IntStatus_RcvUrg16_LSB 0x30
183#define QIB_7220_IntStatus_RcvUrg16_RMASK 0x1
184#define QIB_7220_IntStatus_RcvUrg15_LSB 0x2F
185#define QIB_7220_IntStatus_RcvUrg15_RMASK 0x1
186#define QIB_7220_IntStatus_RcvUrg14_LSB 0x2E
187#define QIB_7220_IntStatus_RcvUrg14_RMASK 0x1
188#define QIB_7220_IntStatus_RcvUrg13_LSB 0x2D
189#define QIB_7220_IntStatus_RcvUrg13_RMASK 0x1
190#define QIB_7220_IntStatus_RcvUrg12_LSB 0x2C
191#define QIB_7220_IntStatus_RcvUrg12_RMASK 0x1
192#define QIB_7220_IntStatus_RcvUrg11_LSB 0x2B
193#define QIB_7220_IntStatus_RcvUrg11_RMASK 0x1
194#define QIB_7220_IntStatus_RcvUrg10_LSB 0x2A
195#define QIB_7220_IntStatus_RcvUrg10_RMASK 0x1
196#define QIB_7220_IntStatus_RcvUrg9_LSB 0x29
197#define QIB_7220_IntStatus_RcvUrg9_RMASK 0x1
198#define QIB_7220_IntStatus_RcvUrg8_LSB 0x28
199#define QIB_7220_IntStatus_RcvUrg8_RMASK 0x1
200#define QIB_7220_IntStatus_RcvUrg7_LSB 0x27
201#define QIB_7220_IntStatus_RcvUrg7_RMASK 0x1
202#define QIB_7220_IntStatus_RcvUrg6_LSB 0x26
203#define QIB_7220_IntStatus_RcvUrg6_RMASK 0x1
204#define QIB_7220_IntStatus_RcvUrg5_LSB 0x25
205#define QIB_7220_IntStatus_RcvUrg5_RMASK 0x1
206#define QIB_7220_IntStatus_RcvUrg4_LSB 0x24
207#define QIB_7220_IntStatus_RcvUrg4_RMASK 0x1
208#define QIB_7220_IntStatus_RcvUrg3_LSB 0x23
209#define QIB_7220_IntStatus_RcvUrg3_RMASK 0x1
210#define QIB_7220_IntStatus_RcvUrg2_LSB 0x22
211#define QIB_7220_IntStatus_RcvUrg2_RMASK 0x1
212#define QIB_7220_IntStatus_RcvUrg1_LSB 0x21
213#define QIB_7220_IntStatus_RcvUrg1_RMASK 0x1
214#define QIB_7220_IntStatus_RcvUrg0_LSB 0x20
215#define QIB_7220_IntStatus_RcvUrg0_RMASK 0x1
216#define QIB_7220_IntStatus_Error_LSB 0x1F
217#define QIB_7220_IntStatus_Error_RMASK 0x1
218#define QIB_7220_IntStatus_PioSent_LSB 0x1E
219#define QIB_7220_IntStatus_PioSent_RMASK 0x1
220#define QIB_7220_IntStatus_PioBufAvail_LSB 0x1D
221#define QIB_7220_IntStatus_PioBufAvail_RMASK 0x1
222#define QIB_7220_IntStatus_assertGPIO_LSB 0x1C
223#define QIB_7220_IntStatus_assertGPIO_RMASK 0x1
224#define QIB_7220_IntStatus_IBSerdesTrimDone_LSB 0x1B
225#define QIB_7220_IntStatus_IBSerdesTrimDone_RMASK 0x1
226#define QIB_7220_IntStatus_JInt_LSB 0x1A
227#define QIB_7220_IntStatus_JInt_RMASK 0x1
228#define QIB_7220_IntStatus_Reserved1_LSB 0x11
229#define QIB_7220_IntStatus_Reserved1_RMASK 0x1FF
230#define QIB_7220_IntStatus_RcvAvail16_LSB 0x10
231#define QIB_7220_IntStatus_RcvAvail16_RMASK 0x1
232#define QIB_7220_IntStatus_RcvAvail15_LSB 0xF
233#define QIB_7220_IntStatus_RcvAvail15_RMASK 0x1
234#define QIB_7220_IntStatus_RcvAvail14_LSB 0xE
235#define QIB_7220_IntStatus_RcvAvail14_RMASK 0x1
236#define QIB_7220_IntStatus_RcvAvail13_LSB 0xD
237#define QIB_7220_IntStatus_RcvAvail13_RMASK 0x1
238#define QIB_7220_IntStatus_RcvAvail12_LSB 0xC
239#define QIB_7220_IntStatus_RcvAvail12_RMASK 0x1
240#define QIB_7220_IntStatus_RcvAvail11_LSB 0xB
241#define QIB_7220_IntStatus_RcvAvail11_RMASK 0x1
242#define QIB_7220_IntStatus_RcvAvail10_LSB 0xA
243#define QIB_7220_IntStatus_RcvAvail10_RMASK 0x1
244#define QIB_7220_IntStatus_RcvAvail9_LSB 0x9
245#define QIB_7220_IntStatus_RcvAvail9_RMASK 0x1
246#define QIB_7220_IntStatus_RcvAvail8_LSB 0x8
247#define QIB_7220_IntStatus_RcvAvail8_RMASK 0x1
248#define QIB_7220_IntStatus_RcvAvail7_LSB 0x7
249#define QIB_7220_IntStatus_RcvAvail7_RMASK 0x1
250#define QIB_7220_IntStatus_RcvAvail6_LSB 0x6
251#define QIB_7220_IntStatus_RcvAvail6_RMASK 0x1
252#define QIB_7220_IntStatus_RcvAvail5_LSB 0x5
253#define QIB_7220_IntStatus_RcvAvail5_RMASK 0x1
254#define QIB_7220_IntStatus_RcvAvail4_LSB 0x4
255#define QIB_7220_IntStatus_RcvAvail4_RMASK 0x1
256#define QIB_7220_IntStatus_RcvAvail3_LSB 0x3
257#define QIB_7220_IntStatus_RcvAvail3_RMASK 0x1
258#define QIB_7220_IntStatus_RcvAvail2_LSB 0x2
259#define QIB_7220_IntStatus_RcvAvail2_RMASK 0x1
260#define QIB_7220_IntStatus_RcvAvail1_LSB 0x1
261#define QIB_7220_IntStatus_RcvAvail1_RMASK 0x1
262#define QIB_7220_IntStatus_RcvAvail0_LSB 0x0
263#define QIB_7220_IntStatus_RcvAvail0_RMASK 0x1
264
265#define QIB_7220_IntClear_OFFS 0x78
266#define QIB_7220_IntClear_SDmaIntClear_LSB 0x3F
267#define QIB_7220_IntClear_SDmaIntClear_RMASK 0x1
268#define QIB_7220_IntClear_SDmaDisabledClear_LSB 0x3E
269#define QIB_7220_IntClear_SDmaDisabledClear_RMASK 0x1
270#define QIB_7220_IntClear_Reserved_LSB 0x31
271#define QIB_7220_IntClear_Reserved_RMASK 0x1FFF
272#define QIB_7220_IntClear_RcvUrg16IntClear_LSB 0x30
273#define QIB_7220_IntClear_RcvUrg16IntClear_RMASK 0x1
274#define QIB_7220_IntClear_RcvUrg15IntClear_LSB 0x2F
275#define QIB_7220_IntClear_RcvUrg15IntClear_RMASK 0x1
276#define QIB_7220_IntClear_RcvUrg14IntClear_LSB 0x2E
277#define QIB_7220_IntClear_RcvUrg14IntClear_RMASK 0x1
278#define QIB_7220_IntClear_RcvUrg13IntClear_LSB 0x2D
279#define QIB_7220_IntClear_RcvUrg13IntClear_RMASK 0x1
280#define QIB_7220_IntClear_RcvUrg12IntClear_LSB 0x2C
281#define QIB_7220_IntClear_RcvUrg12IntClear_RMASK 0x1
282#define QIB_7220_IntClear_RcvUrg11IntClear_LSB 0x2B
283#define QIB_7220_IntClear_RcvUrg11IntClear_RMASK 0x1
284#define QIB_7220_IntClear_RcvUrg10IntClear_LSB 0x2A
285#define QIB_7220_IntClear_RcvUrg10IntClear_RMASK 0x1
286#define QIB_7220_IntClear_RcvUrg9IntClear_LSB 0x29
287#define QIB_7220_IntClear_RcvUrg9IntClear_RMASK 0x1
288#define QIB_7220_IntClear_RcvUrg8IntClear_LSB 0x28
289#define QIB_7220_IntClear_RcvUrg8IntClear_RMASK 0x1
290#define QIB_7220_IntClear_RcvUrg7IntClear_LSB 0x27
291#define QIB_7220_IntClear_RcvUrg7IntClear_RMASK 0x1
292#define QIB_7220_IntClear_RcvUrg6IntClear_LSB 0x26
293#define QIB_7220_IntClear_RcvUrg6IntClear_RMASK 0x1
294#define QIB_7220_IntClear_RcvUrg5IntClear_LSB 0x25
295#define QIB_7220_IntClear_RcvUrg5IntClear_RMASK 0x1
296#define QIB_7220_IntClear_RcvUrg4IntClear_LSB 0x24
297#define QIB_7220_IntClear_RcvUrg4IntClear_RMASK 0x1
298#define QIB_7220_IntClear_RcvUrg3IntClear_LSB 0x23
299#define QIB_7220_IntClear_RcvUrg3IntClear_RMASK 0x1
300#define QIB_7220_IntClear_RcvUrg2IntClear_LSB 0x22
301#define QIB_7220_IntClear_RcvUrg2IntClear_RMASK 0x1
302#define QIB_7220_IntClear_RcvUrg1IntClear_LSB 0x21
303#define QIB_7220_IntClear_RcvUrg1IntClear_RMASK 0x1
304#define QIB_7220_IntClear_RcvUrg0IntClear_LSB 0x20
305#define QIB_7220_IntClear_RcvUrg0IntClear_RMASK 0x1
306#define QIB_7220_IntClear_ErrorIntClear_LSB 0x1F
307#define QIB_7220_IntClear_ErrorIntClear_RMASK 0x1
308#define QIB_7220_IntClear_PioSetIntClear_LSB 0x1E
309#define QIB_7220_IntClear_PioSetIntClear_RMASK 0x1
310#define QIB_7220_IntClear_PioBufAvailIntClear_LSB 0x1D
311#define QIB_7220_IntClear_PioBufAvailIntClear_RMASK 0x1
312#define QIB_7220_IntClear_assertGPIOIntClear_LSB 0x1C
313#define QIB_7220_IntClear_assertGPIOIntClear_RMASK 0x1
314#define QIB_7220_IntClear_IBSerdesTrimDoneClear_LSB 0x1B
315#define QIB_7220_IntClear_IBSerdesTrimDoneClear_RMASK 0x1
316#define QIB_7220_IntClear_JIntClear_LSB 0x1A
317#define QIB_7220_IntClear_JIntClear_RMASK 0x1
318#define QIB_7220_IntClear_Reserved1_LSB 0x11
319#define QIB_7220_IntClear_Reserved1_RMASK 0x1FF
320#define QIB_7220_IntClear_RcvAvail16IntClear_LSB 0x10
321#define QIB_7220_IntClear_RcvAvail16IntClear_RMASK 0x1
322#define QIB_7220_IntClear_RcvAvail15IntClear_LSB 0xF
323#define QIB_7220_IntClear_RcvAvail15IntClear_RMASK 0x1
324#define QIB_7220_IntClear_RcvAvail14IntClear_LSB 0xE
325#define QIB_7220_IntClear_RcvAvail14IntClear_RMASK 0x1
326#define QIB_7220_IntClear_RcvAvail13IntClear_LSB 0xD
327#define QIB_7220_IntClear_RcvAvail13IntClear_RMASK 0x1
328#define QIB_7220_IntClear_RcvAvail12IntClear_LSB 0xC
329#define QIB_7220_IntClear_RcvAvail12IntClear_RMASK 0x1
330#define QIB_7220_IntClear_RcvAvail11IntClear_LSB 0xB
331#define QIB_7220_IntClear_RcvAvail11IntClear_RMASK 0x1
332#define QIB_7220_IntClear_RcvAvail10IntClear_LSB 0xA
333#define QIB_7220_IntClear_RcvAvail10IntClear_RMASK 0x1
334#define QIB_7220_IntClear_RcvAvail9IntClear_LSB 0x9
335#define QIB_7220_IntClear_RcvAvail9IntClear_RMASK 0x1
336#define QIB_7220_IntClear_RcvAvail8IntClear_LSB 0x8
337#define QIB_7220_IntClear_RcvAvail8IntClear_RMASK 0x1
338#define QIB_7220_IntClear_RcvAvail7IntClear_LSB 0x7
339#define QIB_7220_IntClear_RcvAvail7IntClear_RMASK 0x1
340#define QIB_7220_IntClear_RcvAvail6IntClear_LSB 0x6
341#define QIB_7220_IntClear_RcvAvail6IntClear_RMASK 0x1
342#define QIB_7220_IntClear_RcvAvail5IntClear_LSB 0x5
343#define QIB_7220_IntClear_RcvAvail5IntClear_RMASK 0x1
344#define QIB_7220_IntClear_RcvAvail4IntClear_LSB 0x4
345#define QIB_7220_IntClear_RcvAvail4IntClear_RMASK 0x1
346#define QIB_7220_IntClear_RcvAvail3IntClear_LSB 0x3
347#define QIB_7220_IntClear_RcvAvail3IntClear_RMASK 0x1
348#define QIB_7220_IntClear_RcvAvail2IntClear_LSB 0x2
349#define QIB_7220_IntClear_RcvAvail2IntClear_RMASK 0x1
350#define QIB_7220_IntClear_RcvAvail1IntClear_LSB 0x1
351#define QIB_7220_IntClear_RcvAvail1IntClear_RMASK 0x1
352#define QIB_7220_IntClear_RcvAvail0IntClear_LSB 0x0
353#define QIB_7220_IntClear_RcvAvail0IntClear_RMASK 0x1
354
355#define QIB_7220_ErrMask_OFFS 0x80
356#define QIB_7220_ErrMask_Reserved_LSB 0x36
357#define QIB_7220_ErrMask_Reserved_RMASK 0x3FF
358#define QIB_7220_ErrMask_InvalidEEPCmdMask_LSB 0x35
359#define QIB_7220_ErrMask_InvalidEEPCmdMask_RMASK 0x1
360#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_LSB 0x34
361#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_RMASK 0x1
362#define QIB_7220_ErrMask_HardwareErrMask_LSB 0x33
363#define QIB_7220_ErrMask_HardwareErrMask_RMASK 0x1
364#define QIB_7220_ErrMask_ResetNegatedMask_LSB 0x32
365#define QIB_7220_ErrMask_ResetNegatedMask_RMASK 0x1
366#define QIB_7220_ErrMask_InvalidAddrErrMask_LSB 0x31
367#define QIB_7220_ErrMask_InvalidAddrErrMask_RMASK 0x1
368#define QIB_7220_ErrMask_IBStatusChangedMask_LSB 0x30
369#define QIB_7220_ErrMask_IBStatusChangedMask_RMASK 0x1
370#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_LSB 0x2F
371#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_RMASK 0x1
372#define QIB_7220_ErrMask_SDmaMissingDwErrMask_LSB 0x2E
373#define QIB_7220_ErrMask_SDmaMissingDwErrMask_RMASK 0x1
374#define QIB_7220_ErrMask_SDmaDwEnErrMask_LSB 0x2D
375#define QIB_7220_ErrMask_SDmaDwEnErrMask_RMASK 0x1
376#define QIB_7220_ErrMask_SDmaRpyTagErrMask_LSB 0x2C
377#define QIB_7220_ErrMask_SDmaRpyTagErrMask_RMASK 0x1
378#define QIB_7220_ErrMask_SDma1stDescErrMask_LSB 0x2B
379#define QIB_7220_ErrMask_SDma1stDescErrMask_RMASK 0x1
380#define QIB_7220_ErrMask_SDmaBaseErrMask_LSB 0x2A
381#define QIB_7220_ErrMask_SDmaBaseErrMask_RMASK 0x1
382#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_LSB 0x29
383#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_RMASK 0x1
384#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_LSB 0x28
385#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_RMASK 0x1
386#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_LSB 0x27
387#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_RMASK 0x1
388#define QIB_7220_ErrMask_SendBufMisuseErrMask_LSB 0x26
389#define QIB_7220_ErrMask_SendBufMisuseErrMask_RMASK 0x1
390#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
391#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
392#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
393#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
394#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
395#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
396#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
397#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
398#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
399#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
400#define QIB_7220_ErrMask_SendPktLenErrMask_LSB 0x20
401#define QIB_7220_ErrMask_SendPktLenErrMask_RMASK 0x1
402#define QIB_7220_ErrMask_SendUnderRunErrMask_LSB 0x1F
403#define QIB_7220_ErrMask_SendUnderRunErrMask_RMASK 0x1
404#define QIB_7220_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
405#define QIB_7220_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
406#define QIB_7220_ErrMask_SendMinPktLenErrMask_LSB 0x1D
407#define QIB_7220_ErrMask_SendMinPktLenErrMask_RMASK 0x1
408#define QIB_7220_ErrMask_SDmaDisabledErrMask_LSB 0x1C
409#define QIB_7220_ErrMask_SDmaDisabledErrMask_RMASK 0x1
410#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
411#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
412#define QIB_7220_ErrMask_Reserved1_LSB 0x12
413#define QIB_7220_ErrMask_Reserved1_RMASK 0x1FF
414#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
415#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
416#define QIB_7220_ErrMask_RcvHdrErrMask_LSB 0x10
417#define QIB_7220_ErrMask_RcvHdrErrMask_RMASK 0x1
418#define QIB_7220_ErrMask_RcvHdrLenErrMask_LSB 0xF
419#define QIB_7220_ErrMask_RcvHdrLenErrMask_RMASK 0x1
420#define QIB_7220_ErrMask_RcvBadTidErrMask_LSB 0xE
421#define QIB_7220_ErrMask_RcvBadTidErrMask_RMASK 0x1
422#define QIB_7220_ErrMask_RcvHdrFullErrMask_LSB 0xD
423#define QIB_7220_ErrMask_RcvHdrFullErrMask_RMASK 0x1
424#define QIB_7220_ErrMask_RcvEgrFullErrMask_LSB 0xC
425#define QIB_7220_ErrMask_RcvEgrFullErrMask_RMASK 0x1
426#define QIB_7220_ErrMask_RcvBadVersionErrMask_LSB 0xB
427#define QIB_7220_ErrMask_RcvBadVersionErrMask_RMASK 0x1
428#define QIB_7220_ErrMask_RcvIBFlowErrMask_LSB 0xA
429#define QIB_7220_ErrMask_RcvIBFlowErrMask_RMASK 0x1
430#define QIB_7220_ErrMask_RcvEBPErrMask_LSB 0x9
431#define QIB_7220_ErrMask_RcvEBPErrMask_RMASK 0x1
432#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
433#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
434#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
435#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
436#define QIB_7220_ErrMask_RcvShortPktLenErrMask_LSB 0x6
437#define QIB_7220_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
438#define QIB_7220_ErrMask_RcvLongPktLenErrMask_LSB 0x5
439#define QIB_7220_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
440#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
441#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
442#define QIB_7220_ErrMask_RcvMinPktLenErrMask_LSB 0x3
443#define QIB_7220_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
444#define QIB_7220_ErrMask_RcvICRCErrMask_LSB 0x2
445#define QIB_7220_ErrMask_RcvICRCErrMask_RMASK 0x1
446#define QIB_7220_ErrMask_RcvVCRCErrMask_LSB 0x1
447#define QIB_7220_ErrMask_RcvVCRCErrMask_RMASK 0x1
448#define QIB_7220_ErrMask_RcvFormatErrMask_LSB 0x0
449#define QIB_7220_ErrMask_RcvFormatErrMask_RMASK 0x1
450
451#define QIB_7220_ErrStatus_OFFS 0x88
452#define QIB_7220_ErrStatus_Reserved_LSB 0x36
453#define QIB_7220_ErrStatus_Reserved_RMASK 0x3FF
454#define QIB_7220_ErrStatus_InvalidEEPCmdErr_LSB 0x35
455#define QIB_7220_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
456#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_LSB 0x34
457#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_RMASK 0x1
458#define QIB_7220_ErrStatus_HardwareErr_LSB 0x33
459#define QIB_7220_ErrStatus_HardwareErr_RMASK 0x1
460#define QIB_7220_ErrStatus_ResetNegated_LSB 0x32
461#define QIB_7220_ErrStatus_ResetNegated_RMASK 0x1
462#define QIB_7220_ErrStatus_InvalidAddrErr_LSB 0x31
463#define QIB_7220_ErrStatus_InvalidAddrErr_RMASK 0x1
464#define QIB_7220_ErrStatus_IBStatusChanged_LSB 0x30
465#define QIB_7220_ErrStatus_IBStatusChanged_RMASK 0x1
466#define QIB_7220_ErrStatus_SDmaUnexpDataErr_LSB 0x2F
467#define QIB_7220_ErrStatus_SDmaUnexpDataErr_RMASK 0x1
468#define QIB_7220_ErrStatus_SDmaMissingDwErr_LSB 0x2E
469#define QIB_7220_ErrStatus_SDmaMissingDwErr_RMASK 0x1
470#define QIB_7220_ErrStatus_SDmaDwEnErr_LSB 0x2D
471#define QIB_7220_ErrStatus_SDmaDwEnErr_RMASK 0x1
472#define QIB_7220_ErrStatus_SDmaRpyTagErr_LSB 0x2C
473#define QIB_7220_ErrStatus_SDmaRpyTagErr_RMASK 0x1
474#define QIB_7220_ErrStatus_SDma1stDescErr_LSB 0x2B
475#define QIB_7220_ErrStatus_SDma1stDescErr_RMASK 0x1
476#define QIB_7220_ErrStatus_SDmaBaseErr_LSB 0x2A
477#define QIB_7220_ErrStatus_SDmaBaseErr_RMASK 0x1
478#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_LSB 0x29
479#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_RMASK 0x1
480#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_LSB 0x28
481#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_RMASK 0x1
482#define QIB_7220_ErrStatus_SDmaGenMismatchErr_LSB 0x27
483#define QIB_7220_ErrStatus_SDmaGenMismatchErr_RMASK 0x1
484#define QIB_7220_ErrStatus_SendBufMisuseErr_LSB 0x26
485#define QIB_7220_ErrStatus_SendBufMisuseErr_RMASK 0x1
486#define QIB_7220_ErrStatus_SendUnsupportedVLErr_LSB 0x25
487#define QIB_7220_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
488#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
489#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
490#define QIB_7220_ErrStatus_SendPioArmLaunchErr_LSB 0x23
491#define QIB_7220_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
492#define QIB_7220_ErrStatus_SendDroppedDataPktErr_LSB 0x22
493#define QIB_7220_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
494#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
495#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
496#define QIB_7220_ErrStatus_SendPktLenErr_LSB 0x20
497#define QIB_7220_ErrStatus_SendPktLenErr_RMASK 0x1
498#define QIB_7220_ErrStatus_SendUnderRunErr_LSB 0x1F
499#define QIB_7220_ErrStatus_SendUnderRunErr_RMASK 0x1
500#define QIB_7220_ErrStatus_SendMaxPktLenErr_LSB 0x1E
501#define QIB_7220_ErrStatus_SendMaxPktLenErr_RMASK 0x1
502#define QIB_7220_ErrStatus_SendMinPktLenErr_LSB 0x1D
503#define QIB_7220_ErrStatus_SendMinPktLenErr_RMASK 0x1
504#define QIB_7220_ErrStatus_SDmaDisabledErr_LSB 0x1C
505#define QIB_7220_ErrStatus_SDmaDisabledErr_RMASK 0x1
506#define QIB_7220_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
507#define QIB_7220_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
508#define QIB_7220_ErrStatus_Reserved1_LSB 0x12
509#define QIB_7220_ErrStatus_Reserved1_RMASK 0x1FF
510#define QIB_7220_ErrStatus_RcvIBLostLinkErr_LSB 0x11
511#define QIB_7220_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
512#define QIB_7220_ErrStatus_RcvHdrErr_LSB 0x10
513#define QIB_7220_ErrStatus_RcvHdrErr_RMASK 0x1
514#define QIB_7220_ErrStatus_RcvHdrLenErr_LSB 0xF
515#define QIB_7220_ErrStatus_RcvHdrLenErr_RMASK 0x1
516#define QIB_7220_ErrStatus_RcvBadTidErr_LSB 0xE
517#define QIB_7220_ErrStatus_RcvBadTidErr_RMASK 0x1
518#define QIB_7220_ErrStatus_RcvHdrFullErr_LSB 0xD
519#define QIB_7220_ErrStatus_RcvHdrFullErr_RMASK 0x1
520#define QIB_7220_ErrStatus_RcvEgrFullErr_LSB 0xC
521#define QIB_7220_ErrStatus_RcvEgrFullErr_RMASK 0x1
522#define QIB_7220_ErrStatus_RcvBadVersionErr_LSB 0xB
523#define QIB_7220_ErrStatus_RcvBadVersionErr_RMASK 0x1
524#define QIB_7220_ErrStatus_RcvIBFlowErr_LSB 0xA
525#define QIB_7220_ErrStatus_RcvIBFlowErr_RMASK 0x1
526#define QIB_7220_ErrStatus_RcvEBPErr_LSB 0x9
527#define QIB_7220_ErrStatus_RcvEBPErr_RMASK 0x1
528#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
529#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
530#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
531#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
532#define QIB_7220_ErrStatus_RcvShortPktLenErr_LSB 0x6
533#define QIB_7220_ErrStatus_RcvShortPktLenErr_RMASK 0x1
534#define QIB_7220_ErrStatus_RcvLongPktLenErr_LSB 0x5
535#define QIB_7220_ErrStatus_RcvLongPktLenErr_RMASK 0x1
536#define QIB_7220_ErrStatus_RcvMaxPktLenErr_LSB 0x4
537#define QIB_7220_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
538#define QIB_7220_ErrStatus_RcvMinPktLenErr_LSB 0x3
539#define QIB_7220_ErrStatus_RcvMinPktLenErr_RMASK 0x1
540#define QIB_7220_ErrStatus_RcvICRCErr_LSB 0x2
541#define QIB_7220_ErrStatus_RcvICRCErr_RMASK 0x1
542#define QIB_7220_ErrStatus_RcvVCRCErr_LSB 0x1
543#define QIB_7220_ErrStatus_RcvVCRCErr_RMASK 0x1
544#define QIB_7220_ErrStatus_RcvFormatErr_LSB 0x0
545#define QIB_7220_ErrStatus_RcvFormatErr_RMASK 0x1
546
547#define QIB_7220_ErrClear_OFFS 0x90
548#define QIB_7220_ErrClear_Reserved_LSB 0x36
549#define QIB_7220_ErrClear_Reserved_RMASK 0x3FF
550#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
551#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
552#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_LSB 0x34
553#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_RMASK 0x1
554#define QIB_7220_ErrClear_HardwareErrClear_LSB 0x33
555#define QIB_7220_ErrClear_HardwareErrClear_RMASK 0x1
556#define QIB_7220_ErrClear_ResetNegatedClear_LSB 0x32
557#define QIB_7220_ErrClear_ResetNegatedClear_RMASK 0x1
558#define QIB_7220_ErrClear_InvalidAddrErrClear_LSB 0x31
559#define QIB_7220_ErrClear_InvalidAddrErrClear_RMASK 0x1
560#define QIB_7220_ErrClear_IBStatusChangedClear_LSB 0x30
561#define QIB_7220_ErrClear_IBStatusChangedClear_RMASK 0x1
562#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_LSB 0x2F
563#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_RMASK 0x1
564#define QIB_7220_ErrClear_SDmaMissingDwErrClear_LSB 0x2E
565#define QIB_7220_ErrClear_SDmaMissingDwErrClear_RMASK 0x1
566#define QIB_7220_ErrClear_SDmaDwEnErrClear_LSB 0x2D
567#define QIB_7220_ErrClear_SDmaDwEnErrClear_RMASK 0x1
568#define QIB_7220_ErrClear_SDmaRpyTagErrClear_LSB 0x2C
569#define QIB_7220_ErrClear_SDmaRpyTagErrClear_RMASK 0x1
570#define QIB_7220_ErrClear_SDma1stDescErrClear_LSB 0x2B
571#define QIB_7220_ErrClear_SDma1stDescErrClear_RMASK 0x1
572#define QIB_7220_ErrClear_SDmaBaseErrClear_LSB 0x2A
573#define QIB_7220_ErrClear_SDmaBaseErrClear_RMASK 0x1
574#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_LSB 0x29
575#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_RMASK 0x1
576#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_LSB 0x28
577#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_RMASK 0x1
578#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_LSB 0x27
579#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_RMASK 0x1
580#define QIB_7220_ErrClear_SendBufMisuseErrClear_LSB 0x26
581#define QIB_7220_ErrClear_SendBufMisuseErrClear_RMASK 0x1
582#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
583#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
584#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
585#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
586#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
587#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
588#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
589#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
590#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
591#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
592#define QIB_7220_ErrClear_SendPktLenErrClear_LSB 0x20
593#define QIB_7220_ErrClear_SendPktLenErrClear_RMASK 0x1
594#define QIB_7220_ErrClear_SendUnderRunErrClear_LSB 0x1F
595#define QIB_7220_ErrClear_SendUnderRunErrClear_RMASK 0x1
596#define QIB_7220_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
597#define QIB_7220_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
598#define QIB_7220_ErrClear_SendMinPktLenErrClear_LSB 0x1D
599#define QIB_7220_ErrClear_SendMinPktLenErrClear_RMASK 0x1
600#define QIB_7220_ErrClear_SDmaDisabledErrClear_LSB 0x1C
601#define QIB_7220_ErrClear_SDmaDisabledErrClear_RMASK 0x1
602#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
603#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
604#define QIB_7220_ErrClear_Reserved1_LSB 0x12
605#define QIB_7220_ErrClear_Reserved1_RMASK 0x1FF
606#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
607#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
608#define QIB_7220_ErrClear_RcvHdrErrClear_LSB 0x10
609#define QIB_7220_ErrClear_RcvHdrErrClear_RMASK 0x1
610#define QIB_7220_ErrClear_RcvHdrLenErrClear_LSB 0xF
611#define QIB_7220_ErrClear_RcvHdrLenErrClear_RMASK 0x1
612#define QIB_7220_ErrClear_RcvBadTidErrClear_LSB 0xE
613#define QIB_7220_ErrClear_RcvBadTidErrClear_RMASK 0x1
614#define QIB_7220_ErrClear_RcvHdrFullErrClear_LSB 0xD
615#define QIB_7220_ErrClear_RcvHdrFullErrClear_RMASK 0x1
616#define QIB_7220_ErrClear_RcvEgrFullErrClear_LSB 0xC
617#define QIB_7220_ErrClear_RcvEgrFullErrClear_RMASK 0x1
618#define QIB_7220_ErrClear_RcvBadVersionErrClear_LSB 0xB
619#define QIB_7220_ErrClear_RcvBadVersionErrClear_RMASK 0x1
620#define QIB_7220_ErrClear_RcvIBFlowErrClear_LSB 0xA
621#define QIB_7220_ErrClear_RcvIBFlowErrClear_RMASK 0x1
622#define QIB_7220_ErrClear_RcvEBPErrClear_LSB 0x9
623#define QIB_7220_ErrClear_RcvEBPErrClear_RMASK 0x1
624#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
625#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
626#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
627#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
628#define QIB_7220_ErrClear_RcvShortPktLenErrClear_LSB 0x6
629#define QIB_7220_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
630#define QIB_7220_ErrClear_RcvLongPktLenErrClear_LSB 0x5
631#define QIB_7220_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
632#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
633#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
634#define QIB_7220_ErrClear_RcvMinPktLenErrClear_LSB 0x3
635#define QIB_7220_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
636#define QIB_7220_ErrClear_RcvICRCErrClear_LSB 0x2
637#define QIB_7220_ErrClear_RcvICRCErrClear_RMASK 0x1
638#define QIB_7220_ErrClear_RcvVCRCErrClear_LSB 0x1
639#define QIB_7220_ErrClear_RcvVCRCErrClear_RMASK 0x1
640#define QIB_7220_ErrClear_RcvFormatErrClear_LSB 0x0
641#define QIB_7220_ErrClear_RcvFormatErrClear_RMASK 0x1
642
643#define QIB_7220_HwErrMask_OFFS 0x98
644#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
645#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
646#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
647#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
648#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_LSB 0x3D
649#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_RMASK 0x1
650#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
651#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
652#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_LSB 0x3B
653#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_RMASK 0x1
654#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_LSB 0x3A
655#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_RMASK 0x1
656#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x39
657#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
658#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x38
659#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
660#define QIB_7220_HwErrMask_Reserved_LSB 0x37
661#define QIB_7220_HwErrMask_Reserved_RMASK 0x1
662#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
663#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
664#define QIB_7220_HwErrMask_Reserved1_LSB 0x33
665#define QIB_7220_HwErrMask_Reserved1_RMASK 0x7
666#define QIB_7220_HwErrMask_RXEMemParityErrMask_LSB 0x2C
667#define QIB_7220_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
668#define QIB_7220_HwErrMask_TXEMemParityErrMask_LSB 0x28
669#define QIB_7220_HwErrMask_TXEMemParityErrMask_RMASK 0xF
670#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_LSB 0x27
671#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_RMASK 0x1
672#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_LSB 0x26
673#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_RMASK 0x1
674#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_LSB 0x25
675#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_RMASK 0x1
676#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_LSB 0x24
677#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_RMASK 0x1
678#define QIB_7220_HwErrMask_Reserved2_LSB 0x22
679#define QIB_7220_HwErrMask_Reserved2_RMASK 0x3
680#define QIB_7220_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
681#define QIB_7220_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
682#define QIB_7220_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
683#define QIB_7220_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
684#define QIB_7220_HwErrMask_PoisonedTLPMask_LSB 0x1D
685#define QIB_7220_HwErrMask_PoisonedTLPMask_RMASK 0x1
686#define QIB_7220_HwErrMask_SDmaMemReadErrMask_LSB 0x1C
687#define QIB_7220_HwErrMask_SDmaMemReadErrMask_RMASK 0x1
688#define QIB_7220_HwErrMask_Reserved3_LSB 0x8
689#define QIB_7220_HwErrMask_Reserved3_RMASK 0xFFFFF
690#define QIB_7220_HwErrMask_PCIeMemParityErrMask_LSB 0x0
691#define QIB_7220_HwErrMask_PCIeMemParityErrMask_RMASK 0xFF
692
693#define QIB_7220_HwErrStatus_OFFS 0xA0
694#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
695#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
696#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
697#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
698#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_LSB 0x3D
699#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_RMASK 0x1
700#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
701#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
702#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_LSB 0x3B
703#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_RMASK 0x1
704#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_LSB 0x3A
705#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_RMASK 0x1
706#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x39
707#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
708#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x38
709#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
710#define QIB_7220_HwErrStatus_Reserved_LSB 0x37
711#define QIB_7220_HwErrStatus_Reserved_RMASK 0x1
712#define QIB_7220_HwErrStatus_PowerOnBISTFailed_LSB 0x36
713#define QIB_7220_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
714#define QIB_7220_HwErrStatus_Reserved1_LSB 0x33
715#define QIB_7220_HwErrStatus_Reserved1_RMASK 0x7
716#define QIB_7220_HwErrStatus_RXEMemParity_LSB 0x2C
717#define QIB_7220_HwErrStatus_RXEMemParity_RMASK 0x7F
718#define QIB_7220_HwErrStatus_TXEMemParity_LSB 0x28
719#define QIB_7220_HwErrStatus_TXEMemParity_RMASK 0xF
720#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_LSB 0x27
721#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_RMASK 0x1
722#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_LSB 0x26
723#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_RMASK 0x1
724#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_LSB 0x25
725#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_RMASK 0x1
726#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_LSB 0x24
727#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_RMASK 0x1
728#define QIB_7220_HwErrStatus_Reserved2_LSB 0x22
729#define QIB_7220_HwErrStatus_Reserved2_RMASK 0x3
730#define QIB_7220_HwErrStatus_PCIeBusParity_LSB 0x1F
731#define QIB_7220_HwErrStatus_PCIeBusParity_RMASK 0x7
732#define QIB_7220_HwErrStatus_PcieCplTimeout_LSB 0x1E
733#define QIB_7220_HwErrStatus_PcieCplTimeout_RMASK 0x1
734#define QIB_7220_HwErrStatus_PoisenedTLP_LSB 0x1D
735#define QIB_7220_HwErrStatus_PoisenedTLP_RMASK 0x1
736#define QIB_7220_HwErrStatus_SDmaMemReadErr_LSB 0x1C
737#define QIB_7220_HwErrStatus_SDmaMemReadErr_RMASK 0x1
738#define QIB_7220_HwErrStatus_Reserved3_LSB 0x8
739#define QIB_7220_HwErrStatus_Reserved3_RMASK 0xFFFFF
740#define QIB_7220_HwErrStatus_PCIeMemParity_LSB 0x0
741#define QIB_7220_HwErrStatus_PCIeMemParity_RMASK 0xFF
742
743#define QIB_7220_HwErrClear_OFFS 0xA8
744#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
745#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
746#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
747#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
748#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_LSB 0x3D
749#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_RMASK 0x1
750#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
751#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
752#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_LSB 0x3B
753#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_RMASK 0x1
754#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_LSB 0x3A
755#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_RMASK 0x1
756#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x39
757#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
758#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x38
759#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
760#define QIB_7220_HwErrClear_Reserved_LSB 0x37
761#define QIB_7220_HwErrClear_Reserved_RMASK 0x1
762#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
763#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
764#define QIB_7220_HwErrClear_Reserved1_LSB 0x33
765#define QIB_7220_HwErrClear_Reserved1_RMASK 0x7
766#define QIB_7220_HwErrClear_RXEMemParityClear_LSB 0x2C
767#define QIB_7220_HwErrClear_RXEMemParityClear_RMASK 0x7F
768#define QIB_7220_HwErrClear_TXEMemParityClear_LSB 0x28
769#define QIB_7220_HwErrClear_TXEMemParityClear_RMASK 0xF
770#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_LSB 0x27
771#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_RMASK 0x1
772#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_LSB 0x26
773#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_RMASK 0x1
774#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_LSB 0x25
775#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_RMASK 0x1
776#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_LSB 0x24
777#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_RMASK 0x1
778#define QIB_7220_HwErrClear_Reserved2_LSB 0x22
779#define QIB_7220_HwErrClear_Reserved2_RMASK 0x3
780#define QIB_7220_HwErrClear_PCIeBusParityClr_LSB 0x1F
781#define QIB_7220_HwErrClear_PCIeBusParityClr_RMASK 0x7
782#define QIB_7220_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
783#define QIB_7220_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
784#define QIB_7220_HwErrClear_PoisonedTLPClear_LSB 0x1D
785#define QIB_7220_HwErrClear_PoisonedTLPClear_RMASK 0x1
786#define QIB_7220_HwErrClear_SDmaMemReadErrClear_LSB 0x1C
787#define QIB_7220_HwErrClear_SDmaMemReadErrClear_RMASK 0x1
788#define QIB_7220_HwErrClear_Reserved3_LSB 0x8
789#define QIB_7220_HwErrClear_Reserved3_RMASK 0xFFFFF
790#define QIB_7220_HwErrClear_PCIeMemParityClr_LSB 0x0
791#define QIB_7220_HwErrClear_PCIeMemParityClr_RMASK 0xFF
792
793#define QIB_7220_HwDiagCtrl_OFFS 0xB0
794#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
795#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
796#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
797#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
798#define QIB_7220_HwDiagCtrl_CounterWrEnable_LSB 0x3D
799#define QIB_7220_HwDiagCtrl_CounterWrEnable_RMASK 0x1
800#define QIB_7220_HwDiagCtrl_CounterDisable_LSB 0x3C
801#define QIB_7220_HwDiagCtrl_CounterDisable_RMASK 0x1
802#define QIB_7220_HwDiagCtrl_Reserved_LSB 0x33
803#define QIB_7220_HwDiagCtrl_Reserved_RMASK 0x1FF
804#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
805#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
806#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
807#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
808#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_LSB 0x27
809#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_RMASK 0x1
810#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_LSB 0x26
811#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_RMASK 0x1
812#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_LSB 0x25
813#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_RMASK 0x1
814#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_LSB 0x24
815#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_RMASK 0x1
816#define QIB_7220_HwDiagCtrl_Reserved1_LSB 0x23
817#define QIB_7220_HwDiagCtrl_Reserved1_RMASK 0x1
818#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
819#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
820#define QIB_7220_HwDiagCtrl_Reserved2_LSB 0x8
821#define QIB_7220_HwDiagCtrl_Reserved2_RMASK 0x7FFFFF
822#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
823#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_RMASK 0xFF
824
825#define QIB_7220_REG_0000B8_OFFS 0xB8
826
827#define QIB_7220_IBCStatus_OFFS 0xC0
828#define QIB_7220_IBCStatus_TxCreditOk_LSB 0x1F
829#define QIB_7220_IBCStatus_TxCreditOk_RMASK 0x1
830#define QIB_7220_IBCStatus_TxReady_LSB 0x1E
831#define QIB_7220_IBCStatus_TxReady_RMASK 0x1
832#define QIB_7220_IBCStatus_Reserved_LSB 0xE
833#define QIB_7220_IBCStatus_Reserved_RMASK 0xFFFF
834#define QIB_7220_IBCStatus_IBTxLaneReversed_LSB 0xD
835#define QIB_7220_IBCStatus_IBTxLaneReversed_RMASK 0x1
836#define QIB_7220_IBCStatus_IBRxLaneReversed_LSB 0xC
837#define QIB_7220_IBCStatus_IBRxLaneReversed_RMASK 0x1
838#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_LSB 0xB
839#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_RMASK 0x1
840#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_LSB 0xA
841#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_RMASK 0x1
842#define QIB_7220_IBCStatus_LinkWidthActive_LSB 0x9
843#define QIB_7220_IBCStatus_LinkWidthActive_RMASK 0x1
844#define QIB_7220_IBCStatus_LinkSpeedActive_LSB 0x8
845#define QIB_7220_IBCStatus_LinkSpeedActive_RMASK 0x1
846#define QIB_7220_IBCStatus_LinkState_LSB 0x5
847#define QIB_7220_IBCStatus_LinkState_RMASK 0x7
848#define QIB_7220_IBCStatus_LinkTrainingState_LSB 0x0
849#define QIB_7220_IBCStatus_LinkTrainingState_RMASK 0x1F
850
851#define QIB_7220_IBCCtrl_OFFS 0xC8
852#define QIB_7220_IBCCtrl_Loopback_LSB 0x3F
853#define QIB_7220_IBCCtrl_Loopback_RMASK 0x1
854#define QIB_7220_IBCCtrl_LinkDownDefaultState_LSB 0x3E
855#define QIB_7220_IBCCtrl_LinkDownDefaultState_RMASK 0x1
856#define QIB_7220_IBCCtrl_Reserved_LSB 0x2B
857#define QIB_7220_IBCCtrl_Reserved_RMASK 0x7FFFF
858#define QIB_7220_IBCCtrl_CreditScale_LSB 0x28
859#define QIB_7220_IBCCtrl_CreditScale_RMASK 0x7
860#define QIB_7220_IBCCtrl_OverrunThreshold_LSB 0x24
861#define QIB_7220_IBCCtrl_OverrunThreshold_RMASK 0xF
862#define QIB_7220_IBCCtrl_PhyerrThreshold_LSB 0x20
863#define QIB_7220_IBCCtrl_PhyerrThreshold_RMASK 0xF
864#define QIB_7220_IBCCtrl_MaxPktLen_LSB 0x15
865#define QIB_7220_IBCCtrl_MaxPktLen_RMASK 0x7FF
866#define QIB_7220_IBCCtrl_LinkCmd_LSB 0x13
867#define QIB_7220_IBCCtrl_LinkCmd_RMASK 0x3
868#define QIB_7220_IBCCtrl_LinkInitCmd_LSB 0x10
869#define QIB_7220_IBCCtrl_LinkInitCmd_RMASK 0x7
870#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
871#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
872#define QIB_7220_IBCCtrl_FlowCtrlPeriod_LSB 0x0
873#define QIB_7220_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
874
875#define QIB_7220_EXTStatus_OFFS 0xD0
876#define QIB_7220_EXTStatus_GPIOIn_LSB 0x30
877#define QIB_7220_EXTStatus_GPIOIn_RMASK 0xFFFF
878#define QIB_7220_EXTStatus_Reserved_LSB 0x20
879#define QIB_7220_EXTStatus_Reserved_RMASK 0xFFFF
880#define QIB_7220_EXTStatus_Reserved1_LSB 0x10
881#define QIB_7220_EXTStatus_Reserved1_RMASK 0xFFFF
882#define QIB_7220_EXTStatus_MemBISTDisabled_LSB 0xF
883#define QIB_7220_EXTStatus_MemBISTDisabled_RMASK 0x1
884#define QIB_7220_EXTStatus_MemBISTEndTest_LSB 0xE
885#define QIB_7220_EXTStatus_MemBISTEndTest_RMASK 0x1
886#define QIB_7220_EXTStatus_Reserved2_LSB 0x0
887#define QIB_7220_EXTStatus_Reserved2_RMASK 0x3FFF
888
889#define QIB_7220_EXTCtrl_OFFS 0xD8
890#define QIB_7220_EXTCtrl_GPIOOe_LSB 0x30
891#define QIB_7220_EXTCtrl_GPIOOe_RMASK 0xFFFF
892#define QIB_7220_EXTCtrl_GPIOInvert_LSB 0x20
893#define QIB_7220_EXTCtrl_GPIOInvert_RMASK 0xFFFF
894#define QIB_7220_EXTCtrl_Reserved_LSB 0x4
895#define QIB_7220_EXTCtrl_Reserved_RMASK 0xFFFFFFF
896#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
897#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
898#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
899#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
900#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
901#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
902#define QIB_7220_EXTCtrl_LEDGblErrRedOff_LSB 0x0
903#define QIB_7220_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
904
905#define QIB_7220_GPIOOut_OFFS 0xE0
906
907#define QIB_7220_GPIOMask_OFFS 0xE8
908
909#define QIB_7220_GPIOStatus_OFFS 0xF0
910
911#define QIB_7220_GPIOClear_OFFS 0xF8
912
913#define QIB_7220_RcvCtrl_OFFS 0x100
914#define QIB_7220_RcvCtrl_Reserved_LSB 0x27
915#define QIB_7220_RcvCtrl_Reserved_RMASK 0x1FFFFFF
916#define QIB_7220_RcvCtrl_RcvQPMapEnable_LSB 0x26
917#define QIB_7220_RcvCtrl_RcvQPMapEnable_RMASK 0x1
918#define QIB_7220_RcvCtrl_PortCfg_LSB 0x24
919#define QIB_7220_RcvCtrl_PortCfg_RMASK 0x3
920#define QIB_7220_RcvCtrl_TailUpd_LSB 0x23
921#define QIB_7220_RcvCtrl_TailUpd_RMASK 0x1
922#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_LSB 0x22
923#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
924#define QIB_7220_RcvCtrl_IntrAvail_LSB 0x11
925#define QIB_7220_RcvCtrl_IntrAvail_RMASK 0x1FFFF
926#define QIB_7220_RcvCtrl_PortEnable_LSB 0x0
927#define QIB_7220_RcvCtrl_PortEnable_RMASK 0x1FFFF
928
929#define QIB_7220_RcvBTHQP_OFFS 0x108
930#define QIB_7220_RcvBTHQP_Reserved_LSB 0x18
931#define QIB_7220_RcvBTHQP_Reserved_RMASK 0xFF
932#define QIB_7220_RcvBTHQP_RcvBTHQP_LSB 0x0
933#define QIB_7220_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
934
935#define QIB_7220_RcvHdrSize_OFFS 0x110
936
937#define QIB_7220_RcvHdrCnt_OFFS 0x118
938
939#define QIB_7220_RcvHdrEntSize_OFFS 0x120
940
941#define QIB_7220_RcvTIDBase_OFFS 0x128
942
943#define QIB_7220_RcvTIDCnt_OFFS 0x130
944
945#define QIB_7220_RcvEgrBase_OFFS 0x138
946
947#define QIB_7220_RcvEgrCnt_OFFS 0x140
948
949#define QIB_7220_RcvBufBase_OFFS 0x148
950
951#define QIB_7220_RcvBufSize_OFFS 0x150
952
953#define QIB_7220_RxIntMemBase_OFFS 0x158
954
955#define QIB_7220_RxIntMemSize_OFFS 0x160
956
957#define QIB_7220_RcvPartitionKey_OFFS 0x168
958
959#define QIB_7220_RcvQPMulticastPort_OFFS 0x170
960#define QIB_7220_RcvQPMulticastPort_Reserved_LSB 0x5
961#define QIB_7220_RcvQPMulticastPort_Reserved_RMASK 0x7FFFFFFFFFFFFFF
962#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_LSB 0x0
963#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_RMASK 0x1F
964
965#define QIB_7220_RcvPktLEDCnt_OFFS 0x178
966#define QIB_7220_RcvPktLEDCnt_ONperiod_LSB 0x20
967#define QIB_7220_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
968#define QIB_7220_RcvPktLEDCnt_OFFperiod_LSB 0x0
969#define QIB_7220_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
970
971#define QIB_7220_IBCDDRCtrl_OFFS 0x180
972#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_LSB 0x30
973#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_RMASK 0xFFFF
974#define QIB_7220_IBCDDRCtrl_IB_DLID_LSB 0x20
975#define QIB_7220_IBCDDRCtrl_IB_DLID_RMASK 0xFFFF
976#define QIB_7220_IBCDDRCtrl_Reserved_LSB 0x1B
977#define QIB_7220_IBCDDRCtrl_Reserved_RMASK 0x1F
978#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_LSB 0x1A
979#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_RMASK 0x1
980#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_LSB 0x12
981#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_RMASK 0xFF
982#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_LSB 0x11
983#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_RMASK 0x1
984#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_LSB 0x10
985#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_RMASK 0x1
986#define QIB_7220_IBCDDRCtrl_SD_DDS_LSB 0xC
987#define QIB_7220_IBCDDRCtrl_SD_DDS_RMASK 0xF
988#define QIB_7220_IBCDDRCtrl_SD_DDSV_LSB 0xB
989#define QIB_7220_IBCDDRCtrl_SD_DDSV_RMASK 0x1
990#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_LSB 0xA
991#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_RMASK 0x1
992#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_LSB 0x9
993#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_RMASK 0x1
994#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_LSB 0x8
995#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_RMASK 0x1
996#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_LSB 0x7
997#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_RMASK 0x1
998#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_LSB 0x5
999#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_RMASK 0x3
1000#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_LSB 0x4
1001#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_RMASK 0x1
1002#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_LSB 0x3
1003#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_RMASK 0x1
1004#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_LSB 0x2
1005#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_RMASK 0x1
1006#define QIB_7220_IBCDDRCtrl_SD_SPEED_LSB 0x1
1007#define QIB_7220_IBCDDRCtrl_SD_SPEED_RMASK 0x1
1008#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_LSB 0x0
1009#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_RMASK 0x1
1010
1011#define QIB_7220_HRTBT_GUID_OFFS 0x188
1012
1013#define QIB_7220_IBCDDRCtrl2_OFFS 0x1A0
1014#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_LSB 0x5
1015#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_RMASK 0x1F
1016#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_LSB 0x0
1017#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_RMASK 0x1F
1018
1019#define QIB_7220_IBCDDRStatus_OFFS 0x1A8
1020#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_LSB 0x24
1021#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_RMASK 0x1
1022#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_LSB 0x20
1023#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_RMASK 0xF
1024#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_LSB 0x1E
1025#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_RMASK 0x3
1026#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_LSB 0x1A
1027#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_RMASK 0xF
1028#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_LSB 0x0
1029#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_RMASK 0x3FFFFFF
1030
1031#define QIB_7220_JIntReload_OFFS 0x1B0
1032#define QIB_7220_JIntReload_J_limit_reload_LSB 0x10
1033#define QIB_7220_JIntReload_J_limit_reload_RMASK 0xFFFF
1034#define QIB_7220_JIntReload_J_reload_LSB 0x0
1035#define QIB_7220_JIntReload_J_reload_RMASK 0xFFFF
1036
1037#define QIB_7220_IBNCModeCtrl_OFFS 0x1B8
1038#define QIB_7220_IBNCModeCtrl_Reserved_LSB 0x1A
1039#define QIB_7220_IBNCModeCtrl_Reserved_RMASK 0x3FFFFFFFFF
1040#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_LSB 0x11
1041#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_RMASK 0x1FF
1042#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_LSB 0x8
1043#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_RMASK 0x1FF
1044#define QIB_7220_IBNCModeCtrl_Reserved1_LSB 0x3
1045#define QIB_7220_IBNCModeCtrl_Reserved1_RMASK 0x1F
1046#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_LSB 0x2
1047#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
1048#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_LSB 0x1
1049#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_RMASK 0x1
1050#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_LSB 0x0
1051#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_RMASK 0x1
1052
1053#define QIB_7220_SendCtrl_OFFS 0x1C0
1054#define QIB_7220_SendCtrl_Disarm_LSB 0x1F
1055#define QIB_7220_SendCtrl_Disarm_RMASK 0x1
1056#define QIB_7220_SendCtrl_Reserved_LSB 0x1D
1057#define QIB_7220_SendCtrl_Reserved_RMASK 0x3
1058#define QIB_7220_SendCtrl_AvailUpdThld_LSB 0x18
1059#define QIB_7220_SendCtrl_AvailUpdThld_RMASK 0x1F
1060#define QIB_7220_SendCtrl_DisarmPIOBuf_LSB 0x10
1061#define QIB_7220_SendCtrl_DisarmPIOBuf_RMASK 0xFF
1062#define QIB_7220_SendCtrl_Reserved1_LSB 0xD
1063#define QIB_7220_SendCtrl_Reserved1_RMASK 0x7
1064#define QIB_7220_SendCtrl_SDmaHalt_LSB 0xC
1065#define QIB_7220_SendCtrl_SDmaHalt_RMASK 0x1
1066#define QIB_7220_SendCtrl_SDmaEnable_LSB 0xB
1067#define QIB_7220_SendCtrl_SDmaEnable_RMASK 0x1
1068#define QIB_7220_SendCtrl_SDmaSingleDescriptor_LSB 0xA
1069#define QIB_7220_SendCtrl_SDmaSingleDescriptor_RMASK 0x1
1070#define QIB_7220_SendCtrl_SDmaIntEnable_LSB 0x9
1071#define QIB_7220_SendCtrl_SDmaIntEnable_RMASK 0x1
1072#define QIB_7220_SendCtrl_Reserved2_LSB 0x5
1073#define QIB_7220_SendCtrl_Reserved2_RMASK 0xF
1074#define QIB_7220_SendCtrl_SSpecialTriggerEn_LSB 0x4
1075#define QIB_7220_SendCtrl_SSpecialTriggerEn_RMASK 0x1
1076#define QIB_7220_SendCtrl_SPioEnable_LSB 0x3
1077#define QIB_7220_SendCtrl_SPioEnable_RMASK 0x1
1078#define QIB_7220_SendCtrl_SendBufAvailUpd_LSB 0x2
1079#define QIB_7220_SendCtrl_SendBufAvailUpd_RMASK 0x1
1080#define QIB_7220_SendCtrl_SendIntBufAvail_LSB 0x1
1081#define QIB_7220_SendCtrl_SendIntBufAvail_RMASK 0x1
1082#define QIB_7220_SendCtrl_Abort_LSB 0x0
1083#define QIB_7220_SendCtrl_Abort_RMASK 0x1
1084
1085#define QIB_7220_SendBufBase_OFFS 0x1C8
1086#define QIB_7220_SendBufBase_Reserved_LSB 0x35
1087#define QIB_7220_SendBufBase_Reserved_RMASK 0x7FF
1088#define QIB_7220_SendBufBase_BaseAddr_LargePIO_LSB 0x20
1089#define QIB_7220_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
1090#define QIB_7220_SendBufBase_Reserved1_LSB 0x15
1091#define QIB_7220_SendBufBase_Reserved1_RMASK 0x7FF
1092#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
1093#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
1094
1095#define QIB_7220_SendBufSize_OFFS 0x1D0
1096#define QIB_7220_SendBufSize_Reserved_LSB 0x2D
1097#define QIB_7220_SendBufSize_Reserved_RMASK 0xFFFFF
1098#define QIB_7220_SendBufSize_Size_LargePIO_LSB 0x20
1099#define QIB_7220_SendBufSize_Size_LargePIO_RMASK 0x1FFF
1100#define QIB_7220_SendBufSize_Reserved1_LSB 0xC
1101#define QIB_7220_SendBufSize_Reserved1_RMASK 0xFFFFF
1102#define QIB_7220_SendBufSize_Size_SmallPIO_LSB 0x0
1103#define QIB_7220_SendBufSize_Size_SmallPIO_RMASK 0xFFF
1104
1105#define QIB_7220_SendBufCnt_OFFS 0x1D8
1106#define QIB_7220_SendBufCnt_Reserved_LSB 0x24
1107#define QIB_7220_SendBufCnt_Reserved_RMASK 0xFFFFFFF
1108#define QIB_7220_SendBufCnt_Num_LargeBuffers_LSB 0x20
1109#define QIB_7220_SendBufCnt_Num_LargeBuffers_RMASK 0xF
1110#define QIB_7220_SendBufCnt_Reserved1_LSB 0x9
1111#define QIB_7220_SendBufCnt_Reserved1_RMASK 0x7FFFFF
1112#define QIB_7220_SendBufCnt_Num_SmallBuffers_LSB 0x0
1113#define QIB_7220_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
1114
1115#define QIB_7220_SendBufAvailAddr_OFFS 0x1E0
1116#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
1117#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
1118#define QIB_7220_SendBufAvailAddr_Reserved_LSB 0x0
1119#define QIB_7220_SendBufAvailAddr_Reserved_RMASK 0x3F
1120
1121#define QIB_7220_TxIntMemBase_OFFS 0x1E8
1122
1123#define QIB_7220_TxIntMemSize_OFFS 0x1F0
1124
1125#define QIB_7220_SendDmaBase_OFFS 0x1F8
1126#define QIB_7220_SendDmaBase_Reserved_LSB 0x30
1127#define QIB_7220_SendDmaBase_Reserved_RMASK 0xFFFF
1128#define QIB_7220_SendDmaBase_SendDmaBase_LSB 0x0
1129#define QIB_7220_SendDmaBase_SendDmaBase_RMASK 0xFFFFFFFFFFFF
1130
1131#define QIB_7220_SendDmaLenGen_OFFS 0x200
1132#define QIB_7220_SendDmaLenGen_Reserved_LSB 0x13
1133#define QIB_7220_SendDmaLenGen_Reserved_RMASK 0x1FFFFFFFFFFF
1134#define QIB_7220_SendDmaLenGen_Generation_LSB 0x10
1135#define QIB_7220_SendDmaLenGen_Generation_MSB 0x12
1136#define QIB_7220_SendDmaLenGen_Generation_RMASK 0x7
1137#define QIB_7220_SendDmaLenGen_Length_LSB 0x0
1138#define QIB_7220_SendDmaLenGen_Length_RMASK 0xFFFF
1139
1140#define QIB_7220_SendDmaTail_OFFS 0x208
1141#define QIB_7220_SendDmaTail_Reserved_LSB 0x10
1142#define QIB_7220_SendDmaTail_Reserved_RMASK 0xFFFFFFFFFFFF
1143#define QIB_7220_SendDmaTail_SendDmaTail_LSB 0x0
1144#define QIB_7220_SendDmaTail_SendDmaTail_RMASK 0xFFFF
1145
1146#define QIB_7220_SendDmaHead_OFFS 0x210
1147#define QIB_7220_SendDmaHead_Reserved_LSB 0x30
1148#define QIB_7220_SendDmaHead_Reserved_RMASK 0xFFFF
1149#define QIB_7220_SendDmaHead_InternalSendDmaHead_LSB 0x20
1150#define QIB_7220_SendDmaHead_InternalSendDmaHead_RMASK 0xFFFF
1151#define QIB_7220_SendDmaHead_Reserved1_LSB 0x10
1152#define QIB_7220_SendDmaHead_Reserved1_RMASK 0xFFFF
1153#define QIB_7220_SendDmaHead_SendDmaHead_LSB 0x0
1154#define QIB_7220_SendDmaHead_SendDmaHead_RMASK 0xFFFF
1155
1156#define QIB_7220_SendDmaHeadAddr_OFFS 0x218
1157#define QIB_7220_SendDmaHeadAddr_Reserved_LSB 0x30
1158#define QIB_7220_SendDmaHeadAddr_Reserved_RMASK 0xFFFF
1159#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_LSB 0x0
1160#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
1161
1162#define QIB_7220_SendDmaBufMask0_OFFS 0x220
1163#define QIB_7220_SendDmaBufMask0_BufMask_63_0_LSB 0x0
1164#define QIB_7220_SendDmaBufMask0_BufMask_63_0_RMASK 0x0
1165
1166#define QIB_7220_SendDmaStatus_OFFS 0x238
1167#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_LSB 0x3F
1168#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_RMASK 0x1
1169#define QIB_7220_SendDmaStatus_AbortInProg_LSB 0x3E
1170#define QIB_7220_SendDmaStatus_AbortInProg_RMASK 0x1
1171#define QIB_7220_SendDmaStatus_InternalSDmaEnable_LSB 0x3D
1172#define QIB_7220_SendDmaStatus_InternalSDmaEnable_RMASK 0x1
1173#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_LSB 0x2F
1174#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_RMASK 0x3FFF
1175#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_LSB 0x28
1176#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_RMASK 0x7F
1177#define QIB_7220_SendDmaStatus_RpyTag_7_0_LSB 0x20
1178#define QIB_7220_SendDmaStatus_RpyTag_7_0_RMASK 0xFF
1179#define QIB_7220_SendDmaStatus_ScbFull_LSB 0x1F
1180#define QIB_7220_SendDmaStatus_ScbFull_RMASK 0x1
1181#define QIB_7220_SendDmaStatus_ScbEmpty_LSB 0x1E
1182#define QIB_7220_SendDmaStatus_ScbEmpty_RMASK 0x1
1183#define QIB_7220_SendDmaStatus_ScbEntryValid_LSB 0x1D
1184#define QIB_7220_SendDmaStatus_ScbEntryValid_RMASK 0x1
1185#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_LSB 0x1C
1186#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_RMASK 0x1
1187#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_LSB 0x1B
1188#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_RMASK 0x1
1189#define QIB_7220_SendDmaStatus_SplFifoDisarmed_LSB 0x1A
1190#define QIB_7220_SendDmaStatus_SplFifoDisarmed_RMASK 0x1
1191#define QIB_7220_SendDmaStatus_SplFifoEmpty_LSB 0x19
1192#define QIB_7220_SendDmaStatus_SplFifoEmpty_RMASK 0x1
1193#define QIB_7220_SendDmaStatus_SplFifoFull_LSB 0x18
1194#define QIB_7220_SendDmaStatus_SplFifoFull_RMASK 0x1
1195#define QIB_7220_SendDmaStatus_SplFifoBufNum_LSB 0x10
1196#define QIB_7220_SendDmaStatus_SplFifoBufNum_RMASK 0xFF
1197#define QIB_7220_SendDmaStatus_SplFifoDescIndex_LSB 0x0
1198#define QIB_7220_SendDmaStatus_SplFifoDescIndex_RMASK 0xFFFF
1199
1200#define QIB_7220_SendBufErr0_OFFS 0x240
1201#define QIB_7220_SendBufErr0_SendBufErr_63_0_LSB 0x0
1202#define QIB_7220_SendBufErr0_SendBufErr_63_0_RMASK 0x0
1203
1204#define QIB_7220_RcvHdrAddr0_OFFS 0x270
1205#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
1206#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
1207#define QIB_7220_RcvHdrAddr0_Reserved_LSB 0x0
1208#define QIB_7220_RcvHdrAddr0_Reserved_RMASK 0x3
1209
1210#define QIB_7220_RcvHdrTailAddr0_OFFS 0x300
1211#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
1212#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
1213#define QIB_7220_RcvHdrTailAddr0_Reserved_LSB 0x0
1214#define QIB_7220_RcvHdrTailAddr0_Reserved_RMASK 0x3
1215
1216#define QIB_7220_ibsd_epb_access_ctrl_OFFS 0x3C0
1217#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_LSB 0x8
1218#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_RMASK 0x1
1219#define QIB_7220_ibsd_epb_access_ctrl_Reserved_LSB 0x1
1220#define QIB_7220_ibsd_epb_access_ctrl_Reserved_RMASK 0x7F
1221#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_LSB 0x0
1222#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_RMASK 0x1
1223
1224#define QIB_7220_ibsd_epb_transaction_reg_OFFS 0x3C8
1225#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_LSB 0x1F
1226#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_RMASK 0x1
1227#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_LSB 0x1E
1228#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_RMASK 0x1
1229#define QIB_7220_ibsd_epb_transaction_reg_Reserved_LSB 0x1D
1230#define QIB_7220_ibsd_epb_transaction_reg_Reserved_RMASK 0x1
1231#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_LSB 0x1C
1232#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_RMASK 0x1
1233#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_LSB 0x1B
1234#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_RMASK 0x1
1235#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_LSB 0x19
1236#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_RMASK 0x3
1237#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_LSB 0x18
1238#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_RMASK 0x1
1239#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_LSB 0x17
1240#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_RMASK 0x1
1241#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_LSB 0x8
1242#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_RMASK 0x7FFF
1243#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_LSB 0x0
1244#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_RMASK 0xFF
1245
1246#define QIB_7220_XGXSCfg_OFFS 0x3D8
1247#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_LSB 0x3F
1248#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_RMASK 0x1
1249#define QIB_7220_XGXSCfg_Reserved_LSB 0x13
1250#define QIB_7220_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFFF
1251#define QIB_7220_XGXSCfg_link_sync_mask_LSB 0x9
1252#define QIB_7220_XGXSCfg_link_sync_mask_RMASK 0x3FF
1253#define QIB_7220_XGXSCfg_Reserved1_LSB 0x3
1254#define QIB_7220_XGXSCfg_Reserved1_RMASK 0x3F
1255#define QIB_7220_XGXSCfg_xcv_reset_LSB 0x2
1256#define QIB_7220_XGXSCfg_xcv_reset_RMASK 0x1
1257#define QIB_7220_XGXSCfg_Reserved2_LSB 0x1
1258#define QIB_7220_XGXSCfg_Reserved2_RMASK 0x1
1259#define QIB_7220_XGXSCfg_tx_rx_reset_LSB 0x0
1260#define QIB_7220_XGXSCfg_tx_rx_reset_RMASK 0x1
1261
1262#define QIB_7220_IBSerDesCtrl_OFFS 0x3E0
1263#define QIB_7220_IBSerDesCtrl_Reserved_LSB 0x2D
1264#define QIB_7220_IBSerDesCtrl_Reserved_RMASK 0x7FFFF
1265#define QIB_7220_IBSerDesCtrl_INT_uC_LSB 0x2C
1266#define QIB_7220_IBSerDesCtrl_INT_uC_RMASK 0x1
1267#define QIB_7220_IBSerDesCtrl_CKSEL_uC_LSB 0x2A
1268#define QIB_7220_IBSerDesCtrl_CKSEL_uC_RMASK 0x3
1269#define QIB_7220_IBSerDesCtrl_PLLN_LSB 0x28
1270#define QIB_7220_IBSerDesCtrl_PLLN_RMASK 0x3
1271#define QIB_7220_IBSerDesCtrl_PLLM_LSB 0x25
1272#define QIB_7220_IBSerDesCtrl_PLLM_RMASK 0x7
1273#define QIB_7220_IBSerDesCtrl_TXOBPD_LSB 0x24
1274#define QIB_7220_IBSerDesCtrl_TXOBPD_RMASK 0x1
1275#define QIB_7220_IBSerDesCtrl_TWC_LSB 0x23
1276#define QIB_7220_IBSerDesCtrl_TWC_RMASK 0x1
1277#define QIB_7220_IBSerDesCtrl_RXIDLE_LSB 0x22
1278#define QIB_7220_IBSerDesCtrl_RXIDLE_RMASK 0x1
1279#define QIB_7220_IBSerDesCtrl_RXINV_LSB 0x21
1280#define QIB_7220_IBSerDesCtrl_RXINV_RMASK 0x1
1281#define QIB_7220_IBSerDesCtrl_TXINV_LSB 0x20
1282#define QIB_7220_IBSerDesCtrl_TXINV_RMASK 0x1
1283#define QIB_7220_IBSerDesCtrl_Reserved1_LSB 0x12
1284#define QIB_7220_IBSerDesCtrl_Reserved1_RMASK 0x3FFF
1285#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_LSB 0xD
1286#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_RMASK 0x1F
1287#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_LSB 0x8
1288#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_RMASK 0x1F
1289#define QIB_7220_IBSerDesCtrl_Reserved2_LSB 0x1
1290#define QIB_7220_IBSerDesCtrl_Reserved2_RMASK 0x7F
1291#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_LSB 0x0
1292#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_RMASK 0x1
1293
1294#define QIB_7220_pciesd_epb_access_ctrl_OFFS 0x400
1295#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_LSB 0x8
1296#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_RMASK 0x1
1297#define QIB_7220_pciesd_epb_access_ctrl_Reserved_LSB 0x3
1298#define QIB_7220_pciesd_epb_access_ctrl_Reserved_RMASK 0x1F
1299#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_LSB 0x1
1300#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_RMASK 0x3
1301#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_LSB 0x0
1302#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_RMASK 0x1
1303
1304#define QIB_7220_pciesd_epb_transaction_reg_OFFS 0x408
1305#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_LSB 0x1F
1306#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_RMASK 0x1
1307#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_LSB 0x1E
1308#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_RMASK 0x1
1309#define QIB_7220_pciesd_epb_transaction_reg_Reserved_LSB 0x1D
1310#define QIB_7220_pciesd_epb_transaction_reg_Reserved_RMASK 0x1
1311#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_LSB 0x1C
1312#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_RMASK 0x1
1313#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_LSB 0x19
1314#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_RMASK 0x7
1315#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_LSB 0x18
1316#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_RMASK 0x1
1317#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_LSB 0x17
1318#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_RMASK 0x1
1319#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_LSB 0x8
1320#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_RMASK 0x7FFF
1321#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_LSB 0x0
1322#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_RMASK 0xFF
1323
1324#define QIB_7220_SerDes_DDSRXEQ0_OFFS 0x500
1325#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_LSB 0x4
1326#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_RMASK 0x3F
1327#define QIB_7220_SerDes_DDSRXEQ0_element_num_LSB 0x0
1328#define QIB_7220_SerDes_DDSRXEQ0_element_num_RMASK 0xF
1329
1330#define QIB_7220_LBIntCnt_OFFS 0x13000
1331
1332#define QIB_7220_LBFlowStallCnt_OFFS 0x13008
1333
1334#define QIB_7220_TxSDmaDescCnt_OFFS 0x13010
1335
1336#define QIB_7220_TxUnsupVLErrCnt_OFFS 0x13018
1337
1338#define QIB_7220_TxDataPktCnt_OFFS 0x13020
1339
1340#define QIB_7220_TxFlowPktCnt_OFFS 0x13028
1341
1342#define QIB_7220_TxDwordCnt_OFFS 0x13030
1343
1344#define QIB_7220_TxLenErrCnt_OFFS 0x13038
1345
1346#define QIB_7220_TxMaxMinLenErrCnt_OFFS 0x13040
1347
1348#define QIB_7220_TxUnderrunCnt_OFFS 0x13048
1349
1350#define QIB_7220_TxFlowStallCnt_OFFS 0x13050
1351
1352#define QIB_7220_TxDroppedPktCnt_OFFS 0x13058
1353
1354#define QIB_7220_RxDroppedPktCnt_OFFS 0x13060
1355
1356#define QIB_7220_RxDataPktCnt_OFFS 0x13068
1357
1358#define QIB_7220_RxFlowPktCnt_OFFS 0x13070
1359
1360#define QIB_7220_RxDwordCnt_OFFS 0x13078
1361
1362#define QIB_7220_RxLenErrCnt_OFFS 0x13080
1363
1364#define QIB_7220_RxMaxMinLenErrCnt_OFFS 0x13088
1365
1366#define QIB_7220_RxICRCErrCnt_OFFS 0x13090
1367
1368#define QIB_7220_RxVCRCErrCnt_OFFS 0x13098
1369
1370#define QIB_7220_RxFlowCtrlViolCnt_OFFS 0x130A0
1371
1372#define QIB_7220_RxVersionErrCnt_OFFS 0x130A8
1373
1374#define QIB_7220_RxLinkMalformCnt_OFFS 0x130B0
1375
1376#define QIB_7220_RxEBPCnt_OFFS 0x130B8
1377
1378#define QIB_7220_RxLPCRCErrCnt_OFFS 0x130C0
1379
1380#define QIB_7220_RxBufOvflCnt_OFFS 0x130C8
1381
1382#define QIB_7220_RxTIDFullErrCnt_OFFS 0x130D0
1383
1384#define QIB_7220_RxTIDValidErrCnt_OFFS 0x130D8
1385
1386#define QIB_7220_RxPKeyMismatchCnt_OFFS 0x130E0
1387
1388#define QIB_7220_RxP0HdrEgrOvflCnt_OFFS 0x130E8
1389
1390#define QIB_7220_IBStatusChangeCnt_OFFS 0x13170
1391
1392#define QIB_7220_IBLinkErrRecoveryCnt_OFFS 0x13178
1393
1394#define QIB_7220_IBLinkDownedCnt_OFFS 0x13180
1395
1396#define QIB_7220_IBSymbolErrCnt_OFFS 0x13188
1397
1398#define QIB_7220_RxVL15DroppedPktCnt_OFFS 0x13190
1399
1400#define QIB_7220_RxOtherLocalPhyErrCnt_OFFS 0x13198
1401
1402#define QIB_7220_PcieRetryBufDiagQwordCnt_OFFS 0x131A0
1403
1404#define QIB_7220_ExcessBufferOvflCnt_OFFS 0x131A8
1405
1406#define QIB_7220_LocalLinkIntegrityErrCnt_OFFS 0x131B0
1407
1408#define QIB_7220_RxVlErrCnt_OFFS 0x131B8
1409
1410#define QIB_7220_RxDlidFltrCnt_OFFS 0x131C0
1411
1412#define QIB_7220_CNT_0131C8_OFFS 0x131C8
1413
1414#define QIB_7220_PSStat_OFFS 0x13200
1415
1416#define QIB_7220_PSStart_OFFS 0x13208
1417
1418#define QIB_7220_PSInterval_OFFS 0x13210
1419
1420#define QIB_7220_PSRcvDataCount_OFFS 0x13218
1421
1422#define QIB_7220_PSRcvPktsCount_OFFS 0x13220
1423
1424#define QIB_7220_PSXmitDataCount_OFFS 0x13228
1425
1426#define QIB_7220_PSXmitPktsCount_OFFS 0x13230
1427
1428#define QIB_7220_PSXmitWaitCount_OFFS 0x13238
1429
1430#define QIB_7220_CNT_013240_OFFS 0x13240
1431
1432#define QIB_7220_RcvEgrArray_OFFS 0x14000
1433
1434#define QIB_7220_MEM_038000_OFFS 0x38000
1435
1436#define QIB_7220_RcvTIDArray0_OFFS 0x53000
1437
1438#define QIB_7220_PIOLaunchFIFO_OFFS 0x64000
1439
1440#define QIB_7220_MEM_064480_OFFS 0x64480
1441
1442#define QIB_7220_SendPIOpbcCache_OFFS 0x64800
1443
1444#define QIB_7220_MEM_064C80_OFFS 0x64C80
1445
1446#define QIB_7220_PreLaunchFIFO_OFFS 0x65000
1447
1448#define QIB_7220_MEM_065080_OFFS 0x65080
1449
1450#define QIB_7220_ScoreBoard_OFFS 0x65400
1451
1452#define QIB_7220_MEM_065440_OFFS 0x65440
1453
1454#define QIB_7220_DescriptorFIFO_OFFS 0x65800
1455
1456#define QIB_7220_MEM_065880_OFFS 0x65880
1457
1458#define QIB_7220_RcvBuf1_OFFS 0x72000
1459
1460#define QIB_7220_MEM_074800_OFFS 0x74800
1461
1462#define QIB_7220_RcvBuf2_OFFS 0x75000
1463
1464#define QIB_7220_MEM_076400_OFFS 0x76400
1465
1466#define QIB_7220_RcvFlags_OFFS 0x77000
1467
1468#define QIB_7220_MEM_078400_OFFS 0x78400
1469
1470#define QIB_7220_RcvLookupBuf1_OFFS 0x79000
1471
1472#define QIB_7220_MEM_07A400_OFFS 0x7A400
1473
1474#define QIB_7220_RcvDMADatBuf_OFFS 0x7B000
1475
1476#define QIB_7220_RcvDMAHdrBuf_OFFS 0x7B800
1477
1478#define QIB_7220_MiscRXEIntMem_OFFS 0x7C000
1479
1480#define QIB_7220_MEM_07D400_OFFS 0x7D400
1481
1482#define QIB_7220_PCIERcvBuf_OFFS 0x80000
1483
1484#define QIB_7220_PCIERetryBuf_OFFS 0x84000
1485
1486#define QIB_7220_PCIERcvBufRdToWrAddr_OFFS 0x88000
1487
1488#define QIB_7220_PCIECplBuf_OFFS 0x90000
1489
1490#define QIB_7220_IBSerDesMappTable_OFFS 0x94000
1491
1492#define QIB_7220_MEM_095000_OFFS 0x95000
1493
1494#define QIB_7220_SendBuf0_MA_OFFS 0x100000
1495
1496#define QIB_7220_MEM_1A0000_OFFS 0x1A0000
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h
new file mode 100644
index 000000000000..a97440ba924c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7322_regs.h
@@ -0,0 +1,3163 @@
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
34
35#define QIB_7322_Revision_OFFS 0x0
36#define QIB_7322_Revision_DEF 0x0000000002010601
37#define QIB_7322_Revision_R_Simulator_LSB 0x3F
38#define QIB_7322_Revision_R_Simulator_MSB 0x3F
39#define QIB_7322_Revision_R_Simulator_RMASK 0x1
40#define QIB_7322_Revision_R_Emulation_LSB 0x3E
41#define QIB_7322_Revision_R_Emulation_MSB 0x3E
42#define QIB_7322_Revision_R_Emulation_RMASK 0x1
43#define QIB_7322_Revision_R_Emulation_Revcode_LSB 0x28
44#define QIB_7322_Revision_R_Emulation_Revcode_MSB 0x3D
45#define QIB_7322_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
46#define QIB_7322_Revision_BoardID_LSB 0x20
47#define QIB_7322_Revision_BoardID_MSB 0x27
48#define QIB_7322_Revision_BoardID_RMASK 0xFF
49#define QIB_7322_Revision_R_SW_LSB 0x18
50#define QIB_7322_Revision_R_SW_MSB 0x1F
51#define QIB_7322_Revision_R_SW_RMASK 0xFF
52#define QIB_7322_Revision_R_Arch_LSB 0x10
53#define QIB_7322_Revision_R_Arch_MSB 0x17
54#define QIB_7322_Revision_R_Arch_RMASK 0xFF
55#define QIB_7322_Revision_R_ChipRevMajor_LSB 0x8
56#define QIB_7322_Revision_R_ChipRevMajor_MSB 0xF
57#define QIB_7322_Revision_R_ChipRevMajor_RMASK 0xFF
58#define QIB_7322_Revision_R_ChipRevMinor_LSB 0x0
59#define QIB_7322_Revision_R_ChipRevMinor_MSB 0x7
60#define QIB_7322_Revision_R_ChipRevMinor_RMASK 0xFF
61
62#define QIB_7322_Control_OFFS 0x8
63#define QIB_7322_Control_DEF 0x0000000000000000
64#define QIB_7322_Control_PCIECplQDiagEn_LSB 0x6
65#define QIB_7322_Control_PCIECplQDiagEn_MSB 0x6
66#define QIB_7322_Control_PCIECplQDiagEn_RMASK 0x1
67#define QIB_7322_Control_PCIEPostQDiagEn_LSB 0x5
68#define QIB_7322_Control_PCIEPostQDiagEn_MSB 0x5
69#define QIB_7322_Control_PCIEPostQDiagEn_RMASK 0x1
70#define QIB_7322_Control_SDmaDescFetchPriorityEn_LSB 0x4
71#define QIB_7322_Control_SDmaDescFetchPriorityEn_MSB 0x4
72#define QIB_7322_Control_SDmaDescFetchPriorityEn_RMASK 0x1
73#define QIB_7322_Control_PCIERetryBufDiagEn_LSB 0x3
74#define QIB_7322_Control_PCIERetryBufDiagEn_MSB 0x3
75#define QIB_7322_Control_PCIERetryBufDiagEn_RMASK 0x1
76#define QIB_7322_Control_FreezeMode_LSB 0x1
77#define QIB_7322_Control_FreezeMode_MSB 0x1
78#define QIB_7322_Control_FreezeMode_RMASK 0x1
79#define QIB_7322_Control_SyncReset_LSB 0x0
80#define QIB_7322_Control_SyncReset_MSB 0x0
81#define QIB_7322_Control_SyncReset_RMASK 0x1
82
83#define QIB_7322_PageAlign_OFFS 0x10
84#define QIB_7322_PageAlign_DEF 0x0000000000001000
85
86#define QIB_7322_ContextCnt_OFFS 0x18
87#define QIB_7322_ContextCnt_DEF 0x0000000000000012
88
89#define QIB_7322_Scratch_OFFS 0x20
90#define QIB_7322_Scratch_DEF 0x0000000000000000
91
92#define QIB_7322_CntrRegBase_OFFS 0x28
93#define QIB_7322_CntrRegBase_DEF 0x0000000000011000
94
95#define QIB_7322_SendRegBase_OFFS 0x30
96#define QIB_7322_SendRegBase_DEF 0x0000000000003000
97
98#define QIB_7322_UserRegBase_OFFS 0x38
99#define QIB_7322_UserRegBase_DEF 0x0000000000200000
100
101#define QIB_7322_IntMask_OFFS 0x68
102#define QIB_7322_IntMask_DEF 0x0000000000000000
103#define QIB_7322_IntMask_SDmaIntMask_1_LSB 0x3F
104#define QIB_7322_IntMask_SDmaIntMask_1_MSB 0x3F
105#define QIB_7322_IntMask_SDmaIntMask_1_RMASK 0x1
106#define QIB_7322_IntMask_SDmaIntMask_0_LSB 0x3E
107#define QIB_7322_IntMask_SDmaIntMask_0_MSB 0x3E
108#define QIB_7322_IntMask_SDmaIntMask_0_RMASK 0x1
109#define QIB_7322_IntMask_SDmaProgressIntMask_1_LSB 0x3D
110#define QIB_7322_IntMask_SDmaProgressIntMask_1_MSB 0x3D
111#define QIB_7322_IntMask_SDmaProgressIntMask_1_RMASK 0x1
112#define QIB_7322_IntMask_SDmaProgressIntMask_0_LSB 0x3C
113#define QIB_7322_IntMask_SDmaProgressIntMask_0_MSB 0x3C
114#define QIB_7322_IntMask_SDmaProgressIntMask_0_RMASK 0x1
115#define QIB_7322_IntMask_SDmaIdleIntMask_1_LSB 0x3B
116#define QIB_7322_IntMask_SDmaIdleIntMask_1_MSB 0x3B
117#define QIB_7322_IntMask_SDmaIdleIntMask_1_RMASK 0x1
118#define QIB_7322_IntMask_SDmaIdleIntMask_0_LSB 0x3A
119#define QIB_7322_IntMask_SDmaIdleIntMask_0_MSB 0x3A
120#define QIB_7322_IntMask_SDmaIdleIntMask_0_RMASK 0x1
121#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_LSB 0x39
122#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_MSB 0x39
123#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_RMASK 0x1
124#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_LSB 0x38
125#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_MSB 0x38
126#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_RMASK 0x1
127#define QIB_7322_IntMask_RcvUrg17IntMask_LSB 0x31
128#define QIB_7322_IntMask_RcvUrg17IntMask_MSB 0x31
129#define QIB_7322_IntMask_RcvUrg17IntMask_RMASK 0x1
130#define QIB_7322_IntMask_RcvUrg16IntMask_LSB 0x30
131#define QIB_7322_IntMask_RcvUrg16IntMask_MSB 0x30
132#define QIB_7322_IntMask_RcvUrg16IntMask_RMASK 0x1
133#define QIB_7322_IntMask_RcvUrg15IntMask_LSB 0x2F
134#define QIB_7322_IntMask_RcvUrg15IntMask_MSB 0x2F
135#define QIB_7322_IntMask_RcvUrg15IntMask_RMASK 0x1
136#define QIB_7322_IntMask_RcvUrg14IntMask_LSB 0x2E
137#define QIB_7322_IntMask_RcvUrg14IntMask_MSB 0x2E
138#define QIB_7322_IntMask_RcvUrg14IntMask_RMASK 0x1
139#define QIB_7322_IntMask_RcvUrg13IntMask_LSB 0x2D
140#define QIB_7322_IntMask_RcvUrg13IntMask_MSB 0x2D
141#define QIB_7322_IntMask_RcvUrg13IntMask_RMASK 0x1
142#define QIB_7322_IntMask_RcvUrg12IntMask_LSB 0x2C
143#define QIB_7322_IntMask_RcvUrg12IntMask_MSB 0x2C
144#define QIB_7322_IntMask_RcvUrg12IntMask_RMASK 0x1
145#define QIB_7322_IntMask_RcvUrg11IntMask_LSB 0x2B
146#define QIB_7322_IntMask_RcvUrg11IntMask_MSB 0x2B
147#define QIB_7322_IntMask_RcvUrg11IntMask_RMASK 0x1
148#define QIB_7322_IntMask_RcvUrg10IntMask_LSB 0x2A
149#define QIB_7322_IntMask_RcvUrg10IntMask_MSB 0x2A
150#define QIB_7322_IntMask_RcvUrg10IntMask_RMASK 0x1
151#define QIB_7322_IntMask_RcvUrg9IntMask_LSB 0x29
152#define QIB_7322_IntMask_RcvUrg9IntMask_MSB 0x29
153#define QIB_7322_IntMask_RcvUrg9IntMask_RMASK 0x1
154#define QIB_7322_IntMask_RcvUrg8IntMask_LSB 0x28
155#define QIB_7322_IntMask_RcvUrg8IntMask_MSB 0x28
156#define QIB_7322_IntMask_RcvUrg8IntMask_RMASK 0x1
157#define QIB_7322_IntMask_RcvUrg7IntMask_LSB 0x27
158#define QIB_7322_IntMask_RcvUrg7IntMask_MSB 0x27
159#define QIB_7322_IntMask_RcvUrg7IntMask_RMASK 0x1
160#define QIB_7322_IntMask_RcvUrg6IntMask_LSB 0x26
161#define QIB_7322_IntMask_RcvUrg6IntMask_MSB 0x26
162#define QIB_7322_IntMask_RcvUrg6IntMask_RMASK 0x1
163#define QIB_7322_IntMask_RcvUrg5IntMask_LSB 0x25
164#define QIB_7322_IntMask_RcvUrg5IntMask_MSB 0x25
165#define QIB_7322_IntMask_RcvUrg5IntMask_RMASK 0x1
166#define QIB_7322_IntMask_RcvUrg4IntMask_LSB 0x24
167#define QIB_7322_IntMask_RcvUrg4IntMask_MSB 0x24
168#define QIB_7322_IntMask_RcvUrg4IntMask_RMASK 0x1
169#define QIB_7322_IntMask_RcvUrg3IntMask_LSB 0x23
170#define QIB_7322_IntMask_RcvUrg3IntMask_MSB 0x23
171#define QIB_7322_IntMask_RcvUrg3IntMask_RMASK 0x1
172#define QIB_7322_IntMask_RcvUrg2IntMask_LSB 0x22
173#define QIB_7322_IntMask_RcvUrg2IntMask_MSB 0x22
174#define QIB_7322_IntMask_RcvUrg2IntMask_RMASK 0x1
175#define QIB_7322_IntMask_RcvUrg1IntMask_LSB 0x21
176#define QIB_7322_IntMask_RcvUrg1IntMask_MSB 0x21
177#define QIB_7322_IntMask_RcvUrg1IntMask_RMASK 0x1
178#define QIB_7322_IntMask_RcvUrg0IntMask_LSB 0x20
179#define QIB_7322_IntMask_RcvUrg0IntMask_MSB 0x20
180#define QIB_7322_IntMask_RcvUrg0IntMask_RMASK 0x1
181#define QIB_7322_IntMask_ErrIntMask_1_LSB 0x1F
182#define QIB_7322_IntMask_ErrIntMask_1_MSB 0x1F
183#define QIB_7322_IntMask_ErrIntMask_1_RMASK 0x1
184#define QIB_7322_IntMask_ErrIntMask_0_LSB 0x1E
185#define QIB_7322_IntMask_ErrIntMask_0_MSB 0x1E
186#define QIB_7322_IntMask_ErrIntMask_0_RMASK 0x1
187#define QIB_7322_IntMask_ErrIntMask_LSB 0x1D
188#define QIB_7322_IntMask_ErrIntMask_MSB 0x1D
189#define QIB_7322_IntMask_ErrIntMask_RMASK 0x1
190#define QIB_7322_IntMask_AssertGPIOIntMask_LSB 0x1C
191#define QIB_7322_IntMask_AssertGPIOIntMask_MSB 0x1C
192#define QIB_7322_IntMask_AssertGPIOIntMask_RMASK 0x1
193#define QIB_7322_IntMask_SendDoneIntMask_1_LSB 0x19
194#define QIB_7322_IntMask_SendDoneIntMask_1_MSB 0x19
195#define QIB_7322_IntMask_SendDoneIntMask_1_RMASK 0x1
196#define QIB_7322_IntMask_SendDoneIntMask_0_LSB 0x18
197#define QIB_7322_IntMask_SendDoneIntMask_0_MSB 0x18
198#define QIB_7322_IntMask_SendDoneIntMask_0_RMASK 0x1
199#define QIB_7322_IntMask_SendBufAvailIntMask_LSB 0x17
200#define QIB_7322_IntMask_SendBufAvailIntMask_MSB 0x17
201#define QIB_7322_IntMask_SendBufAvailIntMask_RMASK 0x1
202#define QIB_7322_IntMask_RcvAvail17IntMask_LSB 0x11
203#define QIB_7322_IntMask_RcvAvail17IntMask_MSB 0x11
204#define QIB_7322_IntMask_RcvAvail17IntMask_RMASK 0x1
205#define QIB_7322_IntMask_RcvAvail16IntMask_LSB 0x10
206#define QIB_7322_IntMask_RcvAvail16IntMask_MSB 0x10
207#define QIB_7322_IntMask_RcvAvail16IntMask_RMASK 0x1
208#define QIB_7322_IntMask_RcvAvail15IntMask_LSB 0xF
209#define QIB_7322_IntMask_RcvAvail15IntMask_MSB 0xF
210#define QIB_7322_IntMask_RcvAvail15IntMask_RMASK 0x1
211#define QIB_7322_IntMask_RcvAvail14IntMask_LSB 0xE
212#define QIB_7322_IntMask_RcvAvail14IntMask_MSB 0xE
213#define QIB_7322_IntMask_RcvAvail14IntMask_RMASK 0x1
214#define QIB_7322_IntMask_RcvAvail13IntMask_LSB 0xD
215#define QIB_7322_IntMask_RcvAvail13IntMask_MSB 0xD
216#define QIB_7322_IntMask_RcvAvail13IntMask_RMASK 0x1
217#define QIB_7322_IntMask_RcvAvail12IntMask_LSB 0xC
218#define QIB_7322_IntMask_RcvAvail12IntMask_MSB 0xC
219#define QIB_7322_IntMask_RcvAvail12IntMask_RMASK 0x1
220#define QIB_7322_IntMask_RcvAvail11IntMask_LSB 0xB
221#define QIB_7322_IntMask_RcvAvail11IntMask_MSB 0xB
222#define QIB_7322_IntMask_RcvAvail11IntMask_RMASK 0x1
223#define QIB_7322_IntMask_RcvAvail10IntMask_LSB 0xA
224#define QIB_7322_IntMask_RcvAvail10IntMask_MSB 0xA
225#define QIB_7322_IntMask_RcvAvail10IntMask_RMASK 0x1
226#define QIB_7322_IntMask_RcvAvail9IntMask_LSB 0x9
227#define QIB_7322_IntMask_RcvAvail9IntMask_MSB 0x9
228#define QIB_7322_IntMask_RcvAvail9IntMask_RMASK 0x1
229#define QIB_7322_IntMask_RcvAvail8IntMask_LSB 0x8
230#define QIB_7322_IntMask_RcvAvail8IntMask_MSB 0x8
231#define QIB_7322_IntMask_RcvAvail8IntMask_RMASK 0x1
232#define QIB_7322_IntMask_RcvAvail7IntMask_LSB 0x7
233#define QIB_7322_IntMask_RcvAvail7IntMask_MSB 0x7
234#define QIB_7322_IntMask_RcvAvail7IntMask_RMASK 0x1
235#define QIB_7322_IntMask_RcvAvail6IntMask_LSB 0x6
236#define QIB_7322_IntMask_RcvAvail6IntMask_MSB 0x6
237#define QIB_7322_IntMask_RcvAvail6IntMask_RMASK 0x1
238#define QIB_7322_IntMask_RcvAvail5IntMask_LSB 0x5
239#define QIB_7322_IntMask_RcvAvail5IntMask_MSB 0x5
240#define QIB_7322_IntMask_RcvAvail5IntMask_RMASK 0x1
241#define QIB_7322_IntMask_RcvAvail4IntMask_LSB 0x4
242#define QIB_7322_IntMask_RcvAvail4IntMask_MSB 0x4
243#define QIB_7322_IntMask_RcvAvail4IntMask_RMASK 0x1
244#define QIB_7322_IntMask_RcvAvail3IntMask_LSB 0x3
245#define QIB_7322_IntMask_RcvAvail3IntMask_MSB 0x3
246#define QIB_7322_IntMask_RcvAvail3IntMask_RMASK 0x1
247#define QIB_7322_IntMask_RcvAvail2IntMask_LSB 0x2
248#define QIB_7322_IntMask_RcvAvail2IntMask_MSB 0x2
249#define QIB_7322_IntMask_RcvAvail2IntMask_RMASK 0x1
250#define QIB_7322_IntMask_RcvAvail1IntMask_LSB 0x1
251#define QIB_7322_IntMask_RcvAvail1IntMask_MSB 0x1
252#define QIB_7322_IntMask_RcvAvail1IntMask_RMASK 0x1
253#define QIB_7322_IntMask_RcvAvail0IntMask_LSB 0x0
254#define QIB_7322_IntMask_RcvAvail0IntMask_MSB 0x0
255#define QIB_7322_IntMask_RcvAvail0IntMask_RMASK 0x1
256
257#define QIB_7322_IntStatus_OFFS 0x70
258#define QIB_7322_IntStatus_DEF 0x0000000000000000
259#define QIB_7322_IntStatus_SDmaInt_1_LSB 0x3F
260#define QIB_7322_IntStatus_SDmaInt_1_MSB 0x3F
261#define QIB_7322_IntStatus_SDmaInt_1_RMASK 0x1
262#define QIB_7322_IntStatus_SDmaInt_0_LSB 0x3E
263#define QIB_7322_IntStatus_SDmaInt_0_MSB 0x3E
264#define QIB_7322_IntStatus_SDmaInt_0_RMASK 0x1
265#define QIB_7322_IntStatus_SDmaProgressInt_1_LSB 0x3D
266#define QIB_7322_IntStatus_SDmaProgressInt_1_MSB 0x3D
267#define QIB_7322_IntStatus_SDmaProgressInt_1_RMASK 0x1
268#define QIB_7322_IntStatus_SDmaProgressInt_0_LSB 0x3C
269#define QIB_7322_IntStatus_SDmaProgressInt_0_MSB 0x3C
270#define QIB_7322_IntStatus_SDmaProgressInt_0_RMASK 0x1
271#define QIB_7322_IntStatus_SDmaIdleInt_1_LSB 0x3B
272#define QIB_7322_IntStatus_SDmaIdleInt_1_MSB 0x3B
273#define QIB_7322_IntStatus_SDmaIdleInt_1_RMASK 0x1
274#define QIB_7322_IntStatus_SDmaIdleInt_0_LSB 0x3A
275#define QIB_7322_IntStatus_SDmaIdleInt_0_MSB 0x3A
276#define QIB_7322_IntStatus_SDmaIdleInt_0_RMASK 0x1
277#define QIB_7322_IntStatus_SDmaCleanupDone_1_LSB 0x39
278#define QIB_7322_IntStatus_SDmaCleanupDone_1_MSB 0x39
279#define QIB_7322_IntStatus_SDmaCleanupDone_1_RMASK 0x1
280#define QIB_7322_IntStatus_SDmaCleanupDone_0_LSB 0x38
281#define QIB_7322_IntStatus_SDmaCleanupDone_0_MSB 0x38
282#define QIB_7322_IntStatus_SDmaCleanupDone_0_RMASK 0x1
283#define QIB_7322_IntStatus_RcvUrg17_LSB 0x31
284#define QIB_7322_IntStatus_RcvUrg17_MSB 0x31
285#define QIB_7322_IntStatus_RcvUrg17_RMASK 0x1
286#define QIB_7322_IntStatus_RcvUrg16_LSB 0x30
287#define QIB_7322_IntStatus_RcvUrg16_MSB 0x30
288#define QIB_7322_IntStatus_RcvUrg16_RMASK 0x1
289#define QIB_7322_IntStatus_RcvUrg15_LSB 0x2F
290#define QIB_7322_IntStatus_RcvUrg15_MSB 0x2F
291#define QIB_7322_IntStatus_RcvUrg15_RMASK 0x1
292#define QIB_7322_IntStatus_RcvUrg14_LSB 0x2E
293#define QIB_7322_IntStatus_RcvUrg14_MSB 0x2E
294#define QIB_7322_IntStatus_RcvUrg14_RMASK 0x1
295#define QIB_7322_IntStatus_RcvUrg13_LSB 0x2D
296#define QIB_7322_IntStatus_RcvUrg13_MSB 0x2D
297#define QIB_7322_IntStatus_RcvUrg13_RMASK 0x1
298#define QIB_7322_IntStatus_RcvUrg12_LSB 0x2C
299#define QIB_7322_IntStatus_RcvUrg12_MSB 0x2C
300#define QIB_7322_IntStatus_RcvUrg12_RMASK 0x1
301#define QIB_7322_IntStatus_RcvUrg11_LSB 0x2B
302#define QIB_7322_IntStatus_RcvUrg11_MSB 0x2B
303#define QIB_7322_IntStatus_RcvUrg11_RMASK 0x1
304#define QIB_7322_IntStatus_RcvUrg10_LSB 0x2A
305#define QIB_7322_IntStatus_RcvUrg10_MSB 0x2A
306#define QIB_7322_IntStatus_RcvUrg10_RMASK 0x1
307#define QIB_7322_IntStatus_RcvUrg9_LSB 0x29
308#define QIB_7322_IntStatus_RcvUrg9_MSB 0x29
309#define QIB_7322_IntStatus_RcvUrg9_RMASK 0x1
310#define QIB_7322_IntStatus_RcvUrg8_LSB 0x28
311#define QIB_7322_IntStatus_RcvUrg8_MSB 0x28
312#define QIB_7322_IntStatus_RcvUrg8_RMASK 0x1
313#define QIB_7322_IntStatus_RcvUrg7_LSB 0x27
314#define QIB_7322_IntStatus_RcvUrg7_MSB 0x27
315#define QIB_7322_IntStatus_RcvUrg7_RMASK 0x1
316#define QIB_7322_IntStatus_RcvUrg6_LSB 0x26
317#define QIB_7322_IntStatus_RcvUrg6_MSB 0x26
318#define QIB_7322_IntStatus_RcvUrg6_RMASK 0x1
319#define QIB_7322_IntStatus_RcvUrg5_LSB 0x25
320#define QIB_7322_IntStatus_RcvUrg5_MSB 0x25
321#define QIB_7322_IntStatus_RcvUrg5_RMASK 0x1
322#define QIB_7322_IntStatus_RcvUrg4_LSB 0x24
323#define QIB_7322_IntStatus_RcvUrg4_MSB 0x24
324#define QIB_7322_IntStatus_RcvUrg4_RMASK 0x1
325#define QIB_7322_IntStatus_RcvUrg3_LSB 0x23
326#define QIB_7322_IntStatus_RcvUrg3_MSB 0x23
327#define QIB_7322_IntStatus_RcvUrg3_RMASK 0x1
328#define QIB_7322_IntStatus_RcvUrg2_LSB 0x22
329#define QIB_7322_IntStatus_RcvUrg2_MSB 0x22
330#define QIB_7322_IntStatus_RcvUrg2_RMASK 0x1
331#define QIB_7322_IntStatus_RcvUrg1_LSB 0x21
332#define QIB_7322_IntStatus_RcvUrg1_MSB 0x21
333#define QIB_7322_IntStatus_RcvUrg1_RMASK 0x1
334#define QIB_7322_IntStatus_RcvUrg0_LSB 0x20
335#define QIB_7322_IntStatus_RcvUrg0_MSB 0x20
336#define QIB_7322_IntStatus_RcvUrg0_RMASK 0x1
337#define QIB_7322_IntStatus_Err_1_LSB 0x1F
338#define QIB_7322_IntStatus_Err_1_MSB 0x1F
339#define QIB_7322_IntStatus_Err_1_RMASK 0x1
340#define QIB_7322_IntStatus_Err_0_LSB 0x1E
341#define QIB_7322_IntStatus_Err_0_MSB 0x1E
342#define QIB_7322_IntStatus_Err_0_RMASK 0x1
343#define QIB_7322_IntStatus_Err_LSB 0x1D
344#define QIB_7322_IntStatus_Err_MSB 0x1D
345#define QIB_7322_IntStatus_Err_RMASK 0x1
346#define QIB_7322_IntStatus_AssertGPIO_LSB 0x1C
347#define QIB_7322_IntStatus_AssertGPIO_MSB 0x1C
348#define QIB_7322_IntStatus_AssertGPIO_RMASK 0x1
349#define QIB_7322_IntStatus_SendDone_1_LSB 0x19
350#define QIB_7322_IntStatus_SendDone_1_MSB 0x19
351#define QIB_7322_IntStatus_SendDone_1_RMASK 0x1
352#define QIB_7322_IntStatus_SendDone_0_LSB 0x18
353#define QIB_7322_IntStatus_SendDone_0_MSB 0x18
354#define QIB_7322_IntStatus_SendDone_0_RMASK 0x1
355#define QIB_7322_IntStatus_SendBufAvail_LSB 0x17
356#define QIB_7322_IntStatus_SendBufAvail_MSB 0x17
357#define QIB_7322_IntStatus_SendBufAvail_RMASK 0x1
358#define QIB_7322_IntStatus_RcvAvail17_LSB 0x11
359#define QIB_7322_IntStatus_RcvAvail17_MSB 0x11
360#define QIB_7322_IntStatus_RcvAvail17_RMASK 0x1
361#define QIB_7322_IntStatus_RcvAvail16_LSB 0x10
362#define QIB_7322_IntStatus_RcvAvail16_MSB 0x10
363#define QIB_7322_IntStatus_RcvAvail16_RMASK 0x1
364#define QIB_7322_IntStatus_RcvAvail15_LSB 0xF
365#define QIB_7322_IntStatus_RcvAvail15_MSB 0xF
366#define QIB_7322_IntStatus_RcvAvail15_RMASK 0x1
367#define QIB_7322_IntStatus_RcvAvail14_LSB 0xE
368#define QIB_7322_IntStatus_RcvAvail14_MSB 0xE
369#define QIB_7322_IntStatus_RcvAvail14_RMASK 0x1
370#define QIB_7322_IntStatus_RcvAvail13_LSB 0xD
371#define QIB_7322_IntStatus_RcvAvail13_MSB 0xD
372#define QIB_7322_IntStatus_RcvAvail13_RMASK 0x1
373#define QIB_7322_IntStatus_RcvAvail12_LSB 0xC
374#define QIB_7322_IntStatus_RcvAvail12_MSB 0xC
375#define QIB_7322_IntStatus_RcvAvail12_RMASK 0x1
376#define QIB_7322_IntStatus_RcvAvail11_LSB 0xB
377#define QIB_7322_IntStatus_RcvAvail11_MSB 0xB
378#define QIB_7322_IntStatus_RcvAvail11_RMASK 0x1
379#define QIB_7322_IntStatus_RcvAvail10_LSB 0xA
380#define QIB_7322_IntStatus_RcvAvail10_MSB 0xA
381#define QIB_7322_IntStatus_RcvAvail10_RMASK 0x1
382#define QIB_7322_IntStatus_RcvAvail9_LSB 0x9
383#define QIB_7322_IntStatus_RcvAvail9_MSB 0x9
384#define QIB_7322_IntStatus_RcvAvail9_RMASK 0x1
385#define QIB_7322_IntStatus_RcvAvail8_LSB 0x8
386#define QIB_7322_IntStatus_RcvAvail8_MSB 0x8
387#define QIB_7322_IntStatus_RcvAvail8_RMASK 0x1
388#define QIB_7322_IntStatus_RcvAvail7_LSB 0x7
389#define QIB_7322_IntStatus_RcvAvail7_MSB 0x7
390#define QIB_7322_IntStatus_RcvAvail7_RMASK 0x1
391#define QIB_7322_IntStatus_RcvAvail6_LSB 0x6
392#define QIB_7322_IntStatus_RcvAvail6_MSB 0x6
393#define QIB_7322_IntStatus_RcvAvail6_RMASK 0x1
394#define QIB_7322_IntStatus_RcvAvail5_LSB 0x5
395#define QIB_7322_IntStatus_RcvAvail5_MSB 0x5
396#define QIB_7322_IntStatus_RcvAvail5_RMASK 0x1
397#define QIB_7322_IntStatus_RcvAvail4_LSB 0x4
398#define QIB_7322_IntStatus_RcvAvail4_MSB 0x4
399#define QIB_7322_IntStatus_RcvAvail4_RMASK 0x1
400#define QIB_7322_IntStatus_RcvAvail3_LSB 0x3
401#define QIB_7322_IntStatus_RcvAvail3_MSB 0x3
402#define QIB_7322_IntStatus_RcvAvail3_RMASK 0x1
403#define QIB_7322_IntStatus_RcvAvail2_LSB 0x2
404#define QIB_7322_IntStatus_RcvAvail2_MSB 0x2
405#define QIB_7322_IntStatus_RcvAvail2_RMASK 0x1
406#define QIB_7322_IntStatus_RcvAvail1_LSB 0x1
407#define QIB_7322_IntStatus_RcvAvail1_MSB 0x1
408#define QIB_7322_IntStatus_RcvAvail1_RMASK 0x1
409#define QIB_7322_IntStatus_RcvAvail0_LSB 0x0
410#define QIB_7322_IntStatus_RcvAvail0_MSB 0x0
411#define QIB_7322_IntStatus_RcvAvail0_RMASK 0x1
412
413#define QIB_7322_IntClear_OFFS 0x78
414#define QIB_7322_IntClear_DEF 0x0000000000000000
415#define QIB_7322_IntClear_SDmaIntClear_1_LSB 0x3F
416#define QIB_7322_IntClear_SDmaIntClear_1_MSB 0x3F
417#define QIB_7322_IntClear_SDmaIntClear_1_RMASK 0x1
418#define QIB_7322_IntClear_SDmaIntClear_0_LSB 0x3E
419#define QIB_7322_IntClear_SDmaIntClear_0_MSB 0x3E
420#define QIB_7322_IntClear_SDmaIntClear_0_RMASK 0x1
421#define QIB_7322_IntClear_SDmaProgressIntClear_1_LSB 0x3D
422#define QIB_7322_IntClear_SDmaProgressIntClear_1_MSB 0x3D
423#define QIB_7322_IntClear_SDmaProgressIntClear_1_RMASK 0x1
424#define QIB_7322_IntClear_SDmaProgressIntClear_0_LSB 0x3C
425#define QIB_7322_IntClear_SDmaProgressIntClear_0_MSB 0x3C
426#define QIB_7322_IntClear_SDmaProgressIntClear_0_RMASK 0x1
427#define QIB_7322_IntClear_SDmaIdleIntClear_1_LSB 0x3B
428#define QIB_7322_IntClear_SDmaIdleIntClear_1_MSB 0x3B
429#define QIB_7322_IntClear_SDmaIdleIntClear_1_RMASK 0x1
430#define QIB_7322_IntClear_SDmaIdleIntClear_0_LSB 0x3A
431#define QIB_7322_IntClear_SDmaIdleIntClear_0_MSB 0x3A
432#define QIB_7322_IntClear_SDmaIdleIntClear_0_RMASK 0x1
433#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_LSB 0x39
434#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_MSB 0x39
435#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_RMASK 0x1
436#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_LSB 0x38
437#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_MSB 0x38
438#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_RMASK 0x1
439#define QIB_7322_IntClear_RcvUrg17IntClear_LSB 0x31
440#define QIB_7322_IntClear_RcvUrg17IntClear_MSB 0x31
441#define QIB_7322_IntClear_RcvUrg17IntClear_RMASK 0x1
442#define QIB_7322_IntClear_RcvUrg16IntClear_LSB 0x30
443#define QIB_7322_IntClear_RcvUrg16IntClear_MSB 0x30
444#define QIB_7322_IntClear_RcvUrg16IntClear_RMASK 0x1
445#define QIB_7322_IntClear_RcvUrg15IntClear_LSB 0x2F
446#define QIB_7322_IntClear_RcvUrg15IntClear_MSB 0x2F
447#define QIB_7322_IntClear_RcvUrg15IntClear_RMASK 0x1
448#define QIB_7322_IntClear_RcvUrg14IntClear_LSB 0x2E
449#define QIB_7322_IntClear_RcvUrg14IntClear_MSB 0x2E
450#define QIB_7322_IntClear_RcvUrg14IntClear_RMASK 0x1
451#define QIB_7322_IntClear_RcvUrg13IntClear_LSB 0x2D
452#define QIB_7322_IntClear_RcvUrg13IntClear_MSB 0x2D
453#define QIB_7322_IntClear_RcvUrg13IntClear_RMASK 0x1
454#define QIB_7322_IntClear_RcvUrg12IntClear_LSB 0x2C
455#define QIB_7322_IntClear_RcvUrg12IntClear_MSB 0x2C
456#define QIB_7322_IntClear_RcvUrg12IntClear_RMASK 0x1
457#define QIB_7322_IntClear_RcvUrg11IntClear_LSB 0x2B
458#define QIB_7322_IntClear_RcvUrg11IntClear_MSB 0x2B
459#define QIB_7322_IntClear_RcvUrg11IntClear_RMASK 0x1
460#define QIB_7322_IntClear_RcvUrg10IntClear_LSB 0x2A
461#define QIB_7322_IntClear_RcvUrg10IntClear_MSB 0x2A
462#define QIB_7322_IntClear_RcvUrg10IntClear_RMASK 0x1
463#define QIB_7322_IntClear_RcvUrg9IntClear_LSB 0x29
464#define QIB_7322_IntClear_RcvUrg9IntClear_MSB 0x29
465#define QIB_7322_IntClear_RcvUrg9IntClear_RMASK 0x1
466#define QIB_7322_IntClear_RcvUrg8IntClear_LSB 0x28
467#define QIB_7322_IntClear_RcvUrg8IntClear_MSB 0x28
468#define QIB_7322_IntClear_RcvUrg8IntClear_RMASK 0x1
469#define QIB_7322_IntClear_RcvUrg7IntClear_LSB 0x27
470#define QIB_7322_IntClear_RcvUrg7IntClear_MSB 0x27
471#define QIB_7322_IntClear_RcvUrg7IntClear_RMASK 0x1
472#define QIB_7322_IntClear_RcvUrg6IntClear_LSB 0x26
473#define QIB_7322_IntClear_RcvUrg6IntClear_MSB 0x26
474#define QIB_7322_IntClear_RcvUrg6IntClear_RMASK 0x1
475#define QIB_7322_IntClear_RcvUrg5IntClear_LSB 0x25
476#define QIB_7322_IntClear_RcvUrg5IntClear_MSB 0x25
477#define QIB_7322_IntClear_RcvUrg5IntClear_RMASK 0x1
478#define QIB_7322_IntClear_RcvUrg4IntClear_LSB 0x24
479#define QIB_7322_IntClear_RcvUrg4IntClear_MSB 0x24
480#define QIB_7322_IntClear_RcvUrg4IntClear_RMASK 0x1
481#define QIB_7322_IntClear_RcvUrg3IntClear_LSB 0x23
482#define QIB_7322_IntClear_RcvUrg3IntClear_MSB 0x23
483#define QIB_7322_IntClear_RcvUrg3IntClear_RMASK 0x1
484#define QIB_7322_IntClear_RcvUrg2IntClear_LSB 0x22
485#define QIB_7322_IntClear_RcvUrg2IntClear_MSB 0x22
486#define QIB_7322_IntClear_RcvUrg2IntClear_RMASK 0x1
487#define QIB_7322_IntClear_RcvUrg1IntClear_LSB 0x21
488#define QIB_7322_IntClear_RcvUrg1IntClear_MSB 0x21
489#define QIB_7322_IntClear_RcvUrg1IntClear_RMASK 0x1
490#define QIB_7322_IntClear_RcvUrg0IntClear_LSB 0x20
491#define QIB_7322_IntClear_RcvUrg0IntClear_MSB 0x20
492#define QIB_7322_IntClear_RcvUrg0IntClear_RMASK 0x1
493#define QIB_7322_IntClear_ErrIntClear_1_LSB 0x1F
494#define QIB_7322_IntClear_ErrIntClear_1_MSB 0x1F
495#define QIB_7322_IntClear_ErrIntClear_1_RMASK 0x1
496#define QIB_7322_IntClear_ErrIntClear_0_LSB 0x1E
497#define QIB_7322_IntClear_ErrIntClear_0_MSB 0x1E
498#define QIB_7322_IntClear_ErrIntClear_0_RMASK 0x1
499#define QIB_7322_IntClear_ErrIntClear_LSB 0x1D
500#define QIB_7322_IntClear_ErrIntClear_MSB 0x1D
501#define QIB_7322_IntClear_ErrIntClear_RMASK 0x1
502#define QIB_7322_IntClear_AssertGPIOIntClear_LSB 0x1C
503#define QIB_7322_IntClear_AssertGPIOIntClear_MSB 0x1C
504#define QIB_7322_IntClear_AssertGPIOIntClear_RMASK 0x1
505#define QIB_7322_IntClear_SendDoneIntClear_1_LSB 0x19
506#define QIB_7322_IntClear_SendDoneIntClear_1_MSB 0x19
507#define QIB_7322_IntClear_SendDoneIntClear_1_RMASK 0x1
508#define QIB_7322_IntClear_SendDoneIntClear_0_LSB 0x18
509#define QIB_7322_IntClear_SendDoneIntClear_0_MSB 0x18
510#define QIB_7322_IntClear_SendDoneIntClear_0_RMASK 0x1
511#define QIB_7322_IntClear_SendBufAvailIntClear_LSB 0x17
512#define QIB_7322_IntClear_SendBufAvailIntClear_MSB 0x17
513#define QIB_7322_IntClear_SendBufAvailIntClear_RMASK 0x1
514#define QIB_7322_IntClear_RcvAvail17IntClear_LSB 0x11
515#define QIB_7322_IntClear_RcvAvail17IntClear_MSB 0x11
516#define QIB_7322_IntClear_RcvAvail17IntClear_RMASK 0x1
517#define QIB_7322_IntClear_RcvAvail16IntClear_LSB 0x10
518#define QIB_7322_IntClear_RcvAvail16IntClear_MSB 0x10
519#define QIB_7322_IntClear_RcvAvail16IntClear_RMASK 0x1
520#define QIB_7322_IntClear_RcvAvail15IntClear_LSB 0xF
521#define QIB_7322_IntClear_RcvAvail15IntClear_MSB 0xF
522#define QIB_7322_IntClear_RcvAvail15IntClear_RMASK 0x1
523#define QIB_7322_IntClear_RcvAvail14IntClear_LSB 0xE
524#define QIB_7322_IntClear_RcvAvail14IntClear_MSB 0xE
525#define QIB_7322_IntClear_RcvAvail14IntClear_RMASK 0x1
526#define QIB_7322_IntClear_RcvAvail13IntClear_LSB 0xD
527#define QIB_7322_IntClear_RcvAvail13IntClear_MSB 0xD
528#define QIB_7322_IntClear_RcvAvail13IntClear_RMASK 0x1
529#define QIB_7322_IntClear_RcvAvail12IntClear_LSB 0xC
530#define QIB_7322_IntClear_RcvAvail12IntClear_MSB 0xC
531#define QIB_7322_IntClear_RcvAvail12IntClear_RMASK 0x1
532#define QIB_7322_IntClear_RcvAvail11IntClear_LSB 0xB
533#define QIB_7322_IntClear_RcvAvail11IntClear_MSB 0xB
534#define QIB_7322_IntClear_RcvAvail11IntClear_RMASK 0x1
535#define QIB_7322_IntClear_RcvAvail10IntClear_LSB 0xA
536#define QIB_7322_IntClear_RcvAvail10IntClear_MSB 0xA
537#define QIB_7322_IntClear_RcvAvail10IntClear_RMASK 0x1
538#define QIB_7322_IntClear_RcvAvail9IntClear_LSB 0x9
539#define QIB_7322_IntClear_RcvAvail9IntClear_MSB 0x9
540#define QIB_7322_IntClear_RcvAvail9IntClear_RMASK 0x1
541#define QIB_7322_IntClear_RcvAvail8IntClear_LSB 0x8
542#define QIB_7322_IntClear_RcvAvail8IntClear_MSB 0x8
543#define QIB_7322_IntClear_RcvAvail8IntClear_RMASK 0x1
544#define QIB_7322_IntClear_RcvAvail7IntClear_LSB 0x7
545#define QIB_7322_IntClear_RcvAvail7IntClear_MSB 0x7
546#define QIB_7322_IntClear_RcvAvail7IntClear_RMASK 0x1
547#define QIB_7322_IntClear_RcvAvail6IntClear_LSB 0x6
548#define QIB_7322_IntClear_RcvAvail6IntClear_MSB 0x6
549#define QIB_7322_IntClear_RcvAvail6IntClear_RMASK 0x1
550#define QIB_7322_IntClear_RcvAvail5IntClear_LSB 0x5
551#define QIB_7322_IntClear_RcvAvail5IntClear_MSB 0x5
552#define QIB_7322_IntClear_RcvAvail5IntClear_RMASK 0x1
553#define QIB_7322_IntClear_RcvAvail4IntClear_LSB 0x4
554#define QIB_7322_IntClear_RcvAvail4IntClear_MSB 0x4
555#define QIB_7322_IntClear_RcvAvail4IntClear_RMASK 0x1
556#define QIB_7322_IntClear_RcvAvail3IntClear_LSB 0x3
557#define QIB_7322_IntClear_RcvAvail3IntClear_MSB 0x3
558#define QIB_7322_IntClear_RcvAvail3IntClear_RMASK 0x1
559#define QIB_7322_IntClear_RcvAvail2IntClear_LSB 0x2
560#define QIB_7322_IntClear_RcvAvail2IntClear_MSB 0x2
561#define QIB_7322_IntClear_RcvAvail2IntClear_RMASK 0x1
562#define QIB_7322_IntClear_RcvAvail1IntClear_LSB 0x1
563#define QIB_7322_IntClear_RcvAvail1IntClear_MSB 0x1
564#define QIB_7322_IntClear_RcvAvail1IntClear_RMASK 0x1
565#define QIB_7322_IntClear_RcvAvail0IntClear_LSB 0x0
566#define QIB_7322_IntClear_RcvAvail0IntClear_MSB 0x0
567#define QIB_7322_IntClear_RcvAvail0IntClear_RMASK 0x1
568
569#define QIB_7322_ErrMask_OFFS 0x80
570#define QIB_7322_ErrMask_DEF 0x0000000000000000
571#define QIB_7322_ErrMask_ResetNegatedMask_LSB 0x3F
572#define QIB_7322_ErrMask_ResetNegatedMask_MSB 0x3F
573#define QIB_7322_ErrMask_ResetNegatedMask_RMASK 0x1
574#define QIB_7322_ErrMask_HardwareErrMask_LSB 0x3E
575#define QIB_7322_ErrMask_HardwareErrMask_MSB 0x3E
576#define QIB_7322_ErrMask_HardwareErrMask_RMASK 0x1
577#define QIB_7322_ErrMask_InvalidAddrErrMask_LSB 0x3D
578#define QIB_7322_ErrMask_InvalidAddrErrMask_MSB 0x3D
579#define QIB_7322_ErrMask_InvalidAddrErrMask_RMASK 0x1
580#define QIB_7322_ErrMask_SDmaVL15ErrMask_LSB 0x38
581#define QIB_7322_ErrMask_SDmaVL15ErrMask_MSB 0x38
582#define QIB_7322_ErrMask_SDmaVL15ErrMask_RMASK 0x1
583#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_LSB 0x37
584#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_MSB 0x37
585#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_RMASK 0x1
586#define QIB_7322_ErrMask_InvalidEEPCmdMask_LSB 0x35
587#define QIB_7322_ErrMask_InvalidEEPCmdMask_MSB 0x35
588#define QIB_7322_ErrMask_InvalidEEPCmdMask_RMASK 0x1
589#define QIB_7322_ErrMask_RcvContextShareErrMask_LSB 0x34
590#define QIB_7322_ErrMask_RcvContextShareErrMask_MSB 0x34
591#define QIB_7322_ErrMask_RcvContextShareErrMask_RMASK 0x1
592#define QIB_7322_ErrMask_SendVLMismatchErrMask_LSB 0x24
593#define QIB_7322_ErrMask_SendVLMismatchErrMask_MSB 0x24
594#define QIB_7322_ErrMask_SendVLMismatchErrMask_RMASK 0x1
595#define QIB_7322_ErrMask_SendArmLaunchErrMask_LSB 0x23
596#define QIB_7322_ErrMask_SendArmLaunchErrMask_MSB 0x23
597#define QIB_7322_ErrMask_SendArmLaunchErrMask_RMASK 0x1
598#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
599#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_MSB 0x1B
600#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
601#define QIB_7322_ErrMask_SDmaWrongPortErrMask_LSB 0x1A
602#define QIB_7322_ErrMask_SDmaWrongPortErrMask_MSB 0x1A
603#define QIB_7322_ErrMask_SDmaWrongPortErrMask_RMASK 0x1
604#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_LSB 0x19
605#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_MSB 0x19
606#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_RMASK 0x1
607#define QIB_7322_ErrMask_RcvHdrFullErrMask_LSB 0xD
608#define QIB_7322_ErrMask_RcvHdrFullErrMask_MSB 0xD
609#define QIB_7322_ErrMask_RcvHdrFullErrMask_RMASK 0x1
610#define QIB_7322_ErrMask_RcvEgrFullErrMask_LSB 0xC
611#define QIB_7322_ErrMask_RcvEgrFullErrMask_MSB 0xC
612#define QIB_7322_ErrMask_RcvEgrFullErrMask_RMASK 0x1
613
614#define QIB_7322_ErrStatus_OFFS 0x88
615#define QIB_7322_ErrStatus_DEF 0x0000000000000000
616#define QIB_7322_ErrStatus_ResetNegated_LSB 0x3F
617#define QIB_7322_ErrStatus_ResetNegated_MSB 0x3F
618#define QIB_7322_ErrStatus_ResetNegated_RMASK 0x1
619#define QIB_7322_ErrStatus_HardwareErr_LSB 0x3E
620#define QIB_7322_ErrStatus_HardwareErr_MSB 0x3E
621#define QIB_7322_ErrStatus_HardwareErr_RMASK 0x1
622#define QIB_7322_ErrStatus_InvalidAddrErr_LSB 0x3D
623#define QIB_7322_ErrStatus_InvalidAddrErr_MSB 0x3D
624#define QIB_7322_ErrStatus_InvalidAddrErr_RMASK 0x1
625#define QIB_7322_ErrStatus_SDmaVL15Err_LSB 0x38
626#define QIB_7322_ErrStatus_SDmaVL15Err_MSB 0x38
627#define QIB_7322_ErrStatus_SDmaVL15Err_RMASK 0x1
628#define QIB_7322_ErrStatus_SBufVL15MisUseErr_LSB 0x37
629#define QIB_7322_ErrStatus_SBufVL15MisUseErr_MSB 0x37
630#define QIB_7322_ErrStatus_SBufVL15MisUseErr_RMASK 0x1
631#define QIB_7322_ErrStatus_InvalidEEPCmdErr_LSB 0x35
632#define QIB_7322_ErrStatus_InvalidEEPCmdErr_MSB 0x35
633#define QIB_7322_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
634#define QIB_7322_ErrStatus_RcvContextShareErr_LSB 0x34
635#define QIB_7322_ErrStatus_RcvContextShareErr_MSB 0x34
636#define QIB_7322_ErrStatus_RcvContextShareErr_RMASK 0x1
637#define QIB_7322_ErrStatus_SendVLMismatchErr_LSB 0x24
638#define QIB_7322_ErrStatus_SendVLMismatchErr_MSB 0x24
639#define QIB_7322_ErrStatus_SendVLMismatchErr_RMASK 0x1
640#define QIB_7322_ErrStatus_SendArmLaunchErr_LSB 0x23
641#define QIB_7322_ErrStatus_SendArmLaunchErr_MSB 0x23
642#define QIB_7322_ErrStatus_SendArmLaunchErr_RMASK 0x1
643#define QIB_7322_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
644#define QIB_7322_ErrStatus_SendSpecialTriggerErr_MSB 0x1B
645#define QIB_7322_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
646#define QIB_7322_ErrStatus_SDmaWrongPortErr_LSB 0x1A
647#define QIB_7322_ErrStatus_SDmaWrongPortErr_MSB 0x1A
648#define QIB_7322_ErrStatus_SDmaWrongPortErr_RMASK 0x1
649#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_LSB 0x19
650#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_MSB 0x19
651#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_RMASK 0x1
652#define QIB_7322_ErrStatus_RcvHdrFullErr_LSB 0xD
653#define QIB_7322_ErrStatus_RcvHdrFullErr_MSB 0xD
654#define QIB_7322_ErrStatus_RcvHdrFullErr_RMASK 0x1
655#define QIB_7322_ErrStatus_RcvEgrFullErr_LSB 0xC
656#define QIB_7322_ErrStatus_RcvEgrFullErr_MSB 0xC
657#define QIB_7322_ErrStatus_RcvEgrFullErr_RMASK 0x1
658
659#define QIB_7322_ErrClear_OFFS 0x90
660#define QIB_7322_ErrClear_DEF 0x0000000000000000
661#define QIB_7322_ErrClear_ResetNegatedClear_LSB 0x3F
662#define QIB_7322_ErrClear_ResetNegatedClear_MSB 0x3F
663#define QIB_7322_ErrClear_ResetNegatedClear_RMASK 0x1
664#define QIB_7322_ErrClear_HardwareErrClear_LSB 0x3E
665#define QIB_7322_ErrClear_HardwareErrClear_MSB 0x3E
666#define QIB_7322_ErrClear_HardwareErrClear_RMASK 0x1
667#define QIB_7322_ErrClear_InvalidAddrErrClear_LSB 0x3D
668#define QIB_7322_ErrClear_InvalidAddrErrClear_MSB 0x3D
669#define QIB_7322_ErrClear_InvalidAddrErrClear_RMASK 0x1
670#define QIB_7322_ErrClear_SDmaVL15ErrClear_LSB 0x38
671#define QIB_7322_ErrClear_SDmaVL15ErrClear_MSB 0x38
672#define QIB_7322_ErrClear_SDmaVL15ErrClear_RMASK 0x1
673#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_LSB 0x37
674#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_MSB 0x37
675#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_RMASK 0x1
676#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
677#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_MSB 0x35
678#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
679#define QIB_7322_ErrClear_RcvContextShareErrClear_LSB 0x34
680#define QIB_7322_ErrClear_RcvContextShareErrClear_MSB 0x34
681#define QIB_7322_ErrClear_RcvContextShareErrClear_RMASK 0x1
682#define QIB_7322_ErrClear_SendVLMismatchErrMask_LSB 0x24
683#define QIB_7322_ErrClear_SendVLMismatchErrMask_MSB 0x24
684#define QIB_7322_ErrClear_SendVLMismatchErrMask_RMASK 0x1
685#define QIB_7322_ErrClear_SendArmLaunchErrClear_LSB 0x23
686#define QIB_7322_ErrClear_SendArmLaunchErrClear_MSB 0x23
687#define QIB_7322_ErrClear_SendArmLaunchErrClear_RMASK 0x1
688#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
689#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_MSB 0x1B
690#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
691#define QIB_7322_ErrClear_SDmaWrongPortErrClear_LSB 0x1A
692#define QIB_7322_ErrClear_SDmaWrongPortErrClear_MSB 0x1A
693#define QIB_7322_ErrClear_SDmaWrongPortErrClear_RMASK 0x1
694#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_LSB 0x19
695#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_MSB 0x19
696#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_RMASK 0x1
697#define QIB_7322_ErrClear_RcvHdrFullErrClear_LSB 0xD
698#define QIB_7322_ErrClear_RcvHdrFullErrClear_MSB 0xD
699#define QIB_7322_ErrClear_RcvHdrFullErrClear_RMASK 0x1
700#define QIB_7322_ErrClear_RcvEgrFullErrClear_LSB 0xC
701#define QIB_7322_ErrClear_RcvEgrFullErrClear_MSB 0xC
702#define QIB_7322_ErrClear_RcvEgrFullErrClear_RMASK 0x1
703
704#define QIB_7322_HwErrMask_OFFS 0x98
705#define QIB_7322_HwErrMask_DEF 0x0000000000000000
706#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_LSB 0x3F
707#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_MSB 0x3F
708#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_RMASK 0x1
709#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_LSB 0x3E
710#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_MSB 0x3E
711#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_RMASK 0x1
712#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_LSB 0x37
713#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_MSB 0x37
714#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_RMASK 0x1
715#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
716#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_MSB 0x36
717#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
718#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_LSB 0x35
719#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_MSB 0x35
720#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_RMASK 0x1
721#define QIB_7322_HwErrMask_MemoryErrMask_LSB 0x30
722#define QIB_7322_HwErrMask_MemoryErrMask_MSB 0x30
723#define QIB_7322_HwErrMask_MemoryErrMask_RMASK 0x1
724#define QIB_7322_HwErrMask_pcie_phy_txParityErr_LSB 0x22
725#define QIB_7322_HwErrMask_pcie_phy_txParityErr_MSB 0x22
726#define QIB_7322_HwErrMask_pcie_phy_txParityErr_RMASK 0x1
727#define QIB_7322_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
728#define QIB_7322_HwErrMask_PCIeBusParityErrMask_MSB 0x21
729#define QIB_7322_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
730#define QIB_7322_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
731#define QIB_7322_HwErrMask_PcieCplTimeoutMask_MSB 0x1E
732#define QIB_7322_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
733#define QIB_7322_HwErrMask_PciePoisonedTLPMask_LSB 0x1D
734#define QIB_7322_HwErrMask_PciePoisonedTLPMask_MSB 0x1D
735#define QIB_7322_HwErrMask_PciePoisonedTLPMask_RMASK 0x1
736#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_LSB 0x1C
737#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_MSB 0x1C
738#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_RMASK 0x1
739#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_LSB 0x1B
740#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_MSB 0x1B
741#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_RMASK 0x1
742#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
743#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
744#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
745#define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE
746#define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE
747#define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1
748#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
749#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
750#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
751#define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC
752#define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC
753#define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1
754#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
755#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
756#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
757
758#define QIB_7322_HwErrStatus_OFFS 0xA0
759#define QIB_7322_HwErrStatus_DEF 0x0000000000000000
760#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_LSB 0x3F
761#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_MSB 0x3F
762#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_RMASK 0x1
763#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_LSB 0x3E
764#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_MSB 0x3E
765#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_RMASK 0x1
766#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_LSB 0x37
767#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_MSB 0x37
768#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_RMASK 0x1
769#define QIB_7322_HwErrStatus_PowerOnBISTFailed_LSB 0x36
770#define QIB_7322_HwErrStatus_PowerOnBISTFailed_MSB 0x36
771#define QIB_7322_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
772#define QIB_7322_HwErrStatus_TempsenseTholdReached_LSB 0x35
773#define QIB_7322_HwErrStatus_TempsenseTholdReached_MSB 0x35
774#define QIB_7322_HwErrStatus_TempsenseTholdReached_RMASK 0x1
775#define QIB_7322_HwErrStatus_MemoryErr_LSB 0x30
776#define QIB_7322_HwErrStatus_MemoryErr_MSB 0x30
777#define QIB_7322_HwErrStatus_MemoryErr_RMASK 0x1
778#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_LSB 0x22
779#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_MSB 0x22
780#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_RMASK 0x1
781#define QIB_7322_HwErrStatus_PCIeBusParity_LSB 0x1F
782#define QIB_7322_HwErrStatus_PCIeBusParity_MSB 0x21
783#define QIB_7322_HwErrStatus_PCIeBusParity_RMASK 0x7
784#define QIB_7322_HwErrStatus_PcieCplTimeout_LSB 0x1E
785#define QIB_7322_HwErrStatus_PcieCplTimeout_MSB 0x1E
786#define QIB_7322_HwErrStatus_PcieCplTimeout_RMASK 0x1
787#define QIB_7322_HwErrStatus_PciePoisonedTLP_LSB 0x1D
788#define QIB_7322_HwErrStatus_PciePoisonedTLP_MSB 0x1D
789#define QIB_7322_HwErrStatus_PciePoisonedTLP_RMASK 0x1
790#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_LSB 0x1C
791#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_MSB 0x1C
792#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_RMASK 0x1
793#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_LSB 0x1B
794#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_MSB 0x1B
795#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_RMASK 0x1
796#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
797#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
798#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
799#define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE
800#define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE
801#define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1
802#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
803#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
804#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
805#define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC
806#define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC
807#define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1
808#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
809#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
810#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
811
812#define QIB_7322_HwErrClear_OFFS 0xA8
813#define QIB_7322_HwErrClear_DEF 0x0000000000000000
814#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_LSB 0x3F
815#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_MSB 0x3F
816#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_RMASK 0x1
817#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_LSB 0x3E
818#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_MSB 0x3E
819#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_RMASK 0x1
820#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_LSB 0x37
821#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_MSB 0x37
822#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_RMASK 0x1
823#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
824#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_MSB 0x36
825#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
826#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_LSB 0x35
827#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_MSB 0x35
828#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_RMASK 0x1
829#define QIB_7322_HwErrClear_MemoryErrClear_LSB 0x30
830#define QIB_7322_HwErrClear_MemoryErrClear_MSB 0x30
831#define QIB_7322_HwErrClear_MemoryErrClear_RMASK 0x1
832#define QIB_7322_HwErrClear_pcie_phy_txParityErr_LSB 0x22
833#define QIB_7322_HwErrClear_pcie_phy_txParityErr_MSB 0x22
834#define QIB_7322_HwErrClear_pcie_phy_txParityErr_RMASK 0x1
835#define QIB_7322_HwErrClear_PCIeBusParityClear_LSB 0x1F
836#define QIB_7322_HwErrClear_PCIeBusParityClear_MSB 0x21
837#define QIB_7322_HwErrClear_PCIeBusParityClear_RMASK 0x7
838#define QIB_7322_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
839#define QIB_7322_HwErrClear_PcieCplTimeoutClear_MSB 0x1E
840#define QIB_7322_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
841#define QIB_7322_HwErrClear_PciePoisonedTLPClear_LSB 0x1D
842#define QIB_7322_HwErrClear_PciePoisonedTLPClear_MSB 0x1D
843#define QIB_7322_HwErrClear_PciePoisonedTLPClear_RMASK 0x1
844#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_LSB 0x1C
845#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_MSB 0x1C
846#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_RMASK 0x1
847#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_LSB 0x1B
848#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_MSB 0x1B
849#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_RMASK 0x1
850#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
851#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
852#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
853#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE
854#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE
855#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1
856#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
857#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
858#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
859#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC
860#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC
861#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1
862#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
863#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
864#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
865
866#define QIB_7322_HwDiagCtrl_OFFS 0xB0
867#define QIB_7322_HwDiagCtrl_DEF 0x0000000000000000
868#define QIB_7322_HwDiagCtrl_Diagnostic_LSB 0x3F
869#define QIB_7322_HwDiagCtrl_Diagnostic_MSB 0x3F
870#define QIB_7322_HwDiagCtrl_Diagnostic_RMASK 0x1
871#define QIB_7322_HwDiagCtrl_CounterWrEnable_LSB 0x3D
872#define QIB_7322_HwDiagCtrl_CounterWrEnable_MSB 0x3D
873#define QIB_7322_HwDiagCtrl_CounterWrEnable_RMASK 0x1
874#define QIB_7322_HwDiagCtrl_CounterDisable_LSB 0x3C
875#define QIB_7322_HwDiagCtrl_CounterDisable_MSB 0x3C
876#define QIB_7322_HwDiagCtrl_CounterDisable_RMASK 0x1
877#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
878#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_MSB 0x22
879#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
880#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
881#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
882#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
883#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE
884#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE
885#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1
886#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
887#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
888#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
889#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC
890#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC
891#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1
892
893#define QIB_7322_EXTStatus_OFFS 0xC0
894#define QIB_7322_EXTStatus_DEF 0x000000000000X000
895#define QIB_7322_EXTStatus_GPIOIn_LSB 0x30
896#define QIB_7322_EXTStatus_GPIOIn_MSB 0x3F
897#define QIB_7322_EXTStatus_GPIOIn_RMASK 0xFFFF
898#define QIB_7322_EXTStatus_MemBISTDisabled_LSB 0xF
899#define QIB_7322_EXTStatus_MemBISTDisabled_MSB 0xF
900#define QIB_7322_EXTStatus_MemBISTDisabled_RMASK 0x1
901#define QIB_7322_EXTStatus_MemBISTEndTest_LSB 0xE
902#define QIB_7322_EXTStatus_MemBISTEndTest_MSB 0xE
903#define QIB_7322_EXTStatus_MemBISTEndTest_RMASK 0x1
904
905#define QIB_7322_EXTCtrl_OFFS 0xC8
906#define QIB_7322_EXTCtrl_DEF 0x0000000000000000
907#define QIB_7322_EXTCtrl_GPIOOe_LSB 0x30
908#define QIB_7322_EXTCtrl_GPIOOe_MSB 0x3F
909#define QIB_7322_EXTCtrl_GPIOOe_RMASK 0xFFFF
910#define QIB_7322_EXTCtrl_GPIOInvert_LSB 0x20
911#define QIB_7322_EXTCtrl_GPIOInvert_MSB 0x2F
912#define QIB_7322_EXTCtrl_GPIOInvert_RMASK 0xFFFF
913#define QIB_7322_EXTCtrl_LEDPort1GreenOn_LSB 0x3
914#define QIB_7322_EXTCtrl_LEDPort1GreenOn_MSB 0x3
915#define QIB_7322_EXTCtrl_LEDPort1GreenOn_RMASK 0x1
916#define QIB_7322_EXTCtrl_LEDPort1YellowOn_LSB 0x2
917#define QIB_7322_EXTCtrl_LEDPort1YellowOn_MSB 0x2
918#define QIB_7322_EXTCtrl_LEDPort1YellowOn_RMASK 0x1
919#define QIB_7322_EXTCtrl_LEDPort0GreenOn_LSB 0x1
920#define QIB_7322_EXTCtrl_LEDPort0GreenOn_MSB 0x1
921#define QIB_7322_EXTCtrl_LEDPort0GreenOn_RMASK 0x1
922#define QIB_7322_EXTCtrl_LEDPort0YellowOn_LSB 0x0
923#define QIB_7322_EXTCtrl_LEDPort0YellowOn_MSB 0x0
924#define QIB_7322_EXTCtrl_LEDPort0YellowOn_RMASK 0x1
925
926#define QIB_7322_GPIOOut_OFFS 0xE0
927#define QIB_7322_GPIOOut_DEF 0x0000000000000000
928
929#define QIB_7322_GPIOMask_OFFS 0xE8
930#define QIB_7322_GPIOMask_DEF 0x0000000000000000
931
932#define QIB_7322_GPIOStatus_OFFS 0xF0
933#define QIB_7322_GPIOStatus_DEF 0x0000000000000000
934
935#define QIB_7322_GPIOClear_OFFS 0xF8
936#define QIB_7322_GPIOClear_DEF 0x0000000000000000
937
938#define QIB_7322_RcvCtrl_OFFS 0x100
939#define QIB_7322_RcvCtrl_DEF 0x0000000000000000
940#define QIB_7322_RcvCtrl_TidReDirect_LSB 0x30
941#define QIB_7322_RcvCtrl_TidReDirect_MSB 0x3F
942#define QIB_7322_RcvCtrl_TidReDirect_RMASK 0xFFFF
943#define QIB_7322_RcvCtrl_TailUpd_LSB 0x2F
944#define QIB_7322_RcvCtrl_TailUpd_MSB 0x2F
945#define QIB_7322_RcvCtrl_TailUpd_RMASK 0x1
946#define QIB_7322_RcvCtrl_XrcTypeCode_LSB 0x2C
947#define QIB_7322_RcvCtrl_XrcTypeCode_MSB 0x2E
948#define QIB_7322_RcvCtrl_XrcTypeCode_RMASK 0x7
949#define QIB_7322_RcvCtrl_TidFlowEnable_LSB 0x2B
950#define QIB_7322_RcvCtrl_TidFlowEnable_MSB 0x2B
951#define QIB_7322_RcvCtrl_TidFlowEnable_RMASK 0x1
952#define QIB_7322_RcvCtrl_ContextCfg_LSB 0x29
953#define QIB_7322_RcvCtrl_ContextCfg_MSB 0x2A
954#define QIB_7322_RcvCtrl_ContextCfg_RMASK 0x3
955#define QIB_7322_RcvCtrl_IntrAvail_LSB 0x14
956#define QIB_7322_RcvCtrl_IntrAvail_MSB 0x25
957#define QIB_7322_RcvCtrl_IntrAvail_RMASK 0x3FFFF
958#define QIB_7322_RcvCtrl_dontDropRHQFull_LSB 0x0
959#define QIB_7322_RcvCtrl_dontDropRHQFull_MSB 0x11
960#define QIB_7322_RcvCtrl_dontDropRHQFull_RMASK 0x3FFFF
961
962#define QIB_7322_RcvHdrSize_OFFS 0x110
963#define QIB_7322_RcvHdrSize_DEF 0x0000000000000000
964
965#define QIB_7322_RcvHdrCnt_OFFS 0x118
966#define QIB_7322_RcvHdrCnt_DEF 0x0000000000000000
967
968#define QIB_7322_RcvHdrEntSize_OFFS 0x120
969#define QIB_7322_RcvHdrEntSize_DEF 0x0000000000000000
970
971#define QIB_7322_RcvTIDBase_OFFS 0x128
972#define QIB_7322_RcvTIDBase_DEF 0x0000000000050000
973
974#define QIB_7322_RcvTIDCnt_OFFS 0x130
975#define QIB_7322_RcvTIDCnt_DEF 0x0000000000000200
976
977#define QIB_7322_RcvEgrBase_OFFS 0x138
978#define QIB_7322_RcvEgrBase_DEF 0x0000000000014000
979
980#define QIB_7322_RcvEgrCnt_OFFS 0x140
981#define QIB_7322_RcvEgrCnt_DEF 0x0000000000001000
982
983#define QIB_7322_RcvBufBase_OFFS 0x148
984#define QIB_7322_RcvBufBase_DEF 0x0000000000080000
985
986#define QIB_7322_RcvBufSize_OFFS 0x150
987#define QIB_7322_RcvBufSize_DEF 0x0000000000005000
988
989#define QIB_7322_RxIntMemBase_OFFS 0x158
990#define QIB_7322_RxIntMemBase_DEF 0x0000000000077000
991
992#define QIB_7322_RxIntMemSize_OFFS 0x160
993#define QIB_7322_RxIntMemSize_DEF 0x0000000000007000
994
995#define QIB_7322_feature_mask_OFFS 0x190
996#define QIB_7322_feature_mask_DEF 0x00000000000000XX
997
998#define QIB_7322_active_feature_mask_OFFS 0x198
999#define QIB_7322_active_feature_mask_DEF 0x00000000000000XX
1000#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_LSB 0x5
1001#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_MSB 0x5
1002#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_RMASK 0x1
1003#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_LSB 0x4
1004#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_MSB 0x4
1005#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_RMASK 0x1
1006#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_LSB 0x3
1007#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_MSB 0x3
1008#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_RMASK 0x1
1009#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_LSB 0x2
1010#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_MSB 0x2
1011#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_RMASK 0x1
1012#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_LSB 0x1
1013#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_MSB 0x1
1014#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_RMASK 0x1
1015#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_LSB 0x0
1016#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_MSB 0x0
1017#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_RMASK 0x1
1018
1019#define QIB_7322_SendCtrl_OFFS 0x1C0
1020#define QIB_7322_SendCtrl_DEF 0x0000000000000000
1021#define QIB_7322_SendCtrl_Disarm_LSB 0x1F
1022#define QIB_7322_SendCtrl_Disarm_MSB 0x1F
1023#define QIB_7322_SendCtrl_Disarm_RMASK 0x1
1024#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_LSB 0x1D
1025#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_MSB 0x1D
1026#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_RMASK 0x1
1027#define QIB_7322_SendCtrl_AvailUpdThld_LSB 0x18
1028#define QIB_7322_SendCtrl_AvailUpdThld_MSB 0x1C
1029#define QIB_7322_SendCtrl_AvailUpdThld_RMASK 0x1F
1030#define QIB_7322_SendCtrl_DisarmSendBuf_LSB 0x10
1031#define QIB_7322_SendCtrl_DisarmSendBuf_MSB 0x17
1032#define QIB_7322_SendCtrl_DisarmSendBuf_RMASK 0xFF
1033#define QIB_7322_SendCtrl_SpecialTriggerEn_LSB 0x4
1034#define QIB_7322_SendCtrl_SpecialTriggerEn_MSB 0x4
1035#define QIB_7322_SendCtrl_SpecialTriggerEn_RMASK 0x1
1036#define QIB_7322_SendCtrl_SendBufAvailUpd_LSB 0x2
1037#define QIB_7322_SendCtrl_SendBufAvailUpd_MSB 0x2
1038#define QIB_7322_SendCtrl_SendBufAvailUpd_RMASK 0x1
1039#define QIB_7322_SendCtrl_SendIntBufAvail_LSB 0x1
1040#define QIB_7322_SendCtrl_SendIntBufAvail_MSB 0x1
1041#define QIB_7322_SendCtrl_SendIntBufAvail_RMASK 0x1
1042
1043#define QIB_7322_SendBufBase_OFFS 0x1C8
1044#define QIB_7322_SendBufBase_DEF 0x0018000000100000
1045#define QIB_7322_SendBufBase_BaseAddr_LargePIO_LSB 0x20
1046#define QIB_7322_SendBufBase_BaseAddr_LargePIO_MSB 0x34
1047#define QIB_7322_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
1048#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
1049#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_MSB 0x14
1050#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
1051
1052#define QIB_7322_SendBufSize_OFFS 0x1D0
1053#define QIB_7322_SendBufSize_DEF 0x0000108000000880
1054#define QIB_7322_SendBufSize_Size_LargePIO_LSB 0x20
1055#define QIB_7322_SendBufSize_Size_LargePIO_MSB 0x2C
1056#define QIB_7322_SendBufSize_Size_LargePIO_RMASK 0x1FFF
1057#define QIB_7322_SendBufSize_Size_SmallPIO_LSB 0x0
1058#define QIB_7322_SendBufSize_Size_SmallPIO_MSB 0xB
1059#define QIB_7322_SendBufSize_Size_SmallPIO_RMASK 0xFFF
1060
1061#define QIB_7322_SendBufCnt_OFFS 0x1D8
1062#define QIB_7322_SendBufCnt_DEF 0x0000002000000080
1063#define QIB_7322_SendBufCnt_Num_LargeBuffers_LSB 0x20
1064#define QIB_7322_SendBufCnt_Num_LargeBuffers_MSB 0x25
1065#define QIB_7322_SendBufCnt_Num_LargeBuffers_RMASK 0x3F
1066#define QIB_7322_SendBufCnt_Num_SmallBuffers_LSB 0x0
1067#define QIB_7322_SendBufCnt_Num_SmallBuffers_MSB 0x8
1068#define QIB_7322_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
1069
1070#define QIB_7322_SendBufAvailAddr_OFFS 0x1E0
1071#define QIB_7322_SendBufAvailAddr_DEF 0x0000000000000000
1072#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
1073#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_MSB 0x27
1074#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
1075
1076#define QIB_7322_SendBufErr0_OFFS 0x240
1077#define QIB_7322_SendBufErr0_DEF 0x0000000000000000
1078#define QIB_7322_SendBufErr0_SendBufErr_63_0_LSB 0x0
1079#define QIB_7322_SendBufErr0_SendBufErr_63_0_MSB 0x3F
1080#define QIB_7322_SendBufErr0_SendBufErr_63_0_RMASK 0x0
1081
1082#define QIB_7322_AvailUpdCount_OFFS 0x268
1083#define QIB_7322_AvailUpdCount_DEF 0x0000000000000000
1084#define QIB_7322_AvailUpdCount_AvailUpdCount_LSB 0x0
1085#define QIB_7322_AvailUpdCount_AvailUpdCount_MSB 0x4
1086#define QIB_7322_AvailUpdCount_AvailUpdCount_RMASK 0x1F
1087
1088#define QIB_7322_RcvHdrAddr0_OFFS 0x280
1089#define QIB_7322_RcvHdrAddr0_DEF 0x0000000000000000
1090#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_LSB 0x2
1091#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_MSB 0x27
1092#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_RMASK 0x3FFFFFFFFF
1093
1094#define QIB_7322_RcvHdrTailAddr0_OFFS 0x340
1095#define QIB_7322_RcvHdrTailAddr0_DEF 0x0000000000000000
1096#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_LSB 0x2
1097#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_MSB 0x27
1098#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_RMASK 0x3FFFFFFFFF
1099
1100#define QIB_7322_ahb_access_ctrl_OFFS 0x460
1101#define QIB_7322_ahb_access_ctrl_DEF 0x0000000000000000
1102#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_LSB 0x1
1103#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_MSB 0x2
1104#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_RMASK 0x3
1105#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_LSB 0x0
1106#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_MSB 0x0
1107#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_RMASK 0x1
1108
1109#define QIB_7322_ahb_transaction_reg_OFFS 0x468
1110#define QIB_7322_ahb_transaction_reg_DEF 0x0000000080000000
1111#define QIB_7322_ahb_transaction_reg_ahb_data_LSB 0x20
1112#define QIB_7322_ahb_transaction_reg_ahb_data_MSB 0x3F
1113#define QIB_7322_ahb_transaction_reg_ahb_data_RMASK 0xFFFFFFFF
1114#define QIB_7322_ahb_transaction_reg_ahb_rdy_LSB 0x1F
1115#define QIB_7322_ahb_transaction_reg_ahb_rdy_MSB 0x1F
1116#define QIB_7322_ahb_transaction_reg_ahb_rdy_RMASK 0x1
1117#define QIB_7322_ahb_transaction_reg_ahb_req_err_LSB 0x1E
1118#define QIB_7322_ahb_transaction_reg_ahb_req_err_MSB 0x1E
1119#define QIB_7322_ahb_transaction_reg_ahb_req_err_RMASK 0x1
1120#define QIB_7322_ahb_transaction_reg_write_not_read_LSB 0x1B
1121#define QIB_7322_ahb_transaction_reg_write_not_read_MSB 0x1B
1122#define QIB_7322_ahb_transaction_reg_write_not_read_RMASK 0x1
1123#define QIB_7322_ahb_transaction_reg_ahb_address_LSB 0x10
1124#define QIB_7322_ahb_transaction_reg_ahb_address_MSB 0x1A
1125#define QIB_7322_ahb_transaction_reg_ahb_address_RMASK 0x7FF
1126
1127#define QIB_7322_SPC_JTAG_ACCESS_REG_OFFS 0x470
1128#define QIB_7322_SPC_JTAG_ACCESS_REG_DEF 0x0000000000000001
1129#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_LSB 0xA
1130#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_MSB 0xA
1131#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_RMASK 0x1
1132#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_LSB 0x5
1133#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_MSB 0x9
1134#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_RMASK 0x1F
1135#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_LSB 0x3
1136#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_MSB 0x4
1137#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_RMASK 0x3
1138#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_LSB 0x2
1139#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_MSB 0x2
1140#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_RMASK 0x1
1141#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_LSB 0x1
1142#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_MSB 0x1
1143#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_RMASK 0x1
1144#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_LSB 0x0
1145#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_MSB 0x0
1146#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_RMASK 0x1
1147
1148#define QIB_7322_SendCheckMask0_OFFS 0x4C0
1149#define QIB_7322_SendCheckMask0_DEF 0x0000000000000000
1150#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_LSB 0x0
1151#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_MSB 0x3F
1152#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_RMASK 0x0
1153
1154#define QIB_7322_SendGRHCheckMask0_OFFS 0x4E0
1155#define QIB_7322_SendGRHCheckMask0_DEF 0x0000000000000000
1156#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_LSB 0x0
1157#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_MSB 0x3F
1158#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_RMASK 0x0
1159
1160#define QIB_7322_SendIBPacketMask0_OFFS 0x500
1161#define QIB_7322_SendIBPacketMask0_DEF 0x0000000000000000
1162#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_LSB 0x0
1163#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_MSB 0x3F
1164#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_RMASK 0x0
1165
1166#define QIB_7322_IntRedirect0_OFFS 0x540
1167#define QIB_7322_IntRedirect0_DEF 0x0000000000000000
1168#define QIB_7322_IntRedirect0_vec11_LSB 0x37
1169#define QIB_7322_IntRedirect0_vec11_MSB 0x3B
1170#define QIB_7322_IntRedirect0_vec11_RMASK 0x1F
1171#define QIB_7322_IntRedirect0_vec10_LSB 0x32
1172#define QIB_7322_IntRedirect0_vec10_MSB 0x36
1173#define QIB_7322_IntRedirect0_vec10_RMASK 0x1F
1174#define QIB_7322_IntRedirect0_vec9_LSB 0x2D
1175#define QIB_7322_IntRedirect0_vec9_MSB 0x31
1176#define QIB_7322_IntRedirect0_vec9_RMASK 0x1F
1177#define QIB_7322_IntRedirect0_vec8_LSB 0x28
1178#define QIB_7322_IntRedirect0_vec8_MSB 0x2C
1179#define QIB_7322_IntRedirect0_vec8_RMASK 0x1F
1180#define QIB_7322_IntRedirect0_vec7_LSB 0x23
1181#define QIB_7322_IntRedirect0_vec7_MSB 0x27
1182#define QIB_7322_IntRedirect0_vec7_RMASK 0x1F
1183#define QIB_7322_IntRedirect0_vec6_LSB 0x1E
1184#define QIB_7322_IntRedirect0_vec6_MSB 0x22
1185#define QIB_7322_IntRedirect0_vec6_RMASK 0x1F
1186#define QIB_7322_IntRedirect0_vec5_LSB 0x19
1187#define QIB_7322_IntRedirect0_vec5_MSB 0x1D
1188#define QIB_7322_IntRedirect0_vec5_RMASK 0x1F
1189#define QIB_7322_IntRedirect0_vec4_LSB 0x14
1190#define QIB_7322_IntRedirect0_vec4_MSB 0x18
1191#define QIB_7322_IntRedirect0_vec4_RMASK 0x1F
1192#define QIB_7322_IntRedirect0_vec3_LSB 0xF
1193#define QIB_7322_IntRedirect0_vec3_MSB 0x13
1194#define QIB_7322_IntRedirect0_vec3_RMASK 0x1F
1195#define QIB_7322_IntRedirect0_vec2_LSB 0xA
1196#define QIB_7322_IntRedirect0_vec2_MSB 0xE
1197#define QIB_7322_IntRedirect0_vec2_RMASK 0x1F
1198#define QIB_7322_IntRedirect0_vec1_LSB 0x5
1199#define QIB_7322_IntRedirect0_vec1_MSB 0x9
1200#define QIB_7322_IntRedirect0_vec1_RMASK 0x1F
1201#define QIB_7322_IntRedirect0_vec0_LSB 0x0
1202#define QIB_7322_IntRedirect0_vec0_MSB 0x4
1203#define QIB_7322_IntRedirect0_vec0_RMASK 0x1F
1204
1205#define QIB_7322_Int_Granted_OFFS 0x570
1206#define QIB_7322_Int_Granted_DEF 0x0000000000000000
1207
1208#define QIB_7322_vec_clr_without_int_OFFS 0x578
1209#define QIB_7322_vec_clr_without_int_DEF 0x0000000000000000
1210
1211#define QIB_7322_DCACtrlA_OFFS 0x580
1212#define QIB_7322_DCACtrlA_DEF 0x0000000000000000
1213#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_LSB 0x4
1214#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_MSB 0x4
1215#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_RMASK 0x1
1216#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_LSB 0x3
1217#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_MSB 0x3
1218#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_RMASK 0x1
1219#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_LSB 0x2
1220#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_MSB 0x2
1221#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_RMASK 0x1
1222#define QIB_7322_DCACtrlA_EagerDCAEnable_LSB 0x1
1223#define QIB_7322_DCACtrlA_EagerDCAEnable_MSB 0x1
1224#define QIB_7322_DCACtrlA_EagerDCAEnable_RMASK 0x1
1225#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_LSB 0x0
1226#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_MSB 0x0
1227#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_RMASK 0x1
1228
1229#define QIB_7322_DCACtrlB_OFFS 0x588
1230#define QIB_7322_DCACtrlB_DEF 0x0000000000000000
1231#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_LSB 0x36
1232#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_MSB 0x3B
1233#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_RMASK 0x3F
1234#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_LSB 0x2E
1235#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_MSB 0x35
1236#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_RMASK 0xFF
1237#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_LSB 0x28
1238#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_MSB 0x2D
1239#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_RMASK 0x3F
1240#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_LSB 0x20
1241#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_MSB 0x27
1242#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_RMASK 0xFF
1243#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_LSB 0x16
1244#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_MSB 0x1B
1245#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_RMASK 0x3F
1246#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_LSB 0xE
1247#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_MSB 0x15
1248#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_RMASK 0xFF
1249#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_LSB 0x8
1250#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_MSB 0xD
1251#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_RMASK 0x3F
1252#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_LSB 0x0
1253#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_MSB 0x7
1254#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_RMASK 0xFF
1255
1256#define QIB_7322_DCACtrlC_OFFS 0x590
1257#define QIB_7322_DCACtrlC_DEF 0x0000000000000000
1258#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_LSB 0x36
1259#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_MSB 0x3B
1260#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_RMASK 0x3F
1261#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_LSB 0x2E
1262#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_MSB 0x35
1263#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_RMASK 0xFF
1264#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_LSB 0x28
1265#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_MSB 0x2D
1266#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_RMASK 0x3F
1267#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_LSB 0x20
1268#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_MSB 0x27
1269#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_RMASK 0xFF
1270#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_LSB 0x16
1271#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_MSB 0x1B
1272#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_RMASK 0x3F
1273#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_LSB 0xE
1274#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_MSB 0x15
1275#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_RMASK 0xFF
1276#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_LSB 0x8
1277#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_MSB 0xD
1278#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_RMASK 0x3F
1279#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_LSB 0x0
1280#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_MSB 0x7
1281#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_RMASK 0xFF
1282
1283#define QIB_7322_DCACtrlD_OFFS 0x598
1284#define QIB_7322_DCACtrlD_DEF 0x0000000000000000
1285#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_LSB 0x36
1286#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_MSB 0x3B
1287#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_RMASK 0x3F
1288#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_LSB 0x2E
1289#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_MSB 0x35
1290#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_RMASK 0xFF
1291#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_LSB 0x28
1292#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_MSB 0x2D
1293#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_RMASK 0x3F
1294#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_LSB 0x20
1295#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_MSB 0x27
1296#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_RMASK 0xFF
1297#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_LSB 0x16
1298#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_MSB 0x1B
1299#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_RMASK 0x3F
1300#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_LSB 0xE
1301#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_MSB 0x15
1302#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_RMASK 0xFF
1303#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_LSB 0x8
1304#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_MSB 0xD
1305#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_RMASK 0x3F
1306#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_LSB 0x0
1307#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_MSB 0x7
1308#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_RMASK 0xFF
1309
1310#define QIB_7322_DCACtrlE_OFFS 0x5A0
1311#define QIB_7322_DCACtrlE_DEF 0x0000000000000000
1312#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_LSB 0x36
1313#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_MSB 0x3B
1314#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_RMASK 0x3F
1315#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_LSB 0x2E
1316#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_MSB 0x35
1317#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_RMASK 0xFF
1318#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_LSB 0x28
1319#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_MSB 0x2D
1320#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_RMASK 0x3F
1321#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_LSB 0x20
1322#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_MSB 0x27
1323#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_RMASK 0xFF
1324#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_LSB 0x16
1325#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_MSB 0x1B
1326#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_RMASK 0x3F
1327#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_LSB 0xE
1328#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_MSB 0x15
1329#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_RMASK 0xFF
1330#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_LSB 0x8
1331#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_MSB 0xD
1332#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_RMASK 0x3F
1333#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_LSB 0x0
1334#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_MSB 0x7
1335#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_RMASK 0xFF
1336
1337#define QIB_7322_DCACtrlF_OFFS 0x5A8
1338#define QIB_7322_DCACtrlF_DEF 0x0000000000000000
1339#define QIB_7322_DCACtrlF_SendDma1DCAOPH_LSB 0x28
1340#define QIB_7322_DCACtrlF_SendDma1DCAOPH_MSB 0x2F
1341#define QIB_7322_DCACtrlF_SendDma1DCAOPH_RMASK 0xFF
1342#define QIB_7322_DCACtrlF_SendDma0DCAOPH_LSB 0x20
1343#define QIB_7322_DCACtrlF_SendDma0DCAOPH_MSB 0x27
1344#define QIB_7322_DCACtrlF_SendDma0DCAOPH_RMASK 0xFF
1345#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_LSB 0x16
1346#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_MSB 0x1B
1347#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_RMASK 0x3F
1348#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_LSB 0xE
1349#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_MSB 0x15
1350#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_RMASK 0xFF
1351#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_LSB 0x8
1352#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_MSB 0xD
1353#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_RMASK 0x3F
1354#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_LSB 0x0
1355#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_MSB 0x7
1356#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_RMASK 0xFF
1357
1358#define QIB_7322_RcvAvailTimeOut0_OFFS 0xC00
1359#define QIB_7322_RcvAvailTimeOut0_DEF 0x0000000000000000
1360#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_LSB 0x10
1361#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_MSB 0x1F
1362#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_RMASK 0xFFFF
1363#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_LSB 0x0
1364#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_MSB 0xF
1365#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_RMASK 0xFFFF
1366
1367#define QIB_7322_CntrRegBase_0_OFFS 0x1028
1368#define QIB_7322_CntrRegBase_0_DEF 0x0000000000012000
1369
1370#define QIB_7322_ErrMask_0_OFFS 0x1080
1371#define QIB_7322_ErrMask_0_DEF 0x0000000000000000
1372#define QIB_7322_ErrMask_0_IBStatusChangedMask_LSB 0x3A
1373#define QIB_7322_ErrMask_0_IBStatusChangedMask_MSB 0x3A
1374#define QIB_7322_ErrMask_0_IBStatusChangedMask_RMASK 0x1
1375#define QIB_7322_ErrMask_0_SHeadersErrMask_LSB 0x39
1376#define QIB_7322_ErrMask_0_SHeadersErrMask_MSB 0x39
1377#define QIB_7322_ErrMask_0_SHeadersErrMask_RMASK 0x1
1378#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_LSB 0x36
1379#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_MSB 0x36
1380#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_RMASK 0x1
1381#define QIB_7322_ErrMask_0_SDmaHaltErrMask_LSB 0x31
1382#define QIB_7322_ErrMask_0_SDmaHaltErrMask_MSB 0x31
1383#define QIB_7322_ErrMask_0_SDmaHaltErrMask_RMASK 0x1
1384#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_LSB 0x30
1385#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_MSB 0x30
1386#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_RMASK 0x1
1387#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_LSB 0x2F
1388#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_MSB 0x2F
1389#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_RMASK 0x1
1390#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_LSB 0x2E
1391#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_MSB 0x2E
1392#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_RMASK 0x1
1393#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_LSB 0x2D
1394#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_MSB 0x2D
1395#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_RMASK 0x1
1396#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_LSB 0x2C
1397#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_MSB 0x2C
1398#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_RMASK 0x1
1399#define QIB_7322_ErrMask_0_SDma1stDescErrMask_LSB 0x2B
1400#define QIB_7322_ErrMask_0_SDma1stDescErrMask_MSB 0x2B
1401#define QIB_7322_ErrMask_0_SDma1stDescErrMask_RMASK 0x1
1402#define QIB_7322_ErrMask_0_SDmaBaseErrMask_LSB 0x2A
1403#define QIB_7322_ErrMask_0_SDmaBaseErrMask_MSB 0x2A
1404#define QIB_7322_ErrMask_0_SDmaBaseErrMask_RMASK 0x1
1405#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_LSB 0x29
1406#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_MSB 0x29
1407#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_RMASK 0x1
1408#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_LSB 0x28
1409#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_MSB 0x28
1410#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_RMASK 0x1
1411#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_LSB 0x27
1412#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_MSB 0x27
1413#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_RMASK 0x1
1414#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_LSB 0x26
1415#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_MSB 0x26
1416#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_RMASK 0x1
1417#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_LSB 0x25
1418#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_MSB 0x25
1419#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_RMASK 0x1
1420#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_LSB 0x24
1421#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_MSB 0x24
1422#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_RMASK 0x1
1423#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_LSB 0x22
1424#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_MSB 0x22
1425#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_RMASK 0x1
1426#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_LSB 0x21
1427#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_MSB 0x21
1428#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_RMASK 0x1
1429#define QIB_7322_ErrMask_0_SendPktLenErrMask_LSB 0x20
1430#define QIB_7322_ErrMask_0_SendPktLenErrMask_MSB 0x20
1431#define QIB_7322_ErrMask_0_SendPktLenErrMask_RMASK 0x1
1432#define QIB_7322_ErrMask_0_SendUnderRunErrMask_LSB 0x1F
1433#define QIB_7322_ErrMask_0_SendUnderRunErrMask_MSB 0x1F
1434#define QIB_7322_ErrMask_0_SendUnderRunErrMask_RMASK 0x1
1435#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_LSB 0x1E
1436#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_MSB 0x1E
1437#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_RMASK 0x1
1438#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_LSB 0x1D
1439#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_MSB 0x1D
1440#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_RMASK 0x1
1441#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_LSB 0x11
1442#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_MSB 0x11
1443#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_RMASK 0x1
1444#define QIB_7322_ErrMask_0_RcvHdrErrMask_LSB 0x10
1445#define QIB_7322_ErrMask_0_RcvHdrErrMask_MSB 0x10
1446#define QIB_7322_ErrMask_0_RcvHdrErrMask_RMASK 0x1
1447#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_LSB 0xF
1448#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_MSB 0xF
1449#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_RMASK 0x1
1450#define QIB_7322_ErrMask_0_RcvBadTidErrMask_LSB 0xE
1451#define QIB_7322_ErrMask_0_RcvBadTidErrMask_MSB 0xE
1452#define QIB_7322_ErrMask_0_RcvBadTidErrMask_RMASK 0x1
1453#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_LSB 0xB
1454#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_MSB 0xB
1455#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_RMASK 0x1
1456#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_LSB 0xA
1457#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_MSB 0xA
1458#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_RMASK 0x1
1459#define QIB_7322_ErrMask_0_RcvEBPErrMask_LSB 0x9
1460#define QIB_7322_ErrMask_0_RcvEBPErrMask_MSB 0x9
1461#define QIB_7322_ErrMask_0_RcvEBPErrMask_RMASK 0x1
1462#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_LSB 0x8
1463#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_MSB 0x8
1464#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_RMASK 0x1
1465#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_LSB 0x7
1466#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_MSB 0x7
1467#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_RMASK 0x1
1468#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_LSB 0x6
1469#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_MSB 0x6
1470#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_RMASK 0x1
1471#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_LSB 0x5
1472#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_MSB 0x5
1473#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_RMASK 0x1
1474#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_LSB 0x4
1475#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_MSB 0x4
1476#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_RMASK 0x1
1477#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_LSB 0x3
1478#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_MSB 0x3
1479#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_RMASK 0x1
1480#define QIB_7322_ErrMask_0_RcvICRCErrMask_LSB 0x2
1481#define QIB_7322_ErrMask_0_RcvICRCErrMask_MSB 0x2
1482#define QIB_7322_ErrMask_0_RcvICRCErrMask_RMASK 0x1
1483#define QIB_7322_ErrMask_0_RcvVCRCErrMask_LSB 0x1
1484#define QIB_7322_ErrMask_0_RcvVCRCErrMask_MSB 0x1
1485#define QIB_7322_ErrMask_0_RcvVCRCErrMask_RMASK 0x1
1486#define QIB_7322_ErrMask_0_RcvFormatErrMask_LSB 0x0
1487#define QIB_7322_ErrMask_0_RcvFormatErrMask_MSB 0x0
1488#define QIB_7322_ErrMask_0_RcvFormatErrMask_RMASK 0x1
1489
1490#define QIB_7322_ErrStatus_0_OFFS 0x1088
1491#define QIB_7322_ErrStatus_0_DEF 0x0000000000000000
1492#define QIB_7322_ErrStatus_0_IBStatusChanged_LSB 0x3A
1493#define QIB_7322_ErrStatus_0_IBStatusChanged_MSB 0x3A
1494#define QIB_7322_ErrStatus_0_IBStatusChanged_RMASK 0x1
1495#define QIB_7322_ErrStatus_0_SHeadersErr_LSB 0x39
1496#define QIB_7322_ErrStatus_0_SHeadersErr_MSB 0x39
1497#define QIB_7322_ErrStatus_0_SHeadersErr_RMASK 0x1
1498#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_LSB 0x36
1499#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_MSB 0x36
1500#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_RMASK 0x1
1501#define QIB_7322_ErrStatus_0_SDmaHaltErr_LSB 0x31
1502#define QIB_7322_ErrStatus_0_SDmaHaltErr_MSB 0x31
1503#define QIB_7322_ErrStatus_0_SDmaHaltErr_RMASK 0x1
1504#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_LSB 0x30
1505#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_MSB 0x30
1506#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_RMASK 0x1
1507#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_LSB 0x2F
1508#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_MSB 0x2F
1509#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_RMASK 0x1
1510#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_LSB 0x2E
1511#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_MSB 0x2E
1512#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_RMASK 0x1
1513#define QIB_7322_ErrStatus_0_SDmaDwEnErr_LSB 0x2D
1514#define QIB_7322_ErrStatus_0_SDmaDwEnErr_MSB 0x2D
1515#define QIB_7322_ErrStatus_0_SDmaDwEnErr_RMASK 0x1
1516#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_LSB 0x2C
1517#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_MSB 0x2C
1518#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_RMASK 0x1
1519#define QIB_7322_ErrStatus_0_SDma1stDescErr_LSB 0x2B
1520#define QIB_7322_ErrStatus_0_SDma1stDescErr_MSB 0x2B
1521#define QIB_7322_ErrStatus_0_SDma1stDescErr_RMASK 0x1
1522#define QIB_7322_ErrStatus_0_SDmaBaseErr_LSB 0x2A
1523#define QIB_7322_ErrStatus_0_SDmaBaseErr_MSB 0x2A
1524#define QIB_7322_ErrStatus_0_SDmaBaseErr_RMASK 0x1
1525#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_LSB 0x29
1526#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_MSB 0x29
1527#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_RMASK 0x1
1528#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_LSB 0x28
1529#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_MSB 0x28
1530#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_RMASK 0x1
1531#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_LSB 0x27
1532#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_MSB 0x27
1533#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_RMASK 0x1
1534#define QIB_7322_ErrStatus_0_SendBufMisuseErr_LSB 0x26
1535#define QIB_7322_ErrStatus_0_SendBufMisuseErr_MSB 0x26
1536#define QIB_7322_ErrStatus_0_SendBufMisuseErr_RMASK 0x1
1537#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_LSB 0x25
1538#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_MSB 0x25
1539#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_RMASK 0x1
1540#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_LSB 0x24
1541#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_MSB 0x24
1542#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_RMASK 0x1
1543#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_LSB 0x22
1544#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_MSB 0x22
1545#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_RMASK 0x1
1546#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_LSB 0x21
1547#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_MSB 0x21
1548#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_RMASK 0x1
1549#define QIB_7322_ErrStatus_0_SendPktLenErr_LSB 0x20
1550#define QIB_7322_ErrStatus_0_SendPktLenErr_MSB 0x20
1551#define QIB_7322_ErrStatus_0_SendPktLenErr_RMASK 0x1
1552#define QIB_7322_ErrStatus_0_SendUnderRunErr_LSB 0x1F
1553#define QIB_7322_ErrStatus_0_SendUnderRunErr_MSB 0x1F
1554#define QIB_7322_ErrStatus_0_SendUnderRunErr_RMASK 0x1
1555#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_LSB 0x1E
1556#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_MSB 0x1E
1557#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_RMASK 0x1
1558#define QIB_7322_ErrStatus_0_SendMinPktLenErr_LSB 0x1D
1559#define QIB_7322_ErrStatus_0_SendMinPktLenErr_MSB 0x1D
1560#define QIB_7322_ErrStatus_0_SendMinPktLenErr_RMASK 0x1
1561#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_LSB 0x11
1562#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_MSB 0x11
1563#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_RMASK 0x1
1564#define QIB_7322_ErrStatus_0_RcvHdrErr_LSB 0x10
1565#define QIB_7322_ErrStatus_0_RcvHdrErr_MSB 0x10
1566#define QIB_7322_ErrStatus_0_RcvHdrErr_RMASK 0x1
1567#define QIB_7322_ErrStatus_0_RcvHdrLenErr_LSB 0xF
1568#define QIB_7322_ErrStatus_0_RcvHdrLenErr_MSB 0xF
1569#define QIB_7322_ErrStatus_0_RcvHdrLenErr_RMASK 0x1
1570#define QIB_7322_ErrStatus_0_RcvBadTidErr_LSB 0xE
1571#define QIB_7322_ErrStatus_0_RcvBadTidErr_MSB 0xE
1572#define QIB_7322_ErrStatus_0_RcvBadTidErr_RMASK 0x1
1573#define QIB_7322_ErrStatus_0_RcvBadVersionErr_LSB 0xB
1574#define QIB_7322_ErrStatus_0_RcvBadVersionErr_MSB 0xB
1575#define QIB_7322_ErrStatus_0_RcvBadVersionErr_RMASK 0x1
1576#define QIB_7322_ErrStatus_0_RcvIBFlowErr_LSB 0xA
1577#define QIB_7322_ErrStatus_0_RcvIBFlowErr_MSB 0xA
1578#define QIB_7322_ErrStatus_0_RcvIBFlowErr_RMASK 0x1
1579#define QIB_7322_ErrStatus_0_RcvEBPErr_LSB 0x9
1580#define QIB_7322_ErrStatus_0_RcvEBPErr_MSB 0x9
1581#define QIB_7322_ErrStatus_0_RcvEBPErr_RMASK 0x1
1582#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_LSB 0x8
1583#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_MSB 0x8
1584#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_RMASK 0x1
1585#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_LSB 0x7
1586#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_MSB 0x7
1587#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_RMASK 0x1
1588#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_LSB 0x6
1589#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_MSB 0x6
1590#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_RMASK 0x1
1591#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_LSB 0x5
1592#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_MSB 0x5
1593#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_RMASK 0x1
1594#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_LSB 0x4
1595#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_MSB 0x4
1596#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_RMASK 0x1
1597#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_LSB 0x3
1598#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_MSB 0x3
1599#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_RMASK 0x1
1600#define QIB_7322_ErrStatus_0_RcvICRCErr_LSB 0x2
1601#define QIB_7322_ErrStatus_0_RcvICRCErr_MSB 0x2
1602#define QIB_7322_ErrStatus_0_RcvICRCErr_RMASK 0x1
1603#define QIB_7322_ErrStatus_0_RcvVCRCErr_LSB 0x1
1604#define QIB_7322_ErrStatus_0_RcvVCRCErr_MSB 0x1
1605#define QIB_7322_ErrStatus_0_RcvVCRCErr_RMASK 0x1
1606#define QIB_7322_ErrStatus_0_RcvFormatErr_LSB 0x0
1607#define QIB_7322_ErrStatus_0_RcvFormatErr_MSB 0x0
1608#define QIB_7322_ErrStatus_0_RcvFormatErr_RMASK 0x1
1609
1610#define QIB_7322_ErrClear_0_OFFS 0x1090
1611#define QIB_7322_ErrClear_0_DEF 0x0000000000000000
1612#define QIB_7322_ErrClear_0_IBStatusChangedClear_LSB 0x3A
1613#define QIB_7322_ErrClear_0_IBStatusChangedClear_MSB 0x3A
1614#define QIB_7322_ErrClear_0_IBStatusChangedClear_RMASK 0x1
1615#define QIB_7322_ErrClear_0_SHeadersErrClear_LSB 0x39
1616#define QIB_7322_ErrClear_0_SHeadersErrClear_MSB 0x39
1617#define QIB_7322_ErrClear_0_SHeadersErrClear_RMASK 0x1
1618#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_LSB 0x36
1619#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_MSB 0x36
1620#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_RMASK 0x1
1621#define QIB_7322_ErrClear_0_SDmaHaltErrClear_LSB 0x31
1622#define QIB_7322_ErrClear_0_SDmaHaltErrClear_MSB 0x31
1623#define QIB_7322_ErrClear_0_SDmaHaltErrClear_RMASK 0x1
1624#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_LSB 0x30
1625#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_MSB 0x30
1626#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_RMASK 0x1
1627#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_LSB 0x2F
1628#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_MSB 0x2F
1629#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_RMASK 0x1
1630#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_LSB 0x2E
1631#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_MSB 0x2E
1632#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_RMASK 0x1
1633#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_LSB 0x2D
1634#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_MSB 0x2D
1635#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_RMASK 0x1
1636#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_LSB 0x2C
1637#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_MSB 0x2C
1638#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_RMASK 0x1
1639#define QIB_7322_ErrClear_0_SDma1stDescErrClear_LSB 0x2B
1640#define QIB_7322_ErrClear_0_SDma1stDescErrClear_MSB 0x2B
1641#define QIB_7322_ErrClear_0_SDma1stDescErrClear_RMASK 0x1
1642#define QIB_7322_ErrClear_0_SDmaBaseErrClear_LSB 0x2A
1643#define QIB_7322_ErrClear_0_SDmaBaseErrClear_MSB 0x2A
1644#define QIB_7322_ErrClear_0_SDmaBaseErrClear_RMASK 0x1
1645#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_LSB 0x29
1646#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_MSB 0x29
1647#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_RMASK 0x1
1648#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_LSB 0x28
1649#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_MSB 0x28
1650#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_RMASK 0x1
1651#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_LSB 0x27
1652#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_MSB 0x27
1653#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_RMASK 0x1
1654#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_LSB 0x26
1655#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_MSB 0x26
1656#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_RMASK 0x1
1657#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_LSB 0x25
1658#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_MSB 0x25
1659#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_RMASK 0x1
1660#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_LSB 0x24
1661#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_MSB 0x24
1662#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_RMASK 0x1
1663#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_LSB 0x22
1664#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_MSB 0x22
1665#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_RMASK 0x1
1666#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_LSB 0x21
1667#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_MSB 0x21
1668#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_RMASK 0x1
1669#define QIB_7322_ErrClear_0_SendPktLenErrClear_LSB 0x20
1670#define QIB_7322_ErrClear_0_SendPktLenErrClear_MSB 0x20
1671#define QIB_7322_ErrClear_0_SendPktLenErrClear_RMASK 0x1
1672#define QIB_7322_ErrClear_0_SendUnderRunErrClear_LSB 0x1F
1673#define QIB_7322_ErrClear_0_SendUnderRunErrClear_MSB 0x1F
1674#define QIB_7322_ErrClear_0_SendUnderRunErrClear_RMASK 0x1
1675#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_LSB 0x1E
1676#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_MSB 0x1E
1677#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_RMASK 0x1
1678#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_LSB 0x1D
1679#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_MSB 0x1D
1680#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_RMASK 0x1
1681#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_LSB 0x11
1682#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_MSB 0x11
1683#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_RMASK 0x1
1684#define QIB_7322_ErrClear_0_RcvHdrErrClear_LSB 0x10
1685#define QIB_7322_ErrClear_0_RcvHdrErrClear_MSB 0x10
1686#define QIB_7322_ErrClear_0_RcvHdrErrClear_RMASK 0x1
1687#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_LSB 0xF
1688#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_MSB 0xF
1689#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_RMASK 0x1
1690#define QIB_7322_ErrClear_0_RcvBadTidErrClear_LSB 0xE
1691#define QIB_7322_ErrClear_0_RcvBadTidErrClear_MSB 0xE
1692#define QIB_7322_ErrClear_0_RcvBadTidErrClear_RMASK 0x1
1693#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_LSB 0xB
1694#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_MSB 0xB
1695#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_RMASK 0x1
1696#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_LSB 0xA
1697#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_MSB 0xA
1698#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_RMASK 0x1
1699#define QIB_7322_ErrClear_0_RcvEBPErrClear_LSB 0x9
1700#define QIB_7322_ErrClear_0_RcvEBPErrClear_MSB 0x9
1701#define QIB_7322_ErrClear_0_RcvEBPErrClear_RMASK 0x1
1702#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_LSB 0x8
1703#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_MSB 0x8
1704#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_RMASK 0x1
1705#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_LSB 0x7
1706#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_MSB 0x7
1707#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_RMASK 0x1
1708#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_LSB 0x6
1709#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_MSB 0x6
1710#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_RMASK 0x1
1711#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_LSB 0x5
1712#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_MSB 0x5
1713#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_RMASK 0x1
1714#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_LSB 0x4
1715#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_MSB 0x4
1716#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_RMASK 0x1
1717#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_LSB 0x3
1718#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_MSB 0x3
1719#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_RMASK 0x1
1720#define QIB_7322_ErrClear_0_RcvICRCErrClear_LSB 0x2
1721#define QIB_7322_ErrClear_0_RcvICRCErrClear_MSB 0x2
1722#define QIB_7322_ErrClear_0_RcvICRCErrClear_RMASK 0x1
1723#define QIB_7322_ErrClear_0_RcvVCRCErrClear_LSB 0x1
1724#define QIB_7322_ErrClear_0_RcvVCRCErrClear_MSB 0x1
1725#define QIB_7322_ErrClear_0_RcvVCRCErrClear_RMASK 0x1
1726#define QIB_7322_ErrClear_0_RcvFormatErrClear_LSB 0x0
1727#define QIB_7322_ErrClear_0_RcvFormatErrClear_MSB 0x0
1728#define QIB_7322_ErrClear_0_RcvFormatErrClear_RMASK 0x1
1729
1730#define QIB_7322_TXEStatus_0_OFFS 0x10B8
1731#define QIB_7322_TXEStatus_0_DEF 0x0000000XC00080FF
1732#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_LSB 0x1F
1733#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_MSB 0x1F
1734#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_RMASK 0x1
1735#define QIB_7322_TXEStatus_0_RmFifoEmpty_LSB 0x1E
1736#define QIB_7322_TXEStatus_0_RmFifoEmpty_MSB 0x1E
1737#define QIB_7322_TXEStatus_0_RmFifoEmpty_RMASK 0x1
1738#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_LSB 0xF
1739#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_MSB 0xF
1740#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_RMASK 0x1
1741#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_LSB 0x7
1742#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_MSB 0x7
1743#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_RMASK 0x1
1744#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_LSB 0x6
1745#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_MSB 0x6
1746#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_RMASK 0x1
1747#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_LSB 0x5
1748#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_MSB 0x5
1749#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_RMASK 0x1
1750#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_LSB 0x4
1751#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_MSB 0x4
1752#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_RMASK 0x1
1753#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_LSB 0x3
1754#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_MSB 0x3
1755#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_RMASK 0x1
1756#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_LSB 0x2
1757#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_MSB 0x2
1758#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_RMASK 0x1
1759#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_LSB 0x1
1760#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_MSB 0x1
1761#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_RMASK 0x1
1762#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_LSB 0x0
1763#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_MSB 0x0
1764#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_RMASK 0x1
1765
1766#define QIB_7322_RcvCtrl_0_OFFS 0x1100
1767#define QIB_7322_RcvCtrl_0_DEF 0x0000000000000000
1768#define QIB_7322_RcvCtrl_0_RcvResetCredit_LSB 0x2A
1769#define QIB_7322_RcvCtrl_0_RcvResetCredit_MSB 0x2A
1770#define QIB_7322_RcvCtrl_0_RcvResetCredit_RMASK 0x1
1771#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_LSB 0x29
1772#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_MSB 0x29
1773#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_RMASK 0x1
1774#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_LSB 0x28
1775#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_MSB 0x28
1776#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_RMASK 0x1
1777#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_LSB 0x27
1778#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_MSB 0x27
1779#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_RMASK 0x1
1780#define QIB_7322_RcvCtrl_0_ContextEnableUser_LSB 0x2
1781#define QIB_7322_RcvCtrl_0_ContextEnableUser_MSB 0x11
1782#define QIB_7322_RcvCtrl_0_ContextEnableUser_RMASK 0xFFFF
1783#define QIB_7322_RcvCtrl_0_ContextEnableKernel_LSB 0x0
1784#define QIB_7322_RcvCtrl_0_ContextEnableKernel_MSB 0x0
1785#define QIB_7322_RcvCtrl_0_ContextEnableKernel_RMASK 0x1
1786
1787#define QIB_7322_RcvBTHQP_0_OFFS 0x1108
1788#define QIB_7322_RcvBTHQP_0_DEF 0x0000000000000000
1789#define QIB_7322_RcvBTHQP_0_RcvBTHQP_LSB 0x0
1790#define QIB_7322_RcvBTHQP_0_RcvBTHQP_MSB 0x17
1791#define QIB_7322_RcvBTHQP_0_RcvBTHQP_RMASK 0xFFFFFF
1792
1793#define QIB_7322_RcvQPMapTableA_0_OFFS 0x1110
1794#define QIB_7322_RcvQPMapTableA_0_DEF 0x0000000000000000
1795#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_LSB 0x19
1796#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_MSB 0x1D
1797#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_RMASK 0x1F
1798#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_LSB 0x14
1799#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_MSB 0x18
1800#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_RMASK 0x1F
1801#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_LSB 0xF
1802#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_MSB 0x13
1803#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_RMASK 0x1F
1804#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_LSB 0xA
1805#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_MSB 0xE
1806#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_RMASK 0x1F
1807#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_LSB 0x5
1808#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_MSB 0x9
1809#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_RMASK 0x1F
1810#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_LSB 0x0
1811#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_MSB 0x4
1812#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_RMASK 0x1F
1813
1814#define QIB_7322_RcvQPMapTableB_0_OFFS 0x1118
1815#define QIB_7322_RcvQPMapTableB_0_DEF 0x0000000000000000
1816#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_LSB 0x19
1817#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_MSB 0x1D
1818#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_RMASK 0x1F
1819#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_LSB 0x14
1820#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_MSB 0x18
1821#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_RMASK 0x1F
1822#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_LSB 0xF
1823#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_MSB 0x13
1824#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_RMASK 0x1F
1825#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_LSB 0xA
1826#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_MSB 0xE
1827#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_RMASK 0x1F
1828#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_LSB 0x5
1829#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_MSB 0x9
1830#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_RMASK 0x1F
1831#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_LSB 0x0
1832#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_MSB 0x4
1833#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_RMASK 0x1F
1834
1835#define QIB_7322_RcvQPMapTableC_0_OFFS 0x1120
1836#define QIB_7322_RcvQPMapTableC_0_DEF 0x0000000000000000
1837#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_LSB 0x19
1838#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_MSB 0x1D
1839#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_RMASK 0x1F
1840#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_LSB 0x14
1841#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_MSB 0x18
1842#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_RMASK 0x1F
1843#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_LSB 0xF
1844#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_MSB 0x13
1845#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_RMASK 0x1F
1846#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_LSB 0xA
1847#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_MSB 0xE
1848#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_RMASK 0x1F
1849#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_LSB 0x5
1850#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_MSB 0x9
1851#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_RMASK 0x1F
1852#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_LSB 0x0
1853#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_MSB 0x4
1854#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_RMASK 0x1F
1855
1856#define QIB_7322_RcvQPMapTableD_0_OFFS 0x1128
1857#define QIB_7322_RcvQPMapTableD_0_DEF 0x0000000000000000
1858#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_LSB 0x19
1859#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_MSB 0x1D
1860#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_RMASK 0x1F
1861#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_LSB 0x14
1862#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_MSB 0x18
1863#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_RMASK 0x1F
1864#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_LSB 0xF
1865#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_MSB 0x13
1866#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_RMASK 0x1F
1867#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_LSB 0xA
1868#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_MSB 0xE
1869#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_RMASK 0x1F
1870#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_LSB 0x5
1871#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_MSB 0x9
1872#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_RMASK 0x1F
1873#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_LSB 0x0
1874#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_MSB 0x4
1875#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_RMASK 0x1F
1876
1877#define QIB_7322_RcvQPMapTableE_0_OFFS 0x1130
1878#define QIB_7322_RcvQPMapTableE_0_DEF 0x0000000000000000
1879#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_LSB 0x19
1880#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_MSB 0x1D
1881#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_RMASK 0x1F
1882#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_LSB 0x14
1883#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_MSB 0x18
1884#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_RMASK 0x1F
1885#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_LSB 0xF
1886#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_MSB 0x13
1887#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_RMASK 0x1F
1888#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_LSB 0xA
1889#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_MSB 0xE
1890#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_RMASK 0x1F
1891#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_LSB 0x5
1892#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_MSB 0x9
1893#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_RMASK 0x1F
1894#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_LSB 0x0
1895#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_MSB 0x4
1896#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_RMASK 0x1F
1897
1898#define QIB_7322_RcvQPMapTableF_0_OFFS 0x1138
1899#define QIB_7322_RcvQPMapTableF_0_DEF 0x0000000000000000
1900#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_LSB 0x5
1901#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_MSB 0x9
1902#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_RMASK 0x1F
1903#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_LSB 0x0
1904#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_MSB 0x4
1905#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_RMASK 0x1F
1906
1907#define QIB_7322_PSStat_0_OFFS 0x1140
1908#define QIB_7322_PSStat_0_DEF 0x0000000000000000
1909
1910#define QIB_7322_PSStart_0_OFFS 0x1148
1911#define QIB_7322_PSStart_0_DEF 0x0000000000000000
1912
1913#define QIB_7322_PSInterval_0_OFFS 0x1150
1914#define QIB_7322_PSInterval_0_DEF 0x0000000000000000
1915
1916#define QIB_7322_RcvStatus_0_OFFS 0x1160
1917#define QIB_7322_RcvStatus_0_DEF 0x0000000000000000
1918#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_LSB 0x1
1919#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_MSB 0x5
1920#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_RMASK 0x1F
1921#define QIB_7322_RcvStatus_0_RxPktInProgress_LSB 0x0
1922#define QIB_7322_RcvStatus_0_RxPktInProgress_MSB 0x0
1923#define QIB_7322_RcvStatus_0_RxPktInProgress_RMASK 0x1
1924
1925#define QIB_7322_RcvPartitionKey_0_OFFS 0x1168
1926#define QIB_7322_RcvPartitionKey_0_DEF 0x0000000000000000
1927
1928#define QIB_7322_RcvQPMulticastContext_0_OFFS 0x1170
1929#define QIB_7322_RcvQPMulticastContext_0_DEF 0x0000000000000000
1930#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_LSB 0x0
1931#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_MSB 0x4
1932#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_RMASK 0x1F
1933
1934#define QIB_7322_RcvPktLEDCnt_0_OFFS 0x1178
1935#define QIB_7322_RcvPktLEDCnt_0_DEF 0x0000000000000000
1936#define QIB_7322_RcvPktLEDCnt_0_ONperiod_LSB 0x20
1937#define QIB_7322_RcvPktLEDCnt_0_ONperiod_MSB 0x3F
1938#define QIB_7322_RcvPktLEDCnt_0_ONperiod_RMASK 0xFFFFFFFF
1939#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_LSB 0x0
1940#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_MSB 0x1F
1941#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_RMASK 0xFFFFFFFF
1942
1943#define QIB_7322_SendDmaIdleCnt_0_OFFS 0x1180
1944#define QIB_7322_SendDmaIdleCnt_0_DEF 0x0000000000000000
1945#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_LSB 0x0
1946#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_MSB 0xF
1947#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_RMASK 0xFFFF
1948
1949#define QIB_7322_SendDmaReloadCnt_0_OFFS 0x1188
1950#define QIB_7322_SendDmaReloadCnt_0_DEF 0x0000000000000000
1951#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_LSB 0x0
1952#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_MSB 0xF
1953#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_RMASK 0xFFFF
1954
1955#define QIB_7322_SendDmaDescCnt_0_OFFS 0x1190
1956#define QIB_7322_SendDmaDescCnt_0_DEF 0x0000000000000000
1957#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_LSB 0x0
1958#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_MSB 0xF
1959#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_RMASK 0xFFFF
1960
1961#define QIB_7322_SendCtrl_0_OFFS 0x11C0
1962#define QIB_7322_SendCtrl_0_DEF 0x0000000000000000
1963#define QIB_7322_SendCtrl_0_IBVLArbiterEn_LSB 0xF
1964#define QIB_7322_SendCtrl_0_IBVLArbiterEn_MSB 0xF
1965#define QIB_7322_SendCtrl_0_IBVLArbiterEn_RMASK 0x1
1966#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_LSB 0xE
1967#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_MSB 0xE
1968#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_RMASK 0x1
1969#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_LSB 0xD
1970#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_MSB 0xD
1971#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_RMASK 0x1
1972#define QIB_7322_SendCtrl_0_SDmaHalt_LSB 0xC
1973#define QIB_7322_SendCtrl_0_SDmaHalt_MSB 0xC
1974#define QIB_7322_SendCtrl_0_SDmaHalt_RMASK 0x1
1975#define QIB_7322_SendCtrl_0_SDmaEnable_LSB 0xB
1976#define QIB_7322_SendCtrl_0_SDmaEnable_MSB 0xB
1977#define QIB_7322_SendCtrl_0_SDmaEnable_RMASK 0x1
1978#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_LSB 0xA
1979#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_MSB 0xA
1980#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_RMASK 0x1
1981#define QIB_7322_SendCtrl_0_SDmaIntEnable_LSB 0x9
1982#define QIB_7322_SendCtrl_0_SDmaIntEnable_MSB 0x9
1983#define QIB_7322_SendCtrl_0_SDmaIntEnable_RMASK 0x1
1984#define QIB_7322_SendCtrl_0_SDmaCleanup_LSB 0x8
1985#define QIB_7322_SendCtrl_0_SDmaCleanup_MSB 0x8
1986#define QIB_7322_SendCtrl_0_SDmaCleanup_RMASK 0x1
1987#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_LSB 0x7
1988#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_MSB 0x7
1989#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_RMASK 0x1
1990#define QIB_7322_SendCtrl_0_SendEnable_LSB 0x3
1991#define QIB_7322_SendCtrl_0_SendEnable_MSB 0x3
1992#define QIB_7322_SendCtrl_0_SendEnable_RMASK 0x1
1993#define QIB_7322_SendCtrl_0_TxeBypassIbc_LSB 0x1
1994#define QIB_7322_SendCtrl_0_TxeBypassIbc_MSB 0x1
1995#define QIB_7322_SendCtrl_0_TxeBypassIbc_RMASK 0x1
1996#define QIB_7322_SendCtrl_0_TxeAbortIbc_LSB 0x0
1997#define QIB_7322_SendCtrl_0_TxeAbortIbc_MSB 0x0
1998#define QIB_7322_SendCtrl_0_TxeAbortIbc_RMASK 0x1
1999
2000#define QIB_7322_SendDmaBase_0_OFFS 0x11F8
2001#define QIB_7322_SendDmaBase_0_DEF 0x0000000000000000
2002#define QIB_7322_SendDmaBase_0_SendDmaBase_LSB 0x0
2003#define QIB_7322_SendDmaBase_0_SendDmaBase_MSB 0x2F
2004#define QIB_7322_SendDmaBase_0_SendDmaBase_RMASK 0xFFFFFFFFFFFF
2005
2006#define QIB_7322_SendDmaLenGen_0_OFFS 0x1200
2007#define QIB_7322_SendDmaLenGen_0_DEF 0x0000000000000000
2008#define QIB_7322_SendDmaLenGen_0_Generation_LSB 0x10
2009#define QIB_7322_SendDmaLenGen_0_Generation_MSB 0x12
2010#define QIB_7322_SendDmaLenGen_0_Generation_RMASK 0x7
2011#define QIB_7322_SendDmaLenGen_0_Length_LSB 0x0
2012#define QIB_7322_SendDmaLenGen_0_Length_MSB 0xF
2013#define QIB_7322_SendDmaLenGen_0_Length_RMASK 0xFFFF
2014
2015#define QIB_7322_SendDmaTail_0_OFFS 0x1208
2016#define QIB_7322_SendDmaTail_0_DEF 0x0000000000000000
2017#define QIB_7322_SendDmaTail_0_SendDmaTail_LSB 0x0
2018#define QIB_7322_SendDmaTail_0_SendDmaTail_MSB 0xF
2019#define QIB_7322_SendDmaTail_0_SendDmaTail_RMASK 0xFFFF
2020
2021#define QIB_7322_SendDmaHead_0_OFFS 0x1210
2022#define QIB_7322_SendDmaHead_0_DEF 0x0000000000000000
2023#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_LSB 0x20
2024#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_MSB 0x2F
2025#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_RMASK 0xFFFF
2026#define QIB_7322_SendDmaHead_0_SendDmaHead_LSB 0x0
2027#define QIB_7322_SendDmaHead_0_SendDmaHead_MSB 0xF
2028#define QIB_7322_SendDmaHead_0_SendDmaHead_RMASK 0xFFFF
2029
2030#define QIB_7322_SendDmaHeadAddr_0_OFFS 0x1218
2031#define QIB_7322_SendDmaHeadAddr_0_DEF 0x0000000000000000
2032#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_LSB 0x0
2033#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_MSB 0x2F
2034#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
2035
2036#define QIB_7322_SendDmaBufMask0_0_OFFS 0x1220
2037#define QIB_7322_SendDmaBufMask0_0_DEF 0x0000000000000000
2038#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_LSB 0x0
2039#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_MSB 0x3F
2040#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_RMASK 0x0
2041
2042#define QIB_7322_SendDmaStatus_0_OFFS 0x1238
2043#define QIB_7322_SendDmaStatus_0_DEF 0x0000000042000000
2044#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_LSB 0x3F
2045#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_MSB 0x3F
2046#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_RMASK 0x1
2047#define QIB_7322_SendDmaStatus_0_HaltInProg_LSB 0x3E
2048#define QIB_7322_SendDmaStatus_0_HaltInProg_MSB 0x3E
2049#define QIB_7322_SendDmaStatus_0_HaltInProg_RMASK 0x1
2050#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_LSB 0x3D
2051#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_MSB 0x3D
2052#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_RMASK 0x1
2053#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_LSB 0x2F
2054#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_MSB 0x3C
2055#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_RMASK 0x3FFF
2056#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_LSB 0x28
2057#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_MSB 0x2E
2058#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_RMASK 0x7F
2059#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_LSB 0x20
2060#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_MSB 0x27
2061#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_RMASK 0xFF
2062#define QIB_7322_SendDmaStatus_0_ScbFull_LSB 0x1F
2063#define QIB_7322_SendDmaStatus_0_ScbFull_MSB 0x1F
2064#define QIB_7322_SendDmaStatus_0_ScbFull_RMASK 0x1
2065#define QIB_7322_SendDmaStatus_0_ScbEmpty_LSB 0x1E
2066#define QIB_7322_SendDmaStatus_0_ScbEmpty_MSB 0x1E
2067#define QIB_7322_SendDmaStatus_0_ScbEmpty_RMASK 0x1
2068#define QIB_7322_SendDmaStatus_0_ScbEntryValid_LSB 0x1D
2069#define QIB_7322_SendDmaStatus_0_ScbEntryValid_MSB 0x1D
2070#define QIB_7322_SendDmaStatus_0_ScbEntryValid_RMASK 0x1
2071#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_LSB 0x1C
2072#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_MSB 0x1C
2073#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_RMASK 0x1
2074#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_LSB 0x1B
2075#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_MSB 0x1B
2076#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_RMASK 0x1
2077#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_LSB 0x1A
2078#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_MSB 0x1A
2079#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_RMASK 0x1
2080#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_LSB 0x19
2081#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_MSB 0x19
2082#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_RMASK 0x1
2083#define QIB_7322_SendDmaStatus_0_SplFifoFull_LSB 0x18
2084#define QIB_7322_SendDmaStatus_0_SplFifoFull_MSB 0x18
2085#define QIB_7322_SendDmaStatus_0_SplFifoFull_RMASK 0x1
2086#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_LSB 0x10
2087#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_MSB 0x17
2088#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_RMASK 0xFF
2089#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_LSB 0x0
2090#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_MSB 0xF
2091#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_RMASK 0xFFFF
2092
2093#define QIB_7322_SendDmaPriorityThld_0_OFFS 0x1258
2094#define QIB_7322_SendDmaPriorityThld_0_DEF 0x0000000000000000
2095#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_LSB 0x0
2096#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_MSB 0x3
2097#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_RMASK 0xF
2098
2099#define QIB_7322_SendHdrErrSymptom_0_OFFS 0x1260
2100#define QIB_7322_SendHdrErrSymptom_0_DEF 0x0000000000000000
2101#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_LSB 0x6
2102#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_MSB 0x6
2103#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_RMASK 0x1
2104#define QIB_7322_SendHdrErrSymptom_0_GRHFail_LSB 0x5
2105#define QIB_7322_SendHdrErrSymptom_0_GRHFail_MSB 0x5
2106#define QIB_7322_SendHdrErrSymptom_0_GRHFail_RMASK 0x1
2107#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_LSB 0x4
2108#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_MSB 0x4
2109#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_RMASK 0x1
2110#define QIB_7322_SendHdrErrSymptom_0_QPFail_LSB 0x3
2111#define QIB_7322_SendHdrErrSymptom_0_QPFail_MSB 0x3
2112#define QIB_7322_SendHdrErrSymptom_0_QPFail_RMASK 0x1
2113#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_LSB 0x2
2114#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_MSB 0x2
2115#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_RMASK 0x1
2116#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_LSB 0x1
2117#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_MSB 0x1
2118#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_RMASK 0x1
2119#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_LSB 0x0
2120#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_MSB 0x0
2121#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_RMASK 0x1
2122
2123#define QIB_7322_RxCreditVL0_0_OFFS 0x1280
2124#define QIB_7322_RxCreditVL0_0_DEF 0x0000000000000000
2125#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_LSB 0x10
2126#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_MSB 0x1B
2127#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_RMASK 0xFFF
2128#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_LSB 0x0
2129#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_MSB 0xB
2130#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_RMASK 0xFFF
2131
2132#define QIB_7322_SendDmaBufUsed0_0_OFFS 0x1480
2133#define QIB_7322_SendDmaBufUsed0_0_DEF 0x0000000000000000
2134#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_LSB 0x0
2135#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_MSB 0x3F
2136#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_RMASK 0x0
2137
2138#define QIB_7322_SendCheckControl_0_OFFS 0x14A8
2139#define QIB_7322_SendCheckControl_0_DEF 0x0000000000000000
2140#define QIB_7322_SendCheckControl_0_PKey_En_LSB 0x4
2141#define QIB_7322_SendCheckControl_0_PKey_En_MSB 0x4
2142#define QIB_7322_SendCheckControl_0_PKey_En_RMASK 0x1
2143#define QIB_7322_SendCheckControl_0_BTHQP_En_LSB 0x3
2144#define QIB_7322_SendCheckControl_0_BTHQP_En_MSB 0x3
2145#define QIB_7322_SendCheckControl_0_BTHQP_En_RMASK 0x1
2146#define QIB_7322_SendCheckControl_0_SLID_En_LSB 0x2
2147#define QIB_7322_SendCheckControl_0_SLID_En_MSB 0x2
2148#define QIB_7322_SendCheckControl_0_SLID_En_RMASK 0x1
2149#define QIB_7322_SendCheckControl_0_RawIPV6_En_LSB 0x1
2150#define QIB_7322_SendCheckControl_0_RawIPV6_En_MSB 0x1
2151#define QIB_7322_SendCheckControl_0_RawIPV6_En_RMASK 0x1
2152#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_LSB 0x0
2153#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_MSB 0x0
2154#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_RMASK 0x1
2155
2156#define QIB_7322_SendIBSLIDMask_0_OFFS 0x14B0
2157#define QIB_7322_SendIBSLIDMask_0_DEF 0x0000000000000000
2158#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_LSB 0x0
2159#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_MSB 0xF
2160#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK 0xFFFF
2161
2162#define QIB_7322_SendIBSLIDAssign_0_OFFS 0x14B8
2163#define QIB_7322_SendIBSLIDAssign_0_DEF 0x0000000000000000
2164#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_LSB 0x0
2165#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_MSB 0xF
2166#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK 0xFFFF
2167
2168#define QIB_7322_IBCStatusA_0_OFFS 0x1540
2169#define QIB_7322_IBCStatusA_0_DEF 0x0000000000000X02
2170#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_LSB 0x27
2171#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_MSB 0x27
2172#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_RMASK 0x1
2173#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_LSB 0x26
2174#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_MSB 0x26
2175#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_RMASK 0x1
2176#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_LSB 0x25
2177#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_MSB 0x25
2178#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_RMASK 0x1
2179#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_LSB 0x24
2180#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_MSB 0x24
2181#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_RMASK 0x1
2182#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_LSB 0x23
2183#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_MSB 0x23
2184#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_RMASK 0x1
2185#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_LSB 0x22
2186#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_MSB 0x22
2187#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_RMASK 0x1
2188#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_LSB 0x21
2189#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_MSB 0x21
2190#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_RMASK 0x1
2191#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_LSB 0x20
2192#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_MSB 0x20
2193#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_RMASK 0x1
2194#define QIB_7322_IBCStatusA_0_TxReady_LSB 0x1E
2195#define QIB_7322_IBCStatusA_0_TxReady_MSB 0x1E
2196#define QIB_7322_IBCStatusA_0_TxReady_RMASK 0x1
2197#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_LSB 0x1D
2198#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_MSB 0x1D
2199#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_RMASK 0x1
2200#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_LSB 0xF
2201#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_MSB 0xF
2202#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_RMASK 0x1
2203#define QIB_7322_IBCStatusA_0_ScrambleEn_LSB 0xE
2204#define QIB_7322_IBCStatusA_0_ScrambleEn_MSB 0xE
2205#define QIB_7322_IBCStatusA_0_ScrambleEn_RMASK 0x1
2206#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_LSB 0xD
2207#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_MSB 0xD
2208#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_RMASK 0x1
2209#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_LSB 0xC
2210#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_MSB 0xC
2211#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_RMASK 0x1
2212#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_LSB 0xA
2213#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_MSB 0xA
2214#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_RMASK 0x1
2215#define QIB_7322_IBCStatusA_0_LinkWidthActive_LSB 0x9
2216#define QIB_7322_IBCStatusA_0_LinkWidthActive_MSB 0x9
2217#define QIB_7322_IBCStatusA_0_LinkWidthActive_RMASK 0x1
2218#define QIB_7322_IBCStatusA_0_LinkSpeedActive_LSB 0x8
2219#define QIB_7322_IBCStatusA_0_LinkSpeedActive_MSB 0x8
2220#define QIB_7322_IBCStatusA_0_LinkSpeedActive_RMASK 0x1
2221#define QIB_7322_IBCStatusA_0_LinkState_LSB 0x5
2222#define QIB_7322_IBCStatusA_0_LinkState_MSB 0x7
2223#define QIB_7322_IBCStatusA_0_LinkState_RMASK 0x7
2224#define QIB_7322_IBCStatusA_0_LinkTrainingState_LSB 0x0
2225#define QIB_7322_IBCStatusA_0_LinkTrainingState_MSB 0x4
2226#define QIB_7322_IBCStatusA_0_LinkTrainingState_RMASK 0x1F
2227
2228#define QIB_7322_IBCStatusB_0_OFFS 0x1548
2229#define QIB_7322_IBCStatusB_0_DEF 0x00000000XXXXXXXX
2230#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_LSB 0x27
2231#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_MSB 0x27
2232#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_RMASK 0x1
2233#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_LSB 0x26
2234#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_MSB 0x26
2235#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_RMASK 0x1
2236#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_LSB 0x25
2237#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_MSB 0x25
2238#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_RMASK 0x1
2239#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_LSB 0x24
2240#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_MSB 0x24
2241#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_RMASK 0x1
2242#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_LSB 0x20
2243#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_MSB 0x23
2244#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_RMASK 0xF
2245#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_LSB 0x1E
2246#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_MSB 0x1F
2247#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_RMASK 0x3
2248#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_LSB 0x1A
2249#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_MSB 0x1D
2250#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_RMASK 0xF
2251#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_LSB 0x0
2252#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_MSB 0x19
2253#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_RMASK 0x3FFFFFF
2254
2255#define QIB_7322_IBCCtrlA_0_OFFS 0x1560
2256#define QIB_7322_IBCCtrlA_0_DEF 0x0000000000000000
2257#define QIB_7322_IBCCtrlA_0_Loopback_LSB 0x3F
2258#define QIB_7322_IBCCtrlA_0_Loopback_MSB 0x3F
2259#define QIB_7322_IBCCtrlA_0_Loopback_RMASK 0x1
2260#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_LSB 0x3E
2261#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_MSB 0x3E
2262#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_RMASK 0x1
2263#define QIB_7322_IBCCtrlA_0_IBLinkEn_LSB 0x3D
2264#define QIB_7322_IBCCtrlA_0_IBLinkEn_MSB 0x3D
2265#define QIB_7322_IBCCtrlA_0_IBLinkEn_RMASK 0x1
2266#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_LSB 0x3C
2267#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_MSB 0x3C
2268#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_RMASK 0x1
2269#define QIB_7322_IBCCtrlA_0_NumVLane_LSB 0x30
2270#define QIB_7322_IBCCtrlA_0_NumVLane_MSB 0x32
2271#define QIB_7322_IBCCtrlA_0_NumVLane_RMASK 0x7
2272#define QIB_7322_IBCCtrlA_0_OverrunThreshold_LSB 0x24
2273#define QIB_7322_IBCCtrlA_0_OverrunThreshold_MSB 0x27
2274#define QIB_7322_IBCCtrlA_0_OverrunThreshold_RMASK 0xF
2275#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_LSB 0x20
2276#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_MSB 0x23
2277#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_RMASK 0xF
2278#define QIB_7322_IBCCtrlA_0_MaxPktLen_LSB 0x15
2279#define QIB_7322_IBCCtrlA_0_MaxPktLen_MSB 0x1F
2280#define QIB_7322_IBCCtrlA_0_MaxPktLen_RMASK 0x7FF
2281#define QIB_7322_IBCCtrlA_0_LinkCmd_LSB 0x13
2282#define QIB_7322_IBCCtrlA_0_LinkCmd_MSB 0x14
2283#define QIB_7322_IBCCtrlA_0_LinkCmd_RMASK 0x3
2284#define QIB_7322_IBCCtrlA_0_LinkInitCmd_LSB 0x10
2285#define QIB_7322_IBCCtrlA_0_LinkInitCmd_MSB 0x12
2286#define QIB_7322_IBCCtrlA_0_LinkInitCmd_RMASK 0x7
2287#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_LSB 0x8
2288#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_MSB 0xF
2289#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_RMASK 0xFF
2290#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_LSB 0x0
2291#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_MSB 0x7
2292#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_RMASK 0xFF
2293
2294#define QIB_7322_IBCCtrlB_0_OFFS 0x1568
2295#define QIB_7322_IBCCtrlB_0_DEF 0x00000000000305FF
2296#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_LSB 0x30
2297#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_MSB 0x3F
2298#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK 0xFFFF
2299#define QIB_7322_IBCCtrlB_0_IB_DLID_LSB 0x20
2300#define QIB_7322_IBCCtrlB_0_IB_DLID_MSB 0x2F
2301#define QIB_7322_IBCCtrlB_0_IB_DLID_RMASK 0xFFFF
2302#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_LSB 0x1B
2303#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_MSB 0x1B
2304#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_RMASK 0x1
2305#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_LSB 0x1A
2306#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_MSB 0x1A
2307#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_RMASK 0x1
2308#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_LSB 0x12
2309#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_MSB 0x19
2310#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_RMASK 0xFF
2311#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_LSB 0x11
2312#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_MSB 0x11
2313#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_RMASK 0x1
2314#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_LSB 0x10
2315#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_MSB 0x10
2316#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_RMASK 0x1
2317#define QIB_7322_IBCCtrlB_0_SD_DDS_LSB 0xC
2318#define QIB_7322_IBCCtrlB_0_SD_DDS_MSB 0xF
2319#define QIB_7322_IBCCtrlB_0_SD_DDS_RMASK 0xF
2320#define QIB_7322_IBCCtrlB_0_SD_DDSV_LSB 0xB
2321#define QIB_7322_IBCCtrlB_0_SD_DDSV_MSB 0xB
2322#define QIB_7322_IBCCtrlB_0_SD_DDSV_RMASK 0x1
2323#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_LSB 0xA
2324#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_MSB 0xA
2325#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_RMASK 0x1
2326#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_LSB 0x9
2327#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_MSB 0x9
2328#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_RMASK 0x1
2329#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_LSB 0x8
2330#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_MSB 0x8
2331#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_RMASK 0x1
2332#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_LSB 0x7
2333#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_MSB 0x7
2334#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_RMASK 0x1
2335#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_LSB 0x5
2336#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_MSB 0x6
2337#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_RMASK 0x3
2338#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_LSB 0x4
2339#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_MSB 0x4
2340#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_RMASK 0x1
2341#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_LSB 0x3
2342#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_MSB 0x3
2343#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_RMASK 0x1
2344#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_LSB 0x2
2345#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_MSB 0x2
2346#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_RMASK 0x1
2347#define QIB_7322_IBCCtrlB_0_SD_SPEED_LSB 0x1
2348#define QIB_7322_IBCCtrlB_0_SD_SPEED_MSB 0x1
2349#define QIB_7322_IBCCtrlB_0_SD_SPEED_RMASK 0x1
2350#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_LSB 0x0
2351#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_MSB 0x0
2352#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_RMASK 0x1
2353
2354#define QIB_7322_IBCCtrlC_0_OFFS 0x1570
2355#define QIB_7322_IBCCtrlC_0_DEF 0x0000000000000301
2356#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_LSB 0x5
2357#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_MSB 0x9
2358#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_RMASK 0x1F
2359#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_LSB 0x0
2360#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_MSB 0x4
2361#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_RMASK 0x1F
2362
2363#define QIB_7322_HRTBT_GUID_0_OFFS 0x1588
2364#define QIB_7322_HRTBT_GUID_0_DEF 0x0000000000000000
2365
2366#define QIB_7322_IB_SDTEST_IF_TX_0_OFFS 0x1590
2367#define QIB_7322_IB_SDTEST_IF_TX_0_DEF 0x0000000000000000
2368#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_LSB 0x30
2369#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_MSB 0x3F
2370#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_RMASK 0xFFFF
2371#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_LSB 0x20
2372#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_MSB 0x2F
2373#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_RMASK 0xFFFF
2374#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_LSB 0xD
2375#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_MSB 0xF
2376#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_RMASK 0x7
2377#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_LSB 0xB
2378#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_MSB 0xC
2379#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_RMASK 0x3
2380#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_LSB 0x4
2381#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_MSB 0x4
2382#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_RMASK 0x1
2383#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_LSB 0x2
2384#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_MSB 0x3
2385#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_RMASK 0x3
2386#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_LSB 0x1
2387#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_MSB 0x1
2388#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_RMASK 0x1
2389#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_LSB 0x0
2390#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_MSB 0x0
2391#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_RMASK 0x1
2392
2393#define QIB_7322_IB_SDTEST_IF_RX_0_OFFS 0x1598
2394#define QIB_7322_IB_SDTEST_IF_RX_0_DEF 0x0000000000000000
2395#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_LSB 0x30
2396#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_MSB 0x3F
2397#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_RMASK 0xFFFF
2398#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_LSB 0x20
2399#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_MSB 0x2F
2400#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_RMASK 0xFFFF
2401#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_LSB 0x18
2402#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_MSB 0x1F
2403#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_RMASK 0xFF
2404#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_LSB 0x10
2405#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_MSB 0x17
2406#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_RMASK 0xFF
2407#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_LSB 0x1
2408#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_MSB 0x1
2409#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_RMASK 0x1
2410#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_LSB 0x0
2411#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_MSB 0x0
2412#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_RMASK 0x1
2413
2414#define QIB_7322_IBNCModeCtrl_0_OFFS 0x15B8
2415#define QIB_7322_IBNCModeCtrl_0_DEF 0x0000000000000000
2416#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_LSB 0x22
2417#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_MSB 0x22
2418#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_RMASK 0x1
2419#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_LSB 0x21
2420#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_MSB 0x21
2421#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_RMASK 0x1
2422#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_LSB 0x20
2423#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_MSB 0x20
2424#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_RMASK 0x1
2425#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_LSB 0x11
2426#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_MSB 0x19
2427#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_RMASK 0x1FF
2428#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_LSB 0x8
2429#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_MSB 0x10
2430#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_RMASK 0x1FF
2431#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_LSB 0x2
2432#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_MSB 0x2
2433#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
2434#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_LSB 0x1
2435#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_MSB 0x1
2436#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_RMASK 0x1
2437#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_LSB 0x0
2438#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_MSB 0x0
2439#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_RMASK 0x1
2440
2441#define QIB_7322_IBSerdesStatus_0_OFFS 0x15D0
2442#define QIB_7322_IBSerdesStatus_0_DEF 0x0000000000000000
2443
2444#define QIB_7322_IBPCSConfig_0_OFFS 0x15D8
2445#define QIB_7322_IBPCSConfig_0_DEF 0x0000000000000007
2446#define QIB_7322_IBPCSConfig_0_link_sync_mask_LSB 0x9
2447#define QIB_7322_IBPCSConfig_0_link_sync_mask_MSB 0x12
2448#define QIB_7322_IBPCSConfig_0_link_sync_mask_RMASK 0x3FF
2449#define QIB_7322_IBPCSConfig_0_xcv_rreset_LSB 0x2
2450#define QIB_7322_IBPCSConfig_0_xcv_rreset_MSB 0x2
2451#define QIB_7322_IBPCSConfig_0_xcv_rreset_RMASK 0x1
2452#define QIB_7322_IBPCSConfig_0_xcv_treset_LSB 0x1
2453#define QIB_7322_IBPCSConfig_0_xcv_treset_MSB 0x1
2454#define QIB_7322_IBPCSConfig_0_xcv_treset_RMASK 0x1
2455#define QIB_7322_IBPCSConfig_0_tx_rx_reset_LSB 0x0
2456#define QIB_7322_IBPCSConfig_0_tx_rx_reset_MSB 0x0
2457#define QIB_7322_IBPCSConfig_0_tx_rx_reset_RMASK 0x1
2458
2459#define QIB_7322_IBSerdesCtrl_0_OFFS 0x15E0
2460#define QIB_7322_IBSerdesCtrl_0_DEF 0x0000000000FFA00F
2461#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_LSB 0x1A
2462#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_MSB 0x1A
2463#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_RMASK 0x1
2464#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_LSB 0x19
2465#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_MSB 0x19
2466#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_RMASK 0x1
2467#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_LSB 0x18
2468#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_MSB 0x18
2469#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_RMASK 0x1
2470#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_LSB 0x14
2471#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_MSB 0x17
2472#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_RMASK 0xF
2473#define QIB_7322_IBSerdesCtrl_0_CGMODE_LSB 0x10
2474#define QIB_7322_IBSerdesCtrl_0_CGMODE_MSB 0x13
2475#define QIB_7322_IBSerdesCtrl_0_CGMODE_RMASK 0xF
2476#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_LSB 0xF
2477#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_MSB 0xF
2478#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_RMASK 0x1
2479#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_LSB 0xD
2480#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_MSB 0xD
2481#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_RMASK 0x1
2482#define QIB_7322_IBSerdesCtrl_0_LPEN_LSB 0xC
2483#define QIB_7322_IBSerdesCtrl_0_LPEN_MSB 0xC
2484#define QIB_7322_IBSerdesCtrl_0_LPEN_RMASK 0x1
2485#define QIB_7322_IBSerdesCtrl_0_PLLPD_LSB 0xB
2486#define QIB_7322_IBSerdesCtrl_0_PLLPD_MSB 0xB
2487#define QIB_7322_IBSerdesCtrl_0_PLLPD_RMASK 0x1
2488#define QIB_7322_IBSerdesCtrl_0_TXPD_LSB 0xA
2489#define QIB_7322_IBSerdesCtrl_0_TXPD_MSB 0xA
2490#define QIB_7322_IBSerdesCtrl_0_TXPD_RMASK 0x1
2491#define QIB_7322_IBSerdesCtrl_0_RXPD_LSB 0x9
2492#define QIB_7322_IBSerdesCtrl_0_RXPD_MSB 0x9
2493#define QIB_7322_IBSerdesCtrl_0_RXPD_RMASK 0x1
2494#define QIB_7322_IBSerdesCtrl_0_TXIDLE_LSB 0x8
2495#define QIB_7322_IBSerdesCtrl_0_TXIDLE_MSB 0x8
2496#define QIB_7322_IBSerdesCtrl_0_TXIDLE_RMASK 0x1
2497#define QIB_7322_IBSerdesCtrl_0_CMODE_LSB 0x0
2498#define QIB_7322_IBSerdesCtrl_0_CMODE_MSB 0x6
2499#define QIB_7322_IBSerdesCtrl_0_CMODE_RMASK 0x7F
2500
2501#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_OFFS 0x1600
2502#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_DEF 0x0000000000000000
2503#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_LSB 0x1F
2504#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_MSB 0x1F
2505#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_RMASK 0x1
2506#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_LSB 0x1E
2507#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_MSB 0x1E
2508#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_RMASK 0x1
2509#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_LSB 0xE
2510#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_MSB 0x11
2511#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_RMASK 0xF
2512#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_LSB 0x9
2513#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_MSB 0xD
2514#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_RMASK 0x1F
2515#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_LSB 0x5
2516#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_MSB 0x8
2517#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_RMASK 0xF
2518#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_LSB 0x3
2519#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_MSB 0x4
2520#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_RMASK 0x3
2521#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_LSB 0x0
2522#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_MSB 0x2
2523#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_RMASK 0x7
2524
2525#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_OFFS 0x1640
2526#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_DEF 0x0000000000000000
2527#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_LSB 0x27
2528#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_MSB 0x27
2529#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_RMASK 0x1
2530#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_LSB 0x26
2531#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_MSB 0x26
2532#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_RMASK 0x1
2533#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_LSB 0x25
2534#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_MSB 0x25
2535#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_RMASK 0x1
2536#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_LSB 0x24
2537#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_MSB 0x24
2538#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_RMASK 0x1
2539#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_LSB 0x23
2540#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_MSB 0x23
2541#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_RMASK 0x1
2542#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_LSB 0x22
2543#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_MSB 0x22
2544#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_RMASK 0x1
2545#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_LSB 0x21
2546#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_MSB 0x21
2547#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_RMASK 0x1
2548#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_LSB 0x20
2549#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_MSB 0x20
2550#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_RMASK 0x1
2551#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_LSB 0x18
2552#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_MSB 0x1F
2553#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_RMASK 0xFF
2554#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_LSB 0x10
2555#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_MSB 0x17
2556#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_RMASK 0xFF
2557#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_LSB 0x8
2558#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_MSB 0xF
2559#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_RMASK 0xFF
2560#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_LSB 0x0
2561#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_MSB 0x7
2562#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_RMASK 0xFF
2563
2564#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_OFFS 0x1648
2565#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_DEF 0x0000000000000000
2566#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_LSB 0x27
2567#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_MSB 0x27
2568#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_RMASK 0x1
2569#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_LSB 0x26
2570#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_MSB 0x26
2571#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_RMASK 0x1
2572#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_LSB 0x25
2573#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_MSB 0x25
2574#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_RMASK 0x1
2575#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_LSB 0x24
2576#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_MSB 0x24
2577#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_RMASK 0x1
2578#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_LSB 0x23
2579#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_MSB 0x23
2580#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_RMASK 0x1
2581#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_LSB 0x22
2582#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_MSB 0x22
2583#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_RMASK 0x1
2584#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_LSB 0x21
2585#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_MSB 0x21
2586#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_RMASK 0x1
2587#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_LSB 0x20
2588#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_MSB 0x20
2589#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_RMASK 0x1
2590#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_LSB 0x18
2591#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_MSB 0x1F
2592#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_RMASK 0xFF
2593#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_LSB 0x10
2594#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_MSB 0x17
2595#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_RMASK 0xFF
2596#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_LSB 0x8
2597#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_MSB 0xF
2598#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_RMASK 0xFF
2599#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_LSB 0x0
2600#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_MSB 0x7
2601#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_RMASK 0xFF
2602
2603#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_OFFS 0x1650
2604#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_DEF 0x0000000000000000
2605#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_LSB 0x27
2606#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_MSB 0x27
2607#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_RMASK 0x1
2608#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_LSB 0x26
2609#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_MSB 0x26
2610#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_RMASK 0x1
2611#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_LSB 0x25
2612#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_MSB 0x25
2613#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_RMASK 0x1
2614#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_LSB 0x24
2615#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_MSB 0x24
2616#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_RMASK 0x1
2617#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_LSB 0x23
2618#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_MSB 0x23
2619#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_RMASK 0x1
2620#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_LSB 0x22
2621#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_MSB 0x22
2622#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_RMASK 0x1
2623#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_LSB 0x21
2624#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_MSB 0x21
2625#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_RMASK 0x1
2626#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_LSB 0x20
2627#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_MSB 0x20
2628#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_RMASK 0x1
2629#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_LSB 0x18
2630#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_MSB 0x1F
2631#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_RMASK 0xFF
2632#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_LSB 0x10
2633#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_MSB 0x17
2634#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_RMASK 0xFF
2635#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_LSB 0x8
2636#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_MSB 0xF
2637#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_RMASK 0xFF
2638#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_LSB 0x0
2639#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_MSB 0x7
2640#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_RMASK 0xFF
2641
2642#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_OFFS 0x1658
2643#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_DEF 0x0000000000000000
2644#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_LSB 0x27
2645#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_MSB 0x27
2646#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_RMASK 0x1
2647#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_LSB 0x26
2648#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_MSB 0x26
2649#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_RMASK 0x1
2650#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_LSB 0x25
2651#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_MSB 0x25
2652#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_RMASK 0x1
2653#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_LSB 0x24
2654#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_MSB 0x24
2655#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_RMASK 0x1
2656#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_LSB 0x23
2657#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_MSB 0x23
2658#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_RMASK 0x1
2659#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_LSB 0x22
2660#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_MSB 0x22
2661#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_RMASK 0x1
2662#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_LSB 0x21
2663#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_MSB 0x21
2664#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_RMASK 0x1
2665#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_LSB 0x20
2666#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_MSB 0x20
2667#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_RMASK 0x1
2668#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_LSB 0x18
2669#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_MSB 0x1F
2670#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_RMASK 0xFF
2671#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_LSB 0x10
2672#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_MSB 0x17
2673#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_RMASK 0xFF
2674#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_LSB 0x8
2675#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_MSB 0xF
2676#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_RMASK 0xFF
2677#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_LSB 0x0
2678#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_MSB 0x7
2679#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_RMASK 0xFF
2680
2681#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_OFFS 0x1660
2682#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_DEF 0x0000000000000000
2683#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_LSB 0x27
2684#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_MSB 0x27
2685#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_RMASK 0x1
2686#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_LSB 0x26
2687#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_MSB 0x26
2688#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_RMASK 0x1
2689#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_LSB 0x25
2690#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_MSB 0x25
2691#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_RMASK 0x1
2692#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_LSB 0x24
2693#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_MSB 0x24
2694#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_RMASK 0x1
2695#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_LSB 0x23
2696#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_MSB 0x23
2697#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_RMASK 0x1
2698#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_LSB 0x22
2699#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_MSB 0x22
2700#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_RMASK 0x1
2701#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_LSB 0x21
2702#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_MSB 0x21
2703#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_RMASK 0x1
2704#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_LSB 0x20
2705#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_MSB 0x20
2706#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_RMASK 0x1
2707#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_LSB 0x18
2708#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_MSB 0x1F
2709#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_RMASK 0xFF
2710#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_LSB 0x10
2711#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_MSB 0x17
2712#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_RMASK 0xFF
2713#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_LSB 0x8
2714#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_MSB 0xF
2715#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_RMASK 0xFF
2716#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_LSB 0x0
2717#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_MSB 0x7
2718#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_RMASK 0xFF
2719
2720#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_OFFS 0x1668
2721#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_DEF 0x0000000000000000
2722#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_LSB 0x27
2723#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_MSB 0x27
2724#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_RMASK 0x1
2725#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_LSB 0x26
2726#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_MSB 0x26
2727#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_RMASK 0x1
2728#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_LSB 0x25
2729#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_MSB 0x25
2730#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_RMASK 0x1
2731#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_LSB 0x24
2732#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_MSB 0x24
2733#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_RMASK 0x1
2734#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_LSB 0x23
2735#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_MSB 0x23
2736#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_RMASK 0x1
2737#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_LSB 0x22
2738#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_MSB 0x22
2739#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_RMASK 0x1
2740#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_LSB 0x21
2741#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_MSB 0x21
2742#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_RMASK 0x1
2743#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_LSB 0x20
2744#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_MSB 0x20
2745#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_RMASK 0x1
2746#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_LSB 0x18
2747#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_MSB 0x1F
2748#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_RMASK 0xFF
2749#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_LSB 0x10
2750#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_MSB 0x17
2751#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_RMASK 0xFF
2752#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_LSB 0x8
2753#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_MSB 0xF
2754#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_RMASK 0xFF
2755#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_LSB 0x0
2756#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_MSB 0x7
2757#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_RMASK 0xFF
2758
2759#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_OFFS 0x1670
2760#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_DEF 0x0000000000000000
2761
2762#define QIB_7322_HighPriorityLimit_0_OFFS 0x1BC0
2763#define QIB_7322_HighPriorityLimit_0_DEF 0x0000000000000000
2764#define QIB_7322_HighPriorityLimit_0_Limit_LSB 0x0
2765#define QIB_7322_HighPriorityLimit_0_Limit_MSB 0x7
2766#define QIB_7322_HighPriorityLimit_0_Limit_RMASK 0xFF
2767
2768#define QIB_7322_LowPriority0_0_OFFS 0x1C00
2769#define QIB_7322_LowPriority0_0_DEF 0x0000000000000000
2770#define QIB_7322_LowPriority0_0_VirtualLane_LSB 0x10
2771#define QIB_7322_LowPriority0_0_VirtualLane_MSB 0x12
2772#define QIB_7322_LowPriority0_0_VirtualLane_RMASK 0x7
2773#define QIB_7322_LowPriority0_0_Weight_LSB 0x0
2774#define QIB_7322_LowPriority0_0_Weight_MSB 0x7
2775#define QIB_7322_LowPriority0_0_Weight_RMASK 0xFF
2776
2777#define QIB_7322_HighPriority0_0_OFFS 0x1E00
2778#define QIB_7322_HighPriority0_0_DEF 0x0000000000000000
2779#define QIB_7322_HighPriority0_0_VirtualLane_LSB 0x10
2780#define QIB_7322_HighPriority0_0_VirtualLane_MSB 0x12
2781#define QIB_7322_HighPriority0_0_VirtualLane_RMASK 0x7
2782#define QIB_7322_HighPriority0_0_Weight_LSB 0x0
2783#define QIB_7322_HighPriority0_0_Weight_MSB 0x7
2784#define QIB_7322_HighPriority0_0_Weight_RMASK 0xFF
2785
2786#define QIB_7322_CntrRegBase_1_OFFS 0x2028
2787#define QIB_7322_CntrRegBase_1_DEF 0x0000000000013000
2788
2789#define QIB_7322_RcvQPMulticastContext_1_OFFS 0x2170
2790
2791#define QIB_7322_SendCtrl_1_OFFS 0x21C0
2792
2793#define QIB_7322_SendBufAvail0_OFFS 0x3000
2794#define QIB_7322_SendBufAvail0_DEF 0x0000000000000000
2795#define QIB_7322_SendBufAvail0_SendBuf_31_0_LSB 0x0
2796#define QIB_7322_SendBufAvail0_SendBuf_31_0_MSB 0x3F
2797#define QIB_7322_SendBufAvail0_SendBuf_31_0_RMASK 0x0
2798
2799#define QIB_7322_MsixTable_OFFS 0x8000
2800#define QIB_7322_MsixTable_DEF 0x0000000000000000
2801
2802#define QIB_7322_MsixPba_OFFS 0x9000
2803#define QIB_7322_MsixPba_DEF 0x0000000000000000
2804
2805#define QIB_7322_LAMemory_OFFS 0xA000
2806#define QIB_7322_LAMemory_DEF 0x0000000000000000
2807
2808#define QIB_7322_LBIntCnt_OFFS 0x11000
2809#define QIB_7322_LBIntCnt_DEF 0x0000000000000000
2810
2811#define QIB_7322_LBFlowStallCnt_OFFS 0x11008
2812#define QIB_7322_LBFlowStallCnt_DEF 0x0000000000000000
2813
2814#define QIB_7322_RxTIDFullErrCnt_OFFS 0x110D0
2815#define QIB_7322_RxTIDFullErrCnt_DEF 0x0000000000000000
2816
2817#define QIB_7322_RxTIDValidErrCnt_OFFS 0x110D8
2818#define QIB_7322_RxTIDValidErrCnt_DEF 0x0000000000000000
2819
2820#define QIB_7322_RxP0HdrEgrOvflCnt_OFFS 0x110E8
2821#define QIB_7322_RxP0HdrEgrOvflCnt_DEF 0x0000000000000000
2822
2823#define QIB_7322_PcieRetryBufDiagQwordCnt_OFFS 0x111A0
2824#define QIB_7322_PcieRetryBufDiagQwordCnt_DEF 0x0000000000000000
2825
2826#define QIB_7322_RxTidFlowDropCnt_OFFS 0x111E0
2827#define QIB_7322_RxTidFlowDropCnt_DEF 0x0000000000000000
2828
2829#define QIB_7322_LBIntCnt_0_OFFS 0x12000
2830#define QIB_7322_LBIntCnt_0_DEF 0x0000000000000000
2831
2832#define QIB_7322_TxCreditUpToDateTimeOut_0_OFFS 0x12008
2833#define QIB_7322_TxCreditUpToDateTimeOut_0_DEF 0x0000000000000000
2834
2835#define QIB_7322_TxSDmaDescCnt_0_OFFS 0x12010
2836#define QIB_7322_TxSDmaDescCnt_0_DEF 0x0000000000000000
2837
2838#define QIB_7322_TxUnsupVLErrCnt_0_OFFS 0x12018
2839#define QIB_7322_TxUnsupVLErrCnt_0_DEF 0x0000000000000000
2840
2841#define QIB_7322_TxDataPktCnt_0_OFFS 0x12020
2842#define QIB_7322_TxDataPktCnt_0_DEF 0x0000000000000000
2843
2844#define QIB_7322_TxFlowPktCnt_0_OFFS 0x12028
2845#define QIB_7322_TxFlowPktCnt_0_DEF 0x0000000000000000
2846
2847#define QIB_7322_TxDwordCnt_0_OFFS 0x12030
2848#define QIB_7322_TxDwordCnt_0_DEF 0x0000000000000000
2849
2850#define QIB_7322_TxLenErrCnt_0_OFFS 0x12038
2851#define QIB_7322_TxLenErrCnt_0_DEF 0x0000000000000000
2852
2853#define QIB_7322_TxMaxMinLenErrCnt_0_OFFS 0x12040
2854#define QIB_7322_TxMaxMinLenErrCnt_0_DEF 0x0000000000000000
2855
2856#define QIB_7322_TxUnderrunCnt_0_OFFS 0x12048
2857#define QIB_7322_TxUnderrunCnt_0_DEF 0x0000000000000000
2858
2859#define QIB_7322_TxFlowStallCnt_0_OFFS 0x12050
2860#define QIB_7322_TxFlowStallCnt_0_DEF 0x0000000000000000
2861
2862#define QIB_7322_TxDroppedPktCnt_0_OFFS 0x12058
2863#define QIB_7322_TxDroppedPktCnt_0_DEF 0x0000000000000000
2864
2865#define QIB_7322_RxDroppedPktCnt_0_OFFS 0x12060
2866#define QIB_7322_RxDroppedPktCnt_0_DEF 0x0000000000000000
2867
2868#define QIB_7322_RxDataPktCnt_0_OFFS 0x12068
2869#define QIB_7322_RxDataPktCnt_0_DEF 0x0000000000000000
2870
2871#define QIB_7322_RxFlowPktCnt_0_OFFS 0x12070
2872#define QIB_7322_RxFlowPktCnt_0_DEF 0x0000000000000000
2873
2874#define QIB_7322_RxDwordCnt_0_OFFS 0x12078
2875#define QIB_7322_RxDwordCnt_0_DEF 0x0000000000000000
2876
2877#define QIB_7322_RxLenErrCnt_0_OFFS 0x12080
2878#define QIB_7322_RxLenErrCnt_0_DEF 0x0000000000000000
2879
2880#define QIB_7322_RxMaxMinLenErrCnt_0_OFFS 0x12088
2881#define QIB_7322_RxMaxMinLenErrCnt_0_DEF 0x0000000000000000
2882
2883#define QIB_7322_RxICRCErrCnt_0_OFFS 0x12090
2884#define QIB_7322_RxICRCErrCnt_0_DEF 0x0000000000000000
2885
2886#define QIB_7322_RxVCRCErrCnt_0_OFFS 0x12098
2887#define QIB_7322_RxVCRCErrCnt_0_DEF 0x0000000000000000
2888
2889#define QIB_7322_RxFlowCtrlViolCnt_0_OFFS 0x120A0
2890#define QIB_7322_RxFlowCtrlViolCnt_0_DEF 0x0000000000000000
2891
2892#define QIB_7322_RxVersionErrCnt_0_OFFS 0x120A8
2893#define QIB_7322_RxVersionErrCnt_0_DEF 0x0000000000000000
2894
2895#define QIB_7322_RxLinkMalformCnt_0_OFFS 0x120B0
2896#define QIB_7322_RxLinkMalformCnt_0_DEF 0x0000000000000000
2897
2898#define QIB_7322_RxEBPCnt_0_OFFS 0x120B8
2899#define QIB_7322_RxEBPCnt_0_DEF 0x0000000000000000
2900
2901#define QIB_7322_RxLPCRCErrCnt_0_OFFS 0x120C0
2902#define QIB_7322_RxLPCRCErrCnt_0_DEF 0x0000000000000000
2903
2904#define QIB_7322_RxBufOvflCnt_0_OFFS 0x120C8
2905#define QIB_7322_RxBufOvflCnt_0_DEF 0x0000000000000000
2906
2907#define QIB_7322_RxLenTruncateCnt_0_OFFS 0x120D0
2908#define QIB_7322_RxLenTruncateCnt_0_DEF 0x0000000000000000
2909
2910#define QIB_7322_RxPKeyMismatchCnt_0_OFFS 0x120E0
2911#define QIB_7322_RxPKeyMismatchCnt_0_DEF 0x0000000000000000
2912
2913#define QIB_7322_IBLinkDownedCnt_0_OFFS 0x12180
2914#define QIB_7322_IBLinkDownedCnt_0_DEF 0x0000000000000000
2915
2916#define QIB_7322_IBSymbolErrCnt_0_OFFS 0x12188
2917#define QIB_7322_IBSymbolErrCnt_0_DEF 0x0000000000000000
2918
2919#define QIB_7322_IBStatusChangeCnt_0_OFFS 0x12190
2920#define QIB_7322_IBStatusChangeCnt_0_DEF 0x0000000000000000
2921
2922#define QIB_7322_IBLinkErrRecoveryCnt_0_OFFS 0x12198
2923#define QIB_7322_IBLinkErrRecoveryCnt_0_DEF 0x0000000000000000
2924
2925#define QIB_7322_ExcessBufferOvflCnt_0_OFFS 0x121A8
2926#define QIB_7322_ExcessBufferOvflCnt_0_DEF 0x0000000000000000
2927
2928#define QIB_7322_LocalLinkIntegrityErrCnt_0_OFFS 0x121B0
2929#define QIB_7322_LocalLinkIntegrityErrCnt_0_DEF 0x0000000000000000
2930
2931#define QIB_7322_RxVlErrCnt_0_OFFS 0x121B8
2932#define QIB_7322_RxVlErrCnt_0_DEF 0x0000000000000000
2933
2934#define QIB_7322_RxDlidFltrCnt_0_OFFS 0x121C0
2935#define QIB_7322_RxDlidFltrCnt_0_DEF 0x0000000000000000
2936
2937#define QIB_7322_RxVL15DroppedPktCnt_0_OFFS 0x121C8
2938#define QIB_7322_RxVL15DroppedPktCnt_0_DEF 0x0000000000000000
2939
2940#define QIB_7322_RxOtherLocalPhyErrCnt_0_OFFS 0x121D0
2941#define QIB_7322_RxOtherLocalPhyErrCnt_0_DEF 0x0000000000000000
2942
2943#define QIB_7322_RxQPInvalidContextCnt_0_OFFS 0x121D8
2944#define QIB_7322_RxQPInvalidContextCnt_0_DEF 0x0000000000000000
2945
2946#define QIB_7322_TxHeadersErrCnt_0_OFFS 0x121F8
2947#define QIB_7322_TxHeadersErrCnt_0_DEF 0x0000000000000000
2948
2949#define QIB_7322_PSRcvDataCount_0_OFFS 0x12218
2950#define QIB_7322_PSRcvDataCount_0_DEF 0x0000000000000000
2951
2952#define QIB_7322_PSRcvPktsCount_0_OFFS 0x12220
2953#define QIB_7322_PSRcvPktsCount_0_DEF 0x0000000000000000
2954
2955#define QIB_7322_PSXmitDataCount_0_OFFS 0x12228
2956#define QIB_7322_PSXmitDataCount_0_DEF 0x0000000000000000
2957
2958#define QIB_7322_PSXmitPktsCount_0_OFFS 0x12230
2959#define QIB_7322_PSXmitPktsCount_0_DEF 0x0000000000000000
2960
2961#define QIB_7322_PSXmitWaitCount_0_OFFS 0x12238
2962#define QIB_7322_PSXmitWaitCount_0_DEF 0x0000000000000000
2963
2964#define QIB_7322_LBIntCnt_1_OFFS 0x13000
2965#define QIB_7322_LBIntCnt_1_DEF 0x0000000000000000
2966
2967#define QIB_7322_TxCreditUpToDateTimeOut_1_OFFS 0x13008
2968#define QIB_7322_TxCreditUpToDateTimeOut_1_DEF 0x0000000000000000
2969
2970#define QIB_7322_TxSDmaDescCnt_1_OFFS 0x13010
2971#define QIB_7322_TxSDmaDescCnt_1_DEF 0x0000000000000000
2972
2973#define QIB_7322_TxUnsupVLErrCnt_1_OFFS 0x13018
2974#define QIB_7322_TxUnsupVLErrCnt_1_DEF 0x0000000000000000
2975
2976#define QIB_7322_TxDataPktCnt_1_OFFS 0x13020
2977#define QIB_7322_TxDataPktCnt_1_DEF 0x0000000000000000
2978
2979#define QIB_7322_TxFlowPktCnt_1_OFFS 0x13028
2980#define QIB_7322_TxFlowPktCnt_1_DEF 0x0000000000000000
2981
2982#define QIB_7322_TxDwordCnt_1_OFFS 0x13030
2983#define QIB_7322_TxDwordCnt_1_DEF 0x0000000000000000
2984
2985#define QIB_7322_TxLenErrCnt_1_OFFS 0x13038
2986#define QIB_7322_TxLenErrCnt_1_DEF 0x0000000000000000
2987
2988#define QIB_7322_TxMaxMinLenErrCnt_1_OFFS 0x13040
2989#define QIB_7322_TxMaxMinLenErrCnt_1_DEF 0x0000000000000000
2990
2991#define QIB_7322_TxUnderrunCnt_1_OFFS 0x13048
2992#define QIB_7322_TxUnderrunCnt_1_DEF 0x0000000000000000
2993
2994#define QIB_7322_TxFlowStallCnt_1_OFFS 0x13050
2995#define QIB_7322_TxFlowStallCnt_1_DEF 0x0000000000000000
2996
2997#define QIB_7322_TxDroppedPktCnt_1_OFFS 0x13058
2998#define QIB_7322_TxDroppedPktCnt_1_DEF 0x0000000000000000
2999
3000#define QIB_7322_RxDroppedPktCnt_1_OFFS 0x13060
3001#define QIB_7322_RxDroppedPktCnt_1_DEF 0x0000000000000000
3002
3003#define QIB_7322_RxDataPktCnt_1_OFFS 0x13068
3004#define QIB_7322_RxDataPktCnt_1_DEF 0x0000000000000000
3005
3006#define QIB_7322_RxFlowPktCnt_1_OFFS 0x13070
3007#define QIB_7322_RxFlowPktCnt_1_DEF 0x0000000000000000
3008
3009#define QIB_7322_RxDwordCnt_1_OFFS 0x13078
3010#define QIB_7322_RxDwordCnt_1_DEF 0x0000000000000000
3011
3012#define QIB_7322_RxLenErrCnt_1_OFFS 0x13080
3013#define QIB_7322_RxLenErrCnt_1_DEF 0x0000000000000000
3014
3015#define QIB_7322_RxMaxMinLenErrCnt_1_OFFS 0x13088
3016#define QIB_7322_RxMaxMinLenErrCnt_1_DEF 0x0000000000000000
3017
3018#define QIB_7322_RxICRCErrCnt_1_OFFS 0x13090
3019#define QIB_7322_RxICRCErrCnt_1_DEF 0x0000000000000000
3020
3021#define QIB_7322_RxVCRCErrCnt_1_OFFS 0x13098
3022#define QIB_7322_RxVCRCErrCnt_1_DEF 0x0000000000000000
3023
3024#define QIB_7322_RxFlowCtrlViolCnt_1_OFFS 0x130A0
3025#define QIB_7322_RxFlowCtrlViolCnt_1_DEF 0x0000000000000000
3026
3027#define QIB_7322_RxVersionErrCnt_1_OFFS 0x130A8
3028#define QIB_7322_RxVersionErrCnt_1_DEF 0x0000000000000000
3029
3030#define QIB_7322_RxLinkMalformCnt_1_OFFS 0x130B0
3031#define QIB_7322_RxLinkMalformCnt_1_DEF 0x0000000000000000
3032
3033#define QIB_7322_RxEBPCnt_1_OFFS 0x130B8
3034#define QIB_7322_RxEBPCnt_1_DEF 0x0000000000000000
3035
3036#define QIB_7322_RxLPCRCErrCnt_1_OFFS 0x130C0
3037#define QIB_7322_RxLPCRCErrCnt_1_DEF 0x0000000000000000
3038
3039#define QIB_7322_RxBufOvflCnt_1_OFFS 0x130C8
3040#define QIB_7322_RxBufOvflCnt_1_DEF 0x0000000000000000
3041
3042#define QIB_7322_RxLenTruncateCnt_1_OFFS 0x130D0
3043#define QIB_7322_RxLenTruncateCnt_1_DEF 0x0000000000000000
3044
3045#define QIB_7322_RxPKeyMismatchCnt_1_OFFS 0x130E0
3046#define QIB_7322_RxPKeyMismatchCnt_1_DEF 0x0000000000000000
3047
3048#define QIB_7322_IBLinkDownedCnt_1_OFFS 0x13180
3049#define QIB_7322_IBLinkDownedCnt_1_DEF 0x0000000000000000
3050
3051#define QIB_7322_IBSymbolErrCnt_1_OFFS 0x13188
3052#define QIB_7322_IBSymbolErrCnt_1_DEF 0x0000000000000000
3053
3054#define QIB_7322_IBStatusChangeCnt_1_OFFS 0x13190
3055#define QIB_7322_IBStatusChangeCnt_1_DEF 0x0000000000000000
3056
3057#define QIB_7322_IBLinkErrRecoveryCnt_1_OFFS 0x13198
3058#define QIB_7322_IBLinkErrRecoveryCnt_1_DEF 0x0000000000000000
3059
3060#define QIB_7322_ExcessBufferOvflCnt_1_OFFS 0x131A8
3061#define QIB_7322_ExcessBufferOvflCnt_1_DEF 0x0000000000000000
3062
3063#define QIB_7322_LocalLinkIntegrityErrCnt_1_OFFS 0x131B0
3064#define QIB_7322_LocalLinkIntegrityErrCnt_1_DEF 0x0000000000000000
3065
3066#define QIB_7322_RxVlErrCnt_1_OFFS 0x131B8
3067#define QIB_7322_RxVlErrCnt_1_DEF 0x0000000000000000
3068
3069#define QIB_7322_RxDlidFltrCnt_1_OFFS 0x131C0
3070#define QIB_7322_RxDlidFltrCnt_1_DEF 0x0000000000000000
3071
3072#define QIB_7322_RxVL15DroppedPktCnt_1_OFFS 0x131C8
3073#define QIB_7322_RxVL15DroppedPktCnt_1_DEF 0x0000000000000000
3074
3075#define QIB_7322_RxOtherLocalPhyErrCnt_1_OFFS 0x131D0
3076#define QIB_7322_RxOtherLocalPhyErrCnt_1_DEF 0x0000000000000000
3077
3078#define QIB_7322_RxQPInvalidContextCnt_1_OFFS 0x131D8
3079#define QIB_7322_RxQPInvalidContextCnt_1_DEF 0x0000000000000000
3080
3081#define QIB_7322_TxHeadersErrCnt_1_OFFS 0x131F8
3082#define QIB_7322_TxHeadersErrCnt_1_DEF 0x0000000000000000
3083
3084#define QIB_7322_PSRcvDataCount_1_OFFS 0x13218
3085#define QIB_7322_PSRcvDataCount_1_DEF 0x0000000000000000
3086
3087#define QIB_7322_PSRcvPktsCount_1_OFFS 0x13220
3088#define QIB_7322_PSRcvPktsCount_1_DEF 0x0000000000000000
3089
3090#define QIB_7322_PSXmitDataCount_1_OFFS 0x13228
3091#define QIB_7322_PSXmitDataCount_1_DEF 0x0000000000000000
3092
3093#define QIB_7322_PSXmitPktsCount_1_OFFS 0x13230
3094#define QIB_7322_PSXmitPktsCount_1_DEF 0x0000000000000000
3095
3096#define QIB_7322_PSXmitWaitCount_1_OFFS 0x13238
3097#define QIB_7322_PSXmitWaitCount_1_DEF 0x0000000000000000
3098
3099#define QIB_7322_RcvEgrArray_OFFS 0x14000
3100#define QIB_7322_RcvEgrArray_DEF 0x0000000000000000
3101#define QIB_7322_RcvEgrArray_RT_BufSize_LSB 0x25
3102#define QIB_7322_RcvEgrArray_RT_BufSize_MSB 0x27
3103#define QIB_7322_RcvEgrArray_RT_BufSize_RMASK 0x7
3104#define QIB_7322_RcvEgrArray_RT_Addr_LSB 0x0
3105#define QIB_7322_RcvEgrArray_RT_Addr_MSB 0x24
3106#define QIB_7322_RcvEgrArray_RT_Addr_RMASK 0x1FFFFFFFFF
3107
3108#define QIB_7322_RcvTIDArray0_OFFS 0x50000
3109#define QIB_7322_RcvTIDArray0_DEF 0x0000000000000000
3110#define QIB_7322_RcvTIDArray0_RT_BufSize_LSB 0x25
3111#define QIB_7322_RcvTIDArray0_RT_BufSize_MSB 0x27
3112#define QIB_7322_RcvTIDArray0_RT_BufSize_RMASK 0x7
3113#define QIB_7322_RcvTIDArray0_RT_Addr_LSB 0x0
3114#define QIB_7322_RcvTIDArray0_RT_Addr_MSB 0x24
3115#define QIB_7322_RcvTIDArray0_RT_Addr_RMASK 0x1FFFFFFFFF
3116
3117#define QIB_7322_IBSD_DDS_MAP_TABLE_0_OFFS 0xD0000
3118#define QIB_7322_IBSD_DDS_MAP_TABLE_0_DEF 0x0000000000000000
3119
3120#define QIB_7322_RcvHdrTail0_OFFS 0x200000
3121#define QIB_7322_RcvHdrTail0_DEF 0x0000000000000000
3122
3123#define QIB_7322_RcvHdrHead0_OFFS 0x200008
3124#define QIB_7322_RcvHdrHead0_DEF 0x0000000000000000
3125#define QIB_7322_RcvHdrHead0_counter_LSB 0x20
3126#define QIB_7322_RcvHdrHead0_counter_MSB 0x2F
3127#define QIB_7322_RcvHdrHead0_counter_RMASK 0xFFFF
3128#define QIB_7322_RcvHdrHead0_RcvHeadPointer_LSB 0x0
3129#define QIB_7322_RcvHdrHead0_RcvHeadPointer_MSB 0x1F
3130#define QIB_7322_RcvHdrHead0_RcvHeadPointer_RMASK 0xFFFFFFFF
3131
3132#define QIB_7322_RcvEgrIndexTail0_OFFS 0x200010
3133#define QIB_7322_RcvEgrIndexTail0_DEF 0x0000000000000000
3134
3135#define QIB_7322_RcvEgrIndexHead0_OFFS 0x200018
3136#define QIB_7322_RcvEgrIndexHead0_DEF 0x0000000000000000
3137
3138#define QIB_7322_RcvTIDFlowTable0_OFFS 0x201000
3139#define QIB_7322_RcvTIDFlowTable0_DEF 0x0000000000000000
3140#define QIB_7322_RcvTIDFlowTable0_GenMismatch_LSB 0x1C
3141#define QIB_7322_RcvTIDFlowTable0_GenMismatch_MSB 0x1C
3142#define QIB_7322_RcvTIDFlowTable0_GenMismatch_RMASK 0x1
3143#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_LSB 0x1B
3144#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_MSB 0x1B
3145#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_RMASK 0x1
3146#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_LSB 0x16
3147#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_MSB 0x16
3148#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_RMASK 0x1
3149#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_LSB 0x15
3150#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_MSB 0x15
3151#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_RMASK 0x1
3152#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_LSB 0x14
3153#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_MSB 0x14
3154#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_RMASK 0x1
3155#define QIB_7322_RcvTIDFlowTable0_FlowValid_LSB 0x13
3156#define QIB_7322_RcvTIDFlowTable0_FlowValid_MSB 0x13
3157#define QIB_7322_RcvTIDFlowTable0_FlowValid_RMASK 0x1
3158#define QIB_7322_RcvTIDFlowTable0_GenVal_LSB 0xB
3159#define QIB_7322_RcvTIDFlowTable0_GenVal_MSB 0x12
3160#define QIB_7322_RcvTIDFlowTable0_GenVal_RMASK 0xFF
3161#define QIB_7322_RcvTIDFlowTable0_SeqNum_LSB 0x0
3162#define QIB_7322_RcvTIDFlowTable0_SeqNum_MSB 0xA
3163#define QIB_7322_RcvTIDFlowTable0_SeqNum_RMASK 0x7FF
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
new file mode 100644
index 000000000000..b3955ed8f794
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -0,0 +1,758 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef _QIB_COMMON_H
36#define _QIB_COMMON_H
37
38/*
39 * This file contains defines, structures, etc. that are used
40 * to communicate between kernel and user code.
41 */
42
43/* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */
44#define QIB_SRC_OUI_1 0x00
45#define QIB_SRC_OUI_2 0x11
46#define QIB_SRC_OUI_3 0x75
47
48/* version of protocol header (known to chip also). In the long run,
49 * we should be able to generate and accept a range of version numbers;
50 * for now we only accept one, and it's compiled in.
51 */
52#define IPS_PROTO_VERSION 2
53
54/*
55 * These are compile time constants that you may want to enable or disable
56 * if you are trying to debug problems with code or performance.
57 * QIB_VERBOSE_TRACING define as 1 if you want additional tracing in
58 * fastpath code
59 * QIB_TRACE_REGWRITES define as 1 if you want register writes to be
60 * traced in faspath code
61 * _QIB_TRACING define as 0 if you want to remove all tracing in a
62 * compilation unit
63 */
64
65/*
66 * The value in the BTH QP field that QLogic_IB uses to differentiate
67 * an qlogic_ib protocol IB packet vs standard IB transport
68 * This it needs to be even (0x656b78), because the LSB is sometimes
69 * used for the MSB of context. The change may cause a problem
70 * interoperating with older software.
71 */
72#define QIB_KD_QP 0x656b78
73
74/*
75 * These are the status bits readable (in ascii form, 64bit value)
76 * from the "status" sysfs file. For binary compatibility, values
77 * must remain as is; removed states can be reused for different
78 * purposes.
79 */
80#define QIB_STATUS_INITTED 0x1 /* basic initialization done */
81/* Chip has been found and initted */
82#define QIB_STATUS_CHIP_PRESENT 0x20
83/* IB link is at ACTIVE, usable for data traffic */
84#define QIB_STATUS_IB_READY 0x40
85/* link is configured, LID, MTU, etc. have been set */
86#define QIB_STATUS_IB_CONF 0x80
87/* A Fatal hardware error has occurred. */
88#define QIB_STATUS_HWERROR 0x200
89
90/*
91 * The list of usermode accessible registers. Also see Reg_* later in file.
92 */
93enum qib_ureg {
94 /* (RO) DMA RcvHdr to be used next. */
95 ur_rcvhdrtail = 0,
96 /* (RW) RcvHdr entry to be processed next by host. */
97 ur_rcvhdrhead = 1,
98 /* (RO) Index of next Eager index to use. */
99 ur_rcvegrindextail = 2,
100 /* (RW) Eager TID to be processed next */
101 ur_rcvegrindexhead = 3,
102 /* For internal use only; max register number. */
103 _QIB_UregMax
104};
105
106/* bit values for spi_runtime_flags */
107#define QIB_RUNTIME_PCIE 0x0002
108#define QIB_RUNTIME_FORCE_WC_ORDER 0x0004
109#define QIB_RUNTIME_RCVHDR_COPY 0x0008
110#define QIB_RUNTIME_MASTER 0x0010
111#define QIB_RUNTIME_RCHK 0x0020
112#define QIB_RUNTIME_NODMA_RTAIL 0x0080
113#define QIB_RUNTIME_SPECIAL_TRIGGER 0x0100
114#define QIB_RUNTIME_SDMA 0x0200
115#define QIB_RUNTIME_FORCE_PIOAVAIL 0x0400
116#define QIB_RUNTIME_PIO_REGSWAPPED 0x0800
117#define QIB_RUNTIME_CTXT_MSB_IN_QP 0x1000
118#define QIB_RUNTIME_CTXT_REDIRECT 0x2000
119#define QIB_RUNTIME_HDRSUPP 0x4000
120
121/*
122 * This structure is returned by qib_userinit() immediately after
123 * open to get implementation-specific info, and info specific to this
124 * instance.
125 *
126 * This struct must have explict pad fields where type sizes
127 * may result in different alignments between 32 and 64 bit
128 * programs, since the 64 bit * bit kernel requires the user code
129 * to have matching offsets
130 */
131struct qib_base_info {
132 /* version of hardware, for feature checking. */
133 __u32 spi_hw_version;
134 /* version of software, for feature checking. */
135 __u32 spi_sw_version;
136 /* QLogic_IB context assigned, goes into sent packets */
137 __u16 spi_ctxt;
138 __u16 spi_subctxt;
139 /*
140 * IB MTU, packets IB data must be less than this.
141 * The MTU is in bytes, and will be a multiple of 4 bytes.
142 */
143 __u32 spi_mtu;
144 /*
145 * Size of a PIO buffer. Any given packet's total size must be less
146 * than this (in words). Included is the starting control word, so
147 * if 513 is returned, then total pkt size is 512 words or less.
148 */
149 __u32 spi_piosize;
150 /* size of the TID cache in qlogic_ib, in entries */
151 __u32 spi_tidcnt;
152 /* size of the TID Eager list in qlogic_ib, in entries */
153 __u32 spi_tidegrcnt;
154 /* size of a single receive header queue entry in words. */
155 __u32 spi_rcvhdrent_size;
156 /*
157 * Count of receive header queue entries allocated.
158 * This may be less than the spu_rcvhdrcnt passed in!.
159 */
160 __u32 spi_rcvhdr_cnt;
161
162 /* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */
163 __u32 spi_runtime_flags;
164
165 /* address where hardware receive header queue is mapped */
166 __u64 spi_rcvhdr_base;
167
168 /* user program. */
169
170 /* base address of eager TID receive buffers used by hardware. */
171 __u64 spi_rcv_egrbufs;
172
173 /* Allocated by initialization code, not by protocol. */
174
175 /*
176 * Size of each TID buffer in host memory, starting at
177 * spi_rcv_egrbufs. The buffers are virtually contiguous.
178 */
179 __u32 spi_rcv_egrbufsize;
180 /*
181 * The special QP (queue pair) value that identifies an qlogic_ib
182 * protocol packet from standard IB packets. More, probably much
183 * more, to be added.
184 */
185 __u32 spi_qpair;
186
187 /*
188 * User register base for init code, not to be used directly by
189 * protocol or applications. Always points to chip registers,
190 * for normal or shared context.
191 */
192 __u64 spi_uregbase;
193 /*
194 * Maximum buffer size in bytes that can be used in a single TID
195 * entry (assuming the buffer is aligned to this boundary). This is
196 * the minimum of what the hardware and software support Guaranteed
197 * to be a power of 2.
198 */
199 __u32 spi_tid_maxsize;
200 /*
201 * alignment of each pio send buffer (byte count
202 * to add to spi_piobufbase to get to second buffer)
203 */
204 __u32 spi_pioalign;
205 /*
206 * The index of the first pio buffer available to this process;
207 * needed to do lookup in spi_pioavailaddr; not added to
208 * spi_piobufbase.
209 */
210 __u32 spi_pioindex;
211 /* number of buffers mapped for this process */
212 __u32 spi_piocnt;
213
214 /*
215 * Base address of writeonly pio buffers for this process.
216 * Each buffer has spi_piosize words, and is aligned on spi_pioalign
217 * boundaries. spi_piocnt buffers are mapped from this address
218 */
219 __u64 spi_piobufbase;
220
221 /*
222 * Base address of readonly memory copy of the pioavail registers.
223 * There are 2 bits for each buffer.
224 */
225 __u64 spi_pioavailaddr;
226
227 /*
228 * Address where driver updates a copy of the interface and driver
229 * status (QIB_STATUS_*) as a 64 bit value. It's followed by a
230 * link status qword (formerly combined with driver status), then a
231 * string indicating hardware error, if there was one.
232 */
233 __u64 spi_status;
234
235 /* number of chip ctxts available to user processes */
236 __u32 spi_nctxts;
237 __u16 spi_unit; /* unit number of chip we are using */
238 __u16 spi_port; /* IB port number we are using */
239 /* num bufs in each contiguous set */
240 __u32 spi_rcv_egrperchunk;
241 /* size in bytes of each contiguous set */
242 __u32 spi_rcv_egrchunksize;
243 /* total size of mmap to cover full rcvegrbuffers */
244 __u32 spi_rcv_egrbuftotlen;
245 __u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */
246 /* address of readonly memory copy of the rcvhdrq tail register. */
247 __u64 spi_rcvhdr_tailaddr;
248
249 /*
250 * shared memory pages for subctxts if ctxt is shared; these cover
251 * all the processes in the group sharing a single context.
252 * all have enough space for the num_subcontexts value on this job.
253 */
254 __u64 spi_subctxt_uregbase;
255 __u64 spi_subctxt_rcvegrbuf;
256 __u64 spi_subctxt_rcvhdr_base;
257
258 /* shared memory page for send buffer disarm status */
259 __u64 spi_sendbuf_status;
260} __attribute__ ((aligned(8)));
261
262/*
263 * This version number is given to the driver by the user code during
264 * initialization in the spu_userversion field of qib_user_info, so
265 * the driver can check for compatibility with user code.
266 *
267 * The major version changes when data structures
268 * change in an incompatible way. The driver must be the same or higher
269 * for initialization to succeed. In some cases, a higher version
270 * driver will not interoperate with older software, and initialization
271 * will return an error.
272 */
273#define QIB_USER_SWMAJOR 1
274
275/*
276 * Minor version differences are always compatible
277 * a within a major version, however if user software is larger
278 * than driver software, some new features and/or structure fields
279 * may not be implemented; the user code must deal with this if it
280 * cares, or it must abort after initialization reports the difference.
281 */
282#define QIB_USER_SWMINOR 10
283
284#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
285
286#ifndef QIB_KERN_TYPE
287#define QIB_KERN_TYPE 0
288#define QIB_IDSTR "QLogic kernel.org driver"
289#endif
290
291/*
292 * Similarly, this is the kernel version going back to the user. It's
293 * slightly different, in that we want to tell if the driver was built as
294 * part of a QLogic release, or from the driver from openfabrics.org,
295 * kernel.org, or a standard distribution, for support reasons.
296 * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
297 *
298 * It's returned by the driver to the user code during initialization in the
299 * spi_sw_version field of qib_base_info, so the user code can in turn
300 * check for compatibility with the kernel.
301*/
302#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
303
304/*
305 * This structure is passed to qib_userinit() to tell the driver where
306 * user code buffers are, sizes, etc. The offsets and sizes of the
307 * fields must remain unchanged, for binary compatibility. It can
308 * be extended, if userversion is changed so user code can tell, if needed
309 */
310struct qib_user_info {
311 /*
312 * version of user software, to detect compatibility issues.
313 * Should be set to QIB_USER_SWVERSION.
314 */
315 __u32 spu_userversion;
316
317 __u32 _spu_unused2;
318
319 /* size of struct base_info to write to */
320 __u32 spu_base_info_size;
321
322 __u32 _spu_unused3;
323
324 /*
325 * If two or more processes wish to share a context, each process
326 * must set the spu_subctxt_cnt and spu_subctxt_id to the same
327 * values. The only restriction on the spu_subctxt_id is that
328 * it be unique for a given node.
329 */
330 __u16 spu_subctxt_cnt;
331 __u16 spu_subctxt_id;
332
333 __u32 spu_port; /* IB port requested by user if > 0 */
334
335 /*
336 * address of struct base_info to write to
337 */
338 __u64 spu_base_info;
339
340} __attribute__ ((aligned(8)));
341
342/* User commands. */
343
344/* 16 available, was: old set up userspace (for old user code) */
345#define QIB_CMD_CTXT_INFO 17 /* find out what resources we got */
346#define QIB_CMD_RECV_CTRL 18 /* control receipt of packets */
347#define QIB_CMD_TID_UPDATE 19 /* update expected TID entries */
348#define QIB_CMD_TID_FREE 20 /* free expected TID entries */
349#define QIB_CMD_SET_PART_KEY 21 /* add partition key */
350/* 22 available, was: return info on slave processes (for old user code) */
351#define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */
352#define QIB_CMD_USER_INIT 24 /* set up userspace */
353#define QIB_CMD_UNUSED_1 25
354#define QIB_CMD_UNUSED_2 26
355#define QIB_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
356#define QIB_CMD_POLL_TYPE 28 /* set the kind of polling we want */
357#define QIB_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
358/* 30 is unused */
359#define QIB_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
360#define QIB_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
361/* 33 available, was a testing feature */
362#define QIB_CMD_DISARM_BUFS 34 /* disarm send buffers w/ errors */
363#define QIB_CMD_ACK_EVENT 35 /* ack & clear bits */
364#define QIB_CMD_CPUS_LIST 36 /* list of cpus allocated, for pinned
365 * processes: qib_cpus_list */
366
367/*
368 * QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for
369 * compatibility with libraries from previous release. The ACK_EVENT
370 * will take appropriate driver action (if any, just DISARM for now),
371 * then clear the bits passed in as part of the mask. These bits are
372 * in the first 64bit word at spi_sendbuf_status, and are passed to
373 * the driver in the event_mask union as well.
374 */
375#define _QIB_EVENT_DISARM_BUFS_BIT 0
376#define _QIB_EVENT_LINKDOWN_BIT 1
377#define _QIB_EVENT_LID_CHANGE_BIT 2
378#define _QIB_EVENT_LMC_CHANGE_BIT 3
379#define _QIB_EVENT_SL2VL_CHANGE_BIT 4
380#define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT
381
382#define QIB_EVENT_DISARM_BUFS_BIT (1UL << _QIB_EVENT_DISARM_BUFS_BIT)
383#define QIB_EVENT_LINKDOWN_BIT (1UL << _QIB_EVENT_LINKDOWN_BIT)
384#define QIB_EVENT_LID_CHANGE_BIT (1UL << _QIB_EVENT_LID_CHANGE_BIT)
385#define QIB_EVENT_LMC_CHANGE_BIT (1UL << _QIB_EVENT_LMC_CHANGE_BIT)
386#define QIB_EVENT_SL2VL_CHANGE_BIT (1UL << _QIB_EVENT_SL2VL_CHANGE_BIT)
387
388
389/*
390 * Poll types
391 */
392#define QIB_POLL_TYPE_ANYRCV 0x0
393#define QIB_POLL_TYPE_URGENT 0x1
394
395struct qib_ctxt_info {
396 __u16 num_active; /* number of active units */
397 __u16 unit; /* unit (chip) assigned to caller */
398 __u16 port; /* IB port assigned to caller (1-based) */
399 __u16 ctxt; /* ctxt on unit assigned to caller */
400 __u16 subctxt; /* subctxt on unit assigned to caller */
401 __u16 num_ctxts; /* number of ctxts available on unit */
402 __u16 num_subctxts; /* number of subctxts opened on ctxt */
403 __u16 rec_cpu; /* cpu # for affinity (ffff if none) */
404};
405
406struct qib_tid_info {
407 __u32 tidcnt;
408 /* make structure same size in 32 and 64 bit */
409 __u32 tid__unused;
410 /* virtual address of first page in transfer */
411 __u64 tidvaddr;
412 /* pointer (same size 32/64 bit) to __u16 tid array */
413 __u64 tidlist;
414
415 /*
416 * pointer (same size 32/64 bit) to bitmap of TIDs used
417 * for this call; checked for being large enough at open
418 */
419 __u64 tidmap;
420};
421
422struct qib_cmd {
423 __u32 type; /* command type */
424 union {
425 struct qib_tid_info tid_info;
426 struct qib_user_info user_info;
427
428 /*
429 * address in userspace where we should put the sdma
430 * inflight counter
431 */
432 __u64 sdma_inflight;
433 /*
434 * address in userspace where we should put the sdma
435 * completion counter
436 */
437 __u64 sdma_complete;
438 /* address in userspace of struct qib_ctxt_info to
439 write result to */
440 __u64 ctxt_info;
441 /* enable/disable receipt of packets */
442 __u32 recv_ctrl;
443 /* enable/disable armlaunch errors (non-zero to enable) */
444 __u32 armlaunch_ctrl;
445 /* partition key to set */
446 __u16 part_key;
447 /* user address of __u32 bitmask of active slaves */
448 __u64 slave_mask_addr;
449 /* type of polling we want */
450 __u16 poll_type;
451 /* back pressure enable bit for one particular context */
452 __u8 ctxt_bp;
453 /* qib_user_event_ack(), IPATH_EVENT_* bits */
454 __u64 event_mask;
455 } cmd;
456};
457
458struct qib_iovec {
459 /* Pointer to data, but same size 32 and 64 bit */
460 __u64 iov_base;
461
462 /*
463 * Length of data; don't need 64 bits, but want
464 * qib_sendpkt to remain same size as before 32 bit changes, so...
465 */
466 __u64 iov_len;
467};
468
469/*
470 * Describes a single packet for send. Each packet can have one or more
471 * buffers, but the total length (exclusive of IB headers) must be less
472 * than the MTU, and if using the PIO method, entire packet length,
473 * including IB headers, must be less than the qib_piosize value (words).
474 * Use of this necessitates including sys/uio.h
475 */
476struct __qib_sendpkt {
477 __u32 sps_flags; /* flags for packet (TBD) */
478 __u32 sps_cnt; /* number of entries to use in sps_iov */
479 /* array of iov's describing packet. TEMPORARY */
480 struct qib_iovec sps_iov[4];
481};
482
483/*
484 * Diagnostics can send a packet by "writing" the following
485 * structs to the diag data special file.
486 * This allows a custom
487 * pbc (+ static rate) qword, so that special modes and deliberate
488 * changes to CRCs can be used. The elements were also re-ordered
489 * for better alignment and to avoid padding issues.
490 */
491#define _DIAG_XPKT_VERS 3
492struct qib_diag_xpkt {
493 __u16 version;
494 __u16 unit;
495 __u16 port;
496 __u16 len;
497 __u64 data;
498 __u64 pbc_wd;
499};
500
501/*
502 * Data layout in I2C flash (for GUID, etc.)
503 * All fields are little-endian binary unless otherwise stated
504 */
505#define QIB_FLASH_VERSION 2
506struct qib_flash {
507 /* flash layout version (QIB_FLASH_VERSION) */
508 __u8 if_fversion;
509 /* checksum protecting if_length bytes */
510 __u8 if_csum;
511 /*
512 * valid length (in use, protected by if_csum), including
513 * if_fversion and if_csum themselves)
514 */
515 __u8 if_length;
516 /* the GUID, in network order */
517 __u8 if_guid[8];
518 /* number of GUIDs to use, starting from if_guid */
519 __u8 if_numguid;
520 /* the (last 10 characters of) board serial number, in ASCII */
521 char if_serial[12];
522 /* board mfg date (YYYYMMDD ASCII) */
523 char if_mfgdate[8];
524 /* last board rework/test date (YYYYMMDD ASCII) */
525 char if_testdate[8];
526 /* logging of error counts, TBD */
527 __u8 if_errcntp[4];
528 /* powered on hours, updated at driver unload */
529 __u8 if_powerhour[2];
530 /* ASCII free-form comment field */
531 char if_comment[32];
532 /* Backwards compatible prefix for longer QLogic Serial Numbers */
533 char if_sprefix[4];
534 /* 82 bytes used, min flash size is 128 bytes */
535 __u8 if_future[46];
536};
537
538/*
539 * These are the counters implemented in the chip, and are listed in order.
540 * The InterCaps naming is taken straight from the chip spec.
541 */
542struct qlogic_ib_counters {
543 __u64 LBIntCnt;
544 __u64 LBFlowStallCnt;
545 __u64 TxSDmaDescCnt; /* was Reserved1 */
546 __u64 TxUnsupVLErrCnt;
547 __u64 TxDataPktCnt;
548 __u64 TxFlowPktCnt;
549 __u64 TxDwordCnt;
550 __u64 TxLenErrCnt;
551 __u64 TxMaxMinLenErrCnt;
552 __u64 TxUnderrunCnt;
553 __u64 TxFlowStallCnt;
554 __u64 TxDroppedPktCnt;
555 __u64 RxDroppedPktCnt;
556 __u64 RxDataPktCnt;
557 __u64 RxFlowPktCnt;
558 __u64 RxDwordCnt;
559 __u64 RxLenErrCnt;
560 __u64 RxMaxMinLenErrCnt;
561 __u64 RxICRCErrCnt;
562 __u64 RxVCRCErrCnt;
563 __u64 RxFlowCtrlErrCnt;
564 __u64 RxBadFormatCnt;
565 __u64 RxLinkProblemCnt;
566 __u64 RxEBPCnt;
567 __u64 RxLPCRCErrCnt;
568 __u64 RxBufOvflCnt;
569 __u64 RxTIDFullErrCnt;
570 __u64 RxTIDValidErrCnt;
571 __u64 RxPKeyMismatchCnt;
572 __u64 RxP0HdrEgrOvflCnt;
573 __u64 RxP1HdrEgrOvflCnt;
574 __u64 RxP2HdrEgrOvflCnt;
575 __u64 RxP3HdrEgrOvflCnt;
576 __u64 RxP4HdrEgrOvflCnt;
577 __u64 RxP5HdrEgrOvflCnt;
578 __u64 RxP6HdrEgrOvflCnt;
579 __u64 RxP7HdrEgrOvflCnt;
580 __u64 RxP8HdrEgrOvflCnt;
581 __u64 RxP9HdrEgrOvflCnt;
582 __u64 RxP10HdrEgrOvflCnt;
583 __u64 RxP11HdrEgrOvflCnt;
584 __u64 RxP12HdrEgrOvflCnt;
585 __u64 RxP13HdrEgrOvflCnt;
586 __u64 RxP14HdrEgrOvflCnt;
587 __u64 RxP15HdrEgrOvflCnt;
588 __u64 RxP16HdrEgrOvflCnt;
589 __u64 IBStatusChangeCnt;
590 __u64 IBLinkErrRecoveryCnt;
591 __u64 IBLinkDownedCnt;
592 __u64 IBSymbolErrCnt;
593 __u64 RxVL15DroppedPktCnt;
594 __u64 RxOtherLocalPhyErrCnt;
595 __u64 PcieRetryBufDiagQwordCnt;
596 __u64 ExcessBufferOvflCnt;
597 __u64 LocalLinkIntegrityErrCnt;
598 __u64 RxVlErrCnt;
599 __u64 RxDlidFltrCnt;
600};
601
602/*
603 * The next set of defines are for packet headers, and chip register
604 * and memory bits that are visible to and/or used by user-mode software.
605 */
606
607/* RcvHdrFlags bits */
608#define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF
609#define QLOGIC_IB_RHF_LENGTH_SHIFT 0
610#define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7
611#define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11
612#define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF
613#define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16
614#define QLOGIC_IB_RHF_SEQ_MASK 0xF
615#define QLOGIC_IB_RHF_SEQ_SHIFT 0
616#define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF
617#define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4
618#define QLOGIC_IB_RHF_H_ICRCERR 0x80000000
619#define QLOGIC_IB_RHF_H_VCRCERR 0x40000000
620#define QLOGIC_IB_RHF_H_PARITYERR 0x20000000
621#define QLOGIC_IB_RHF_H_LENERR 0x10000000
622#define QLOGIC_IB_RHF_H_MTUERR 0x08000000
623#define QLOGIC_IB_RHF_H_IHDRERR 0x04000000
624#define QLOGIC_IB_RHF_H_TIDERR 0x02000000
625#define QLOGIC_IB_RHF_H_MKERR 0x01000000
626#define QLOGIC_IB_RHF_H_IBERR 0x00800000
627#define QLOGIC_IB_RHF_H_ERR_MASK 0xFF800000
628#define QLOGIC_IB_RHF_L_USE_EGR 0x80000000
629#define QLOGIC_IB_RHF_L_SWA 0x00008000
630#define QLOGIC_IB_RHF_L_SWB 0x00004000
631
632/* qlogic_ib header fields */
633#define QLOGIC_IB_I_VERS_MASK 0xF
634#define QLOGIC_IB_I_VERS_SHIFT 28
635#define QLOGIC_IB_I_CTXT_MASK 0xF
636#define QLOGIC_IB_I_CTXT_SHIFT 24
637#define QLOGIC_IB_I_TID_MASK 0x7FF
638#define QLOGIC_IB_I_TID_SHIFT 13
639#define QLOGIC_IB_I_OFFSET_MASK 0x1FFF
640#define QLOGIC_IB_I_OFFSET_SHIFT 0
641
642/* K_PktFlags bits */
643#define QLOGIC_IB_KPF_INTR 0x1
644#define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3
645#define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1
646
647#define QLOGIC_IB_MAX_SUBCTXT 4
648
649/* SendPIO per-buffer control */
650#define QLOGIC_IB_SP_TEST 0x40
651#define QLOGIC_IB_SP_TESTEBP 0x20
652#define QLOGIC_IB_SP_TRIGGER_SHIFT 15
653
654/* SendPIOAvail bits */
655#define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1
656#define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0
657
658/* qlogic_ib header format */
659struct qib_header {
660 /*
661 * Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset -
662 * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
663 * Context 4, TID 11, offset 13.
664 */
665 __le32 ver_ctxt_tid_offset;
666 __le16 chksum;
667 __le16 pkt_flags;
668};
669
670/*
671 * qlogic_ib user message header format.
672 * This structure contains the first 4 fields common to all protocols
673 * that employ qlogic_ib.
674 */
675struct qib_message_header {
676 __be16 lrh[4];
677 __be32 bth[3];
678 /* fields below this point are in host byte order */
679 struct qib_header iph;
680 __u8 sub_opcode;
681};
682
683/* IB - LRH header consts */
684#define QIB_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
685#define QIB_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
686
687/* misc. */
688#define SIZE_OF_CRC 1
689
690#define QIB_DEFAULT_P_KEY 0xFFFF
691#define QIB_PERMISSIVE_LID 0xFFFF
692#define QIB_AETH_CREDIT_SHIFT 24
693#define QIB_AETH_CREDIT_MASK 0x1F
694#define QIB_AETH_CREDIT_INVAL 0x1F
695#define QIB_PSN_MASK 0xFFFFFF
696#define QIB_MSN_MASK 0xFFFFFF
697#define QIB_QPN_MASK 0xFFFFFF
698#define QIB_MULTICAST_LID_BASE 0xC000
699#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
700#define QIB_MULTICAST_QPN 0xFFFFFF
701
702/* Receive Header Queue: receive type (from qlogic_ib) */
703#define RCVHQ_RCV_TYPE_EXPECTED 0
704#define RCVHQ_RCV_TYPE_EAGER 1
705#define RCVHQ_RCV_TYPE_NON_KD 2
706#define RCVHQ_RCV_TYPE_ERROR 3
707
708#define QIB_HEADER_QUEUE_WORDS 9
709
710/* functions for extracting fields from rcvhdrq entries for the driver.
711 */
712static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf)
713{
714 return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK;
715}
716
717static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf)
718{
719 return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) &
720 QLOGIC_IB_RHF_RCVTYPE_MASK;
721}
722
723static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf)
724{
725 return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) &
726 QLOGIC_IB_RHF_LENGTH_MASK) << 2;
727}
728
729static inline __u32 qib_hdrget_index(const __le32 *rbuf)
730{
731 return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) &
732 QLOGIC_IB_RHF_EGRINDEX_MASK;
733}
734
735static inline __u32 qib_hdrget_seq(const __le32 *rbuf)
736{
737 return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) &
738 QLOGIC_IB_RHF_SEQ_MASK;
739}
740
741static inline __u32 qib_hdrget_offset(const __le32 *rbuf)
742{
743 return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) &
744 QLOGIC_IB_RHF_HDRQ_OFFSET_MASK;
745}
746
747static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
748{
749 return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
750}
751
752static inline __u32 qib_hdrget_qib_ver(__le32 hdrword)
753{
754 return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) &
755 QLOGIC_IB_I_VERS_MASK;
756}
757
758#endif /* _QIB_COMMON_H */
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
new file mode 100644
index 000000000000..a86cbf880f98
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -0,0 +1,484 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/err.h>
35#include <linux/slab.h>
36#include <linux/vmalloc.h>
37
38#include "qib_verbs.h"
39
40/**
41 * qib_cq_enter - add a new entry to the completion queue
42 * @cq: completion queue
43 * @entry: work completion entry to add
44 * @sig: true if @entry is a solicitated entry
45 *
46 * This may be called with qp->s_lock held.
47 */
48void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
49{
50 struct qib_cq_wc *wc;
51 unsigned long flags;
52 u32 head;
53 u32 next;
54
55 spin_lock_irqsave(&cq->lock, flags);
56
57 /*
58 * Note that the head pointer might be writable by user processes.
59 * Take care to verify it is a sane value.
60 */
61 wc = cq->queue;
62 head = wc->head;
63 if (head >= (unsigned) cq->ibcq.cqe) {
64 head = cq->ibcq.cqe;
65 next = 0;
66 } else
67 next = head + 1;
68 if (unlikely(next == wc->tail)) {
69 spin_unlock_irqrestore(&cq->lock, flags);
70 if (cq->ibcq.event_handler) {
71 struct ib_event ev;
72
73 ev.device = cq->ibcq.device;
74 ev.element.cq = &cq->ibcq;
75 ev.event = IB_EVENT_CQ_ERR;
76 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
77 }
78 return;
79 }
80 if (cq->ip) {
81 wc->uqueue[head].wr_id = entry->wr_id;
82 wc->uqueue[head].status = entry->status;
83 wc->uqueue[head].opcode = entry->opcode;
84 wc->uqueue[head].vendor_err = entry->vendor_err;
85 wc->uqueue[head].byte_len = entry->byte_len;
86 wc->uqueue[head].ex.imm_data =
87 (__u32 __force)entry->ex.imm_data;
88 wc->uqueue[head].qp_num = entry->qp->qp_num;
89 wc->uqueue[head].src_qp = entry->src_qp;
90 wc->uqueue[head].wc_flags = entry->wc_flags;
91 wc->uqueue[head].pkey_index = entry->pkey_index;
92 wc->uqueue[head].slid = entry->slid;
93 wc->uqueue[head].sl = entry->sl;
94 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
95 wc->uqueue[head].port_num = entry->port_num;
96 /* Make sure entry is written before the head index. */
97 smp_wmb();
98 } else
99 wc->kqueue[head] = *entry;
100 wc->head = next;
101
102 if (cq->notify == IB_CQ_NEXT_COMP ||
103 (cq->notify == IB_CQ_SOLICITED && solicited)) {
104 cq->notify = IB_CQ_NONE;
105 cq->triggered++;
106 /*
107 * This will cause send_complete() to be called in
108 * another thread.
109 */
110 queue_work(qib_cq_wq, &cq->comptask);
111 }
112
113 spin_unlock_irqrestore(&cq->lock, flags);
114}
115
116/**
117 * qib_poll_cq - poll for work completion entries
118 * @ibcq: the completion queue to poll
119 * @num_entries: the maximum number of entries to return
120 * @entry: pointer to array where work completions are placed
121 *
122 * Returns the number of completion entries polled.
123 *
124 * This may be called from interrupt context. Also called by ib_poll_cq()
125 * in the generic verbs code.
126 */
127int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
128{
129 struct qib_cq *cq = to_icq(ibcq);
130 struct qib_cq_wc *wc;
131 unsigned long flags;
132 int npolled;
133 u32 tail;
134
135 /* The kernel can only poll a kernel completion queue */
136 if (cq->ip) {
137 npolled = -EINVAL;
138 goto bail;
139 }
140
141 spin_lock_irqsave(&cq->lock, flags);
142
143 wc = cq->queue;
144 tail = wc->tail;
145 if (tail > (u32) cq->ibcq.cqe)
146 tail = (u32) cq->ibcq.cqe;
147 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
148 if (tail == wc->head)
149 break;
150 /* The kernel doesn't need a RMB since it has the lock. */
151 *entry = wc->kqueue[tail];
152 if (tail >= cq->ibcq.cqe)
153 tail = 0;
154 else
155 tail++;
156 }
157 wc->tail = tail;
158
159 spin_unlock_irqrestore(&cq->lock, flags);
160
161bail:
162 return npolled;
163}
164
165static void send_complete(struct work_struct *work)
166{
167 struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
168
169 /*
170 * The completion handler will most likely rearm the notification
171 * and poll for all pending entries. If a new completion entry
172 * is added while we are in this routine, queue_work()
173 * won't call us again until we return so we check triggered to
174 * see if we need to call the handler again.
175 */
176 for (;;) {
177 u8 triggered = cq->triggered;
178
179 /*
180 * IPoIB connected mode assumes the callback is from a
181 * soft IRQ. We simulate this by blocking "bottom halves".
182 * See the implementation for ipoib_cm_handle_tx_wc(),
183 * netif_tx_lock_bh() and netif_tx_lock().
184 */
185 local_bh_disable();
186 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
187 local_bh_enable();
188
189 if (cq->triggered == triggered)
190 return;
191 }
192}
193
194/**
195 * qib_create_cq - create a completion queue
196 * @ibdev: the device this completion queue is attached to
197 * @entries: the minimum size of the completion queue
198 * @context: unused by the QLogic_IB driver
199 * @udata: user data for libibverbs.so
200 *
201 * Returns a pointer to the completion queue or negative errno values
202 * for failure.
203 *
204 * Called by ib_create_cq() in the generic verbs code.
205 */
206struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
207 int comp_vector, struct ib_ucontext *context,
208 struct ib_udata *udata)
209{
210 struct qib_ibdev *dev = to_idev(ibdev);
211 struct qib_cq *cq;
212 struct qib_cq_wc *wc;
213 struct ib_cq *ret;
214 u32 sz;
215
216 if (entries < 1 || entries > ib_qib_max_cqes) {
217 ret = ERR_PTR(-EINVAL);
218 goto done;
219 }
220
221 /* Allocate the completion queue structure. */
222 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
223 if (!cq) {
224 ret = ERR_PTR(-ENOMEM);
225 goto done;
226 }
227
228 /*
229 * Allocate the completion queue entries and head/tail pointers.
230 * This is allocated separately so that it can be resized and
231 * also mapped into user space.
232 * We need to use vmalloc() in order to support mmap and large
233 * numbers of entries.
234 */
235 sz = sizeof(*wc);
236 if (udata && udata->outlen >= sizeof(__u64))
237 sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
238 else
239 sz += sizeof(struct ib_wc) * (entries + 1);
240 wc = vmalloc_user(sz);
241 if (!wc) {
242 ret = ERR_PTR(-ENOMEM);
243 goto bail_cq;
244 }
245
246 /*
247 * Return the address of the WC as the offset to mmap.
248 * See qib_mmap() for details.
249 */
250 if (udata && udata->outlen >= sizeof(__u64)) {
251 int err;
252
253 cq->ip = qib_create_mmap_info(dev, sz, context, wc);
254 if (!cq->ip) {
255 ret = ERR_PTR(-ENOMEM);
256 goto bail_wc;
257 }
258
259 err = ib_copy_to_udata(udata, &cq->ip->offset,
260 sizeof(cq->ip->offset));
261 if (err) {
262 ret = ERR_PTR(err);
263 goto bail_ip;
264 }
265 } else
266 cq->ip = NULL;
267
268 spin_lock(&dev->n_cqs_lock);
269 if (dev->n_cqs_allocated == ib_qib_max_cqs) {
270 spin_unlock(&dev->n_cqs_lock);
271 ret = ERR_PTR(-ENOMEM);
272 goto bail_ip;
273 }
274
275 dev->n_cqs_allocated++;
276 spin_unlock(&dev->n_cqs_lock);
277
278 if (cq->ip) {
279 spin_lock_irq(&dev->pending_lock);
280 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
281 spin_unlock_irq(&dev->pending_lock);
282 }
283
284 /*
285 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
286 * The number of entries should be >= the number requested or return
287 * an error.
288 */
289 cq->ibcq.cqe = entries;
290 cq->notify = IB_CQ_NONE;
291 cq->triggered = 0;
292 spin_lock_init(&cq->lock);
293 INIT_WORK(&cq->comptask, send_complete);
294 wc->head = 0;
295 wc->tail = 0;
296 cq->queue = wc;
297
298 ret = &cq->ibcq;
299
300 goto done;
301
302bail_ip:
303 kfree(cq->ip);
304bail_wc:
305 vfree(wc);
306bail_cq:
307 kfree(cq);
308done:
309 return ret;
310}
311
312/**
313 * qib_destroy_cq - destroy a completion queue
314 * @ibcq: the completion queue to destroy.
315 *
316 * Returns 0 for success.
317 *
318 * Called by ib_destroy_cq() in the generic verbs code.
319 */
320int qib_destroy_cq(struct ib_cq *ibcq)
321{
322 struct qib_ibdev *dev = to_idev(ibcq->device);
323 struct qib_cq *cq = to_icq(ibcq);
324
325 flush_work(&cq->comptask);
326 spin_lock(&dev->n_cqs_lock);
327 dev->n_cqs_allocated--;
328 spin_unlock(&dev->n_cqs_lock);
329 if (cq->ip)
330 kref_put(&cq->ip->ref, qib_release_mmap_info);
331 else
332 vfree(cq->queue);
333 kfree(cq);
334
335 return 0;
336}
337
338/**
339 * qib_req_notify_cq - change the notification type for a completion queue
340 * @ibcq: the completion queue
341 * @notify_flags: the type of notification to request
342 *
343 * Returns 0 for success.
344 *
345 * This may be called from interrupt context. Also called by
346 * ib_req_notify_cq() in the generic verbs code.
347 */
348int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
349{
350 struct qib_cq *cq = to_icq(ibcq);
351 unsigned long flags;
352 int ret = 0;
353
354 spin_lock_irqsave(&cq->lock, flags);
355 /*
356 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
357 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
358 */
359 if (cq->notify != IB_CQ_NEXT_COMP)
360 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
361
362 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
363 cq->queue->head != cq->queue->tail)
364 ret = 1;
365
366 spin_unlock_irqrestore(&cq->lock, flags);
367
368 return ret;
369}
370
371/**
372 * qib_resize_cq - change the size of the CQ
373 * @ibcq: the completion queue
374 *
375 * Returns 0 for success.
376 */
377int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
378{
379 struct qib_cq *cq = to_icq(ibcq);
380 struct qib_cq_wc *old_wc;
381 struct qib_cq_wc *wc;
382 u32 head, tail, n;
383 int ret;
384 u32 sz;
385
386 if (cqe < 1 || cqe > ib_qib_max_cqes) {
387 ret = -EINVAL;
388 goto bail;
389 }
390
391 /*
392 * Need to use vmalloc() if we want to support large #s of entries.
393 */
394 sz = sizeof(*wc);
395 if (udata && udata->outlen >= sizeof(__u64))
396 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
397 else
398 sz += sizeof(struct ib_wc) * (cqe + 1);
399 wc = vmalloc_user(sz);
400 if (!wc) {
401 ret = -ENOMEM;
402 goto bail;
403 }
404
405 /* Check that we can write the offset to mmap. */
406 if (udata && udata->outlen >= sizeof(__u64)) {
407 __u64 offset = 0;
408
409 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
410 if (ret)
411 goto bail_free;
412 }
413
414 spin_lock_irq(&cq->lock);
415 /*
416 * Make sure head and tail are sane since they
417 * might be user writable.
418 */
419 old_wc = cq->queue;
420 head = old_wc->head;
421 if (head > (u32) cq->ibcq.cqe)
422 head = (u32) cq->ibcq.cqe;
423 tail = old_wc->tail;
424 if (tail > (u32) cq->ibcq.cqe)
425 tail = (u32) cq->ibcq.cqe;
426 if (head < tail)
427 n = cq->ibcq.cqe + 1 + head - tail;
428 else
429 n = head - tail;
430 if (unlikely((u32)cqe < n)) {
431 ret = -EINVAL;
432 goto bail_unlock;
433 }
434 for (n = 0; tail != head; n++) {
435 if (cq->ip)
436 wc->uqueue[n] = old_wc->uqueue[tail];
437 else
438 wc->kqueue[n] = old_wc->kqueue[tail];
439 if (tail == (u32) cq->ibcq.cqe)
440 tail = 0;
441 else
442 tail++;
443 }
444 cq->ibcq.cqe = cqe;
445 wc->head = n;
446 wc->tail = 0;
447 cq->queue = wc;
448 spin_unlock_irq(&cq->lock);
449
450 vfree(old_wc);
451
452 if (cq->ip) {
453 struct qib_ibdev *dev = to_idev(ibcq->device);
454 struct qib_mmap_info *ip = cq->ip;
455
456 qib_update_mmap_info(dev, ip, sz, wc);
457
458 /*
459 * Return the offset to mmap.
460 * See qib_mmap() for details.
461 */
462 if (udata && udata->outlen >= sizeof(__u64)) {
463 ret = ib_copy_to_udata(udata, &ip->offset,
464 sizeof(ip->offset));
465 if (ret)
466 goto bail;
467 }
468
469 spin_lock_irq(&dev->pending_lock);
470 if (list_empty(&ip->pending_mmaps))
471 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
472 spin_unlock_irq(&dev->pending_lock);
473 }
474
475 ret = 0;
476 goto bail;
477
478bail_unlock:
479 spin_unlock_irq(&cq->lock);
480bail_free:
481 vfree(wc);
482bail:
483 return ret;
484}
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
new file mode 100644
index 000000000000..ca98dd523752
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -0,0 +1,894 @@
1/*
2 * Copyright (c) 2010 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35/*
36 * This file contains support for diagnostic functions. It is accessed by
37 * opening the qib_diag device, normally minor number 129. Diagnostic use
38 * of the QLogic_IB chip may render the chip or board unusable until the
39 * driver is unloaded, or in some cases, until the system is rebooted.
40 *
41 * Accesses to the chip through this interface are not similar to going
42 * through the /sys/bus/pci resource mmap interface.
43 */
44
45#include <linux/io.h>
46#include <linux/pci.h>
47#include <linux/poll.h>
48#include <linux/vmalloc.h>
49#include <linux/fs.h>
50#include <linux/uaccess.h>
51
52#include "qib.h"
53#include "qib_common.h"
54
55/*
56 * Each client that opens the diag device must read then write
57 * offset 0, to prevent lossage from random cat or od. diag_state
58 * sequences this "handshake".
59 */
60enum diag_state { UNUSED = 0, OPENED, INIT, READY };
61
62/* State for an individual client. PID so children cannot abuse handshake */
63static struct qib_diag_client {
64 struct qib_diag_client *next;
65 struct qib_devdata *dd;
66 pid_t pid;
67 enum diag_state state;
68} *client_pool;
69
70/*
71 * Get a client struct. Recycled if possible, else kmalloc.
72 * Must be called with qib_mutex held
73 */
74static struct qib_diag_client *get_client(struct qib_devdata *dd)
75{
76 struct qib_diag_client *dc;
77
78 dc = client_pool;
79 if (dc)
80 /* got from pool remove it and use */
81 client_pool = dc->next;
82 else
83 /* None in pool, alloc and init */
84 dc = kmalloc(sizeof *dc, GFP_KERNEL);
85
86 if (dc) {
87 dc->next = NULL;
88 dc->dd = dd;
89 dc->pid = current->pid;
90 dc->state = OPENED;
91 }
92 return dc;
93}
94
95/*
96 * Return to pool. Must be called with qib_mutex held
97 */
98static void return_client(struct qib_diag_client *dc)
99{
100 struct qib_devdata *dd = dc->dd;
101 struct qib_diag_client *tdc, *rdc;
102
103 rdc = NULL;
104 if (dc == dd->diag_client) {
105 dd->diag_client = dc->next;
106 rdc = dc;
107 } else {
108 tdc = dc->dd->diag_client;
109 while (tdc) {
110 if (dc == tdc->next) {
111 tdc->next = dc->next;
112 rdc = dc;
113 break;
114 }
115 tdc = tdc->next;
116 }
117 }
118 if (rdc) {
119 rdc->state = UNUSED;
120 rdc->dd = NULL;
121 rdc->pid = 0;
122 rdc->next = client_pool;
123 client_pool = rdc;
124 }
125}
126
127static int qib_diag_open(struct inode *in, struct file *fp);
128static int qib_diag_release(struct inode *in, struct file *fp);
129static ssize_t qib_diag_read(struct file *fp, char __user *data,
130 size_t count, loff_t *off);
131static ssize_t qib_diag_write(struct file *fp, const char __user *data,
132 size_t count, loff_t *off);
133
134static const struct file_operations diag_file_ops = {
135 .owner = THIS_MODULE,
136 .write = qib_diag_write,
137 .read = qib_diag_read,
138 .open = qib_diag_open,
139 .release = qib_diag_release
140};
141
142static atomic_t diagpkt_count = ATOMIC_INIT(0);
143static struct cdev *diagpkt_cdev;
144static struct device *diagpkt_device;
145
146static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,
147 size_t count, loff_t *off);
148
149static const struct file_operations diagpkt_file_ops = {
150 .owner = THIS_MODULE,
151 .write = qib_diagpkt_write,
152};
153
154int qib_diag_add(struct qib_devdata *dd)
155{
156 char name[16];
157 int ret = 0;
158
159 if (atomic_inc_return(&diagpkt_count) == 1) {
160 ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",
161 &diagpkt_file_ops, &diagpkt_cdev,
162 &diagpkt_device);
163 if (ret)
164 goto done;
165 }
166
167 snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
168 ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
169 &diag_file_ops, &dd->diag_cdev,
170 &dd->diag_device);
171done:
172 return ret;
173}
174
175static void qib_unregister_observers(struct qib_devdata *dd);
176
177void qib_diag_remove(struct qib_devdata *dd)
178{
179 struct qib_diag_client *dc;
180
181 if (atomic_dec_and_test(&diagpkt_count))
182 qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
183
184 qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
185
186 /*
187 * Return all diag_clients of this device. There should be none,
188 * as we are "guaranteed" that no clients are still open
189 */
190 while (dd->diag_client)
191 return_client(dd->diag_client);
192
193 /* Now clean up all unused client structs */
194 while (client_pool) {
195 dc = client_pool;
196 client_pool = dc->next;
197 kfree(dc);
198 }
199 /* Clean up observer list */
200 qib_unregister_observers(dd);
201}
202
203/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem *
204 *
205 * @dd: the qlogic_ib device
206 * @offs: the offset in chip-space
207 * @cntp: Pointer to max (byte) count for transfer starting at offset
208 * This returns a u32 __iomem * so it can be used for both 64 and 32-bit
209 * mapping. It is needed because with the use of PAT for control of
210 * write-combining, the logically contiguous address-space of the chip
211 * may be split into virtually non-contiguous spaces, with different
212 * attributes, which are them mapped to contiguous physical space
213 * based from the first BAR.
214 *
215 * The code below makes the same assumptions as were made in
216 * init_chip_wc_pat() (qib_init.c), copied here:
217 * Assumes chip address space looks like:
218 * - kregs + sregs + cregs + uregs (in any order)
219 * - piobufs (2K and 4K bufs in either order)
220 * or:
221 * - kregs + sregs + cregs (in any order)
222 * - piobufs (2K and 4K bufs in either order)
223 * - uregs
224 *
225 * If cntp is non-NULL, returns how many bytes from offset can be accessed
226 * Returns 0 if the offset is not mapped.
227 */
228static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
229 u32 *cntp)
230{
231 u32 kreglen;
232 u32 snd_bottom, snd_lim = 0;
233 u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
234 u32 __iomem *map = NULL;
235 u32 cnt = 0;
236
237 /* First, simplest case, offset is within the first map. */
238 kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
239 if (offset < kreglen) {
240 map = krb32 + (offset / sizeof(u32));
241 cnt = kreglen - offset;
242 goto mapped;
243 }
244
245 /*
246 * Next check for user regs, the next most common case,
247 * and a cheap check because if they are not in the first map
248 * they are last in chip.
249 */
250 if (dd->userbase) {
251 /* If user regs mapped, they are after send, so set limit. */
252 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
253 snd_lim = dd->uregbase;
254 krb32 = (u32 __iomem *)dd->userbase;
255 if (offset >= dd->uregbase && offset < ulim) {
256 map = krb32 + (offset - dd->uregbase) / sizeof(u32);
257 cnt = ulim - offset;
258 goto mapped;
259 }
260 }
261
262 /*
263 * Lastly, check for offset within Send Buffers.
264 * This is gnarly because struct devdata is deliberately vague
265 * about things like 7322 VL15 buffers, and we are not in
266 * chip-specific code here, so should not make many assumptions.
267 * The one we _do_ make is that the only chip that has more sndbufs
268 * than we admit is the 7322, and it has userregs above that, so
269 * we know the snd_lim.
270 */
271 /* Assume 2K buffers are first. */
272 snd_bottom = dd->pio2k_bufbase;
273 if (snd_lim == 0) {
274 u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
275 snd_lim = snd_bottom + tot2k;
276 }
277 /* If 4k buffers exist, account for them by bumping
278 * appropriate limit.
279 */
280 if (dd->piobcnt4k) {
281 u32 tot4k = dd->piobcnt4k * dd->align4k;
282 u32 offs4k = dd->piobufbase >> 32;
283 if (snd_bottom > offs4k)
284 snd_bottom = offs4k;
285 else {
286 /* 4k above 2k. Bump snd_lim, if needed*/
287 if (!dd->userbase)
288 snd_lim = offs4k + tot4k;
289 }
290 }
291 /*
292 * Judgement call: can we ignore the space between SendBuffs and
293 * UserRegs, where we would like to see vl15 buffs, but not more?
294 */
295 if (offset >= snd_bottom && offset < snd_lim) {
296 offset -= snd_bottom;
297 map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
298 cnt = snd_lim - offset;
299 }
300
301mapped:
302 if (cntp)
303 *cntp = cnt;
304 return map;
305}
306
307/*
308 * qib_read_umem64 - read a 64-bit quantity from the chip into user space
309 * @dd: the qlogic_ib device
310 * @uaddr: the location to store the data in user memory
311 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
312 * @count: number of bytes to copy (multiple of 32 bits)
313 *
314 * This function also localizes all chip memory accesses.
315 * The copy should be written such that we read full cacheline packets
316 * from the chip. This is usually used for a single qword
317 *
318 * NOTE: This assumes the chip address is 64-bit aligned.
319 */
320static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
321 u32 regoffs, size_t count)
322{
323 const u64 __iomem *reg_addr;
324 const u64 __iomem *reg_end;
325 u32 limit;
326 int ret;
327
328 reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
329 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
330 ret = -EINVAL;
331 goto bail;
332 }
333 if (count >= limit)
334 count = limit;
335 reg_end = reg_addr + (count / sizeof(u64));
336
337 /* not very efficient, but it works for now */
338 while (reg_addr < reg_end) {
339 u64 data = readq(reg_addr);
340
341 if (copy_to_user(uaddr, &data, sizeof(u64))) {
342 ret = -EFAULT;
343 goto bail;
344 }
345 reg_addr++;
346 uaddr += sizeof(u64);
347 }
348 ret = 0;
349bail:
350 return ret;
351}
352
353/*
354 * qib_write_umem64 - write a 64-bit quantity to the chip from user space
355 * @dd: the qlogic_ib device
356 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
357 * @uaddr: the source of the data in user memory
358 * @count: the number of bytes to copy (multiple of 32 bits)
359 *
360 * This is usually used for a single qword
361 * NOTE: This assumes the chip address is 64-bit aligned.
362 */
363
364static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
365 const void __user *uaddr, size_t count)
366{
367 u64 __iomem *reg_addr;
368 const u64 __iomem *reg_end;
369 u32 limit;
370 int ret;
371
372 reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
373 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
374 ret = -EINVAL;
375 goto bail;
376 }
377 if (count >= limit)
378 count = limit;
379 reg_end = reg_addr + (count / sizeof(u64));
380
381 /* not very efficient, but it works for now */
382 while (reg_addr < reg_end) {
383 u64 data;
384 if (copy_from_user(&data, uaddr, sizeof(data))) {
385 ret = -EFAULT;
386 goto bail;
387 }
388 writeq(data, reg_addr);
389
390 reg_addr++;
391 uaddr += sizeof(u64);
392 }
393 ret = 0;
394bail:
395 return ret;
396}
397
398/*
399 * qib_read_umem32 - read a 32-bit quantity from the chip into user space
400 * @dd: the qlogic_ib device
401 * @uaddr: the location to store the data in user memory
402 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
403 * @count: number of bytes to copy
404 *
405 * read 32 bit values, not 64 bit; for memories that only
406 * support 32 bit reads; usually a single dword.
407 */
408static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
409 u32 regoffs, size_t count)
410{
411 const u32 __iomem *reg_addr;
412 const u32 __iomem *reg_end;
413 u32 limit;
414 int ret;
415
416 reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
417 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
418 ret = -EINVAL;
419 goto bail;
420 }
421 if (count >= limit)
422 count = limit;
423 reg_end = reg_addr + (count / sizeof(u32));
424
425 /* not very efficient, but it works for now */
426 while (reg_addr < reg_end) {
427 u32 data = readl(reg_addr);
428
429 if (copy_to_user(uaddr, &data, sizeof(data))) {
430 ret = -EFAULT;
431 goto bail;
432 }
433
434 reg_addr++;
435 uaddr += sizeof(u32);
436
437 }
438 ret = 0;
439bail:
440 return ret;
441}
442
443/*
444 * qib_write_umem32 - write a 32-bit quantity to the chip from user space
445 * @dd: the qlogic_ib device
446 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
447 * @uaddr: the source of the data in user memory
448 * @count: number of bytes to copy
449 *
450 * write 32 bit values, not 64 bit; for memories that only
451 * support 32 bit write; usually a single dword.
452 */
453
454static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
455 const void __user *uaddr, size_t count)
456{
457 u32 __iomem *reg_addr;
458 const u32 __iomem *reg_end;
459 u32 limit;
460 int ret;
461
462 reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
463 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
464 ret = -EINVAL;
465 goto bail;
466 }
467 if (count >= limit)
468 count = limit;
469 reg_end = reg_addr + (count / sizeof(u32));
470
471 while (reg_addr < reg_end) {
472 u32 data;
473
474 if (copy_from_user(&data, uaddr, sizeof(data))) {
475 ret = -EFAULT;
476 goto bail;
477 }
478 writel(data, reg_addr);
479
480 reg_addr++;
481 uaddr += sizeof(u32);
482 }
483 ret = 0;
484bail:
485 return ret;
486}
487
488static int qib_diag_open(struct inode *in, struct file *fp)
489{
490 int unit = iminor(in) - QIB_DIAG_MINOR_BASE;
491 struct qib_devdata *dd;
492 struct qib_diag_client *dc;
493 int ret;
494
495 mutex_lock(&qib_mutex);
496
497 dd = qib_lookup(unit);
498
499 if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
500 !dd->kregbase) {
501 ret = -ENODEV;
502 goto bail;
503 }
504
505 dc = get_client(dd);
506 if (!dc) {
507 ret = -ENOMEM;
508 goto bail;
509 }
510 dc->next = dd->diag_client;
511 dd->diag_client = dc;
512 fp->private_data = dc;
513 ret = 0;
514bail:
515 mutex_unlock(&qib_mutex);
516
517 return ret;
518}
519
520/**
521 * qib_diagpkt_write - write an IB packet
522 * @fp: the diag data device file pointer
523 * @data: qib_diag_pkt structure saying where to get the packet
524 * @count: size of data to write
525 * @off: unused by this code
526 */
527static ssize_t qib_diagpkt_write(struct file *fp,
528 const char __user *data,
529 size_t count, loff_t *off)
530{
531 u32 __iomem *piobuf;
532 u32 plen, clen, pbufn;
533 struct qib_diag_xpkt dp;
534 u32 *tmpbuf = NULL;
535 struct qib_devdata *dd;
536 struct qib_pportdata *ppd;
537 ssize_t ret = 0;
538
539 if (count != sizeof(dp)) {
540 ret = -EINVAL;
541 goto bail;
542 }
543 if (copy_from_user(&dp, data, sizeof(dp))) {
544 ret = -EFAULT;
545 goto bail;
546 }
547
548 dd = qib_lookup(dp.unit);
549 if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
550 ret = -ENODEV;
551 goto bail;
552 }
553 if (!(dd->flags & QIB_INITTED)) {
554 /* no hardware, freeze, etc. */
555 ret = -ENODEV;
556 goto bail;
557 }
558
559 if (dp.version != _DIAG_XPKT_VERS) {
560 qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
561 dp.version);
562 ret = -EINVAL;
563 goto bail;
564 }
565 /* send count must be an exact number of dwords */
566 if (dp.len & 3) {
567 ret = -EINVAL;
568 goto bail;
569 }
570 if (!dp.port || dp.port > dd->num_pports) {
571 ret = -EINVAL;
572 goto bail;
573 }
574 ppd = &dd->pport[dp.port - 1];
575
576 /* need total length before first word written */
577 /* +1 word is for the qword padding */
578 plen = sizeof(u32) + dp.len;
579 clen = dp.len >> 2;
580
581 if ((plen + 4) > ppd->ibmaxlen) {
582 ret = -EINVAL;
583 goto bail; /* before writing pbc */
584 }
585 tmpbuf = vmalloc(plen);
586 if (!tmpbuf) {
587 qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, "
588 "failing\n");
589 ret = -ENOMEM;
590 goto bail;
591 }
592
593 if (copy_from_user(tmpbuf,
594 (const void __user *) (unsigned long) dp.data,
595 dp.len)) {
596 ret = -EFAULT;
597 goto bail;
598 }
599
600 plen >>= 2; /* in dwords */
601
602 if (dp.pbc_wd == 0)
603 dp.pbc_wd = plen;
604
605 piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
606 if (!piobuf) {
607 ret = -EBUSY;
608 goto bail;
609 }
610 /* disarm it just to be extra sure */
611 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
612
613 /* disable header check on pbufn for this packet */
614 dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
615
616 writeq(dp.pbc_wd, piobuf);
617 /*
618 * Copy all but the trigger word, then flush, so it's written
619 * to chip before trigger word, then write trigger word, then
620 * flush again, so packet is sent.
621 */
622 if (dd->flags & QIB_PIO_FLUSH_WC) {
623 qib_flush_wc();
624 qib_pio_copy(piobuf + 2, tmpbuf, clen - 1);
625 qib_flush_wc();
626 __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
627 } else
628 qib_pio_copy(piobuf + 2, tmpbuf, clen);
629
630 if (dd->flags & QIB_USE_SPCL_TRIG) {
631 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
632
633 qib_flush_wc();
634 __raw_writel(0xaebecede, piobuf + spcl_off);
635 }
636
637 /*
638 * Ensure buffer is written to the chip, then re-enable
639 * header checks (if supported by chip). The txchk
640 * code will ensure seen by chip before returning.
641 */
642 qib_flush_wc();
643 qib_sendbuf_done(dd, pbufn);
644 dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
645
646 ret = sizeof(dp);
647
648bail:
649 vfree(tmpbuf);
650 return ret;
651}
652
653static int qib_diag_release(struct inode *in, struct file *fp)
654{
655 mutex_lock(&qib_mutex);
656 return_client(fp->private_data);
657 fp->private_data = NULL;
658 mutex_unlock(&qib_mutex);
659 return 0;
660}
661
662/*
663 * Chip-specific code calls to register its interest in
664 * a specific range.
665 */
666struct diag_observer_list_elt {
667 struct diag_observer_list_elt *next;
668 const struct diag_observer *op;
669};
670
671int qib_register_observer(struct qib_devdata *dd,
672 const struct diag_observer *op)
673{
674 struct diag_observer_list_elt *olp;
675 int ret = -EINVAL;
676
677 if (!dd || !op)
678 goto bail;
679 ret = -ENOMEM;
680 olp = vmalloc(sizeof *olp);
681 if (!olp) {
682 printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n");
683 goto bail;
684 }
685 if (olp) {
686 unsigned long flags;
687
688 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
689 olp->op = op;
690 olp->next = dd->diag_observer_list;
691 dd->diag_observer_list = olp;
692 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
693 ret = 0;
694 }
695bail:
696 return ret;
697}
698
699/* Remove all registered observers when device is closed */
700static void qib_unregister_observers(struct qib_devdata *dd)
701{
702 struct diag_observer_list_elt *olp;
703 unsigned long flags;
704
705 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
706 olp = dd->diag_observer_list;
707 while (olp) {
708 /* Pop one observer, let go of lock */
709 dd->diag_observer_list = olp->next;
710 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
711 vfree(olp);
712 /* try again. */
713 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
714 olp = dd->diag_observer_list;
715 }
716 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
717}
718
719/*
720 * Find the observer, if any, for the specified address. Initial implementation
721 * is simple stack of observers. This must be called with diag transaction
722 * lock held.
723 */
724static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
725 u32 addr)
726{
727 struct diag_observer_list_elt *olp;
728 const struct diag_observer *op = NULL;
729
730 olp = dd->diag_observer_list;
731 while (olp) {
732 op = olp->op;
733 if (addr >= op->bottom && addr <= op->top)
734 break;
735 olp = olp->next;
736 }
737 if (!olp)
738 op = NULL;
739
740 return op;
741}
742
743static ssize_t qib_diag_read(struct file *fp, char __user *data,
744 size_t count, loff_t *off)
745{
746 struct qib_diag_client *dc = fp->private_data;
747 struct qib_devdata *dd = dc->dd;
748 void __iomem *kreg_base;
749 ssize_t ret;
750
751 if (dc->pid != current->pid) {
752 ret = -EPERM;
753 goto bail;
754 }
755
756 kreg_base = dd->kregbase;
757
758 if (count == 0)
759 ret = 0;
760 else if ((count % 4) || (*off % 4))
761 /* address or length is not 32-bit aligned, hence invalid */
762 ret = -EINVAL;
763 else if (dc->state < READY && (*off || count != 8))
764 ret = -EINVAL; /* prevent cat /dev/qib_diag* */
765 else {
766 unsigned long flags;
767 u64 data64 = 0;
768 int use_32;
769 const struct diag_observer *op;
770
771 use_32 = (count % 8) || (*off % 8);
772 ret = -1;
773 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
774 /*
775 * Check for observer on this address range.
776 * we only support a single 32 or 64-bit read
777 * via observer, currently.
778 */
779 op = diag_get_observer(dd, *off);
780 if (op) {
781 u32 offset = *off;
782 ret = op->hook(dd, op, offset, &data64, 0, use_32);
783 }
784 /*
785 * We need to release lock before any copy_to_user(),
786 * whether implicit in qib_read_umem* or explicit below.
787 */
788 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
789 if (!op) {
790 if (use_32)
791 /*
792 * Address or length is not 64-bit aligned;
793 * do 32-bit rd
794 */
795 ret = qib_read_umem32(dd, data, (u32) *off,
796 count);
797 else
798 ret = qib_read_umem64(dd, data, (u32) *off,
799 count);
800 } else if (ret == count) {
801 /* Below finishes case where observer existed */
802 ret = copy_to_user(data, &data64, use_32 ?
803 sizeof(u32) : sizeof(u64));
804 if (ret)
805 ret = -EFAULT;
806 }
807 }
808
809 if (ret >= 0) {
810 *off += count;
811 ret = count;
812 if (dc->state == OPENED)
813 dc->state = INIT;
814 }
815bail:
816 return ret;
817}
818
819static ssize_t qib_diag_write(struct file *fp, const char __user *data,
820 size_t count, loff_t *off)
821{
822 struct qib_diag_client *dc = fp->private_data;
823 struct qib_devdata *dd = dc->dd;
824 void __iomem *kreg_base;
825 ssize_t ret;
826
827 if (dc->pid != current->pid) {
828 ret = -EPERM;
829 goto bail;
830 }
831
832 kreg_base = dd->kregbase;
833
834 if (count == 0)
835 ret = 0;
836 else if ((count % 4) || (*off % 4))
837 /* address or length is not 32-bit aligned, hence invalid */
838 ret = -EINVAL;
839 else if (dc->state < READY &&
840 ((*off || count != 8) || dc->state != INIT))
841 /* No writes except second-step of init seq */
842 ret = -EINVAL; /* before any other write allowed */
843 else {
844 unsigned long flags;
845 const struct diag_observer *op = NULL;
846 int use_32 = (count % 8) || (*off % 8);
847
848 /*
849 * Check for observer on this address range.
850 * We only support a single 32 or 64-bit write
851 * via observer, currently. This helps, because
852 * we would otherwise have to jump through hoops
853 * to make "diag transaction" meaningful when we
854 * cannot do a copy_from_user while holding the lock.
855 */
856 if (count == 4 || count == 8) {
857 u64 data64;
858 u32 offset = *off;
859 ret = copy_from_user(&data64, data, count);
860 if (ret) {
861 ret = -EFAULT;
862 goto bail;
863 }
864 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
865 op = diag_get_observer(dd, *off);
866 if (op)
867 ret = op->hook(dd, op, offset, &data64, ~0Ull,
868 use_32);
869 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
870 }
871
872 if (!op) {
873 if (use_32)
874 /*
875 * Address or length is not 64-bit aligned;
876 * do 32-bit write
877 */
878 ret = qib_write_umem32(dd, (u32) *off, data,
879 count);
880 else
881 ret = qib_write_umem64(dd, (u32) *off, data,
882 count);
883 }
884 }
885
886 if (ret >= 0) {
887 *off += count;
888 ret = count;
889 if (dc->state == INIT)
890 dc->state = READY; /* all read/write OK now */
891 }
892bail:
893 return ret;
894}
diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c
new file mode 100644
index 000000000000..2920bb39a65b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_dma.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/types.h>
33#include <linux/scatterlist.h>
34
35#include "qib_verbs.h"
36
37#define BAD_DMA_ADDRESS ((u64) 0)
38
39/*
40 * The following functions implement driver specific replacements
41 * for the ib_dma_*() functions.
42 *
43 * These functions return kernel virtual addresses instead of
44 * device bus addresses since the driver uses the CPU to copy
45 * data instead of using hardware DMA.
46 */
47
48static int qib_mapping_error(struct ib_device *dev, u64 dma_addr)
49{
50 return dma_addr == BAD_DMA_ADDRESS;
51}
52
53static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr,
54 size_t size, enum dma_data_direction direction)
55{
56 BUG_ON(!valid_dma_direction(direction));
57 return (u64) cpu_addr;
58}
59
60static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
61 enum dma_data_direction direction)
62{
63 BUG_ON(!valid_dma_direction(direction));
64}
65
66static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,
67 unsigned long offset, size_t size,
68 enum dma_data_direction direction)
69{
70 u64 addr;
71
72 BUG_ON(!valid_dma_direction(direction));
73
74 if (offset + size > PAGE_SIZE) {
75 addr = BAD_DMA_ADDRESS;
76 goto done;
77 }
78
79 addr = (u64) page_address(page);
80 if (addr)
81 addr += offset;
82 /* TODO: handle highmem pages */
83
84done:
85 return addr;
86}
87
88static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
89 enum dma_data_direction direction)
90{
91 BUG_ON(!valid_dma_direction(direction));
92}
93
94static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
95 int nents, enum dma_data_direction direction)
96{
97 struct scatterlist *sg;
98 u64 addr;
99 int i;
100 int ret = nents;
101
102 BUG_ON(!valid_dma_direction(direction));
103
104 for_each_sg(sgl, sg, nents, i) {
105 addr = (u64) page_address(sg_page(sg));
106 /* TODO: handle highmem pages */
107 if (!addr) {
108 ret = 0;
109 break;
110 }
111 }
112 return ret;
113}
114
115static void qib_unmap_sg(struct ib_device *dev,
116 struct scatterlist *sg, int nents,
117 enum dma_data_direction direction)
118{
119 BUG_ON(!valid_dma_direction(direction));
120}
121
122static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
123{
124 u64 addr = (u64) page_address(sg_page(sg));
125
126 if (addr)
127 addr += sg->offset;
128 return addr;
129}
130
131static unsigned int qib_sg_dma_len(struct ib_device *dev,
132 struct scatterlist *sg)
133{
134 return sg->length;
135}
136
137static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
138 size_t size, enum dma_data_direction dir)
139{
140}
141
142static void qib_sync_single_for_device(struct ib_device *dev, u64 addr,
143 size_t size,
144 enum dma_data_direction dir)
145{
146}
147
148static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size,
149 u64 *dma_handle, gfp_t flag)
150{
151 struct page *p;
152 void *addr = NULL;
153
154 p = alloc_pages(flag, get_order(size));
155 if (p)
156 addr = page_address(p);
157 if (dma_handle)
158 *dma_handle = (u64) addr;
159 return addr;
160}
161
162static void qib_dma_free_coherent(struct ib_device *dev, size_t size,
163 void *cpu_addr, u64 dma_handle)
164{
165 free_pages((unsigned long) cpu_addr, get_order(size));
166}
167
168struct ib_dma_mapping_ops qib_dma_mapping_ops = {
169 .mapping_error = qib_mapping_error,
170 .map_single = qib_dma_map_single,
171 .unmap_single = qib_dma_unmap_single,
172 .map_page = qib_dma_map_page,
173 .unmap_page = qib_dma_unmap_page,
174 .map_sg = qib_map_sg,
175 .unmap_sg = qib_unmap_sg,
176 .dma_address = qib_sg_dma_address,
177 .dma_len = qib_sg_dma_len,
178 .sync_single_for_cpu = qib_sync_single_for_cpu,
179 .sync_single_for_device = qib_sync_single_for_device,
180 .alloc_coherent = qib_dma_alloc_coherent,
181 .free_coherent = qib_dma_free_coherent
182};
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
new file mode 100644
index 000000000000..f15ce076ac49
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -0,0 +1,665 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/spinlock.h>
35#include <linux/pci.h>
36#include <linux/io.h>
37#include <linux/delay.h>
38#include <linux/netdevice.h>
39#include <linux/vmalloc.h>
40
41#include "qib.h"
42
43/*
44 * The size has to be longer than this string, so we can append
45 * board/chip information to it in the init code.
46 */
47const char ib_qib_version[] = QIB_IDSTR "\n";
48
49DEFINE_SPINLOCK(qib_devs_lock);
50LIST_HEAD(qib_dev_list);
51DEFINE_MUTEX(qib_mutex); /* general driver use */
52
53unsigned qib_ibmtu;
54module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
55MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
56
57unsigned qib_compat_ddr_negotiate = 1;
58module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
59 S_IWUSR | S_IRUGO);
60MODULE_PARM_DESC(compat_ddr_negotiate,
61 "Attempt pre-IBTA 1.2 DDR speed negotiation");
62
63MODULE_LICENSE("Dual BSD/GPL");
64MODULE_AUTHOR("QLogic <support@qlogic.com>");
65MODULE_DESCRIPTION("QLogic IB driver");
66
67/*
68 * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
69 * PIO send buffers. This is well beyond anything currently
70 * defined in the InfiniBand spec.
71 */
72#define QIB_PIO_MAXIBHDR 128
73
74struct qlogic_ib_stats qib_stats;
75
76const char *qib_get_unit_name(int unit)
77{
78 static char iname[16];
79
80 snprintf(iname, sizeof iname, "infinipath%u", unit);
81 return iname;
82}
83
84/*
85 * Return count of units with at least one port ACTIVE.
86 */
87int qib_count_active_units(void)
88{
89 struct qib_devdata *dd;
90 struct qib_pportdata *ppd;
91 unsigned long flags;
92 int pidx, nunits_active = 0;
93
94 spin_lock_irqsave(&qib_devs_lock, flags);
95 list_for_each_entry(dd, &qib_dev_list, list) {
96 if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
97 continue;
98 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
99 ppd = dd->pport + pidx;
100 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
101 QIBL_LINKARMED | QIBL_LINKACTIVE))) {
102 nunits_active++;
103 break;
104 }
105 }
106 }
107 spin_unlock_irqrestore(&qib_devs_lock, flags);
108 return nunits_active;
109}
110
111/*
112 * Return count of all units, optionally return in arguments
113 * the number of usable (present) units, and the number of
114 * ports that are up.
115 */
116int qib_count_units(int *npresentp, int *nupp)
117{
118 int nunits = 0, npresent = 0, nup = 0;
119 struct qib_devdata *dd;
120 unsigned long flags;
121 int pidx;
122 struct qib_pportdata *ppd;
123
124 spin_lock_irqsave(&qib_devs_lock, flags);
125
126 list_for_each_entry(dd, &qib_dev_list, list) {
127 nunits++;
128 if ((dd->flags & QIB_PRESENT) && dd->kregbase)
129 npresent++;
130 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
131 ppd = dd->pport + pidx;
132 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
133 QIBL_LINKARMED | QIBL_LINKACTIVE)))
134 nup++;
135 }
136 }
137
138 spin_unlock_irqrestore(&qib_devs_lock, flags);
139
140 if (npresentp)
141 *npresentp = npresent;
142 if (nupp)
143 *nupp = nup;
144
145 return nunits;
146}
147
148/**
149 * qib_wait_linkstate - wait for an IB link state change to occur
150 * @dd: the qlogic_ib device
151 * @state: the state to wait for
152 * @msecs: the number of milliseconds to wait
153 *
154 * wait up to msecs milliseconds for IB link state change to occur for
155 * now, take the easy polling route. Currently used only by
156 * qib_set_linkstate. Returns 0 if state reached, otherwise
157 * -ETIMEDOUT state can have multiple states set, for any of several
158 * transitions.
159 */
160int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
161{
162 int ret;
163 unsigned long flags;
164
165 spin_lock_irqsave(&ppd->lflags_lock, flags);
166 if (ppd->state_wanted) {
167 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
168 ret = -EBUSY;
169 goto bail;
170 }
171 ppd->state_wanted = state;
172 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
173 wait_event_interruptible_timeout(ppd->state_wait,
174 (ppd->lflags & state),
175 msecs_to_jiffies(msecs));
176 spin_lock_irqsave(&ppd->lflags_lock, flags);
177 ppd->state_wanted = 0;
178 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
179
180 if (!(ppd->lflags & state))
181 ret = -ETIMEDOUT;
182 else
183 ret = 0;
184bail:
185 return ret;
186}
187
188int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
189{
190 u32 lstate;
191 int ret;
192 struct qib_devdata *dd = ppd->dd;
193 unsigned long flags;
194
195 switch (newstate) {
196 case QIB_IB_LINKDOWN_ONLY:
197 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
198 IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
199 /* don't wait */
200 ret = 0;
201 goto bail;
202
203 case QIB_IB_LINKDOWN:
204 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
205 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
206 /* don't wait */
207 ret = 0;
208 goto bail;
209
210 case QIB_IB_LINKDOWN_SLEEP:
211 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
212 IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
213 /* don't wait */
214 ret = 0;
215 goto bail;
216
217 case QIB_IB_LINKDOWN_DISABLE:
218 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
219 IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
220 /* don't wait */
221 ret = 0;
222 goto bail;
223
224 case QIB_IB_LINKARM:
225 if (ppd->lflags & QIBL_LINKARMED) {
226 ret = 0;
227 goto bail;
228 }
229 if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
230 ret = -EINVAL;
231 goto bail;
232 }
233 /*
234 * Since the port can be ACTIVE when we ask for ARMED,
235 * clear QIBL_LINKV so we can wait for a transition.
236 * If the link isn't ARMED, then something else happened
237 * and there is no point waiting for ARMED.
238 */
239 spin_lock_irqsave(&ppd->lflags_lock, flags);
240 ppd->lflags &= ~QIBL_LINKV;
241 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
242 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
243 IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
244 lstate = QIBL_LINKV;
245 break;
246
247 case QIB_IB_LINKACTIVE:
248 if (ppd->lflags & QIBL_LINKACTIVE) {
249 ret = 0;
250 goto bail;
251 }
252 if (!(ppd->lflags & QIBL_LINKARMED)) {
253 ret = -EINVAL;
254 goto bail;
255 }
256 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
257 IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
258 lstate = QIBL_LINKACTIVE;
259 break;
260
261 default:
262 ret = -EINVAL;
263 goto bail;
264 }
265 ret = qib_wait_linkstate(ppd, lstate, 10);
266
267bail:
268 return ret;
269}
270
271/*
272 * Get address of eager buffer from it's index (allocated in chunks, not
273 * contiguous).
274 */
275static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
276{
277 const u32 chunk = etail / rcd->rcvegrbufs_perchunk;
278 const u32 idx = etail % rcd->rcvegrbufs_perchunk;
279
280 return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize;
281}
282
283/*
284 * Returns 1 if error was a CRC, else 0.
285 * Needed for some chip's synthesized error counters.
286 */
287static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
288 u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
289 struct qib_message_header *hdr)
290{
291 u32 ret = 0;
292
293 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
294 ret = 1;
295 return ret;
296}
297
298/*
299 * qib_kreceive - receive a packet
300 * @rcd: the qlogic_ib context
301 * @llic: gets count of good packets needed to clear lli,
302 * (used with chips that need need to track crcs for lli)
303 *
304 * called from interrupt handler for errors or receive interrupt
305 * Returns number of CRC error packets, needed by some chips for
306 * local link integrity tracking. crcs are adjusted down by following
307 * good packets, if any, and count of good packets is also tracked.
308 */
309u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
310{
311 struct qib_devdata *dd = rcd->dd;
312 struct qib_pportdata *ppd = rcd->ppd;
313 __le32 *rhf_addr;
314 void *ebuf;
315 const u32 rsize = dd->rcvhdrentsize; /* words */
316 const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
317 u32 etail = -1, l, hdrqtail;
318 struct qib_message_header *hdr;
319 u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
320 int last;
321 u64 lval;
322 struct qib_qp *qp, *nqp;
323
324 l = rcd->head;
325 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
326 if (dd->flags & QIB_NODMA_RTAIL) {
327 u32 seq = qib_hdrget_seq(rhf_addr);
328 if (seq != rcd->seq_cnt)
329 goto bail;
330 hdrqtail = 0;
331 } else {
332 hdrqtail = qib_get_rcvhdrtail(rcd);
333 if (l == hdrqtail)
334 goto bail;
335 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
336 }
337
338 for (last = 0, i = 1; !last; i += !last) {
339 hdr = dd->f_get_msgheader(dd, rhf_addr);
340 eflags = qib_hdrget_err_flags(rhf_addr);
341 etype = qib_hdrget_rcv_type(rhf_addr);
342 /* total length */
343 tlen = qib_hdrget_length_in_bytes(rhf_addr);
344 ebuf = NULL;
345 if ((dd->flags & QIB_NODMA_RTAIL) ?
346 qib_hdrget_use_egr_buf(rhf_addr) :
347 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
348 etail = qib_hdrget_index(rhf_addr);
349 updegr = 1;
350 if (tlen > sizeof(*hdr) ||
351 etype >= RCVHQ_RCV_TYPE_NON_KD)
352 ebuf = qib_get_egrbuf(rcd, etail);
353 }
354 if (!eflags) {
355 u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
356
357 if (lrh_len != tlen) {
358 qib_stats.sps_lenerrs++;
359 goto move_along;
360 }
361 }
362 if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
363 ebuf == NULL &&
364 tlen > (dd->rcvhdrentsize - 2 + 1 -
365 qib_hdrget_offset(rhf_addr)) << 2) {
366 goto move_along;
367 }
368
369 /*
370 * Both tiderr and qibhdrerr are set for all plain IB
371 * packets; only qibhdrerr should be set.
372 */
373 if (unlikely(eflags))
374 crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
375 etail, rhf_addr, hdr);
376 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
377 qib_ib_rcv(rcd, hdr, ebuf, tlen);
378 if (crcs)
379 crcs--;
380 else if (llic && *llic)
381 --*llic;
382 }
383move_along:
384 l += rsize;
385 if (l >= maxcnt)
386 l = 0;
387 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
388 if (dd->flags & QIB_NODMA_RTAIL) {
389 u32 seq = qib_hdrget_seq(rhf_addr);
390
391 if (++rcd->seq_cnt > 13)
392 rcd->seq_cnt = 1;
393 if (seq != rcd->seq_cnt)
394 last = 1;
395 } else if (l == hdrqtail)
396 last = 1;
397 /*
398 * Update head regs etc., every 16 packets, if not last pkt,
399 * to help prevent rcvhdrq overflows, when many packets
400 * are processed and queue is nearly full.
401 * Don't request an interrupt for intermediate updates.
402 */
403 lval = l;
404 if (!last && !(i & 0xf)) {
405 dd->f_update_usrhead(rcd, lval, updegr, etail);
406 updegr = 0;
407 }
408 }
409
410 rcd->head = l;
411 rcd->pkt_count += i;
412
413 /*
414 * Iterate over all QPs waiting to respond.
415 * The list won't change since the IRQ is only run on one CPU.
416 */
417 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
418 list_del_init(&qp->rspwait);
419 if (qp->r_flags & QIB_R_RSP_NAK) {
420 qp->r_flags &= ~QIB_R_RSP_NAK;
421 qib_send_rc_ack(qp);
422 }
423 if (qp->r_flags & QIB_R_RSP_SEND) {
424 unsigned long flags;
425
426 qp->r_flags &= ~QIB_R_RSP_SEND;
427 spin_lock_irqsave(&qp->s_lock, flags);
428 if (ib_qib_state_ops[qp->state] &
429 QIB_PROCESS_OR_FLUSH_SEND)
430 qib_schedule_send(qp);
431 spin_unlock_irqrestore(&qp->s_lock, flags);
432 }
433 if (atomic_dec_and_test(&qp->refcount))
434 wake_up(&qp->wait);
435 }
436
437bail:
438 /* Report number of packets consumed */
439 if (npkts)
440 *npkts = i;
441
442 /*
443 * Always write head at end, and setup rcv interrupt, even
444 * if no packets were processed.
445 */
446 lval = (u64)rcd->head | dd->rhdrhead_intr_off;
447 dd->f_update_usrhead(rcd, lval, updegr, etail);
448 return crcs;
449}
450
451/**
452 * qib_set_mtu - set the MTU
453 * @ppd: the perport data
454 * @arg: the new MTU
455 *
456 * We can handle "any" incoming size, the issue here is whether we
457 * need to restrict our outgoing size. For now, we don't do any
458 * sanity checking on this, and we don't deal with what happens to
459 * programs that are already running when the size changes.
460 * NOTE: changing the MTU will usually cause the IBC to go back to
461 * link INIT state...
462 */
463int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
464{
465 u32 piosize;
466 int ret, chk;
467
468 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
469 arg != 4096) {
470 ret = -EINVAL;
471 goto bail;
472 }
473 chk = ib_mtu_enum_to_int(qib_ibmtu);
474 if (chk > 0 && arg > chk) {
475 ret = -EINVAL;
476 goto bail;
477 }
478
479 piosize = ppd->ibmaxlen;
480 ppd->ibmtu = arg;
481
482 if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
483 /* Only if it's not the initial value (or reset to it) */
484 if (piosize != ppd->init_ibmaxlen) {
485 if (arg > piosize && arg <= ppd->init_ibmaxlen)
486 piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
487 ppd->ibmaxlen = piosize;
488 }
489 } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
490 piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
491 ppd->ibmaxlen = piosize;
492 }
493
494 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
495
496 ret = 0;
497
498bail:
499 return ret;
500}
501
502int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
503{
504 struct qib_devdata *dd = ppd->dd;
505 ppd->lid = lid;
506 ppd->lmc = lmc;
507
508 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
509 lid | (~((1U << lmc) - 1)) << 16);
510
511 qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
512 dd->unit, ppd->port, lid);
513
514 return 0;
515}
516
517/*
518 * Following deal with the "obviously simple" task of overriding the state
519 * of the LEDS, which normally indicate link physical and logical status.
520 * The complications arise in dealing with different hardware mappings
521 * and the board-dependent routine being called from interrupts.
522 * and then there's the requirement to _flash_ them.
523 */
524#define LED_OVER_FREQ_SHIFT 8
525#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
526/* Below is "non-zero" to force override, but both actual LEDs are off */
527#define LED_OVER_BOTH_OFF (8)
528
529static void qib_run_led_override(unsigned long opaque)
530{
531 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
532 struct qib_devdata *dd = ppd->dd;
533 int timeoff;
534 int ph_idx;
535
536 if (!(dd->flags & QIB_INITTED))
537 return;
538
539 ph_idx = ppd->led_override_phase++ & 1;
540 ppd->led_override = ppd->led_override_vals[ph_idx];
541 timeoff = ppd->led_override_timeoff;
542
543 dd->f_setextled(ppd, 1);
544 /*
545 * don't re-fire the timer if user asked for it to be off; we let
546 * it fire one more time after they turn it off to simplify
547 */
548 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
549 mod_timer(&ppd->led_override_timer, jiffies + timeoff);
550}
551
552void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
553{
554 struct qib_devdata *dd = ppd->dd;
555 int timeoff, freq;
556
557 if (!(dd->flags & QIB_INITTED))
558 return;
559
560 /* First check if we are blinking. If not, use 1HZ polling */
561 timeoff = HZ;
562 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
563
564 if (freq) {
565 /* For blink, set each phase from one nybble of val */
566 ppd->led_override_vals[0] = val & 0xF;
567 ppd->led_override_vals[1] = (val >> 4) & 0xF;
568 timeoff = (HZ << 4)/freq;
569 } else {
570 /* Non-blink set both phases the same. */
571 ppd->led_override_vals[0] = val & 0xF;
572 ppd->led_override_vals[1] = val & 0xF;
573 }
574 ppd->led_override_timeoff = timeoff;
575
576 /*
577 * If the timer has not already been started, do so. Use a "quick"
578 * timeout so the function will be called soon, to look at our request.
579 */
580 if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
581 /* Need to start timer */
582 init_timer(&ppd->led_override_timer);
583 ppd->led_override_timer.function = qib_run_led_override;
584 ppd->led_override_timer.data = (unsigned long) ppd;
585 ppd->led_override_timer.expires = jiffies + 1;
586 add_timer(&ppd->led_override_timer);
587 } else {
588 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
589 mod_timer(&ppd->led_override_timer, jiffies + 1);
590 atomic_dec(&ppd->led_override_timer_active);
591 }
592}
593
594/**
595 * qib_reset_device - reset the chip if possible
596 * @unit: the device to reset
597 *
598 * Whether or not reset is successful, we attempt to re-initialize the chip
599 * (that is, much like a driver unload/reload). We clear the INITTED flag
600 * so that the various entry points will fail until we reinitialize. For
601 * now, we only allow this if no user contexts are open that use chip resources
602 */
603int qib_reset_device(int unit)
604{
605 int ret, i;
606 struct qib_devdata *dd = qib_lookup(unit);
607 struct qib_pportdata *ppd;
608 unsigned long flags;
609 int pidx;
610
611 if (!dd) {
612 ret = -ENODEV;
613 goto bail;
614 }
615
616 qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
617
618 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
619 qib_devinfo(dd->pcidev, "Invalid unit number %u or "
620 "not initialized or not present\n", unit);
621 ret = -ENXIO;
622 goto bail;
623 }
624
625 spin_lock_irqsave(&dd->uctxt_lock, flags);
626 if (dd->rcd)
627 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
628 if (!dd->rcd[i] || !dd->rcd[i]->cnt)
629 continue;
630 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
631 ret = -EBUSY;
632 goto bail;
633 }
634 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
635
636 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
637 ppd = dd->pport + pidx;
638 if (atomic_read(&ppd->led_override_timer_active)) {
639 /* Need to stop LED timer, _then_ shut off LEDs */
640 del_timer_sync(&ppd->led_override_timer);
641 atomic_set(&ppd->led_override_timer_active, 0);
642 }
643
644 /* Shut off LEDs after we are sure timer is not running */
645 ppd->led_override = LED_OVER_BOTH_OFF;
646 dd->f_setextled(ppd, 0);
647 if (dd->flags & QIB_HAS_SEND_DMA)
648 qib_teardown_sdma(ppd);
649 }
650
651 ret = dd->f_reset(dd);
652 if (ret == 1)
653 ret = qib_init(dd, 1);
654 else
655 ret = -EAGAIN;
656 if (ret)
657 qib_dev_err(dd, "Reinitialize unit %u after "
658 "reset failed with %d\n", unit, ret);
659 else
660 qib_devinfo(dd->pcidev, "Reinitialized unit %u after "
661 "resetting\n", unit);
662
663bail:
664 return ret;
665}
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
new file mode 100644
index 000000000000..92d9cfe98a68
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -0,0 +1,451 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/delay.h>
35#include <linux/pci.h>
36#include <linux/vmalloc.h>
37
38#include "qib.h"
39
40/*
41 * Functions specific to the serial EEPROM on cards handled by ib_qib.
42 * The actual serail interface code is in qib_twsi.c. This file is a client
43 */
44
45/**
46 * qib_eeprom_read - receives bytes from the eeprom via I2C
47 * @dd: the qlogic_ib device
48 * @eeprom_offset: address to read from
49 * @buffer: where to store result
50 * @len: number of bytes to receive
51 */
52int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
53 void *buff, int len)
54{
55 int ret;
56
57 ret = mutex_lock_interruptible(&dd->eep_lock);
58 if (!ret) {
59 ret = qib_twsi_reset(dd);
60 if (ret)
61 qib_dev_err(dd, "EEPROM Reset for read failed\n");
62 else
63 ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev,
64 eeprom_offset, buff, len);
65 mutex_unlock(&dd->eep_lock);
66 }
67
68 return ret;
69}
70
71/*
72 * Actually update the eeprom, first doing write enable if
73 * needed, then restoring write enable state.
74 * Must be called with eep_lock held
75 */
76static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
77 const void *buf, int len)
78{
79 int ret, pwen;
80
81 pwen = dd->f_eeprom_wen(dd, 1);
82 ret = qib_twsi_reset(dd);
83 if (ret)
84 qib_dev_err(dd, "EEPROM Reset for write failed\n");
85 else
86 ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev,
87 offset, buf, len);
88 dd->f_eeprom_wen(dd, pwen);
89 return ret;
90}
91
92/**
93 * qib_eeprom_write - writes data to the eeprom via I2C
94 * @dd: the qlogic_ib device
95 * @eeprom_offset: where to place data
96 * @buffer: data to write
97 * @len: number of bytes to write
98 */
99int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
100 const void *buff, int len)
101{
102 int ret;
103
104 ret = mutex_lock_interruptible(&dd->eep_lock);
105 if (!ret) {
106 ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len);
107 mutex_unlock(&dd->eep_lock);
108 }
109
110 return ret;
111}
112
113static u8 flash_csum(struct qib_flash *ifp, int adjust)
114{
115 u8 *ip = (u8 *) ifp;
116 u8 csum = 0, len;
117
118 /*
119 * Limit length checksummed to max length of actual data.
120 * Checksum of erased eeprom will still be bad, but we avoid
121 * reading past the end of the buffer we were passed.
122 */
123 len = ifp->if_length;
124 if (len > sizeof(struct qib_flash))
125 len = sizeof(struct qib_flash);
126 while (len--)
127 csum += *ip++;
128 csum -= ifp->if_csum;
129 csum = ~csum;
130 if (adjust)
131 ifp->if_csum = csum;
132
133 return csum;
134}
135
136/**
137 * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device
138 * @dd: the qlogic_ib device
139 *
140 * We have the capability to use the nguid field, and get
141 * the guid from the first chip's flash, to use for all of them.
142 */
143void qib_get_eeprom_info(struct qib_devdata *dd)
144{
145 void *buf;
146 struct qib_flash *ifp;
147 __be64 guid;
148 int len, eep_stat;
149 u8 csum, *bguid;
150 int t = dd->unit;
151 struct qib_devdata *dd0 = qib_lookup(0);
152
153 if (t && dd0->nguid > 1 && t <= dd0->nguid) {
154 u8 oguid;
155 dd->base_guid = dd0->base_guid;
156 bguid = (u8 *) &dd->base_guid;
157
158 oguid = bguid[7];
159 bguid[7] += t;
160 if (oguid > bguid[7]) {
161 if (bguid[6] == 0xff) {
162 if (bguid[5] == 0xff) {
163 qib_dev_err(dd, "Can't set %s GUID"
164 " from base, wraps to"
165 " OUI!\n",
166 qib_get_unit_name(t));
167 dd->base_guid = 0;
168 goto bail;
169 }
170 bguid[5]++;
171 }
172 bguid[6]++;
173 }
174 dd->nguid = 1;
175 goto bail;
176 }
177
178 /*
179 * Read full flash, not just currently used part, since it may have
180 * been written with a newer definition.
181 * */
182 len = sizeof(struct qib_flash);
183 buf = vmalloc(len);
184 if (!buf) {
185 qib_dev_err(dd, "Couldn't allocate memory to read %u "
186 "bytes from eeprom for GUID\n", len);
187 goto bail;
188 }
189
190 /*
191 * Use "public" eeprom read function, which does locking and
192 * figures out device. This will migrate to chip-specific.
193 */
194 eep_stat = qib_eeprom_read(dd, 0, buf, len);
195
196 if (eep_stat) {
197 qib_dev_err(dd, "Failed reading GUID from eeprom\n");
198 goto done;
199 }
200 ifp = (struct qib_flash *)buf;
201
202 csum = flash_csum(ifp, 0);
203 if (csum != ifp->if_csum) {
204 qib_devinfo(dd->pcidev, "Bad I2C flash checksum: "
205 "0x%x, not 0x%x\n", csum, ifp->if_csum);
206 goto done;
207 }
208 if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
209 *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
210 qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n",
211 *(unsigned long long *) ifp->if_guid);
212 /* don't allow GUID if all 0 or all 1's */
213 goto done;
214 }
215
216 /* complain, but allow it */
217 if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
218 qib_devinfo(dd->pcidev, "Warning, GUID %llx is "
219 "default, probably not correct!\n",
220 *(unsigned long long *) ifp->if_guid);
221
222 bguid = ifp->if_guid;
223 if (!bguid[0] && !bguid[1] && !bguid[2]) {
224 /*
225 * Original incorrect GUID format in flash; fix in
226 * core copy, by shifting up 2 octets; don't need to
227 * change top octet, since both it and shifted are 0.
228 */
229 bguid[1] = bguid[3];
230 bguid[2] = bguid[4];
231 bguid[3] = 0;
232 bguid[4] = 0;
233 guid = *(__be64 *) ifp->if_guid;
234 } else
235 guid = *(__be64 *) ifp->if_guid;
236 dd->base_guid = guid;
237 dd->nguid = ifp->if_numguid;
238 /*
239 * Things are slightly complicated by the desire to transparently
240 * support both the Pathscale 10-digit serial number and the QLogic
241 * 13-character version.
242 */
243 if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] &&
244 ((u8 *) ifp->if_sprefix)[0] != 0xFF) {
245 char *snp = dd->serial;
246
247 /*
248 * This board has a Serial-prefix, which is stored
249 * elsewhere for backward-compatibility.
250 */
251 memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
252 snp[sizeof ifp->if_sprefix] = '\0';
253 len = strlen(snp);
254 snp += len;
255 len = (sizeof dd->serial) - len;
256 if (len > sizeof ifp->if_serial)
257 len = sizeof ifp->if_serial;
258 memcpy(snp, ifp->if_serial, len);
259 } else
260 memcpy(dd->serial, ifp->if_serial,
261 sizeof ifp->if_serial);
262 if (!strstr(ifp->if_comment, "Tested successfully"))
263 qib_dev_err(dd, "Board SN %s did not pass functional "
264 "test: %s\n", dd->serial, ifp->if_comment);
265
266 memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
267 /*
268 * Power-on (actually "active") hours are kept as little-endian value
269 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
270 * atomic_t while running.
271 */
272 atomic_set(&dd->active_time, 0);
273 dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
274
275done:
276 vfree(buf);
277
278bail:;
279}
280
281/**
282 * qib_update_eeprom_log - copy active-time and error counters to eeprom
283 * @dd: the qlogic_ib device
284 *
285 * Although the time is kept as seconds in the qib_devdata struct, it is
286 * rounded to hours for re-write, as we have only 16 bits in EEPROM.
287 * First-cut code reads whole (expected) struct qib_flash, modifies,
288 * re-writes. Future direction: read/write only what we need, assuming
289 * that the EEPROM had to have been "good enough" for driver init, and
290 * if not, we aren't making it worse.
291 *
292 */
293int qib_update_eeprom_log(struct qib_devdata *dd)
294{
295 void *buf;
296 struct qib_flash *ifp;
297 int len, hi_water;
298 uint32_t new_time, new_hrs;
299 u8 csum;
300 int ret, idx;
301 unsigned long flags;
302
303 /* first, check if we actually need to do anything. */
304 ret = 0;
305 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
306 if (dd->eep_st_new_errs[idx]) {
307 ret = 1;
308 break;
309 }
310 }
311 new_time = atomic_read(&dd->active_time);
312
313 if (ret == 0 && new_time < 3600)
314 goto bail;
315
316 /*
317 * The quick-check above determined that there is something worthy
318 * of logging, so get current contents and do a more detailed idea.
319 * read full flash, not just currently used part, since it may have
320 * been written with a newer definition
321 */
322 len = sizeof(struct qib_flash);
323 buf = vmalloc(len);
324 ret = 1;
325 if (!buf) {
326 qib_dev_err(dd, "Couldn't allocate memory to read %u "
327 "bytes from eeprom for logging\n", len);
328 goto bail;
329 }
330
331 /* Grab semaphore and read current EEPROM. If we get an
332 * error, let go, but if not, keep it until we finish write.
333 */
334 ret = mutex_lock_interruptible(&dd->eep_lock);
335 if (ret) {
336 qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
337 goto free_bail;
338 }
339 ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
340 if (ret) {
341 mutex_unlock(&dd->eep_lock);
342 qib_dev_err(dd, "Unable read EEPROM for logging\n");
343 goto free_bail;
344 }
345 ifp = (struct qib_flash *)buf;
346
347 csum = flash_csum(ifp, 0);
348 if (csum != ifp->if_csum) {
349 mutex_unlock(&dd->eep_lock);
350 qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
351 csum, ifp->if_csum);
352 ret = 1;
353 goto free_bail;
354 }
355 hi_water = 0;
356 spin_lock_irqsave(&dd->eep_st_lock, flags);
357 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
358 int new_val = dd->eep_st_new_errs[idx];
359 if (new_val) {
360 /*
361 * If we have seen any errors, add to EEPROM values
362 * We need to saturate at 0xFF (255) and we also
363 * would need to adjust the checksum if we were
364 * trying to minimize EEPROM traffic
365 * Note that we add to actual current count in EEPROM,
366 * in case it was altered while we were running.
367 */
368 new_val += ifp->if_errcntp[idx];
369 if (new_val > 0xFF)
370 new_val = 0xFF;
371 if (ifp->if_errcntp[idx] != new_val) {
372 ifp->if_errcntp[idx] = new_val;
373 hi_water = offsetof(struct qib_flash,
374 if_errcntp) + idx;
375 }
376 /*
377 * update our shadow (used to minimize EEPROM
378 * traffic), to match what we are about to write.
379 */
380 dd->eep_st_errs[idx] = new_val;
381 dd->eep_st_new_errs[idx] = 0;
382 }
383 }
384 /*
385 * Now update active-time. We would like to round to the nearest hour
386 * but unless atomic_t are sure to be proper signed ints we cannot,
387 * because we need to account for what we "transfer" to EEPROM and
388 * if we log an hour at 31 minutes, then we would need to set
389 * active_time to -29 to accurately count the _next_ hour.
390 */
391 if (new_time >= 3600) {
392 new_hrs = new_time / 3600;
393 atomic_sub((new_hrs * 3600), &dd->active_time);
394 new_hrs += dd->eep_hrs;
395 if (new_hrs > 0xFFFF)
396 new_hrs = 0xFFFF;
397 dd->eep_hrs = new_hrs;
398 if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
399 ifp->if_powerhour[0] = new_hrs & 0xFF;
400 hi_water = offsetof(struct qib_flash, if_powerhour);
401 }
402 if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
403 ifp->if_powerhour[1] = new_hrs >> 8;
404 hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
405 }
406 }
407 /*
408 * There is a tiny possibility that we could somehow fail to write
409 * the EEPROM after updating our shadows, but problems from holding
410 * the spinlock too long are a much bigger issue.
411 */
412 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
413 if (hi_water) {
414 /* we made some change to the data, uopdate cksum and write */
415 csum = flash_csum(ifp, 1);
416 ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
417 }
418 mutex_unlock(&dd->eep_lock);
419 if (ret)
420 qib_dev_err(dd, "Failed updating EEPROM\n");
421
422free_bail:
423 vfree(buf);
424bail:
425 return ret;
426}
427
428/**
429 * qib_inc_eeprom_err - increment one of the four error counters
430 * that are logged to EEPROM.
431 * @dd: the qlogic_ib device
432 * @eidx: 0..3, the counter to increment
433 * @incr: how much to add
434 *
435 * Each counter is 8-bits, and saturates at 255 (0xFF). They
436 * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
437 * is called, but it can only be called in a context that allows sleep.
438 * This function can be called even at interrupt level.
439 */
440void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
441{
442 uint new_val;
443 unsigned long flags;
444
445 spin_lock_irqsave(&dd->eep_st_lock, flags);
446 new_val = dd->eep_st_new_errs[eidx] + incr;
447 if (new_val > 255)
448 new_val = 255;
449 dd->eep_st_new_errs[eidx] = new_val;
450 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
451}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
new file mode 100644
index 000000000000..a142a9eb5226
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -0,0 +1,2317 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/poll.h>
37#include <linux/cdev.h>
38#include <linux/swap.h>
39#include <linux/vmalloc.h>
40#include <linux/highmem.h>
41#include <linux/io.h>
42#include <linux/uio.h>
43#include <linux/jiffies.h>
44#include <asm/pgtable.h>
45#include <linux/delay.h>
46
47#include "qib.h"
48#include "qib_common.h"
49#include "qib_user_sdma.h"
50
51static int qib_open(struct inode *, struct file *);
52static int qib_close(struct inode *, struct file *);
53static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
54static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
55 unsigned long, loff_t);
56static unsigned int qib_poll(struct file *, struct poll_table_struct *);
57static int qib_mmapf(struct file *, struct vm_area_struct *);
58
59static const struct file_operations qib_file_ops = {
60 .owner = THIS_MODULE,
61 .write = qib_write,
62 .aio_write = qib_aio_write,
63 .open = qib_open,
64 .release = qib_close,
65 .poll = qib_poll,
66 .mmap = qib_mmapf
67};
68
69/*
70 * Convert kernel virtual addresses to physical addresses so they don't
71 * potentially conflict with the chip addresses used as mmap offsets.
72 * It doesn't really matter what mmap offset we use as long as we can
73 * interpret it correctly.
74 */
75static u64 cvt_kvaddr(void *p)
76{
77 struct page *page;
78 u64 paddr = 0;
79
80 page = vmalloc_to_page(p);
81 if (page)
82 paddr = page_to_pfn(page) << PAGE_SHIFT;
83
84 return paddr;
85}
86
87static int qib_get_base_info(struct file *fp, void __user *ubase,
88 size_t ubase_size)
89{
90 struct qib_ctxtdata *rcd = ctxt_fp(fp);
91 int ret = 0;
92 struct qib_base_info *kinfo = NULL;
93 struct qib_devdata *dd = rcd->dd;
94 struct qib_pportdata *ppd = rcd->ppd;
95 unsigned subctxt_cnt;
96 int shared, master;
97 size_t sz;
98
99 subctxt_cnt = rcd->subctxt_cnt;
100 if (!subctxt_cnt) {
101 shared = 0;
102 master = 0;
103 subctxt_cnt = 1;
104 } else {
105 shared = 1;
106 master = !subctxt_fp(fp);
107 }
108
109 sz = sizeof(*kinfo);
110 /* If context sharing is not requested, allow the old size structure */
111 if (!shared)
112 sz -= 7 * sizeof(u64);
113 if (ubase_size < sz) {
114 ret = -EINVAL;
115 goto bail;
116 }
117
118 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
119 if (kinfo == NULL) {
120 ret = -ENOMEM;
121 goto bail;
122 }
123
124 ret = dd->f_get_base_info(rcd, kinfo);
125 if (ret < 0)
126 goto bail;
127
128 kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
129 kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
130 kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
131 kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
132 /*
133 * have to mmap whole thing
134 */
135 kinfo->spi_rcv_egrbuftotlen =
136 rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
137 kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
138 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
139 rcd->rcvegrbuf_chunks;
140 kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
141 if (master)
142 kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
143 /*
144 * for this use, may be cfgctxts summed over all chips that
145 * are are configured and present
146 */
147 kinfo->spi_nctxts = dd->cfgctxts;
148 /* unit (chip/board) our context is on */
149 kinfo->spi_unit = dd->unit;
150 kinfo->spi_port = ppd->port;
151 /* for now, only a single page */
152 kinfo->spi_tid_maxsize = PAGE_SIZE;
153
154 /*
155 * Doing this per context, and based on the skip value, etc. This has
156 * to be the actual buffer size, since the protocol code treats it
157 * as an array.
158 *
159 * These have to be set to user addresses in the user code via mmap.
160 * These values are used on return to user code for the mmap target
161 * addresses only. For 32 bit, same 44 bit address problem, so use
162 * the physical address, not virtual. Before 2.6.11, using the
163 * page_address() macro worked, but in 2.6.11, even that returns the
164 * full 64 bit address (upper bits all 1's). So far, using the
165 * physical addresses (or chip offsets, for chip mapping) works, but
166 * no doubt some future kernel release will change that, and we'll be
167 * on to yet another method of dealing with this.
168 * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
169 * since the chips with non-zero rhf_offset don't normally
170 * enable tail register updates to host memory, but for testing,
171 * both can be enabled and used.
172 */
173 kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
174 kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
175 kinfo->spi_rhf_offset = dd->rhf_offset;
176 kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
177 kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
178 /* setup per-unit (not port) status area for user programs */
179 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
180 (char *) ppd->statusp -
181 (char *) dd->pioavailregs_dma;
182 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
183 if (!shared) {
184 kinfo->spi_piocnt = rcd->piocnt;
185 kinfo->spi_piobufbase = (u64) rcd->piobufs;
186 kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
187 } else if (master) {
188 kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
189 (rcd->piocnt % subctxt_cnt);
190 /* Master's PIO buffers are after all the slave's */
191 kinfo->spi_piobufbase = (u64) rcd->piobufs +
192 dd->palign *
193 (rcd->piocnt - kinfo->spi_piocnt);
194 } else {
195 unsigned slave = subctxt_fp(fp) - 1;
196
197 kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
198 kinfo->spi_piobufbase = (u64) rcd->piobufs +
199 dd->palign * kinfo->spi_piocnt * slave;
200 }
201
202 if (shared) {
203 kinfo->spi_sendbuf_status =
204 cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
205 /* only spi_subctxt_* fields should be set in this block! */
206 kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
207
208 kinfo->spi_subctxt_rcvegrbuf =
209 cvt_kvaddr(rcd->subctxt_rcvegrbuf);
210 kinfo->spi_subctxt_rcvhdr_base =
211 cvt_kvaddr(rcd->subctxt_rcvhdr_base);
212 }
213
214 /*
215 * All user buffers are 2KB buffers. If we ever support
216 * giving 4KB buffers to user processes, this will need some
217 * work. Can't use piobufbase directly, because it has
218 * both 2K and 4K buffer base values.
219 */
220 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
221 dd->palign;
222 kinfo->spi_pioalign = dd->palign;
223 kinfo->spi_qpair = QIB_KD_QP;
224 /*
225 * user mode PIO buffers are always 2KB, even when 4KB can
226 * be received, and sent via the kernel; this is ibmaxlen
227 * for 2K MTU.
228 */
229 kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
230 kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
231 kinfo->spi_ctxt = rcd->ctxt;
232 kinfo->spi_subctxt = subctxt_fp(fp);
233 kinfo->spi_sw_version = QIB_KERN_SWVERSION;
234 kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
235 kinfo->spi_hw_version = dd->revision;
236
237 if (master)
238 kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
239
240 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
241 if (copy_to_user(ubase, kinfo, sz))
242 ret = -EFAULT;
243bail:
244 kfree(kinfo);
245 return ret;
246}
247
248/**
249 * qib_tid_update - update a context TID
250 * @rcd: the context
251 * @fp: the qib device file
252 * @ti: the TID information
253 *
254 * The new implementation as of Oct 2004 is that the driver assigns
255 * the tid and returns it to the caller. To reduce search time, we
256 * keep a cursor for each context, walking the shadow tid array to find
257 * one that's not in use.
258 *
259 * For now, if we can't allocate the full list, we fail, although
260 * in the long run, we'll allocate as many as we can, and the
261 * caller will deal with that by trying the remaining pages later.
262 * That means that when we fail, we have to mark the tids as not in
263 * use again, in our shadow copy.
264 *
265 * It's up to the caller to free the tids when they are done.
266 * We'll unlock the pages as they free them.
267 *
268 * Also, right now we are locking one page at a time, but since
269 * the intended use of this routine is for a single group of
270 * virtually contiguous pages, that should change to improve
271 * performance.
272 */
273static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
274 const struct qib_tid_info *ti)
275{
276 int ret = 0, ntids;
277 u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
278 u16 *tidlist;
279 struct qib_devdata *dd = rcd->dd;
280 u64 physaddr;
281 unsigned long vaddr;
282 u64 __iomem *tidbase;
283 unsigned long tidmap[8];
284 struct page **pagep = NULL;
285 unsigned subctxt = subctxt_fp(fp);
286
287 if (!dd->pageshadow) {
288 ret = -ENOMEM;
289 goto done;
290 }
291
292 cnt = ti->tidcnt;
293 if (!cnt) {
294 ret = -EFAULT;
295 goto done;
296 }
297 ctxttid = rcd->ctxt * dd->rcvtidcnt;
298 if (!rcd->subctxt_cnt) {
299 tidcnt = dd->rcvtidcnt;
300 tid = rcd->tidcursor;
301 tidoff = 0;
302 } else if (!subctxt) {
303 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
304 (dd->rcvtidcnt % rcd->subctxt_cnt);
305 tidoff = dd->rcvtidcnt - tidcnt;
306 ctxttid += tidoff;
307 tid = tidcursor_fp(fp);
308 } else {
309 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
310 tidoff = tidcnt * (subctxt - 1);
311 ctxttid += tidoff;
312 tid = tidcursor_fp(fp);
313 }
314 if (cnt > tidcnt) {
315 /* make sure it all fits in tid_pg_list */
316 qib_devinfo(dd->pcidev, "Process tried to allocate %u "
317 "TIDs, only trying max (%u)\n", cnt, tidcnt);
318 cnt = tidcnt;
319 }
320 pagep = (struct page **) rcd->tid_pg_list;
321 tidlist = (u16 *) &pagep[dd->rcvtidcnt];
322 pagep += tidoff;
323 tidlist += tidoff;
324
325 memset(tidmap, 0, sizeof(tidmap));
326 /* before decrement; chip actual # */
327 ntids = tidcnt;
328 tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
329 dd->rcvtidbase +
330 ctxttid * sizeof(*tidbase));
331
332 /* virtual address of first page in transfer */
333 vaddr = ti->tidvaddr;
334 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
335 cnt * PAGE_SIZE)) {
336 ret = -EFAULT;
337 goto done;
338 }
339 ret = qib_get_user_pages(vaddr, cnt, pagep);
340 if (ret) {
341 /*
342 * if (ret == -EBUSY)
343 * We can't continue because the pagep array won't be
344 * initialized. This should never happen,
345 * unless perhaps the user has mpin'ed the pages
346 * themselves.
347 */
348 qib_devinfo(dd->pcidev,
349 "Failed to lock addr %p, %u pages: "
350 "errno %d\n", (void *) vaddr, cnt, -ret);
351 goto done;
352 }
353 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
354 for (; ntids--; tid++) {
355 if (tid == tidcnt)
356 tid = 0;
357 if (!dd->pageshadow[ctxttid + tid])
358 break;
359 }
360 if (ntids < 0) {
361 /*
362 * Oops, wrapped all the way through their TIDs,
363 * and didn't have enough free; see comments at
364 * start of routine
365 */
366 i--; /* last tidlist[i] not filled in */
367 ret = -ENOMEM;
368 break;
369 }
370 tidlist[i] = tid + tidoff;
371 /* we "know" system pages and TID pages are same size */
372 dd->pageshadow[ctxttid + tid] = pagep[i];
373 dd->physshadow[ctxttid + tid] =
374 qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
375 PCI_DMA_FROMDEVICE);
376 /*
377 * don't need atomic or it's overhead
378 */
379 __set_bit(tid, tidmap);
380 physaddr = dd->physshadow[ctxttid + tid];
381 /* PERFORMANCE: below should almost certainly be cached */
382 dd->f_put_tid(dd, &tidbase[tid],
383 RCVHQ_RCV_TYPE_EXPECTED, physaddr);
384 /*
385 * don't check this tid in qib_ctxtshadow, since we
386 * just filled it in; start with the next one.
387 */
388 tid++;
389 }
390
391 if (ret) {
392 u32 limit;
393cleanup:
394 /* jump here if copy out of updated info failed... */
395 /* same code that's in qib_free_tid() */
396 limit = sizeof(tidmap) * BITS_PER_BYTE;
397 if (limit > tidcnt)
398 /* just in case size changes in future */
399 limit = tidcnt;
400 tid = find_first_bit((const unsigned long *)tidmap, limit);
401 for (; tid < limit; tid++) {
402 if (!test_bit(tid, tidmap))
403 continue;
404 if (dd->pageshadow[ctxttid + tid]) {
405 dma_addr_t phys;
406
407 phys = dd->physshadow[ctxttid + tid];
408 dd->physshadow[ctxttid + tid] = dd->tidinvalid;
409 /* PERFORMANCE: below should almost certainly
410 * be cached
411 */
412 dd->f_put_tid(dd, &tidbase[tid],
413 RCVHQ_RCV_TYPE_EXPECTED,
414 dd->tidinvalid);
415 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
416 PCI_DMA_FROMDEVICE);
417 dd->pageshadow[ctxttid + tid] = NULL;
418 }
419 }
420 qib_release_user_pages(pagep, cnt);
421 } else {
422 /*
423 * Copy the updated array, with qib_tid's filled in, back
424 * to user. Since we did the copy in already, this "should
425 * never fail" If it does, we have to clean up...
426 */
427 if (copy_to_user((void __user *)
428 (unsigned long) ti->tidlist,
429 tidlist, cnt * sizeof(*tidlist))) {
430 ret = -EFAULT;
431 goto cleanup;
432 }
433 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
434 tidmap, sizeof tidmap)) {
435 ret = -EFAULT;
436 goto cleanup;
437 }
438 if (tid == tidcnt)
439 tid = 0;
440 if (!rcd->subctxt_cnt)
441 rcd->tidcursor = tid;
442 else
443 tidcursor_fp(fp) = tid;
444 }
445
446done:
447 return ret;
448}
449
450/**
451 * qib_tid_free - free a context TID
452 * @rcd: the context
453 * @subctxt: the subcontext
454 * @ti: the TID info
455 *
456 * right now we are unlocking one page at a time, but since
457 * the intended use of this routine is for a single group of
458 * virtually contiguous pages, that should change to improve
459 * performance. We check that the TID is in range for this context
460 * but otherwise don't check validity; if user has an error and
461 * frees the wrong tid, it's only their own data that can thereby
462 * be corrupted. We do check that the TID was in use, for sanity
463 * We always use our idea of the saved address, not the address that
464 * they pass in to us.
465 */
466static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
467 const struct qib_tid_info *ti)
468{
469 int ret = 0;
470 u32 tid, ctxttid, cnt, limit, tidcnt;
471 struct qib_devdata *dd = rcd->dd;
472 u64 __iomem *tidbase;
473 unsigned long tidmap[8];
474
475 if (!dd->pageshadow) {
476 ret = -ENOMEM;
477 goto done;
478 }
479
480 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
481 sizeof tidmap)) {
482 ret = -EFAULT;
483 goto done;
484 }
485
486 ctxttid = rcd->ctxt * dd->rcvtidcnt;
487 if (!rcd->subctxt_cnt)
488 tidcnt = dd->rcvtidcnt;
489 else if (!subctxt) {
490 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
491 (dd->rcvtidcnt % rcd->subctxt_cnt);
492 ctxttid += dd->rcvtidcnt - tidcnt;
493 } else {
494 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
495 ctxttid += tidcnt * (subctxt - 1);
496 }
497 tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
498 dd->rcvtidbase +
499 ctxttid * sizeof(*tidbase));
500
501 limit = sizeof(tidmap) * BITS_PER_BYTE;
502 if (limit > tidcnt)
503 /* just in case size changes in future */
504 limit = tidcnt;
505 tid = find_first_bit(tidmap, limit);
506 for (cnt = 0; tid < limit; tid++) {
507 /*
508 * small optimization; if we detect a run of 3 or so without
509 * any set, use find_first_bit again. That's mainly to
510 * accelerate the case where we wrapped, so we have some at
511 * the beginning, and some at the end, and a big gap
512 * in the middle.
513 */
514 if (!test_bit(tid, tidmap))
515 continue;
516 cnt++;
517 if (dd->pageshadow[ctxttid + tid]) {
518 struct page *p;
519 dma_addr_t phys;
520
521 p = dd->pageshadow[ctxttid + tid];
522 dd->pageshadow[ctxttid + tid] = NULL;
523 phys = dd->physshadow[ctxttid + tid];
524 dd->physshadow[ctxttid + tid] = dd->tidinvalid;
525 /* PERFORMANCE: below should almost certainly be
526 * cached
527 */
528 dd->f_put_tid(dd, &tidbase[tid],
529 RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
530 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
531 PCI_DMA_FROMDEVICE);
532 qib_release_user_pages(&p, 1);
533 }
534 }
535done:
536 return ret;
537}
538
539/**
540 * qib_set_part_key - set a partition key
541 * @rcd: the context
542 * @key: the key
543 *
544 * We can have up to 4 active at a time (other than the default, which is
545 * always allowed). This is somewhat tricky, since multiple contexts may set
546 * the same key, so we reference count them, and clean up at exit. All 4
547 * partition keys are packed into a single qlogic_ib register. It's an
548 * error for a process to set the same pkey multiple times. We provide no
549 * mechanism to de-allocate a pkey at this time, we may eventually need to
550 * do that. I've used the atomic operations, and no locking, and only make
551 * a single pass through what's available. This should be more than
552 * adequate for some time. I'll think about spinlocks or the like if and as
553 * it's necessary.
554 */
555static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
556{
557 struct qib_pportdata *ppd = rcd->ppd;
558 int i, any = 0, pidx = -1;
559 u16 lkey = key & 0x7FFF;
560 int ret;
561
562 if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
563 /* nothing to do; this key always valid */
564 ret = 0;
565 goto bail;
566 }
567
568 if (!lkey) {
569 ret = -EINVAL;
570 goto bail;
571 }
572
573 /*
574 * Set the full membership bit, because it has to be
575 * set in the register or the packet, and it seems
576 * cleaner to set in the register than to force all
577 * callers to set it.
578 */
579 key |= 0x8000;
580
581 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
582 if (!rcd->pkeys[i] && pidx == -1)
583 pidx = i;
584 if (rcd->pkeys[i] == key) {
585 ret = -EEXIST;
586 goto bail;
587 }
588 }
589 if (pidx == -1) {
590 ret = -EBUSY;
591 goto bail;
592 }
593 for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
594 if (!ppd->pkeys[i]) {
595 any++;
596 continue;
597 }
598 if (ppd->pkeys[i] == key) {
599 atomic_t *pkrefs = &ppd->pkeyrefs[i];
600
601 if (atomic_inc_return(pkrefs) > 1) {
602 rcd->pkeys[pidx] = key;
603 ret = 0;
604 goto bail;
605 } else {
606 /*
607 * lost race, decrement count, catch below
608 */
609 atomic_dec(pkrefs);
610 any++;
611 }
612 }
613 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
614 /*
615 * It makes no sense to have both the limited and
616 * full membership PKEY set at the same time since
617 * the unlimited one will disable the limited one.
618 */
619 ret = -EEXIST;
620 goto bail;
621 }
622 }
623 if (!any) {
624 ret = -EBUSY;
625 goto bail;
626 }
627 for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
628 if (!ppd->pkeys[i] &&
629 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
630 rcd->pkeys[pidx] = key;
631 ppd->pkeys[i] = key;
632 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
633 ret = 0;
634 goto bail;
635 }
636 }
637 ret = -EBUSY;
638
639bail:
640 return ret;
641}
642
643/**
644 * qib_manage_rcvq - manage a context's receive queue
645 * @rcd: the context
646 * @subctxt: the subcontext
647 * @start_stop: action to carry out
648 *
649 * start_stop == 0 disables receive on the context, for use in queue
650 * overflow conditions. start_stop==1 re-enables, to be used to
651 * re-init the software copy of the head register
652 */
653static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
654 int start_stop)
655{
656 struct qib_devdata *dd = rcd->dd;
657 unsigned int rcvctrl_op;
658
659 if (subctxt)
660 goto bail;
661 /* atomically clear receive enable ctxt. */
662 if (start_stop) {
663 /*
664 * On enable, force in-memory copy of the tail register to
665 * 0, so that protocol code doesn't have to worry about
666 * whether or not the chip has yet updated the in-memory
667 * copy or not on return from the system call. The chip
668 * always resets it's tail register back to 0 on a
669 * transition from disabled to enabled.
670 */
671 if (rcd->rcvhdrtail_kvaddr)
672 qib_clear_rcvhdrtail(rcd);
673 rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
674 } else
675 rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
676 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
677 /* always; new head should be equal to new tail; see above */
678bail:
679 return 0;
680}
681
682static void qib_clean_part_key(struct qib_ctxtdata *rcd,
683 struct qib_devdata *dd)
684{
685 int i, j, pchanged = 0;
686 u64 oldpkey;
687 struct qib_pportdata *ppd = rcd->ppd;
688
689 /* for debugging only */
690 oldpkey = (u64) ppd->pkeys[0] |
691 ((u64) ppd->pkeys[1] << 16) |
692 ((u64) ppd->pkeys[2] << 32) |
693 ((u64) ppd->pkeys[3] << 48);
694
695 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
696 if (!rcd->pkeys[i])
697 continue;
698 for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
699 /* check for match independent of the global bit */
700 if ((ppd->pkeys[j] & 0x7fff) !=
701 (rcd->pkeys[i] & 0x7fff))
702 continue;
703 if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
704 ppd->pkeys[j] = 0;
705 pchanged++;
706 }
707 break;
708 }
709 rcd->pkeys[i] = 0;
710 }
711 if (pchanged)
712 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
713}
714
715/* common code for the mappings on dma_alloc_coherent mem */
716static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
717 unsigned len, void *kvaddr, u32 write_ok, char *what)
718{
719 struct qib_devdata *dd = rcd->dd;
720 unsigned long pfn;
721 int ret;
722
723 if ((vma->vm_end - vma->vm_start) > len) {
724 qib_devinfo(dd->pcidev,
725 "FAIL on %s: len %lx > %x\n", what,
726 vma->vm_end - vma->vm_start, len);
727 ret = -EFAULT;
728 goto bail;
729 }
730
731 /*
732 * shared context user code requires rcvhdrq mapped r/w, others
733 * only allowed readonly mapping.
734 */
735 if (!write_ok) {
736 if (vma->vm_flags & VM_WRITE) {
737 qib_devinfo(dd->pcidev,
738 "%s must be mapped readonly\n", what);
739 ret = -EPERM;
740 goto bail;
741 }
742
743 /* don't allow them to later change with mprotect */
744 vma->vm_flags &= ~VM_MAYWRITE;
745 }
746
747 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
748 ret = remap_pfn_range(vma, vma->vm_start, pfn,
749 len, vma->vm_page_prot);
750 if (ret)
751 qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x "
752 "bytes failed: %d\n", what, rcd->ctxt,
753 pfn, len, ret);
754bail:
755 return ret;
756}
757
758static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
759 u64 ureg)
760{
761 unsigned long phys;
762 unsigned long sz;
763 int ret;
764
765 /*
766 * This is real hardware, so use io_remap. This is the mechanism
767 * for the user process to update the head registers for their ctxt
768 * in the chip.
769 */
770 sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
771 if ((vma->vm_end - vma->vm_start) > sz) {
772 qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen "
773 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
774 ret = -EFAULT;
775 } else {
776 phys = dd->physaddr + ureg;
777 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
778
779 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
780 ret = io_remap_pfn_range(vma, vma->vm_start,
781 phys >> PAGE_SHIFT,
782 vma->vm_end - vma->vm_start,
783 vma->vm_page_prot);
784 }
785 return ret;
786}
787
788static int mmap_piobufs(struct vm_area_struct *vma,
789 struct qib_devdata *dd,
790 struct qib_ctxtdata *rcd,
791 unsigned piobufs, unsigned piocnt)
792{
793 unsigned long phys;
794 int ret;
795
796 /*
797 * When we map the PIO buffers in the chip, we want to map them as
798 * writeonly, no read possible; unfortunately, x86 doesn't allow
799 * for this in hardware, but we still prevent users from asking
800 * for it.
801 */
802 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
803 qib_devinfo(dd->pcidev, "FAIL mmap piobufs: "
804 "reqlen %lx > PAGE\n",
805 vma->vm_end - vma->vm_start);
806 ret = -EINVAL;
807 goto bail;
808 }
809
810 phys = dd->physaddr + piobufs;
811
812#if defined(__powerpc__)
813 /* There isn't a generic way to specify writethrough mappings */
814 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
815 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
816 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
817#endif
818
819 /*
820 * don't allow them to later change to readable with mprotect (for when
821 * not initially mapped readable, as is normally the case)
822 */
823 vma->vm_flags &= ~VM_MAYREAD;
824 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
825
826 if (qib_wc_pat)
827 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
828
829 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
830 vma->vm_end - vma->vm_start,
831 vma->vm_page_prot);
832bail:
833 return ret;
834}
835
836static int mmap_rcvegrbufs(struct vm_area_struct *vma,
837 struct qib_ctxtdata *rcd)
838{
839 struct qib_devdata *dd = rcd->dd;
840 unsigned long start, size;
841 size_t total_size, i;
842 unsigned long pfn;
843 int ret;
844
845 size = rcd->rcvegrbuf_size;
846 total_size = rcd->rcvegrbuf_chunks * size;
847 if ((vma->vm_end - vma->vm_start) > total_size) {
848 qib_devinfo(dd->pcidev, "FAIL on egr bufs: "
849 "reqlen %lx > actual %lx\n",
850 vma->vm_end - vma->vm_start,
851 (unsigned long) total_size);
852 ret = -EINVAL;
853 goto bail;
854 }
855
856 if (vma->vm_flags & VM_WRITE) {
857 qib_devinfo(dd->pcidev, "Can't map eager buffers as "
858 "writable (flags=%lx)\n", vma->vm_flags);
859 ret = -EPERM;
860 goto bail;
861 }
862 /* don't allow them to later change to writeable with mprotect */
863 vma->vm_flags &= ~VM_MAYWRITE;
864
865 start = vma->vm_start;
866
867 for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
868 pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
869 ret = remap_pfn_range(vma, start, pfn, size,
870 vma->vm_page_prot);
871 if (ret < 0)
872 goto bail;
873 }
874 ret = 0;
875
876bail:
877 return ret;
878}
879
880/*
881 * qib_file_vma_fault - handle a VMA page fault.
882 */
883static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
884{
885 struct page *page;
886
887 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
888 if (!page)
889 return VM_FAULT_SIGBUS;
890
891 get_page(page);
892 vmf->page = page;
893
894 return 0;
895}
896
897static struct vm_operations_struct qib_file_vm_ops = {
898 .fault = qib_file_vma_fault,
899};
900
901static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
902 struct qib_ctxtdata *rcd, unsigned subctxt)
903{
904 struct qib_devdata *dd = rcd->dd;
905 unsigned subctxt_cnt;
906 unsigned long len;
907 void *addr;
908 size_t size;
909 int ret = 0;
910
911 subctxt_cnt = rcd->subctxt_cnt;
912 size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
913
914 /*
915 * Each process has all the subctxt uregbase, rcvhdrq, and
916 * rcvegrbufs mmapped - as an array for all the processes,
917 * and also separately for this process.
918 */
919 if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
920 addr = rcd->subctxt_uregbase;
921 size = PAGE_SIZE * subctxt_cnt;
922 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
923 addr = rcd->subctxt_rcvhdr_base;
924 size = rcd->rcvhdrq_size * subctxt_cnt;
925 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
926 addr = rcd->subctxt_rcvegrbuf;
927 size *= subctxt_cnt;
928 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
929 PAGE_SIZE * subctxt)) {
930 addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
931 size = PAGE_SIZE;
932 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
933 rcd->rcvhdrq_size * subctxt)) {
934 addr = rcd->subctxt_rcvhdr_base +
935 rcd->rcvhdrq_size * subctxt;
936 size = rcd->rcvhdrq_size;
937 } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
938 addr = rcd->user_event_mask;
939 size = PAGE_SIZE;
940 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
941 size * subctxt)) {
942 addr = rcd->subctxt_rcvegrbuf + size * subctxt;
943 /* rcvegrbufs are read-only on the slave */
944 if (vma->vm_flags & VM_WRITE) {
945 qib_devinfo(dd->pcidev,
946 "Can't map eager buffers as "
947 "writable (flags=%lx)\n", vma->vm_flags);
948 ret = -EPERM;
949 goto bail;
950 }
951 /*
952 * Don't allow permission to later change to writeable
953 * with mprotect.
954 */
955 vma->vm_flags &= ~VM_MAYWRITE;
956 } else
957 goto bail;
958 len = vma->vm_end - vma->vm_start;
959 if (len > size) {
960 ret = -EINVAL;
961 goto bail;
962 }
963
964 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
965 vma->vm_ops = &qib_file_vm_ops;
966 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
967 ret = 1;
968
969bail:
970 return ret;
971}
972
973/**
974 * qib_mmapf - mmap various structures into user space
975 * @fp: the file pointer
976 * @vma: the VM area
977 *
978 * We use this to have a shared buffer between the kernel and the user code
979 * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
980 * buffers in the chip. We have the open and close entries so we can bump
981 * the ref count and keep the driver from being unloaded while still mapped.
982 */
983static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
984{
985 struct qib_ctxtdata *rcd;
986 struct qib_devdata *dd;
987 u64 pgaddr, ureg;
988 unsigned piobufs, piocnt;
989 int ret, match = 1;
990
991 rcd = ctxt_fp(fp);
992 if (!rcd || !(vma->vm_flags & VM_SHARED)) {
993 ret = -EINVAL;
994 goto bail;
995 }
996 dd = rcd->dd;
997
998 /*
999 * This is the qib_do_user_init() code, mapping the shared buffers
1000 * and per-context user registers into the user process. The address
1001 * referred to by vm_pgoff is the file offset passed via mmap().
1002 * For shared contexts, this is the kernel vmalloc() address of the
1003 * pages to share with the master.
1004 * For non-shared or master ctxts, this is a physical address.
1005 * We only do one mmap for each space mapped.
1006 */
1007 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1008
1009 /*
1010 * Check for 0 in case one of the allocations failed, but user
1011 * called mmap anyway.
1012 */
1013 if (!pgaddr) {
1014 ret = -EINVAL;
1015 goto bail;
1016 }
1017
1018 /*
1019 * Physical addresses must fit in 40 bits for our hardware.
1020 * Check for kernel virtual addresses first, anything else must
1021 * match a HW or memory address.
1022 */
1023 ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
1024 if (ret) {
1025 if (ret > 0)
1026 ret = 0;
1027 goto bail;
1028 }
1029
1030 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
1031 if (!rcd->subctxt_cnt) {
1032 /* ctxt is not shared */
1033 piocnt = rcd->piocnt;
1034 piobufs = rcd->piobufs;
1035 } else if (!subctxt_fp(fp)) {
1036 /* caller is the master */
1037 piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
1038 (rcd->piocnt % rcd->subctxt_cnt);
1039 piobufs = rcd->piobufs +
1040 dd->palign * (rcd->piocnt - piocnt);
1041 } else {
1042 unsigned slave = subctxt_fp(fp) - 1;
1043
1044 /* caller is a slave */
1045 piocnt = rcd->piocnt / rcd->subctxt_cnt;
1046 piobufs = rcd->piobufs + dd->palign * piocnt * slave;
1047 }
1048
1049 if (pgaddr == ureg)
1050 ret = mmap_ureg(vma, dd, ureg);
1051 else if (pgaddr == piobufs)
1052 ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
1053 else if (pgaddr == dd->pioavailregs_phys)
1054 /* in-memory copy of pioavail registers */
1055 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1056 (void *) dd->pioavailregs_dma, 0,
1057 "pioavail registers");
1058 else if (pgaddr == rcd->rcvegr_phys)
1059 ret = mmap_rcvegrbufs(vma, rcd);
1060 else if (pgaddr == (u64) rcd->rcvhdrq_phys)
1061 /*
1062 * The rcvhdrq itself; multiple pages, contiguous
1063 * from an i/o perspective. Shared contexts need
1064 * to map r/w, so we allow writing.
1065 */
1066 ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
1067 rcd->rcvhdrq, 1, "rcvhdrq");
1068 else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
1069 /* in-memory copy of rcvhdrq tail register */
1070 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1071 rcd->rcvhdrtail_kvaddr, 0,
1072 "rcvhdrq tail");
1073 else
1074 match = 0;
1075 if (!match)
1076 ret = -EINVAL;
1077
1078 vma->vm_private_data = NULL;
1079
1080 if (ret < 0)
1081 qib_devinfo(dd->pcidev,
1082 "mmap Failure %d: off %llx len %lx\n",
1083 -ret, (unsigned long long)pgaddr,
1084 vma->vm_end - vma->vm_start);
1085bail:
1086 return ret;
1087}
1088
1089static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
1090 struct file *fp,
1091 struct poll_table_struct *pt)
1092{
1093 struct qib_devdata *dd = rcd->dd;
1094 unsigned pollflag;
1095
1096 poll_wait(fp, &rcd->wait, pt);
1097
1098 spin_lock_irq(&dd->uctxt_lock);
1099 if (rcd->urgent != rcd->urgent_poll) {
1100 pollflag = POLLIN | POLLRDNORM;
1101 rcd->urgent_poll = rcd->urgent;
1102 } else {
1103 pollflag = 0;
1104 set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
1105 }
1106 spin_unlock_irq(&dd->uctxt_lock);
1107
1108 return pollflag;
1109}
1110
1111static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
1112 struct file *fp,
1113 struct poll_table_struct *pt)
1114{
1115 struct qib_devdata *dd = rcd->dd;
1116 unsigned pollflag;
1117
1118 poll_wait(fp, &rcd->wait, pt);
1119
1120 spin_lock_irq(&dd->uctxt_lock);
1121 if (dd->f_hdrqempty(rcd)) {
1122 set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
1123 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
1124 pollflag = 0;
1125 } else
1126 pollflag = POLLIN | POLLRDNORM;
1127 spin_unlock_irq(&dd->uctxt_lock);
1128
1129 return pollflag;
1130}
1131
1132static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
1133{
1134 struct qib_ctxtdata *rcd;
1135 unsigned pollflag;
1136
1137 rcd = ctxt_fp(fp);
1138 if (!rcd)
1139 pollflag = POLLERR;
1140 else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
1141 pollflag = qib_poll_urgent(rcd, fp, pt);
1142 else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
1143 pollflag = qib_poll_next(rcd, fp, pt);
1144 else /* invalid */
1145 pollflag = POLLERR;
1146
1147 return pollflag;
1148}
1149
1150/*
1151 * Check that userland and driver are compatible for subcontexts.
1152 */
1153static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
1154{
1155 /* this code is written long-hand for clarity */
1156 if (QIB_USER_SWMAJOR != user_swmajor) {
1157 /* no promise of compatibility if major mismatch */
1158 return 0;
1159 }
1160 if (QIB_USER_SWMAJOR == 1) {
1161 switch (QIB_USER_SWMINOR) {
1162 case 0:
1163 case 1:
1164 case 2:
1165 /* no subctxt implementation so cannot be compatible */
1166 return 0;
1167 case 3:
1168 /* 3 is only compatible with itself */
1169 return user_swminor == 3;
1170 default:
1171 /* >= 4 are compatible (or are expected to be) */
1172 return user_swminor >= 4;
1173 }
1174 }
1175 /* make no promises yet for future major versions */
1176 return 0;
1177}
1178
1179static int init_subctxts(struct qib_devdata *dd,
1180 struct qib_ctxtdata *rcd,
1181 const struct qib_user_info *uinfo)
1182{
1183 int ret = 0;
1184 unsigned num_subctxts;
1185 size_t size;
1186
1187 /*
1188 * If the user is requesting zero subctxts,
1189 * skip the subctxt allocation.
1190 */
1191 if (uinfo->spu_subctxt_cnt <= 0)
1192 goto bail;
1193 num_subctxts = uinfo->spu_subctxt_cnt;
1194
1195 /* Check for subctxt compatibility */
1196 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1197 uinfo->spu_userversion & 0xffff)) {
1198 qib_devinfo(dd->pcidev,
1199 "Mismatched user version (%d.%d) and driver "
1200 "version (%d.%d) while context sharing. Ensure "
1201 "that driver and library are from the same "
1202 "release.\n",
1203 (int) (uinfo->spu_userversion >> 16),
1204 (int) (uinfo->spu_userversion & 0xffff),
1205 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
1206 goto bail;
1207 }
1208 if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
1209 ret = -EINVAL;
1210 goto bail;
1211 }
1212
1213 rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
1214 if (!rcd->subctxt_uregbase) {
1215 ret = -ENOMEM;
1216 goto bail;
1217 }
1218 /* Note: rcd->rcvhdrq_size isn't initialized yet. */
1219 size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1220 sizeof(u32), PAGE_SIZE) * num_subctxts;
1221 rcd->subctxt_rcvhdr_base = vmalloc_user(size);
1222 if (!rcd->subctxt_rcvhdr_base) {
1223 ret = -ENOMEM;
1224 goto bail_ureg;
1225 }
1226
1227 rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
1228 rcd->rcvegrbuf_size *
1229 num_subctxts);
1230 if (!rcd->subctxt_rcvegrbuf) {
1231 ret = -ENOMEM;
1232 goto bail_rhdr;
1233 }
1234
1235 rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
1236 rcd->subctxt_id = uinfo->spu_subctxt_id;
1237 rcd->active_slaves = 1;
1238 rcd->redirect_seq_cnt = 1;
1239 set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1240 goto bail;
1241
1242bail_rhdr:
1243 vfree(rcd->subctxt_rcvhdr_base);
1244bail_ureg:
1245 vfree(rcd->subctxt_uregbase);
1246 rcd->subctxt_uregbase = NULL;
1247bail:
1248 return ret;
1249}
1250
1251static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
1252 struct file *fp, const struct qib_user_info *uinfo)
1253{
1254 struct qib_devdata *dd = ppd->dd;
1255 struct qib_ctxtdata *rcd;
1256 void *ptmp = NULL;
1257 int ret;
1258
1259 rcd = qib_create_ctxtdata(ppd, ctxt);
1260
1261 /*
1262 * Allocate memory for use in qib_tid_update() at open to
1263 * reduce cost of expected send setup per message segment
1264 */
1265 if (rcd)
1266 ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
1267 dd->rcvtidcnt * sizeof(struct page **),
1268 GFP_KERNEL);
1269
1270 if (!rcd || !ptmp) {
1271 qib_dev_err(dd, "Unable to allocate ctxtdata "
1272 "memory, failing open\n");
1273 ret = -ENOMEM;
1274 goto bailerr;
1275 }
1276 rcd->userversion = uinfo->spu_userversion;
1277 ret = init_subctxts(dd, rcd, uinfo);
1278 if (ret)
1279 goto bailerr;
1280 rcd->tid_pg_list = ptmp;
1281 rcd->pid = current->pid;
1282 init_waitqueue_head(&dd->rcd[ctxt]->wait);
1283 strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
1284 ctxt_fp(fp) = rcd;
1285 qib_stats.sps_ctxts++;
1286 ret = 0;
1287 goto bail;
1288
1289bailerr:
1290 dd->rcd[ctxt] = NULL;
1291 kfree(rcd);
1292 kfree(ptmp);
1293bail:
1294 return ret;
1295}
1296
1297static inline int usable(struct qib_pportdata *ppd, int active_only)
1298{
1299 struct qib_devdata *dd = ppd->dd;
1300 u32 linkok = active_only ? QIBL_LINKACTIVE :
1301 (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
1302
1303 return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
1304 (ppd->lflags & linkok);
1305}
1306
1307static int find_free_ctxt(int unit, struct file *fp,
1308 const struct qib_user_info *uinfo)
1309{
1310 struct qib_devdata *dd = qib_lookup(unit);
1311 struct qib_pportdata *ppd = NULL;
1312 int ret;
1313 u32 ctxt;
1314
1315 if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) {
1316 ret = -ENODEV;
1317 goto bail;
1318 }
1319
1320 /*
1321 * If users requests specific port, only try that one port, else
1322 * select "best" port below, based on context.
1323 */
1324 if (uinfo->spu_port) {
1325 ppd = dd->pport + uinfo->spu_port - 1;
1326 if (!usable(ppd, 0)) {
1327 ret = -ENETDOWN;
1328 goto bail;
1329 }
1330 }
1331
1332 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
1333 if (dd->rcd[ctxt])
1334 continue;
1335 /*
1336 * The setting and clearing of user context rcd[x] protected
1337 * by the qib_mutex
1338 */
1339 if (!ppd) {
1340 /* choose port based on ctxt, if up, else 1st up */
1341 ppd = dd->pport + (ctxt % dd->num_pports);
1342 if (!usable(ppd, 0)) {
1343 int i;
1344 for (i = 0; i < dd->num_pports; i++) {
1345 ppd = dd->pport + i;
1346 if (usable(ppd, 0))
1347 break;
1348 }
1349 if (i == dd->num_pports) {
1350 ret = -ENETDOWN;
1351 goto bail;
1352 }
1353 }
1354 }
1355 ret = setup_ctxt(ppd, ctxt, fp, uinfo);
1356 goto bail;
1357 }
1358 ret = -EBUSY;
1359
1360bail:
1361 return ret;
1362}
1363
1364static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1365{
1366 struct qib_pportdata *ppd;
1367 int ret = 0, devmax;
1368 int npresent, nup;
1369 int ndev;
1370 u32 port = uinfo->spu_port, ctxt;
1371
1372 devmax = qib_count_units(&npresent, &nup);
1373
1374 for (ndev = 0; ndev < devmax; ndev++) {
1375 struct qib_devdata *dd = qib_lookup(ndev);
1376
1377 /* device portion of usable() */
1378 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1379 continue;
1380 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
1381 if (dd->rcd[ctxt])
1382 continue;
1383 if (port) {
1384 if (port > dd->num_pports)
1385 continue;
1386 ppd = dd->pport + port - 1;
1387 if (!usable(ppd, 0))
1388 continue;
1389 } else {
1390 /*
1391 * choose port based on ctxt, if up, else
1392 * first port that's up for multi-port HCA
1393 */
1394 ppd = dd->pport + (ctxt % dd->num_pports);
1395 if (!usable(ppd, 0)) {
1396 int j;
1397
1398 ppd = NULL;
1399 for (j = 0; j < dd->num_pports &&
1400 !ppd; j++)
1401 if (usable(dd->pport + j, 0))
1402 ppd = dd->pport + j;
1403 if (!ppd)
1404 continue; /* to next unit */
1405 }
1406 }
1407 ret = setup_ctxt(ppd, ctxt, fp, uinfo);
1408 goto done;
1409 }
1410 }
1411
1412 if (npresent) {
1413 if (nup == 0)
1414 ret = -ENETDOWN;
1415 else
1416 ret = -EBUSY;
1417 } else
1418 ret = -ENXIO;
1419
1420done:
1421 return ret;
1422}
1423
1424static int find_shared_ctxt(struct file *fp,
1425 const struct qib_user_info *uinfo)
1426{
1427 int devmax, ndev, i;
1428 int ret = 0;
1429
1430 devmax = qib_count_units(NULL, NULL);
1431
1432 for (ndev = 0; ndev < devmax; ndev++) {
1433 struct qib_devdata *dd = qib_lookup(ndev);
1434
1435 /* device portion of usable() */
1436 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1437 continue;
1438 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
1439 struct qib_ctxtdata *rcd = dd->rcd[i];
1440
1441 /* Skip ctxts which are not yet open */
1442 if (!rcd || !rcd->cnt)
1443 continue;
1444 /* Skip ctxt if it doesn't match the requested one */
1445 if (rcd->subctxt_id != uinfo->spu_subctxt_id)
1446 continue;
1447 /* Verify the sharing process matches the master */
1448 if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
1449 rcd->userversion != uinfo->spu_userversion ||
1450 rcd->cnt >= rcd->subctxt_cnt) {
1451 ret = -EINVAL;
1452 goto done;
1453 }
1454 ctxt_fp(fp) = rcd;
1455 subctxt_fp(fp) = rcd->cnt++;
1456 rcd->subpid[subctxt_fp(fp)] = current->pid;
1457 tidcursor_fp(fp) = 0;
1458 rcd->active_slaves |= 1 << subctxt_fp(fp);
1459 ret = 1;
1460 goto done;
1461 }
1462 }
1463
1464done:
1465 return ret;
1466}
1467
1468static int qib_open(struct inode *in, struct file *fp)
1469{
1470 /* The real work is performed later in qib_assign_ctxt() */
1471 fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
1472 if (fp->private_data) /* no cpu affinity by default */
1473 ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
1474 return fp->private_data ? 0 : -ENOMEM;
1475}
1476
1477/*
1478 * Get ctxt early, so can set affinity prior to memory allocation.
1479 */
1480static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1481{
1482 int ret;
1483 int i_minor;
1484 unsigned swmajor, swminor;
1485
1486 /* Check to be sure we haven't already initialized this file */
1487 if (ctxt_fp(fp)) {
1488 ret = -EINVAL;
1489 goto done;
1490 }
1491
1492 /* for now, if major version is different, bail */
1493 swmajor = uinfo->spu_userversion >> 16;
1494 if (swmajor != QIB_USER_SWMAJOR) {
1495 ret = -ENODEV;
1496 goto done;
1497 }
1498
1499 swminor = uinfo->spu_userversion & 0xffff;
1500
1501 mutex_lock(&qib_mutex);
1502
1503 if (qib_compatible_subctxts(swmajor, swminor) &&
1504 uinfo->spu_subctxt_cnt) {
1505 ret = find_shared_ctxt(fp, uinfo);
1506 if (ret) {
1507 if (ret > 0)
1508 ret = 0;
1509 goto done_chk_sdma;
1510 }
1511 }
1512
1513 i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
1514 if (i_minor)
1515 ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1516 else
1517 ret = get_a_ctxt(fp, uinfo);
1518
1519done_chk_sdma:
1520 if (!ret) {
1521 struct qib_filedata *fd = fp->private_data;
1522 const struct qib_ctxtdata *rcd = fd->rcd;
1523 const struct qib_devdata *dd = rcd->dd;
1524
1525 if (dd->flags & QIB_HAS_SEND_DMA) {
1526 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
1527 dd->unit,
1528 rcd->ctxt,
1529 fd->subctxt);
1530 if (!fd->pq)
1531 ret = -ENOMEM;
1532 }
1533
1534 /*
1535 * If process has NOT already set it's affinity, select and
1536 * reserve a processor for it, as a rendevous for all
1537 * users of the driver. If they don't actually later
1538 * set affinity to this cpu, or set it to some other cpu,
1539 * it just means that sooner or later we don't recommend
1540 * a cpu, and let the scheduler do it's best.
1541 */
1542 if (!ret && cpus_weight(current->cpus_allowed) >=
1543 qib_cpulist_count) {
1544 int cpu;
1545 cpu = find_first_zero_bit(qib_cpulist,
1546 qib_cpulist_count);
1547 if (cpu != qib_cpulist_count) {
1548 __set_bit(cpu, qib_cpulist);
1549 fd->rec_cpu_num = cpu;
1550 }
1551 } else if (cpus_weight(current->cpus_allowed) == 1 &&
1552 test_bit(first_cpu(current->cpus_allowed),
1553 qib_cpulist))
1554 qib_devinfo(dd->pcidev, "%s PID %u affinity "
1555 "set to cpu %d; already allocated\n",
1556 current->comm, current->pid,
1557 first_cpu(current->cpus_allowed));
1558 }
1559
1560 mutex_unlock(&qib_mutex);
1561
1562done:
1563 return ret;
1564}
1565
1566
1567static int qib_do_user_init(struct file *fp,
1568 const struct qib_user_info *uinfo)
1569{
1570 int ret;
1571 struct qib_ctxtdata *rcd = ctxt_fp(fp);
1572 struct qib_devdata *dd;
1573 unsigned uctxt;
1574
1575 /* Subctxts don't need to initialize anything since master did it. */
1576 if (subctxt_fp(fp)) {
1577 ret = wait_event_interruptible(rcd->wait,
1578 !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
1579 goto bail;
1580 }
1581
1582 dd = rcd->dd;
1583
1584 /* some ctxts may get extra buffers, calculate that here */
1585 uctxt = rcd->ctxt - dd->first_user_ctxt;
1586 if (uctxt < dd->ctxts_extrabuf) {
1587 rcd->piocnt = dd->pbufsctxt + 1;
1588 rcd->pio_base = rcd->piocnt * uctxt;
1589 } else {
1590 rcd->piocnt = dd->pbufsctxt;
1591 rcd->pio_base = rcd->piocnt * uctxt +
1592 dd->ctxts_extrabuf;
1593 }
1594
1595 /*
1596 * All user buffers are 2KB buffers. If we ever support
1597 * giving 4KB buffers to user processes, this will need some
1598 * work. Can't use piobufbase directly, because it has
1599 * both 2K and 4K buffer base values. So check and handle.
1600 */
1601 if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
1602 if (rcd->pio_base >= dd->piobcnt2k) {
1603 qib_dev_err(dd,
1604 "%u:ctxt%u: no 2KB buffers available\n",
1605 dd->unit, rcd->ctxt);
1606 ret = -ENOBUFS;
1607 goto bail;
1608 }
1609 rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
1610 qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
1611 rcd->ctxt, rcd->piocnt);
1612 }
1613
1614 rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
1615 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1616 TXCHK_CHG_TYPE_USER, rcd);
1617 /*
1618 * try to ensure that processes start up with consistent avail update
1619 * for their own range, at least. If system very quiet, it might
1620 * have the in-memory copy out of date at startup for this range of
1621 * buffers, when a context gets re-used. Do after the chg_pioavail
1622 * and before the rest of setup, so it's "almost certain" the dma
1623 * will have occurred (can't 100% guarantee, but should be many
1624 * decimals of 9s, with this ordering), given how much else happens
1625 * after this.
1626 */
1627 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
1628
1629 /*
1630 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1631 * array for time being. If rcd->ctxt > chip-supported,
1632 * we need to do extra stuff here to handle by handling overflow
1633 * through ctxt 0, someday
1634 */
1635 ret = qib_create_rcvhdrq(dd, rcd);
1636 if (!ret)
1637 ret = qib_setup_eagerbufs(rcd);
1638 if (ret)
1639 goto bail_pio;
1640
1641 rcd->tidcursor = 0; /* start at beginning after open */
1642
1643 /* initialize poll variables... */
1644 rcd->urgent = 0;
1645 rcd->urgent_poll = 0;
1646
1647 /*
1648 * Now enable the ctxt for receive.
1649 * For chips that are set to DMA the tail register to memory
1650 * when they change (and when the update bit transitions from
1651 * 0 to 1. So for those chips, we turn it off and then back on.
1652 * This will (very briefly) affect any other open ctxts, but the
1653 * duration is very short, and therefore isn't an issue. We
1654 * explictly set the in-memory tail copy to 0 beforehand, so we
1655 * don't have to wait to be sure the DMA update has happened
1656 * (chip resets head/tail to 0 on transition to enable).
1657 */
1658 if (rcd->rcvhdrtail_kvaddr)
1659 qib_clear_rcvhdrtail(rcd);
1660
1661 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
1662 rcd->ctxt);
1663
1664 /* Notify any waiting slaves */
1665 if (rcd->subctxt_cnt) {
1666 clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1667 wake_up(&rcd->wait);
1668 }
1669 return 0;
1670
1671bail_pio:
1672 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1673 TXCHK_CHG_TYPE_KERN, rcd);
1674bail:
1675 return ret;
1676}
1677
1678/**
1679 * unlock_exptid - unlock any expected TID entries context still had in use
1680 * @rcd: ctxt
1681 *
1682 * We don't actually update the chip here, because we do a bulk update
1683 * below, using f_clear_tids.
1684 */
1685static void unlock_expected_tids(struct qib_ctxtdata *rcd)
1686{
1687 struct qib_devdata *dd = rcd->dd;
1688 int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
1689 int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
1690
1691 for (i = ctxt_tidbase; i < maxtid; i++) {
1692 struct page *p = dd->pageshadow[i];
1693 dma_addr_t phys;
1694
1695 if (!p)
1696 continue;
1697
1698 phys = dd->physshadow[i];
1699 dd->physshadow[i] = dd->tidinvalid;
1700 dd->pageshadow[i] = NULL;
1701 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
1702 PCI_DMA_FROMDEVICE);
1703 qib_release_user_pages(&p, 1);
1704 cnt++;
1705 }
1706}
1707
1708static int qib_close(struct inode *in, struct file *fp)
1709{
1710 int ret = 0;
1711 struct qib_filedata *fd;
1712 struct qib_ctxtdata *rcd;
1713 struct qib_devdata *dd;
1714 unsigned long flags;
1715 unsigned ctxt;
1716 pid_t pid;
1717
1718 mutex_lock(&qib_mutex);
1719
1720 fd = (struct qib_filedata *) fp->private_data;
1721 fp->private_data = NULL;
1722 rcd = fd->rcd;
1723 if (!rcd) {
1724 mutex_unlock(&qib_mutex);
1725 goto bail;
1726 }
1727
1728 dd = rcd->dd;
1729
1730 /* ensure all pio buffer writes in progress are flushed */
1731 qib_flush_wc();
1732
1733 /* drain user sdma queue */
1734 if (fd->pq) {
1735 qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
1736 qib_user_sdma_queue_destroy(fd->pq);
1737 }
1738
1739 if (fd->rec_cpu_num != -1)
1740 __clear_bit(fd->rec_cpu_num, qib_cpulist);
1741
1742 if (--rcd->cnt) {
1743 /*
1744 * XXX If the master closes the context before the slave(s),
1745 * revoke the mmap for the eager receive queue so
1746 * the slave(s) don't wait for receive data forever.
1747 */
1748 rcd->active_slaves &= ~(1 << fd->subctxt);
1749 rcd->subpid[fd->subctxt] = 0;
1750 mutex_unlock(&qib_mutex);
1751 goto bail;
1752 }
1753
1754 /* early; no interrupt users after this */
1755 spin_lock_irqsave(&dd->uctxt_lock, flags);
1756 ctxt = rcd->ctxt;
1757 dd->rcd[ctxt] = NULL;
1758 pid = rcd->pid;
1759 rcd->pid = 0;
1760 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1761
1762 if (rcd->rcvwait_to || rcd->piowait_to ||
1763 rcd->rcvnowait || rcd->pionowait) {
1764 rcd->rcvwait_to = 0;
1765 rcd->piowait_to = 0;
1766 rcd->rcvnowait = 0;
1767 rcd->pionowait = 0;
1768 }
1769 if (rcd->flag)
1770 rcd->flag = 0;
1771
1772 if (dd->kregbase) {
1773 /* atomically clear receive enable ctxt and intr avail. */
1774 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
1775 QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
1776
1777 /* clean up the pkeys for this ctxt user */
1778 qib_clean_part_key(rcd, dd);
1779 qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
1780 qib_chg_pioavailkernel(dd, rcd->pio_base,
1781 rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
1782
1783 dd->f_clear_tids(dd, rcd);
1784
1785 if (dd->pageshadow)
1786 unlock_expected_tids(rcd);
1787 qib_stats.sps_ctxts--;
1788 }
1789
1790 mutex_unlock(&qib_mutex);
1791 qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
1792
1793bail:
1794 kfree(fd);
1795 return ret;
1796}
1797
1798static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
1799{
1800 struct qib_ctxt_info info;
1801 int ret;
1802 size_t sz;
1803 struct qib_ctxtdata *rcd = ctxt_fp(fp);
1804 struct qib_filedata *fd;
1805
1806 fd = (struct qib_filedata *) fp->private_data;
1807
1808 info.num_active = qib_count_active_units();
1809 info.unit = rcd->dd->unit;
1810 info.port = rcd->ppd->port;
1811 info.ctxt = rcd->ctxt;
1812 info.subctxt = subctxt_fp(fp);
1813 /* Number of user ctxts available for this device. */
1814 info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
1815 info.num_subctxts = rcd->subctxt_cnt;
1816 info.rec_cpu = fd->rec_cpu_num;
1817 sz = sizeof(info);
1818
1819 if (copy_to_user(uinfo, &info, sz)) {
1820 ret = -EFAULT;
1821 goto bail;
1822 }
1823 ret = 0;
1824
1825bail:
1826 return ret;
1827}
1828
1829static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
1830 u32 __user *inflightp)
1831{
1832 const u32 val = qib_user_sdma_inflight_counter(pq);
1833
1834 if (put_user(val, inflightp))
1835 return -EFAULT;
1836
1837 return 0;
1838}
1839
1840static int qib_sdma_get_complete(struct qib_pportdata *ppd,
1841 struct qib_user_sdma_queue *pq,
1842 u32 __user *completep)
1843{
1844 u32 val;
1845 int err;
1846
1847 if (!pq)
1848 return -EINVAL;
1849
1850 err = qib_user_sdma_make_progress(ppd, pq);
1851 if (err < 0)
1852 return err;
1853
1854 val = qib_user_sdma_complete_counter(pq);
1855 if (put_user(val, completep))
1856 return -EFAULT;
1857
1858 return 0;
1859}
1860
1861static int disarm_req_delay(struct qib_ctxtdata *rcd)
1862{
1863 int ret = 0;
1864
1865 if (!usable(rcd->ppd, 1)) {
1866 int i;
1867 /*
1868 * if link is down, or otherwise not usable, delay
1869 * the caller up to 30 seconds, so we don't thrash
1870 * in trying to get the chip back to ACTIVE, and
1871 * set flag so they make the call again.
1872 */
1873 if (rcd->user_event_mask) {
1874 /*
1875 * subctxt_cnt is 0 if not shared, so do base
1876 * separately, first, then remaining subctxt, if any
1877 */
1878 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1879 &rcd->user_event_mask[0]);
1880 for (i = 1; i < rcd->subctxt_cnt; i++)
1881 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1882 &rcd->user_event_mask[i]);
1883 }
1884 for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++)
1885 msleep(100);
1886 ret = -ENETDOWN;
1887 }
1888 return ret;
1889}
1890
1891/*
1892 * Find all user contexts in use, and set the specified bit in their
1893 * event mask.
1894 * See also find_ctxt() for a similar use, that is specific to send buffers.
1895 */
1896int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
1897{
1898 struct qib_ctxtdata *rcd;
1899 unsigned ctxt;
1900 int ret = 0;
1901
1902 spin_lock(&ppd->dd->uctxt_lock);
1903 for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
1904 ctxt++) {
1905 rcd = ppd->dd->rcd[ctxt];
1906 if (!rcd)
1907 continue;
1908 if (rcd->user_event_mask) {
1909 int i;
1910 /*
1911 * subctxt_cnt is 0 if not shared, so do base
1912 * separately, first, then remaining subctxt, if any
1913 */
1914 set_bit(evtbit, &rcd->user_event_mask[0]);
1915 for (i = 1; i < rcd->subctxt_cnt; i++)
1916 set_bit(evtbit, &rcd->user_event_mask[i]);
1917 }
1918 ret = 1;
1919 break;
1920 }
1921 spin_unlock(&ppd->dd->uctxt_lock);
1922
1923 return ret;
1924}
1925
1926/*
1927 * clear the event notifier events for this context.
1928 * For the DISARM_BUFS case, we also take action (this obsoletes
1929 * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
1930 * compatibility.
1931 * Other bits don't currently require actions, just atomically clear.
1932 * User process then performs actions appropriate to bit having been
1933 * set, if desired, and checks again in future.
1934 */
1935static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
1936 unsigned long events)
1937{
1938 int ret = 0, i;
1939
1940 for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
1941 if (!test_bit(i, &events))
1942 continue;
1943 if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
1944 (void)qib_disarm_piobufs_ifneeded(rcd);
1945 ret = disarm_req_delay(rcd);
1946 } else
1947 clear_bit(i, &rcd->user_event_mask[subctxt]);
1948 }
1949 return ret;
1950}
1951
1952static ssize_t qib_write(struct file *fp, const char __user *data,
1953 size_t count, loff_t *off)
1954{
1955 const struct qib_cmd __user *ucmd;
1956 struct qib_ctxtdata *rcd;
1957 const void __user *src;
1958 size_t consumed, copy = 0;
1959 struct qib_cmd cmd;
1960 ssize_t ret = 0;
1961 void *dest;
1962
1963 if (count < sizeof(cmd.type)) {
1964 ret = -EINVAL;
1965 goto bail;
1966 }
1967
1968 ucmd = (const struct qib_cmd __user *) data;
1969
1970 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
1971 ret = -EFAULT;
1972 goto bail;
1973 }
1974
1975 consumed = sizeof(cmd.type);
1976
1977 switch (cmd.type) {
1978 case QIB_CMD_ASSIGN_CTXT:
1979 case QIB_CMD_USER_INIT:
1980 copy = sizeof(cmd.cmd.user_info);
1981 dest = &cmd.cmd.user_info;
1982 src = &ucmd->cmd.user_info;
1983 break;
1984
1985 case QIB_CMD_RECV_CTRL:
1986 copy = sizeof(cmd.cmd.recv_ctrl);
1987 dest = &cmd.cmd.recv_ctrl;
1988 src = &ucmd->cmd.recv_ctrl;
1989 break;
1990
1991 case QIB_CMD_CTXT_INFO:
1992 copy = sizeof(cmd.cmd.ctxt_info);
1993 dest = &cmd.cmd.ctxt_info;
1994 src = &ucmd->cmd.ctxt_info;
1995 break;
1996
1997 case QIB_CMD_TID_UPDATE:
1998 case QIB_CMD_TID_FREE:
1999 copy = sizeof(cmd.cmd.tid_info);
2000 dest = &cmd.cmd.tid_info;
2001 src = &ucmd->cmd.tid_info;
2002 break;
2003
2004 case QIB_CMD_SET_PART_KEY:
2005 copy = sizeof(cmd.cmd.part_key);
2006 dest = &cmd.cmd.part_key;
2007 src = &ucmd->cmd.part_key;
2008 break;
2009
2010 case QIB_CMD_DISARM_BUFS:
2011 case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
2012 copy = 0;
2013 src = NULL;
2014 dest = NULL;
2015 break;
2016
2017 case QIB_CMD_POLL_TYPE:
2018 copy = sizeof(cmd.cmd.poll_type);
2019 dest = &cmd.cmd.poll_type;
2020 src = &ucmd->cmd.poll_type;
2021 break;
2022
2023 case QIB_CMD_ARMLAUNCH_CTRL:
2024 copy = sizeof(cmd.cmd.armlaunch_ctrl);
2025 dest = &cmd.cmd.armlaunch_ctrl;
2026 src = &ucmd->cmd.armlaunch_ctrl;
2027 break;
2028
2029 case QIB_CMD_SDMA_INFLIGHT:
2030 copy = sizeof(cmd.cmd.sdma_inflight);
2031 dest = &cmd.cmd.sdma_inflight;
2032 src = &ucmd->cmd.sdma_inflight;
2033 break;
2034
2035 case QIB_CMD_SDMA_COMPLETE:
2036 copy = sizeof(cmd.cmd.sdma_complete);
2037 dest = &cmd.cmd.sdma_complete;
2038 src = &ucmd->cmd.sdma_complete;
2039 break;
2040
2041 case QIB_CMD_ACK_EVENT:
2042 copy = sizeof(cmd.cmd.event_mask);
2043 dest = &cmd.cmd.event_mask;
2044 src = &ucmd->cmd.event_mask;
2045 break;
2046
2047 default:
2048 ret = -EINVAL;
2049 goto bail;
2050 }
2051
2052 if (copy) {
2053 if ((count - consumed) < copy) {
2054 ret = -EINVAL;
2055 goto bail;
2056 }
2057 if (copy_from_user(dest, src, copy)) {
2058 ret = -EFAULT;
2059 goto bail;
2060 }
2061 consumed += copy;
2062 }
2063
2064 rcd = ctxt_fp(fp);
2065 if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
2066 ret = -EINVAL;
2067 goto bail;
2068 }
2069
2070 switch (cmd.type) {
2071 case QIB_CMD_ASSIGN_CTXT:
2072 ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2073 if (ret)
2074 goto bail;
2075 break;
2076
2077 case QIB_CMD_USER_INIT:
2078 ret = qib_do_user_init(fp, &cmd.cmd.user_info);
2079 if (ret)
2080 goto bail;
2081 ret = qib_get_base_info(fp, (void __user *) (unsigned long)
2082 cmd.cmd.user_info.spu_base_info,
2083 cmd.cmd.user_info.spu_base_info_size);
2084 break;
2085
2086 case QIB_CMD_RECV_CTRL:
2087 ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
2088 break;
2089
2090 case QIB_CMD_CTXT_INFO:
2091 ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
2092 (unsigned long) cmd.cmd.ctxt_info);
2093 break;
2094
2095 case QIB_CMD_TID_UPDATE:
2096 ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
2097 break;
2098
2099 case QIB_CMD_TID_FREE:
2100 ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
2101 break;
2102
2103 case QIB_CMD_SET_PART_KEY:
2104 ret = qib_set_part_key(rcd, cmd.cmd.part_key);
2105 break;
2106
2107 case QIB_CMD_DISARM_BUFS:
2108 (void)qib_disarm_piobufs_ifneeded(rcd);
2109 ret = disarm_req_delay(rcd);
2110 break;
2111
2112 case QIB_CMD_PIOAVAILUPD:
2113 qib_force_pio_avail_update(rcd->dd);
2114 break;
2115
2116 case QIB_CMD_POLL_TYPE:
2117 rcd->poll_type = cmd.cmd.poll_type;
2118 break;
2119
2120 case QIB_CMD_ARMLAUNCH_CTRL:
2121 rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
2122 break;
2123
2124 case QIB_CMD_SDMA_INFLIGHT:
2125 ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
2126 (u32 __user *) (unsigned long)
2127 cmd.cmd.sdma_inflight);
2128 break;
2129
2130 case QIB_CMD_SDMA_COMPLETE:
2131 ret = qib_sdma_get_complete(rcd->ppd,
2132 user_sdma_queue_fp(fp),
2133 (u32 __user *) (unsigned long)
2134 cmd.cmd.sdma_complete);
2135 break;
2136
2137 case QIB_CMD_ACK_EVENT:
2138 ret = qib_user_event_ack(rcd, subctxt_fp(fp),
2139 cmd.cmd.event_mask);
2140 break;
2141 }
2142
2143 if (ret >= 0)
2144 ret = consumed;
2145
2146bail:
2147 return ret;
2148}
2149
2150static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
2151 unsigned long dim, loff_t off)
2152{
2153 struct qib_filedata *fp = iocb->ki_filp->private_data;
2154 struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
2155 struct qib_user_sdma_queue *pq = fp->pq;
2156
2157 if (!dim || !pq)
2158 return -EINVAL;
2159
2160 return qib_user_sdma_writev(rcd, pq, iov, dim);
2161}
2162
2163static struct class *qib_class;
2164static dev_t qib_dev;
2165
2166int qib_cdev_init(int minor, const char *name,
2167 const struct file_operations *fops,
2168 struct cdev **cdevp, struct device **devp)
2169{
2170 const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
2171 struct cdev *cdev;
2172 struct device *device = NULL;
2173 int ret;
2174
2175 cdev = cdev_alloc();
2176 if (!cdev) {
2177 printk(KERN_ERR QIB_DRV_NAME
2178 ": Could not allocate cdev for minor %d, %s\n",
2179 minor, name);
2180 ret = -ENOMEM;
2181 goto done;
2182 }
2183
2184 cdev->owner = THIS_MODULE;
2185 cdev->ops = fops;
2186 kobject_set_name(&cdev->kobj, name);
2187
2188 ret = cdev_add(cdev, dev, 1);
2189 if (ret < 0) {
2190 printk(KERN_ERR QIB_DRV_NAME
2191 ": Could not add cdev for minor %d, %s (err %d)\n",
2192 minor, name, -ret);
2193 goto err_cdev;
2194 }
2195
2196 device = device_create(qib_class, NULL, dev, NULL, name);
2197 if (!IS_ERR(device))
2198 goto done;
2199 ret = PTR_ERR(device);
2200 device = NULL;
2201 printk(KERN_ERR QIB_DRV_NAME ": Could not create "
2202 "device for minor %d, %s (err %d)\n",
2203 minor, name, -ret);
2204err_cdev:
2205 cdev_del(cdev);
2206 cdev = NULL;
2207done:
2208 *cdevp = cdev;
2209 *devp = device;
2210 return ret;
2211}
2212
2213void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
2214{
2215 struct device *device = *devp;
2216
2217 if (device) {
2218 device_unregister(device);
2219 *devp = NULL;
2220 }
2221
2222 if (*cdevp) {
2223 cdev_del(*cdevp);
2224 *cdevp = NULL;
2225 }
2226}
2227
2228static struct cdev *wildcard_cdev;
2229static struct device *wildcard_device;
2230
2231int __init qib_dev_init(void)
2232{
2233 int ret;
2234
2235 ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
2236 if (ret < 0) {
2237 printk(KERN_ERR QIB_DRV_NAME ": Could not allocate "
2238 "chrdev region (err %d)\n", -ret);
2239 goto done;
2240 }
2241
2242 qib_class = class_create(THIS_MODULE, "ipath");
2243 if (IS_ERR(qib_class)) {
2244 ret = PTR_ERR(qib_class);
2245 printk(KERN_ERR QIB_DRV_NAME ": Could not create "
2246 "device class (err %d)\n", -ret);
2247 unregister_chrdev_region(qib_dev, QIB_NMINORS);
2248 }
2249
2250done:
2251 return ret;
2252}
2253
2254void qib_dev_cleanup(void)
2255{
2256 if (qib_class) {
2257 class_destroy(qib_class);
2258 qib_class = NULL;
2259 }
2260
2261 unregister_chrdev_region(qib_dev, QIB_NMINORS);
2262}
2263
2264static atomic_t user_count = ATOMIC_INIT(0);
2265
2266static void qib_user_remove(struct qib_devdata *dd)
2267{
2268 if (atomic_dec_return(&user_count) == 0)
2269 qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2270
2271 qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2272}
2273
2274static int qib_user_add(struct qib_devdata *dd)
2275{
2276 char name[10];
2277 int ret;
2278
2279 if (atomic_inc_return(&user_count) == 1) {
2280 ret = qib_cdev_init(0, "ipath", &qib_file_ops,
2281 &wildcard_cdev, &wildcard_device);
2282 if (ret)
2283 goto done;
2284 }
2285
2286 snprintf(name, sizeof(name), "ipath%d", dd->unit);
2287 ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
2288 &dd->user_cdev, &dd->user_device);
2289 if (ret)
2290 qib_user_remove(dd);
2291done:
2292 return ret;
2293}
2294
2295/*
2296 * Create per-unit files in /dev
2297 */
2298int qib_device_create(struct qib_devdata *dd)
2299{
2300 int r, ret;
2301
2302 r = qib_user_add(dd);
2303 ret = qib_diag_add(dd);
2304 if (r && !ret)
2305 ret = r;
2306 return ret;
2307}
2308
2309/*
2310 * Remove per-unit files in /dev
2311 * void, core kernel returns no errors for this stuff
2312 */
2313void qib_device_remove(struct qib_devdata *dd)
2314{
2315 qib_user_remove(dd);
2316 qib_diag_remove(dd);
2317}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
new file mode 100644
index 000000000000..844954bf417b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -0,0 +1,616 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/fs.h>
36#include <linux/mount.h>
37#include <linux/pagemap.h>
38#include <linux/init.h>
39#include <linux/namei.h>
40
41#include "qib.h"
42
43#define QIBFS_MAGIC 0x726a77
44
45static struct super_block *qib_super;
46
47#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
48
49static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
50 int mode, const struct file_operations *fops,
51 void *data)
52{
53 int error;
54 struct inode *inode = new_inode(dir->i_sb);
55
56 if (!inode) {
57 error = -EPERM;
58 goto bail;
59 }
60
61 inode->i_mode = mode;
62 inode->i_uid = 0;
63 inode->i_gid = 0;
64 inode->i_blocks = 0;
65 inode->i_atime = CURRENT_TIME;
66 inode->i_mtime = inode->i_atime;
67 inode->i_ctime = inode->i_atime;
68 inode->i_private = data;
69 if ((mode & S_IFMT) == S_IFDIR) {
70 inode->i_op = &simple_dir_inode_operations;
71 inc_nlink(inode);
72 inc_nlink(dir);
73 }
74
75 inode->i_fop = fops;
76
77 d_instantiate(dentry, inode);
78 error = 0;
79
80bail:
81 return error;
82}
83
84static int create_file(const char *name, mode_t mode,
85 struct dentry *parent, struct dentry **dentry,
86 const struct file_operations *fops, void *data)
87{
88 int error;
89
90 *dentry = NULL;
91 mutex_lock(&parent->d_inode->i_mutex);
92 *dentry = lookup_one_len(name, parent, strlen(name));
93 if (!IS_ERR(*dentry))
94 error = qibfs_mknod(parent->d_inode, *dentry,
95 mode, fops, data);
96 else
97 error = PTR_ERR(*dentry);
98 mutex_unlock(&parent->d_inode->i_mutex);
99
100 return error;
101}
102
103static ssize_t driver_stats_read(struct file *file, char __user *buf,
104 size_t count, loff_t *ppos)
105{
106 return simple_read_from_buffer(buf, count, ppos, &qib_stats,
107 sizeof qib_stats);
108}
109
110/*
111 * driver stats field names, one line per stat, single string. Used by
112 * programs like ipathstats to print the stats in a way which works for
113 * different versions of drivers, without changing program source.
114 * if qlogic_ib_stats changes, this needs to change. Names need to be
115 * 12 chars or less (w/o newline), for proper display by ipathstats utility.
116 */
117static const char qib_statnames[] =
118 "KernIntr\n"
119 "ErrorIntr\n"
120 "Tx_Errs\n"
121 "Rcv_Errs\n"
122 "H/W_Errs\n"
123 "NoPIOBufs\n"
124 "CtxtsOpen\n"
125 "RcvLen_Errs\n"
126 "EgrBufFull\n"
127 "EgrHdrFull\n"
128 ;
129
130static ssize_t driver_names_read(struct file *file, char __user *buf,
131 size_t count, loff_t *ppos)
132{
133 return simple_read_from_buffer(buf, count, ppos, qib_statnames,
134 sizeof qib_statnames - 1); /* no null */
135}
136
137static const struct file_operations driver_ops[] = {
138 { .read = driver_stats_read, },
139 { .read = driver_names_read, },
140};
141
142/* read the per-device counters */
143static ssize_t dev_counters_read(struct file *file, char __user *buf,
144 size_t count, loff_t *ppos)
145{
146 u64 *counters;
147 size_t avail;
148 struct qib_devdata *dd = private2dd(file);
149
150 avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters);
151 return simple_read_from_buffer(buf, count, ppos, counters, avail);
152}
153
154/* read the per-device counters */
155static ssize_t dev_names_read(struct file *file, char __user *buf,
156 size_t count, loff_t *ppos)
157{
158 char *names;
159 size_t avail;
160 struct qib_devdata *dd = private2dd(file);
161
162 avail = dd->f_read_cntrs(dd, *ppos, &names, NULL);
163 return simple_read_from_buffer(buf, count, ppos, names, avail);
164}
165
166static const struct file_operations cntr_ops[] = {
167 { .read = dev_counters_read, },
168 { .read = dev_names_read, },
169};
170
171/*
172 * Could use file->f_dentry->d_inode->i_ino to figure out which file,
173 * instead of separate routine for each, but for now, this works...
174 */
175
176/* read the per-port names (same for each port) */
177static ssize_t portnames_read(struct file *file, char __user *buf,
178 size_t count, loff_t *ppos)
179{
180 char *names;
181 size_t avail;
182 struct qib_devdata *dd = private2dd(file);
183
184 avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL);
185 return simple_read_from_buffer(buf, count, ppos, names, avail);
186}
187
188/* read the per-port counters for port 1 (pidx 0) */
189static ssize_t portcntrs_1_read(struct file *file, char __user *buf,
190 size_t count, loff_t *ppos)
191{
192 u64 *counters;
193 size_t avail;
194 struct qib_devdata *dd = private2dd(file);
195
196 avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters);
197 return simple_read_from_buffer(buf, count, ppos, counters, avail);
198}
199
200/* read the per-port counters for port 2 (pidx 1) */
201static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
202 size_t count, loff_t *ppos)
203{
204 u64 *counters;
205 size_t avail;
206 struct qib_devdata *dd = private2dd(file);
207
208 avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters);
209 return simple_read_from_buffer(buf, count, ppos, counters, avail);
210}
211
212static const struct file_operations portcntr_ops[] = {
213 { .read = portnames_read, },
214 { .read = portcntrs_1_read, },
215 { .read = portcntrs_2_read, },
216};
217
218/*
219 * read the per-port QSFP data for port 1 (pidx 0)
220 */
221static ssize_t qsfp_1_read(struct file *file, char __user *buf,
222 size_t count, loff_t *ppos)
223{
224 struct qib_devdata *dd = private2dd(file);
225 char *tmp;
226 int ret;
227
228 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
229 if (!tmp)
230 return -ENOMEM;
231
232 ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE);
233 if (ret > 0)
234 ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
235 kfree(tmp);
236 return ret;
237}
238
239/*
240 * read the per-port QSFP data for port 2 (pidx 1)
241 */
242static ssize_t qsfp_2_read(struct file *file, char __user *buf,
243 size_t count, loff_t *ppos)
244{
245 struct qib_devdata *dd = private2dd(file);
246 char *tmp;
247 int ret;
248
249 if (dd->num_pports < 2)
250 return -ENODEV;
251
252 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
253 if (!tmp)
254 return -ENOMEM;
255
256 ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE);
257 if (ret > 0)
258 ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
259 kfree(tmp);
260 return ret;
261}
262
263static const struct file_operations qsfp_ops[] = {
264 { .read = qsfp_1_read, },
265 { .read = qsfp_2_read, },
266};
267
268static ssize_t flash_read(struct file *file, char __user *buf,
269 size_t count, loff_t *ppos)
270{
271 struct qib_devdata *dd;
272 ssize_t ret;
273 loff_t pos;
274 char *tmp;
275
276 pos = *ppos;
277
278 if (pos < 0) {
279 ret = -EINVAL;
280 goto bail;
281 }
282
283 if (pos >= sizeof(struct qib_flash)) {
284 ret = 0;
285 goto bail;
286 }
287
288 if (count > sizeof(struct qib_flash) - pos)
289 count = sizeof(struct qib_flash) - pos;
290
291 tmp = kmalloc(count, GFP_KERNEL);
292 if (!tmp) {
293 ret = -ENOMEM;
294 goto bail;
295 }
296
297 dd = private2dd(file);
298 if (qib_eeprom_read(dd, pos, tmp, count)) {
299 qib_dev_err(dd, "failed to read from flash\n");
300 ret = -ENXIO;
301 goto bail_tmp;
302 }
303
304 if (copy_to_user(buf, tmp, count)) {
305 ret = -EFAULT;
306 goto bail_tmp;
307 }
308
309 *ppos = pos + count;
310 ret = count;
311
312bail_tmp:
313 kfree(tmp);
314
315bail:
316 return ret;
317}
318
319static ssize_t flash_write(struct file *file, const char __user *buf,
320 size_t count, loff_t *ppos)
321{
322 struct qib_devdata *dd;
323 ssize_t ret;
324 loff_t pos;
325 char *tmp;
326
327 pos = *ppos;
328
329 if (pos != 0) {
330 ret = -EINVAL;
331 goto bail;
332 }
333
334 if (count != sizeof(struct qib_flash)) {
335 ret = -EINVAL;
336 goto bail;
337 }
338
339 tmp = kmalloc(count, GFP_KERNEL);
340 if (!tmp) {
341 ret = -ENOMEM;
342 goto bail;
343 }
344
345 if (copy_from_user(tmp, buf, count)) {
346 ret = -EFAULT;
347 goto bail_tmp;
348 }
349
350 dd = private2dd(file);
351 if (qib_eeprom_write(dd, pos, tmp, count)) {
352 ret = -ENXIO;
353 qib_dev_err(dd, "failed to write to flash\n");
354 goto bail_tmp;
355 }
356
357 *ppos = pos + count;
358 ret = count;
359
360bail_tmp:
361 kfree(tmp);
362
363bail:
364 return ret;
365}
366
367static const struct file_operations flash_ops = {
368 .read = flash_read,
369 .write = flash_write,
370};
371
372static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
373{
374 struct dentry *dir, *tmp;
375 char unit[10];
376 int ret, i;
377
378 /* create the per-unit directory */
379 snprintf(unit, sizeof unit, "%u", dd->unit);
380 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
381 &simple_dir_operations, dd);
382 if (ret) {
383 printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
384 goto bail;
385 }
386
387 /* create the files in the new directory */
388 ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
389 &cntr_ops[0], dd);
390 if (ret) {
391 printk(KERN_ERR "create_file(%s/counters) failed: %d\n",
392 unit, ret);
393 goto bail;
394 }
395 ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
396 &cntr_ops[1], dd);
397 if (ret) {
398 printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n",
399 unit, ret);
400 goto bail;
401 }
402 ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
403 &portcntr_ops[0], dd);
404 if (ret) {
405 printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
406 unit, "portcounter_names", ret);
407 goto bail;
408 }
409 for (i = 1; i <= dd->num_pports; i++) {
410 char fname[24];
411
412 sprintf(fname, "port%dcounters", i);
413 /* create the files in the new directory */
414 ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
415 &portcntr_ops[i], dd);
416 if (ret) {
417 printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
418 unit, fname, ret);
419 goto bail;
420 }
421 if (!(dd->flags & QIB_HAS_QSFP))
422 continue;
423 sprintf(fname, "qsfp%d", i);
424 ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
425 &qsfp_ops[i - 1], dd);
426 if (ret) {
427 printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
428 unit, fname, ret);
429 goto bail;
430 }
431 }
432
433 ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
434 &flash_ops, dd);
435 if (ret)
436 printk(KERN_ERR "create_file(%s/flash) failed: %d\n",
437 unit, ret);
438bail:
439 return ret;
440}
441
442static int remove_file(struct dentry *parent, char *name)
443{
444 struct dentry *tmp;
445 int ret;
446
447 tmp = lookup_one_len(name, parent, strlen(name));
448
449 if (IS_ERR(tmp)) {
450 ret = PTR_ERR(tmp);
451 goto bail;
452 }
453
454 spin_lock(&dcache_lock);
455 spin_lock(&tmp->d_lock);
456 if (!(d_unhashed(tmp) && tmp->d_inode)) {
457 dget_locked(tmp);
458 __d_drop(tmp);
459 spin_unlock(&tmp->d_lock);
460 spin_unlock(&dcache_lock);
461 simple_unlink(parent->d_inode, tmp);
462 } else {
463 spin_unlock(&tmp->d_lock);
464 spin_unlock(&dcache_lock);
465 }
466
467 ret = 0;
468bail:
469 /*
470 * We don't expect clients to care about the return value, but
471 * it's there if they need it.
472 */
473 return ret;
474}
475
476static int remove_device_files(struct super_block *sb,
477 struct qib_devdata *dd)
478{
479 struct dentry *dir, *root;
480 char unit[10];
481 int ret, i;
482
483 root = dget(sb->s_root);
484 mutex_lock(&root->d_inode->i_mutex);
485 snprintf(unit, sizeof unit, "%u", dd->unit);
486 dir = lookup_one_len(unit, root, strlen(unit));
487
488 if (IS_ERR(dir)) {
489 ret = PTR_ERR(dir);
490 printk(KERN_ERR "Lookup of %s failed\n", unit);
491 goto bail;
492 }
493
494 remove_file(dir, "counters");
495 remove_file(dir, "counter_names");
496 remove_file(dir, "portcounter_names");
497 for (i = 0; i < dd->num_pports; i++) {
498 char fname[24];
499
500 sprintf(fname, "port%dcounters", i + 1);
501 remove_file(dir, fname);
502 if (dd->flags & QIB_HAS_QSFP) {
503 sprintf(fname, "qsfp%d", i + 1);
504 remove_file(dir, fname);
505 }
506 }
507 remove_file(dir, "flash");
508 d_delete(dir);
509 ret = simple_rmdir(root->d_inode, dir);
510
511bail:
512 mutex_unlock(&root->d_inode->i_mutex);
513 dput(root);
514 return ret;
515}
516
517/*
518 * This fills everything in when the fs is mounted, to handle umount/mount
519 * after device init. The direct add_cntr_files() call handles adding
520 * them from the init code, when the fs is already mounted.
521 */
522static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
523{
524 struct qib_devdata *dd, *tmp;
525 unsigned long flags;
526 int ret;
527
528 static struct tree_descr files[] = {
529 [2] = {"driver_stats", &driver_ops[0], S_IRUGO},
530 [3] = {"driver_stats_names", &driver_ops[1], S_IRUGO},
531 {""},
532 };
533
534 ret = simple_fill_super(sb, QIBFS_MAGIC, files);
535 if (ret) {
536 printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
537 goto bail;
538 }
539
540 spin_lock_irqsave(&qib_devs_lock, flags);
541
542 list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) {
543 spin_unlock_irqrestore(&qib_devs_lock, flags);
544 ret = add_cntr_files(sb, dd);
545 if (ret)
546 goto bail;
547 spin_lock_irqsave(&qib_devs_lock, flags);
548 }
549
550 spin_unlock_irqrestore(&qib_devs_lock, flags);
551
552bail:
553 return ret;
554}
555
556static int qibfs_get_sb(struct file_system_type *fs_type, int flags,
557 const char *dev_name, void *data, struct vfsmount *mnt)
558{
559 int ret = get_sb_single(fs_type, flags, data,
560 qibfs_fill_super, mnt);
561 if (ret >= 0)
562 qib_super = mnt->mnt_sb;
563 return ret;
564}
565
566static void qibfs_kill_super(struct super_block *s)
567{
568 kill_litter_super(s);
569 qib_super = NULL;
570}
571
572int qibfs_add(struct qib_devdata *dd)
573{
574 int ret;
575
576 /*
577 * On first unit initialized, qib_super will not yet exist
578 * because nobody has yet tried to mount the filesystem, so
579 * we can't consider that to be an error; if an error occurs
580 * during the mount, that will get a complaint, so this is OK.
581 * add_cntr_files() for all units is done at mount from
582 * qibfs_fill_super(), so one way or another, everything works.
583 */
584 if (qib_super == NULL)
585 ret = 0;
586 else
587 ret = add_cntr_files(qib_super, dd);
588 return ret;
589}
590
591int qibfs_remove(struct qib_devdata *dd)
592{
593 int ret = 0;
594
595 if (qib_super)
596 ret = remove_device_files(qib_super, dd);
597
598 return ret;
599}
600
601static struct file_system_type qibfs_fs_type = {
602 .owner = THIS_MODULE,
603 .name = "ipathfs",
604 .get_sb = qibfs_get_sb,
605 .kill_sb = qibfs_kill_super,
606};
607
608int __init qib_init_qibfs(void)
609{
610 return register_filesystem(&qibfs_fs_type);
611}
612
613int __exit qib_exit_qibfs(void)
614{
615 return unregister_filesystem(&qibfs_fs_type);
616}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
new file mode 100644
index 000000000000..1eadadc13da8
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -0,0 +1,3576 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34/*
35 * This file contains all of the code that is specific to the
36 * QLogic_IB 6120 PCIe chip.
37 */
38
39#include <linux/interrupt.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <rdma/ib_verbs.h>
43
44#include "qib.h"
45#include "qib_6120_regs.h"
46
47static void qib_6120_setup_setextled(struct qib_pportdata *, u32);
48static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
49static u8 qib_6120_phys_portstate(u64);
50static u32 qib_6120_iblink_state(u64);
51
52/*
53 * This file contains all the chip-specific register information and
54 * access functions for the QLogic QLogic_IB PCI-Express chip.
55 *
56 */
57
58/* KREG_IDX uses machine-generated #defines */
59#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))
60
61/* Use defines to tie machine-generated names to lower-case names */
62#define kr_extctrl KREG_IDX(EXTCtrl)
63#define kr_extstatus KREG_IDX(EXTStatus)
64#define kr_gpio_clear KREG_IDX(GPIOClear)
65#define kr_gpio_mask KREG_IDX(GPIOMask)
66#define kr_gpio_out KREG_IDX(GPIOOut)
67#define kr_gpio_status KREG_IDX(GPIOStatus)
68#define kr_rcvctrl KREG_IDX(RcvCtrl)
69#define kr_sendctrl KREG_IDX(SendCtrl)
70#define kr_partitionkey KREG_IDX(RcvPartitionKey)
71#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
72#define kr_ibcstatus KREG_IDX(IBCStatus)
73#define kr_ibcctrl KREG_IDX(IBCCtrl)
74#define kr_sendbuffererror KREG_IDX(SendBufErr0)
75#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
76#define kr_counterregbase KREG_IDX(CntrRegBase)
77#define kr_palign KREG_IDX(PageAlign)
78#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
79#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
80#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
81#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
82#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
83#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
84#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
85#define kr_scratch KREG_IDX(Scratch)
86#define kr_sendctrl KREG_IDX(SendCtrl)
87#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)
88#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)
89#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)
90#define kr_sendpiosize KREG_IDX(SendPIOSize)
91#define kr_sendregbase KREG_IDX(SendRegBase)
92#define kr_userregbase KREG_IDX(UserRegBase)
93#define kr_control KREG_IDX(Control)
94#define kr_intclear KREG_IDX(IntClear)
95#define kr_intmask KREG_IDX(IntMask)
96#define kr_intstatus KREG_IDX(IntStatus)
97#define kr_errclear KREG_IDX(ErrClear)
98#define kr_errmask KREG_IDX(ErrMask)
99#define kr_errstatus KREG_IDX(ErrStatus)
100#define kr_hwerrclear KREG_IDX(HwErrClear)
101#define kr_hwerrmask KREG_IDX(HwErrMask)
102#define kr_hwerrstatus KREG_IDX(HwErrStatus)
103#define kr_revision KREG_IDX(Revision)
104#define kr_portcnt KREG_IDX(PortCnt)
105#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)
106#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)
107#define kr_serdes_stat KREG_IDX(SerdesStat)
108#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
109
110/* These must only be written via qib_write_kreg_ctxt() */
111#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
112#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
113
114#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \
115 QIB_6120_LBIntCnt_OFFS) / sizeof(u64))
116
117#define cr_badformat CREG_IDX(RxBadFormatCnt)
118#define cr_erricrc CREG_IDX(RxICRCErrCnt)
119#define cr_errlink CREG_IDX(RxLinkProblemCnt)
120#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
121#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
122#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)
123#define cr_err_rlen CREG_IDX(RxLenErrCnt)
124#define cr_errslen CREG_IDX(TxLenErrCnt)
125#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
126#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
127#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
128#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
129#define cr_lbint CREG_IDX(LBIntCnt)
130#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
131#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
132#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
133#define cr_pktrcv CREG_IDX(RxDataPktCnt)
134#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
135#define cr_pktsend CREG_IDX(TxDataPktCnt)
136#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
137#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
138#define cr_rcvebp CREG_IDX(RxEBPCnt)
139#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
140#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
141#define cr_sendstall CREG_IDX(TxFlowStallCnt)
142#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
143#define cr_wordrcv CREG_IDX(RxDwordCnt)
144#define cr_wordsend CREG_IDX(TxDwordCnt)
145#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
146#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
147#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
148#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
149#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
150
151#define SYM_RMASK(regname, fldname) ((u64) \
152 QIB_6120_##regname##_##fldname##_RMASK)
153#define SYM_MASK(regname, fldname) ((u64) \
154 QIB_6120_##regname##_##fldname##_RMASK << \
155 QIB_6120_##regname##_##fldname##_LSB)
156#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)
157
158#define SYM_FIELD(value, regname, fldname) ((u64) \
159 (((value) >> SYM_LSB(regname, fldname)) & \
160 SYM_RMASK(regname, fldname)))
161#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
162#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
163
164/* link training states, from IBC */
165#define IB_6120_LT_STATE_DISABLED 0x00
166#define IB_6120_LT_STATE_LINKUP 0x01
167#define IB_6120_LT_STATE_POLLACTIVE 0x02
168#define IB_6120_LT_STATE_POLLQUIET 0x03
169#define IB_6120_LT_STATE_SLEEPDELAY 0x04
170#define IB_6120_LT_STATE_SLEEPQUIET 0x05
171#define IB_6120_LT_STATE_CFGDEBOUNCE 0x08
172#define IB_6120_LT_STATE_CFGRCVFCFG 0x09
173#define IB_6120_LT_STATE_CFGWAITRMT 0x0a
174#define IB_6120_LT_STATE_CFGIDLE 0x0b
175#define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c
176#define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e
177#define IB_6120_LT_STATE_RECOVERIDLE 0x0f
178
179/* link state machine states from IBC */
180#define IB_6120_L_STATE_DOWN 0x0
181#define IB_6120_L_STATE_INIT 0x1
182#define IB_6120_L_STATE_ARM 0x2
183#define IB_6120_L_STATE_ACTIVE 0x3
184#define IB_6120_L_STATE_ACT_DEFER 0x4
185
186static const u8 qib_6120_physportstate[0x20] = {
187 [IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
188 [IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
189 [IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
190 [IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
191 [IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
192 [IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
193 [IB_6120_LT_STATE_CFGDEBOUNCE] =
194 IB_PHYSPORTSTATE_CFG_TRAIN,
195 [IB_6120_LT_STATE_CFGRCVFCFG] =
196 IB_PHYSPORTSTATE_CFG_TRAIN,
197 [IB_6120_LT_STATE_CFGWAITRMT] =
198 IB_PHYSPORTSTATE_CFG_TRAIN,
199 [IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
200 [IB_6120_LT_STATE_RECOVERRETRAIN] =
201 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
202 [IB_6120_LT_STATE_RECOVERWAITRMT] =
203 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
204 [IB_6120_LT_STATE_RECOVERIDLE] =
205 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
206 [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
207 [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
208 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
209 [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
210 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
211 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
212 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
213 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
214};
215
216
217struct qib_chip_specific {
218 u64 __iomem *cregbase;
219 u64 *cntrs;
220 u64 *portcntrs;
221 void *dummy_hdrq; /* used after ctxt close */
222 dma_addr_t dummy_hdrq_phys;
223 spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */
224 spinlock_t user_tid_lock; /* no back to back user TID writes */
225 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
226 spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
227 u64 hwerrmask;
228 u64 errormask;
229 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
230 u64 gpio_mask; /* shadow the gpio mask register */
231 u64 extctrl; /* shadow the gpio output enable, etc... */
232 /*
233 * these 5 fields are used to establish deltas for IB symbol
234 * errors and linkrecovery errors. They can be reported on
235 * some chips during link negotiation prior to INIT, and with
236 * DDR when faking DDR negotiations with non-IBTA switches.
237 * The chip counters are adjusted at driver unload if there is
238 * a non-zero delta.
239 */
240 u64 ibdeltainprog;
241 u64 ibsymdelta;
242 u64 ibsymsnap;
243 u64 iblnkerrdelta;
244 u64 iblnkerrsnap;
245 u64 ibcctrl; /* shadow for kr_ibcctrl */
246 u32 lastlinkrecov; /* link recovery issue */
247 int irq;
248 u32 cntrnamelen;
249 u32 portcntrnamelen;
250 u32 ncntrs;
251 u32 nportcntrs;
252 /* used with gpio interrupts to implement IB counters */
253 u32 rxfc_unsupvl_errs;
254 u32 overrun_thresh_errs;
255 /*
256 * these count only cases where _successive_ LocalLinkIntegrity
257 * errors were seen in the receive headers of IB standard packets
258 */
259 u32 lli_errs;
260 u32 lli_counter;
261 u64 lli_thresh;
262 u64 sword; /* total dwords sent (sample result) */
263 u64 rword; /* total dwords received (sample result) */
264 u64 spkts; /* total packets sent (sample result) */
265 u64 rpkts; /* total packets received (sample result) */
266 u64 xmit_wait; /* # of ticks no data sent (sample result) */
267 struct timer_list pma_timer;
268 char emsgbuf[128];
269 char bitsmsgbuf[64];
270 u8 pma_sample_status;
271};
272
273/* ibcctrl bits */
274#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
275/* cycle through TS1/TS2 till OK */
276#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
277/* wait for TS1, then go on */
278#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
279#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
280
281#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
282#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
283#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
284#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18
285
286/*
287 * We could have a single register get/put routine, that takes a group type,
288 * but this is somewhat clearer and cleaner. It also gives us some error
289 * checking. 64 bit register reads should always work, but are inefficient
290 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
291 * so we use kreg32 wherever possible. User register and counter register
292 * reads are always 32 bit reads, so only one form of those routines.
293 */
294
295/**
296 * qib_read_ureg32 - read 32-bit virtualized per-context register
297 * @dd: device
298 * @regno: register number
299 * @ctxt: context number
300 *
301 * Return the contents of a register that is virtualized to be per context.
302 * Returns -1 on errors (not distinguishable from valid contents at
303 * runtime; we may add a separate error variable at some point).
304 */
305static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
306 enum qib_ureg regno, int ctxt)
307{
308 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
309 return 0;
310
311 if (dd->userbase)
312 return readl(regno + (u64 __iomem *)
313 ((char __iomem *)dd->userbase +
314 dd->ureg_align * ctxt));
315 else
316 return readl(regno + (u64 __iomem *)
317 (dd->uregbase +
318 (char __iomem *)dd->kregbase +
319 dd->ureg_align * ctxt));
320}
321
322/**
323 * qib_write_ureg - write 32-bit virtualized per-context register
324 * @dd: device
325 * @regno: register number
326 * @value: value
327 * @ctxt: context
328 *
329 * Write the contents of a register that is virtualized to be per context.
330 */
331static inline void qib_write_ureg(const struct qib_devdata *dd,
332 enum qib_ureg regno, u64 value, int ctxt)
333{
334 u64 __iomem *ubase;
335 if (dd->userbase)
336 ubase = (u64 __iomem *)
337 ((char __iomem *) dd->userbase +
338 dd->ureg_align * ctxt);
339 else
340 ubase = (u64 __iomem *)
341 (dd->uregbase +
342 (char __iomem *) dd->kregbase +
343 dd->ureg_align * ctxt);
344
345 if (dd->kregbase && (dd->flags & QIB_PRESENT))
346 writeq(value, &ubase[regno]);
347}
348
349static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
350 const u16 regno)
351{
352 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
353 return -1;
354 return readl((u32 __iomem *)&dd->kregbase[regno]);
355}
356
357static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
358 const u16 regno)
359{
360 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
361 return -1;
362
363 return readq(&dd->kregbase[regno]);
364}
365
366static inline void qib_write_kreg(const struct qib_devdata *dd,
367 const u16 regno, u64 value)
368{
369 if (dd->kregbase && (dd->flags & QIB_PRESENT))
370 writeq(value, &dd->kregbase[regno]);
371}
372
373/**
374 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
375 * @dd: the qlogic_ib device
376 * @regno: the register number to write
377 * @ctxt: the context containing the register
378 * @value: the value to write
379 */
380static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
381 const u16 regno, unsigned ctxt,
382 u64 value)
383{
384 qib_write_kreg(dd, regno + ctxt, value);
385}
386
387static inline void write_6120_creg(const struct qib_devdata *dd,
388 u16 regno, u64 value)
389{
390 if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
391 writeq(value, &dd->cspec->cregbase[regno]);
392}
393
394static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
395{
396 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
397 return 0;
398 return readq(&dd->cspec->cregbase[regno]);
399}
400
401static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
402{
403 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
404 return 0;
405 return readl(&dd->cspec->cregbase[regno]);
406}
407
408/* kr_control bits */
409#define QLOGIC_IB_C_RESET 1U
410
411/* kr_intstatus, kr_intclear, kr_intmask bits */
412#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)
413#define QLOGIC_IB_I_RCVURG_SHIFT 0
414#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)
415#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12
416
417#define QLOGIC_IB_C_FREEZEMODE 0x00000002
418#define QLOGIC_IB_C_LINKENABLE 0x00000004
419#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
420#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
421#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
422#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
423#define QLOGIC_IB_I_BITSEXTANT \
424 ((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
425 (QLOGIC_IB_I_RCVAVAIL_MASK << \
426 QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
427 QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
428 QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)
429
430/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
431#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
432#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
433#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
434#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
435#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
436#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
437#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
438#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
439#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
440#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
441#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
442#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
443
444
445/* kr_extstatus bits */
446#define QLOGIC_IB_EXTS_FREQSEL 0x2
447#define QLOGIC_IB_EXTS_SERDESSEL 0x4
448#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
449#define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000
450
451/* kr_xgxsconfig bits */
452#define QLOGIC_IB_XGXS_RESET 0x5ULL
453
454#define _QIB_GPIO_SDA_NUM 1
455#define _QIB_GPIO_SCL_NUM 0
456
457/* Bits in GPIO for the added IB link interrupts */
458#define GPIO_RXUVL_BIT 3
459#define GPIO_OVRUN_BIT 4
460#define GPIO_LLI_BIT 5
461#define GPIO_ERRINTR_MASK 0x38
462
463
464#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL
465#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \
466 ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
467#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))
468#define QLOGIC_IB_RT_IS_VALID(tid) \
469 (((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \
470 ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))
471#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
472#define QLOGIC_IB_RT_ADDR_SHIFT 10
473
474#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16
475#define QLOGIC_IB_R_TAILUPD_SHIFT 31
476#define IBA6120_R_PKEY_DIS_SHIFT 30
477
478#define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */
479
480#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
481#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
482
483#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
484 ((1ULL << (SYM_LSB(regname, fldname) + (bit)))))
485
486#define TXEMEMPARITYERR_PIOBUF \
487 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
488#define TXEMEMPARITYERR_PIOPBC \
489 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
490#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
491 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
492
493#define RXEMEMPARITYERR_RCVBUF \
494 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
495#define RXEMEMPARITYERR_LOOKUPQ \
496 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
497#define RXEMEMPARITYERR_EXPTID \
498 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
499#define RXEMEMPARITYERR_EAGERTID \
500 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
501#define RXEMEMPARITYERR_FLAGBUF \
502 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
503#define RXEMEMPARITYERR_DATAINFO \
504 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
505#define RXEMEMPARITYERR_HDRINFO \
506 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
507
508/* 6120 specific hardware errors... */
509static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {
510 /* generic hardware errors */
511 QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
512 QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
513
514 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
515 "TXE PIOBUF Memory Parity"),
516 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
517 "TXE PIOPBC Memory Parity"),
518 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
519 "TXE PIOLAUNCHFIFO Memory Parity"),
520
521 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
522 "RXE RCVBUF Memory Parity"),
523 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
524 "RXE LOOKUPQ Memory Parity"),
525 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
526 "RXE EAGERTID Memory Parity"),
527 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
528 "RXE EXPTID Memory Parity"),
529 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
530 "RXE FLAGBUF Memory Parity"),
531 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
532 "RXE DATAINFO Memory Parity"),
533 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
534 "RXE HDRINFO Memory Parity"),
535
536 /* chip-specific hardware errors */
537 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
538 "PCIe Poisoned TLP"),
539 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
540 "PCIe completion timeout"),
541 /*
542 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
543 * parity or memory parity error failures, because most likely we
544 * won't be able to talk to the core of the chip. Nonetheless, we
545 * might see them, if they are in parts of the PCIe core that aren't
546 * essential.
547 */
548 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
549 "PCIePLL1"),
550 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
551 "PCIePLL0"),
552 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
553 "PCIe XTLH core parity"),
554 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
555 "PCIe ADM TX core parity"),
556 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
557 "PCIe ADM RX core parity"),
558 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
559 "SerDes PLL"),
560};
561
562#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)
563#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
564 QLOGIC_IB_HWE_COREPLL_RFSLIP)
565
566 /* variables for sanity checking interrupt and errors */
567#define IB_HWE_BITSEXTANT \
568 (HWE_MASK(RXEMemParityErr) | \
569 HWE_MASK(TXEMemParityErr) | \
570 (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
571 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
572 QLOGIC_IB_HWE_PCIE1PLLFAILED | \
573 QLOGIC_IB_HWE_PCIE0PLLFAILED | \
574 QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
575 QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
576 QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
577 QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
578 QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
579 HWE_MASK(PowerOnBISTFailed) | \
580 QLOGIC_IB_HWE_COREPLL_FBSLIP | \
581 QLOGIC_IB_HWE_COREPLL_RFSLIP | \
582 QLOGIC_IB_HWE_SERDESPLLFAILED | \
583 HWE_MASK(IBCBusToSPCParityErr) | \
584 HWE_MASK(IBCBusFromSPCParityErr))
585
586#define IB_E_BITSEXTANT \
587 (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
588 ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
589 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
590 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
591 ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
592 ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
593 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
594 ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
595 ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
596 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \
597 ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \
598 ERR_MASK(SendDroppedSmpPktErr) | \
599 ERR_MASK(SendDroppedDataPktErr) | \
600 ERR_MASK(SendPioArmLaunchErr) | \
601 ERR_MASK(SendUnexpectedPktNumErr) | \
602 ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \
603 ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \
604 ERR_MASK(HardwareErr))
605
606#define QLOGIC_IB_E_PKTERRS ( \
607 ERR_MASK(SendPktLenErr) | \
608 ERR_MASK(SendDroppedDataPktErr) | \
609 ERR_MASK(RcvVCRCErr) | \
610 ERR_MASK(RcvICRCErr) | \
611 ERR_MASK(RcvShortPktLenErr) | \
612 ERR_MASK(RcvEBPErr))
613
614/* These are all rcv-related errors which we want to count for stats */
615#define E_SUM_PKTERRS \
616 (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
617 ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
618 ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
619 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
620 ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
621 ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
622
623/* These are all send-related errors which we want to count for stats */
624#define E_SUM_ERRS \
625 (ERR_MASK(SendPioArmLaunchErr) | \
626 ERR_MASK(SendUnexpectedPktNumErr) | \
627 ERR_MASK(SendDroppedDataPktErr) | \
628 ERR_MASK(SendDroppedSmpPktErr) | \
629 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
630 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
631 ERR_MASK(InvalidAddrErr))
632
633/*
634 * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
635 * errors not related to freeze and cancelling buffers. Can't ignore
636 * armlaunch because could get more while still cleaning up, and need
637 * to cancel those as they happen.
638 */
639#define E_SPKT_ERRS_IGNORE \
640 (ERR_MASK(SendDroppedDataPktErr) | \
641 ERR_MASK(SendDroppedSmpPktErr) | \
642 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
643 ERR_MASK(SendPktLenErr))
644
645/*
646 * these are errors that can occur when the link changes state while
647 * a packet is being sent or received. This doesn't cover things
648 * like EBP or VCRC that can be the result of a sending having the
649 * link change state, so we receive a "known bad" packet.
650 */
651#define E_SUM_LINK_PKTERRS \
652 (ERR_MASK(SendDroppedDataPktErr) | \
653 ERR_MASK(SendDroppedSmpPktErr) | \
654 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
655 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
656 ERR_MASK(RcvUnexpectedCharErr))
657
658static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,
659 u32, unsigned long);
660
661/*
662 * On platforms using this chip, and not having ordered WC stores, we
663 * can get TXE parity errors due to speculative reads to the PIO buffers,
664 * and this, due to a chip issue can result in (many) false parity error
665 * reports. So it's a debug print on those, and an info print on systems
666 * where the speculative reads don't occur.
667 */
668static void qib_6120_txe_recover(struct qib_devdata *dd)
669{
670 if (!qib_unordered_wc())
671 qib_devinfo(dd->pcidev,
672 "Recovering from TXE PIO parity error\n");
673}
674
675/* enable/disable chip from delivering interrupts */
676static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
677{
678 if (enable) {
679 if (dd->flags & QIB_BADINTR)
680 return;
681 qib_write_kreg(dd, kr_intmask, ~0ULL);
682 /* force re-interrupt of any pending interrupts. */
683 qib_write_kreg(dd, kr_intclear, 0ULL);
684 } else
685 qib_write_kreg(dd, kr_intmask, 0ULL);
686}
687
688/*
689 * Try to cleanup as much as possible for anything that might have gone
690 * wrong while in freeze mode, such as pio buffers being written by user
691 * processes (causing armlaunch), send errors due to going into freeze mode,
692 * etc., and try to avoid causing extra interrupts while doing so.
693 * Forcibly update the in-memory pioavail register copies after cleanup
694 * because the chip won't do it while in freeze mode (the register values
695 * themselves are kept correct).
696 * Make sure that we don't lose any important interrupts by using the chip
697 * feature that says that writing 0 to a bit in *clear that is set in
698 * *status will cause an interrupt to be generated again (if allowed by
699 * the *mask value).
700 * This is in chip-specific code because of all of the register accesses,
701 * even though the details are similar on most chips
702 */
703static void qib_6120_clear_freeze(struct qib_devdata *dd)
704{
705 /* disable error interrupts, to avoid confusion */
706 qib_write_kreg(dd, kr_errmask, 0ULL);
707
708 /* also disable interrupts; errormask is sometimes overwriten */
709 qib_6120_set_intr_state(dd, 0);
710
711 qib_cancel_sends(dd->pport);
712
713 /* clear the freeze, and be sure chip saw it */
714 qib_write_kreg(dd, kr_control, dd->control);
715 qib_read_kreg32(dd, kr_scratch);
716
717 /* force in-memory update now we are out of freeze */
718 qib_force_pio_avail_update(dd);
719
720 /*
721 * force new interrupt if any hwerr, error or interrupt bits are
722 * still set, and clear "safe" send packet errors related to freeze
723 * and cancelling sends. Re-enable error interrupts before possible
724 * force of re-interrupt on pending interrupts.
725 */
726 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
727 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
728 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
729 qib_6120_set_intr_state(dd, 1);
730}
731
732/**
733 * qib_handle_6120_hwerrors - display hardware errors.
734 * @dd: the qlogic_ib device
735 * @msg: the output buffer
736 * @msgl: the size of the output buffer
737 *
738 * Use same msg buffer as regular errors to avoid excessive stack
739 * use. Most hardware errors are catastrophic, but for right now,
740 * we'll print them and continue. Reuse the same message buffer as
741 * handle_6120_errors() to avoid excessive stack usage.
742 */
743static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
744 size_t msgl)
745{
746 u64 hwerrs;
747 u32 bits, ctrl;
748 int isfatal = 0;
749 char *bitsmsg;
750 int log_idx;
751
752 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
753 if (!hwerrs)
754 return;
755 if (hwerrs == ~0ULL) {
756 qib_dev_err(dd, "Read of hardware error status failed "
757 "(all bits set); ignoring\n");
758 return;
759 }
760 qib_stats.sps_hwerrs++;
761
762 /* Always clear the error status register, except MEMBISTFAIL,
763 * regardless of whether we continue or stop using the chip.
764 * We want that set so we know it failed, even across driver reload.
765 * We'll still ignore it in the hwerrmask. We do this partly for
766 * diagnostics, but also for support */
767 qib_write_kreg(dd, kr_hwerrclear,
768 hwerrs & ~HWE_MASK(PowerOnBISTFailed));
769
770 hwerrs &= dd->cspec->hwerrmask;
771
772 /* We log some errors to EEPROM, check if we have any of those. */
773 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
774 if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
775 qib_inc_eeprom_err(dd, log_idx, 1);
776
777 /*
778 * Make sure we get this much out, unless told to be quiet,
779 * or it's occurred within the last 5 seconds.
780 */
781 if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
782 qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
783 "(cleared)\n", (unsigned long long) hwerrs);
784
785 if (hwerrs & ~IB_HWE_BITSEXTANT)
786 qib_dev_err(dd, "hwerror interrupt with unknown errors "
787 "%llx set\n", (unsigned long long)
788 (hwerrs & ~IB_HWE_BITSEXTANT));
789
790 ctrl = qib_read_kreg32(dd, kr_control);
791 if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
792 /*
793 * Parity errors in send memory are recoverable,
794 * just cancel the send (if indicated in * sendbuffererror),
795 * count the occurrence, unfreeze (if no other handled
796 * hardware error bits are set), and continue. They can
797 * occur if a processor speculative read is done to the PIO
798 * buffer while we are sending a packet, for example.
799 */
800 if (hwerrs & TXE_PIO_PARITY) {
801 qib_6120_txe_recover(dd);
802 hwerrs &= ~TXE_PIO_PARITY;
803 }
804
805 if (!hwerrs) {
806 static u32 freeze_cnt;
807
808 freeze_cnt++;
809 qib_6120_clear_freeze(dd);
810 } else
811 isfatal = 1;
812 }
813
814 *msg = '\0';
815
816 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
817 isfatal = 1;
818 strlcat(msg, "[Memory BIST test failed, InfiniPath hardware"
819 " unusable]", msgl);
820 /* ignore from now on, so disable until driver reloaded */
821 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
822 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
823 }
824
825 qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,
826 ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);
827
828 bitsmsg = dd->cspec->bitsmsgbuf;
829 if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
830 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
831 bits = (u32) ((hwerrs >>
832 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
833 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
834 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
835 "[PCIe Mem Parity Errs %x] ", bits);
836 strlcat(msg, bitsmsg, msgl);
837 }
838
839 if (hwerrs & _QIB_PLL_FAIL) {
840 isfatal = 1;
841 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
842 "[PLL failed (%llx), InfiniPath hardware unusable]",
843 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
844 strlcat(msg, bitsmsg, msgl);
845 /* ignore from now on, so disable until driver reloaded */
846 dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
847 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
848 }
849
850 if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
851 /*
852 * If it occurs, it is left masked since the external
853 * interface is unused
854 */
855 dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
856 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
857 }
858
859 if (hwerrs)
860 /*
861 * if any set that we aren't ignoring; only
862 * make the complaint once, in case it's stuck
863 * or recurring, and we get here multiple
864 * times.
865 */
866 qib_dev_err(dd, "%s hardware error\n", msg);
867 else
868 *msg = 0; /* recovered from all of them */
869
870 if (isfatal && !dd->diag_client) {
871 qib_dev_err(dd, "Fatal Hardware Error, no longer"
872 " usable, SN %.16s\n", dd->serial);
873 /*
874 * for /sys status file and user programs to print; if no
875 * trailing brace is copied, we'll know it was truncated.
876 */
877 if (dd->freezemsg)
878 snprintf(dd->freezemsg, dd->freezelen,
879 "{%s}", msg);
880 qib_disable_after_error(dd);
881 }
882}
883
884/*
885 * Decode the error status into strings, deciding whether to always
886 * print * it or not depending on "normal packet errors" vs everything
887 * else. Return 1 if "real" errors, otherwise 0 if only packet
888 * errors, so caller can decide what to print with the string.
889 */
890static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
891 u64 err)
892{
893 int iserr = 1;
894
895 *buf = '\0';
896 if (err & QLOGIC_IB_E_PKTERRS) {
897 if (!(err & ~QLOGIC_IB_E_PKTERRS))
898 iserr = 0;
899 if ((err & ERR_MASK(RcvICRCErr)) &&
900 !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))
901 strlcat(buf, "CRC ", blen);
902 if (!iserr)
903 goto done;
904 }
905 if (err & ERR_MASK(RcvHdrLenErr))
906 strlcat(buf, "rhdrlen ", blen);
907 if (err & ERR_MASK(RcvBadTidErr))
908 strlcat(buf, "rbadtid ", blen);
909 if (err & ERR_MASK(RcvBadVersionErr))
910 strlcat(buf, "rbadversion ", blen);
911 if (err & ERR_MASK(RcvHdrErr))
912 strlcat(buf, "rhdr ", blen);
913 if (err & ERR_MASK(RcvLongPktLenErr))
914 strlcat(buf, "rlongpktlen ", blen);
915 if (err & ERR_MASK(RcvMaxPktLenErr))
916 strlcat(buf, "rmaxpktlen ", blen);
917 if (err & ERR_MASK(RcvMinPktLenErr))
918 strlcat(buf, "rminpktlen ", blen);
919 if (err & ERR_MASK(SendMinPktLenErr))
920 strlcat(buf, "sminpktlen ", blen);
921 if (err & ERR_MASK(RcvFormatErr))
922 strlcat(buf, "rformaterr ", blen);
923 if (err & ERR_MASK(RcvUnsupportedVLErr))
924 strlcat(buf, "runsupvl ", blen);
925 if (err & ERR_MASK(RcvUnexpectedCharErr))
926 strlcat(buf, "runexpchar ", blen);
927 if (err & ERR_MASK(RcvIBFlowErr))
928 strlcat(buf, "ribflow ", blen);
929 if (err & ERR_MASK(SendUnderRunErr))
930 strlcat(buf, "sunderrun ", blen);
931 if (err & ERR_MASK(SendPioArmLaunchErr))
932 strlcat(buf, "spioarmlaunch ", blen);
933 if (err & ERR_MASK(SendUnexpectedPktNumErr))
934 strlcat(buf, "sunexperrpktnum ", blen);
935 if (err & ERR_MASK(SendDroppedSmpPktErr))
936 strlcat(buf, "sdroppedsmppkt ", blen);
937 if (err & ERR_MASK(SendMaxPktLenErr))
938 strlcat(buf, "smaxpktlen ", blen);
939 if (err & ERR_MASK(SendUnsupportedVLErr))
940 strlcat(buf, "sunsupVL ", blen);
941 if (err & ERR_MASK(InvalidAddrErr))
942 strlcat(buf, "invalidaddr ", blen);
943 if (err & ERR_MASK(RcvEgrFullErr))
944 strlcat(buf, "rcvegrfull ", blen);
945 if (err & ERR_MASK(RcvHdrFullErr))
946 strlcat(buf, "rcvhdrfull ", blen);
947 if (err & ERR_MASK(IBStatusChanged))
948 strlcat(buf, "ibcstatuschg ", blen);
949 if (err & ERR_MASK(RcvIBLostLinkErr))
950 strlcat(buf, "riblostlink ", blen);
951 if (err & ERR_MASK(HardwareErr))
952 strlcat(buf, "hardware ", blen);
953 if (err & ERR_MASK(ResetNegated))
954 strlcat(buf, "reset ", blen);
955done:
956 return iserr;
957}
958
959/*
960 * Called when we might have an error that is specific to a particular
961 * PIO buffer, and may need to cancel that buffer, so it can be re-used.
962 */
963static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)
964{
965 unsigned long sbuf[2];
966 struct qib_devdata *dd = ppd->dd;
967
968 /*
969 * It's possible that sendbuffererror could have bits set; might
970 * have already done this as a result of hardware error handling.
971 */
972 sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
973 sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
974
975 if (sbuf[0] || sbuf[1])
976 qib_disarm_piobufs_set(dd, sbuf,
977 dd->piobcnt2k + dd->piobcnt4k);
978}
979
980static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
981{
982 int ret = 1;
983 u32 ibstate = qib_6120_iblink_state(ibcs);
984 u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
985
986 if (linkrecov != dd->cspec->lastlinkrecov) {
987 /* and no more until active again */
988 dd->cspec->lastlinkrecov = 0;
989 qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
990 ret = 0;
991 }
992 if (ibstate == IB_PORT_ACTIVE)
993 dd->cspec->lastlinkrecov =
994 read_6120_creg32(dd, cr_iblinkerrrecov);
995 return ret;
996}
997
998static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
999{
1000 char *msg;
1001 u64 ignore_this_time = 0;
1002 u64 iserr = 0;
1003 int log_idx;
1004 struct qib_pportdata *ppd = dd->pport;
1005 u64 mask;
1006
1007 /* don't report errors that are masked */
1008 errs &= dd->cspec->errormask;
1009 msg = dd->cspec->emsgbuf;
1010
1011 /* do these first, they are most important */
1012 if (errs & ERR_MASK(HardwareErr))
1013 qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1014 else
1015 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1016 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1017 qib_inc_eeprom_err(dd, log_idx, 1);
1018
1019 if (errs & ~IB_E_BITSEXTANT)
1020 qib_dev_err(dd, "error interrupt with unknown errors "
1021 "%llx set\n",
1022 (unsigned long long) (errs & ~IB_E_BITSEXTANT));
1023
1024 if (errs & E_SUM_ERRS) {
1025 qib_disarm_6120_senderrbufs(ppd);
1026 if ((errs & E_SUM_LINK_PKTERRS) &&
1027 !(ppd->lflags & QIBL_LINKACTIVE)) {
1028 /*
1029 * This can happen when trying to bring the link
1030 * up, but the IB link changes state at the "wrong"
1031 * time. The IB logic then complains that the packet
1032 * isn't valid. We don't want to confuse people, so
1033 * we just don't print them, except at debug
1034 */
1035 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1036 }
1037 } else if ((errs & E_SUM_LINK_PKTERRS) &&
1038 !(ppd->lflags & QIBL_LINKACTIVE)) {
1039 /*
1040 * This can happen when SMA is trying to bring the link
1041 * up, but the IB link changes state at the "wrong" time.
1042 * The IB logic then complains that the packet isn't
1043 * valid. We don't want to confuse people, so we just
1044 * don't print them, except at debug
1045 */
1046 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1047 }
1048
1049 qib_write_kreg(dd, kr_errclear, errs);
1050
1051 errs &= ~ignore_this_time;
1052 if (!errs)
1053 goto done;
1054
1055 /*
1056 * The ones we mask off are handled specially below
1057 * or above.
1058 */
1059 mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
1060 ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
1061 qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
1062
1063 if (errs & E_SUM_PKTERRS)
1064 qib_stats.sps_rcverrs++;
1065 if (errs & E_SUM_ERRS)
1066 qib_stats.sps_txerrs++;
1067
1068 iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);
1069
1070 if (errs & ERR_MASK(IBStatusChanged)) {
1071 u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
1072 u32 ibstate = qib_6120_iblink_state(ibcs);
1073 int handle = 1;
1074
1075 if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
1076 handle = chk_6120_linkrecovery(dd, ibcs);
1077 /*
1078 * Since going into a recovery state causes the link state
1079 * to go down and since recovery is transitory, it is better
1080 * if we "miss" ever seeing the link training state go into
1081 * recovery (i.e., ignore this transition for link state
1082 * special handling purposes) without updating lastibcstat.
1083 */
1084 if (handle && qib_6120_phys_portstate(ibcs) ==
1085 IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
1086 handle = 0;
1087 if (handle)
1088 qib_handle_e_ibstatuschanged(ppd, ibcs);
1089 }
1090
1091 if (errs & ERR_MASK(ResetNegated)) {
1092 qib_dev_err(dd, "Got reset, requires re-init "
1093 "(unload and reload driver)\n");
1094 dd->flags &= ~QIB_INITTED; /* needs re-init */
1095 /* mark as having had error */
1096 *dd->devstatusp |= QIB_STATUS_HWERROR;
1097 *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
1098 }
1099
1100 if (*msg && iserr)
1101 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1102
1103 if (ppd->state_wanted & ppd->lflags)
1104 wake_up_interruptible(&ppd->state_wait);
1105
1106 /*
1107 * If there were hdrq or egrfull errors, wake up any processes
1108 * waiting in poll. We used to try to check which contexts had
1109 * the overflow, but given the cost of that and the chip reads
1110 * to support it, it's better to just wake everybody up if we
1111 * get an overflow; waiters can poll again if it's not them.
1112 */
1113 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1114 qib_handle_urcv(dd, ~0U);
1115 if (errs & ERR_MASK(RcvEgrFullErr))
1116 qib_stats.sps_buffull++;
1117 else
1118 qib_stats.sps_hdrfull++;
1119 }
1120done:
1121 return;
1122}
1123
1124/**
1125 * qib_6120_init_hwerrors - enable hardware errors
1126 * @dd: the qlogic_ib device
1127 *
1128 * now that we have finished initializing everything that might reasonably
1129 * cause a hardware error, and cleared those errors bits as they occur,
1130 * we can enable hardware errors in the mask (potentially enabling
1131 * freeze mode), and enable hardware errors as errors (along with
1132 * everything else) in errormask
1133 */
1134static void qib_6120_init_hwerrors(struct qib_devdata *dd)
1135{
1136 u64 val;
1137 u64 extsval;
1138
1139 extsval = qib_read_kreg64(dd, kr_extstatus);
1140
1141 if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))
1142 qib_dev_err(dd, "MemBIST did not complete!\n");
1143
1144 /* init so all hwerrors interrupt, and enter freeze, ajdust below */
1145 val = ~0ULL;
1146 if (dd->minrev < 2) {
1147 /*
1148 * Avoid problem with internal interface bus parity
1149 * checking. Fixed in Rev2.
1150 */
1151 val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;
1152 }
1153 /* avoid some intel cpu's speculative read freeze mode issue */
1154 val &= ~TXEMEMPARITYERR_PIOBUF;
1155
1156 dd->cspec->hwerrmask = val;
1157
1158 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
1159 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1160
1161 /* clear all */
1162 qib_write_kreg(dd, kr_errclear, ~0ULL);
1163 /* enable errors that are masked, at least this first time. */
1164 qib_write_kreg(dd, kr_errmask, ~0ULL);
1165 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
1166 /* clear any interrupts up to this point (ints still not enabled) */
1167 qib_write_kreg(dd, kr_intclear, ~0ULL);
1168
1169 qib_write_kreg(dd, kr_rcvbthqp,
1170 dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
1171 QIB_KD_QP);
1172}
1173
1174/*
1175 * Disable and enable the armlaunch error. Used for PIO bandwidth testing
1176 * on chips that are count-based, rather than trigger-based. There is no
1177 * reference counting, but that's also fine, given the intended use.
1178 * Only chip-specific because it's all register accesses
1179 */
1180static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
1181{
1182 if (enable) {
1183 qib_write_kreg(dd, kr_errclear,
1184 ERR_MASK(SendPioArmLaunchErr));
1185 dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
1186 } else
1187 dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
1188 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1189}
1190
1191/*
1192 * Formerly took parameter <which> in pre-shifted,
1193 * pre-merged form with LinkCmd and LinkInitCmd
1194 * together, and assuming the zero was NOP.
1195 */
1196static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
1197 u16 linitcmd)
1198{
1199 u64 mod_wd;
1200 struct qib_devdata *dd = ppd->dd;
1201 unsigned long flags;
1202
1203 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
1204 /*
1205 * If we are told to disable, note that so link-recovery
1206 * code does not attempt to bring us back up.
1207 */
1208 spin_lock_irqsave(&ppd->lflags_lock, flags);
1209 ppd->lflags |= QIBL_IB_LINK_DISABLED;
1210 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1211 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
1212 /*
1213 * Any other linkinitcmd will lead to LINKDOWN and then
1214 * to INIT (if all is well), so clear flag to let
1215 * link-recovery code attempt to bring us back up.
1216 */
1217 spin_lock_irqsave(&ppd->lflags_lock, flags);
1218 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
1219 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1220 }
1221
1222 mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |
1223 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1224
1225 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
1226 /* write to chip to prevent back-to-back writes of control reg */
1227 qib_write_kreg(dd, kr_scratch, 0);
1228}
1229
1230/**
1231 * qib_6120_bringup_serdes - bring up the serdes
1232 * @dd: the qlogic_ib device
1233 */
1234static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
1235{
1236 struct qib_devdata *dd = ppd->dd;
1237 u64 val, config1, prev_val, hwstat, ibc;
1238
1239 /* Put IBC in reset, sends disabled */
1240 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1241 qib_write_kreg(dd, kr_control, 0ULL);
1242
1243 dd->cspec->ibdeltainprog = 1;
1244 dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
1245 dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
1246
1247 /* flowcontrolwatermark is in units of KBytes */
1248 ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
1249 /*
1250 * How often flowctrl sent. More or less in usecs; balance against
1251 * watermark value, so that in theory senders always get a flow
1252 * control update in time to not let the IB link go idle.
1253 */
1254 ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
1255 /* max error tolerance */
1256 dd->cspec->lli_thresh = 0xf;
1257 ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
1258 /* use "real" buffer space for */
1259 ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
1260 /* IB credit flow control. */
1261 ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
1262 /*
1263 * set initial max size pkt IBC will send, including ICRC; it's the
1264 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
1265 */
1266 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
1267 dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
1268
1269 /* initially come up waiting for TS1, without sending anything. */
1270 val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
1271 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1272 qib_write_kreg(dd, kr_ibcctrl, val);
1273
1274 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1275 config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
1276
1277 /*
1278 * Force reset on, also set rxdetect enable. Must do before reading
1279 * serdesstatus at least for simulation, or some of the bits in
1280 * serdes status will come back as undefined and cause simulation
1281 * failures
1282 */
1283 val |= SYM_MASK(SerdesCfg0, ResetPLL) |
1284 SYM_MASK(SerdesCfg0, RxDetEnX) |
1285 (SYM_MASK(SerdesCfg0, L1PwrDnA) |
1286 SYM_MASK(SerdesCfg0, L1PwrDnB) |
1287 SYM_MASK(SerdesCfg0, L1PwrDnC) |
1288 SYM_MASK(SerdesCfg0, L1PwrDnD));
1289 qib_write_kreg(dd, kr_serdes_cfg0, val);
1290 /* be sure chip saw it */
1291 qib_read_kreg64(dd, kr_scratch);
1292 udelay(5); /* need pll reset set at least for a bit */
1293 /*
1294 * after PLL is reset, set the per-lane Resets and TxIdle and
1295 * clear the PLL reset and rxdetect (to get falling edge).
1296 * Leave L1PWR bits set (permanently)
1297 */
1298 val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |
1299 SYM_MASK(SerdesCfg0, ResetPLL) |
1300 (SYM_MASK(SerdesCfg0, L1PwrDnA) |
1301 SYM_MASK(SerdesCfg0, L1PwrDnB) |
1302 SYM_MASK(SerdesCfg0, L1PwrDnC) |
1303 SYM_MASK(SerdesCfg0, L1PwrDnD)));
1304 val |= (SYM_MASK(SerdesCfg0, ResetA) |
1305 SYM_MASK(SerdesCfg0, ResetB) |
1306 SYM_MASK(SerdesCfg0, ResetC) |
1307 SYM_MASK(SerdesCfg0, ResetD)) |
1308 SYM_MASK(SerdesCfg0, TxIdeEnX);
1309 qib_write_kreg(dd, kr_serdes_cfg0, val);
1310 /* be sure chip saw it */
1311 (void) qib_read_kreg64(dd, kr_scratch);
1312 /* need PLL reset clear for at least 11 usec before lane
1313 * resets cleared; give it a few more to be sure */
1314 udelay(15);
1315 val &= ~((SYM_MASK(SerdesCfg0, ResetA) |
1316 SYM_MASK(SerdesCfg0, ResetB) |
1317 SYM_MASK(SerdesCfg0, ResetC) |
1318 SYM_MASK(SerdesCfg0, ResetD)) |
1319 SYM_MASK(SerdesCfg0, TxIdeEnX));
1320
1321 qib_write_kreg(dd, kr_serdes_cfg0, val);
1322 /* be sure chip saw it */
1323 (void) qib_read_kreg64(dd, kr_scratch);
1324
1325 val = qib_read_kreg64(dd, kr_xgxs_cfg);
1326 prev_val = val;
1327 if (val & QLOGIC_IB_XGXS_RESET)
1328 val &= ~QLOGIC_IB_XGXS_RESET;
1329 if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {
1330 /* need to compensate for Tx inversion in partner */
1331 val &= ~SYM_MASK(XGXSCfg, polarity_inv);
1332 val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);
1333 }
1334 if (val != prev_val)
1335 qib_write_kreg(dd, kr_xgxs_cfg, val);
1336
1337 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1338
1339 /* clear current and de-emphasis bits */
1340 config1 &= ~0x0ffffffff00ULL;
1341 /* set current to 20ma */
1342 config1 |= 0x00000000000ULL;
1343 /* set de-emphasis to -5.68dB */
1344 config1 |= 0x0cccc000000ULL;
1345 qib_write_kreg(dd, kr_serdes_cfg1, config1);
1346
1347 /* base and port guid same for single port */
1348 ppd->guid = dd->base_guid;
1349
1350 /*
1351 * the process of setting and un-resetting the serdes normally
1352 * causes a serdes PLL error, so check for that and clear it
1353 * here. Also clearr hwerr bit in errstatus, but not others.
1354 */
1355 hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
1356 if (hwstat) {
1357 /* should just have PLL, clear all set, in an case */
1358 if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED)
1359 qib_write_kreg(dd, kr_hwerrclear, hwstat);
1360 qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
1361 }
1362
1363 dd->control |= QLOGIC_IB_C_LINKENABLE;
1364 dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
1365 qib_write_kreg(dd, kr_control, dd->control);
1366
1367 return 0;
1368}
1369
1370/**
1371 * qib_6120_quiet_serdes - set serdes to txidle
1372 * @ppd: physical port of the qlogic_ib device
1373 * Called when driver is being unloaded
1374 */
1375static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
1376{
1377 struct qib_devdata *dd = ppd->dd;
1378 u64 val;
1379
1380 qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1381
1382 /* disable IBC */
1383 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1384 qib_write_kreg(dd, kr_control,
1385 dd->control | QLOGIC_IB_C_FREEZEMODE);
1386
1387 if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
1388 dd->cspec->ibdeltainprog) {
1389 u64 diagc;
1390
1391 /* enable counter writes */
1392 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
1393 qib_write_kreg(dd, kr_hwdiagctrl,
1394 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
1395
1396 if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
1397 val = read_6120_creg32(dd, cr_ibsymbolerr);
1398 if (dd->cspec->ibdeltainprog)
1399 val -= val - dd->cspec->ibsymsnap;
1400 val -= dd->cspec->ibsymdelta;
1401 write_6120_creg(dd, cr_ibsymbolerr, val);
1402 }
1403 if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
1404 val = read_6120_creg32(dd, cr_iblinkerrrecov);
1405 if (dd->cspec->ibdeltainprog)
1406 val -= val - dd->cspec->iblnkerrsnap;
1407 val -= dd->cspec->iblnkerrdelta;
1408 write_6120_creg(dd, cr_iblinkerrrecov, val);
1409 }
1410
1411 /* and disable counter writes */
1412 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
1413 }
1414
1415 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1416 val |= SYM_MASK(SerdesCfg0, TxIdeEnX);
1417 qib_write_kreg(dd, kr_serdes_cfg0, val);
1418}
1419
1420/**
1421 * qib_6120_setup_setextled - set the state of the two external LEDs
1422 * @dd: the qlogic_ib device
1423 * @on: whether the link is up or not
1424 *
1425 * The exact combo of LEDs if on is true is determined by looking
1426 * at the ibcstatus.
1427
1428 * These LEDs indicate the physical and logical state of IB link.
1429 * For this chip (at least with recommended board pinouts), LED1
1430 * is Yellow (logical state) and LED2 is Green (physical state),
1431 *
1432 * Note: We try to match the Mellanox HCA LED behavior as best
1433 * we can. Green indicates physical link state is OK (something is
1434 * plugged in, and we can train).
1435 * Amber indicates the link is logically up (ACTIVE).
1436 * Mellanox further blinks the amber LED to indicate data packet
1437 * activity, but we have no hardware support for that, so it would
1438 * require waking up every 10-20 msecs and checking the counters
1439 * on the chip, and then turning the LED off if appropriate. That's
1440 * visible overhead, so not something we will do.
1441 *
1442 */
1443static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
1444{
1445 u64 extctl, val, lst, ltst;
1446 unsigned long flags;
1447 struct qib_devdata *dd = ppd->dd;
1448
1449 /*
1450 * The diags use the LED to indicate diag info, so we leave
1451 * the external LED alone when the diags are running.
1452 */
1453 if (dd->diag_client)
1454 return;
1455
1456 /* Allow override of LED display for, e.g. Locating system in rack */
1457 if (ppd->led_override) {
1458 ltst = (ppd->led_override & QIB_LED_PHYS) ?
1459 IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
1460 lst = (ppd->led_override & QIB_LED_LOG) ?
1461 IB_PORT_ACTIVE : IB_PORT_DOWN;
1462 } else if (on) {
1463 val = qib_read_kreg64(dd, kr_ibcstatus);
1464 ltst = qib_6120_phys_portstate(val);
1465 lst = qib_6120_iblink_state(val);
1466 } else {
1467 ltst = 0;
1468 lst = 0;
1469 }
1470
1471 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
1472 extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
1473 SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
1474
1475 if (ltst == IB_PHYSPORTSTATE_LINKUP)
1476 extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
1477 if (lst == IB_PORT_ACTIVE)
1478 extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
1479 dd->cspec->extctrl = extctl;
1480 qib_write_kreg(dd, kr_extctrl, extctl);
1481 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
1482}
1483
1484static void qib_6120_free_irq(struct qib_devdata *dd)
1485{
1486 if (dd->cspec->irq) {
1487 free_irq(dd->cspec->irq, dd);
1488 dd->cspec->irq = 0;
1489 }
1490 qib_nomsi(dd);
1491}
1492
1493/**
1494 * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
1495 * @dd: the qlogic_ib device
1496 *
1497 * This is called during driver unload.
1498*/
1499static void qib_6120_setup_cleanup(struct qib_devdata *dd)
1500{
1501 qib_6120_free_irq(dd);
1502 kfree(dd->cspec->cntrs);
1503 kfree(dd->cspec->portcntrs);
1504 if (dd->cspec->dummy_hdrq) {
1505 dma_free_coherent(&dd->pcidev->dev,
1506 ALIGN(dd->rcvhdrcnt *
1507 dd->rcvhdrentsize *
1508 sizeof(u32), PAGE_SIZE),
1509 dd->cspec->dummy_hdrq,
1510 dd->cspec->dummy_hdrq_phys);
1511 dd->cspec->dummy_hdrq = NULL;
1512 }
1513}
1514
1515static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
1516{
1517 unsigned long flags;
1518
1519 spin_lock_irqsave(&dd->sendctrl_lock, flags);
1520 if (needint)
1521 dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
1522 else
1523 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
1524 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
1525 qib_write_kreg(dd, kr_scratch, 0ULL);
1526 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
1527}
1528
1529/*
1530 * handle errors and unusual events first, separate function
1531 * to improve cache hits for fast path interrupt handling
1532 */
1533static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
1534{
1535 if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
1536 qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
1537 istat & ~QLOGIC_IB_I_BITSEXTANT);
1538
1539 if (istat & QLOGIC_IB_I_ERROR) {
1540 u64 estat = 0;
1541
1542 qib_stats.sps_errints++;
1543 estat = qib_read_kreg64(dd, kr_errstatus);
1544 if (!estat)
1545 qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
1546 "but no error bits set!\n", istat);
1547 handle_6120_errors(dd, estat);
1548 }
1549
1550 if (istat & QLOGIC_IB_I_GPIO) {
1551 u32 gpiostatus;
1552 u32 to_clear = 0;
1553
1554 /*
1555 * GPIO_3..5 on IBA6120 Rev2 chips indicate
1556 * errors that we need to count.
1557 */
1558 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
1559 /* First the error-counter case. */
1560 if (gpiostatus & GPIO_ERRINTR_MASK) {
1561 /* want to clear the bits we see asserted. */
1562 to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);
1563
1564 /*
1565 * Count appropriately, clear bits out of our copy,
1566 * as they have been "handled".
1567 */
1568 if (gpiostatus & (1 << GPIO_RXUVL_BIT))
1569 dd->cspec->rxfc_unsupvl_errs++;
1570 if (gpiostatus & (1 << GPIO_OVRUN_BIT))
1571 dd->cspec->overrun_thresh_errs++;
1572 if (gpiostatus & (1 << GPIO_LLI_BIT))
1573 dd->cspec->lli_errs++;
1574 gpiostatus &= ~GPIO_ERRINTR_MASK;
1575 }
1576 if (gpiostatus) {
1577 /*
1578 * Some unexpected bits remain. If they could have
1579 * caused the interrupt, complain and clear.
1580 * To avoid repetition of this condition, also clear
1581 * the mask. It is almost certainly due to error.
1582 */
1583 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
1584
1585 /*
1586 * Also check that the chip reflects our shadow,
1587 * and report issues, If they caused the interrupt.
1588 * we will suppress by refreshing from the shadow.
1589 */
1590 if (mask & gpiostatus) {
1591 to_clear |= (gpiostatus & mask);
1592 dd->cspec->gpio_mask &= ~(gpiostatus & mask);
1593 qib_write_kreg(dd, kr_gpio_mask,
1594 dd->cspec->gpio_mask);
1595 }
1596 }
1597 if (to_clear)
1598 qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
1599 }
1600}
1601
1602static irqreturn_t qib_6120intr(int irq, void *data)
1603{
1604 struct qib_devdata *dd = data;
1605 irqreturn_t ret;
1606 u32 istat, ctxtrbits, rmask, crcs = 0;
1607 unsigned i;
1608
1609 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
1610 /*
1611 * This return value is not great, but we do not want the
1612 * interrupt core code to remove our interrupt handler
1613 * because we don't appear to be handling an interrupt
1614 * during a chip reset.
1615 */
1616 ret = IRQ_HANDLED;
1617 goto bail;
1618 }
1619
1620 istat = qib_read_kreg32(dd, kr_intstatus);
1621
1622 if (unlikely(!istat)) {
1623 ret = IRQ_NONE; /* not our interrupt, or already handled */
1624 goto bail;
1625 }
1626 if (unlikely(istat == -1)) {
1627 qib_bad_intrstatus(dd);
1628 /* don't know if it was our interrupt or not */
1629 ret = IRQ_NONE;
1630 goto bail;
1631 }
1632
1633 qib_stats.sps_ints++;
1634 if (dd->int_counter != (u32) -1)
1635 dd->int_counter++;
1636
1637 if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
1638 QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
1639 unlikely_6120_intr(dd, istat);
1640
1641 /*
1642 * Clear the interrupt bits we found set, relatively early, so we
1643 * "know" know the chip will have seen this by the time we process
1644 * the queue, and will re-interrupt if necessary. The processor
1645 * itself won't take the interrupt again until we return.
1646 */
1647 qib_write_kreg(dd, kr_intclear, istat);
1648
1649 /*
1650 * Handle kernel receive queues before checking for pio buffers
1651 * available since receives can overflow; piobuf waiters can afford
1652 * a few extra cycles, since they were waiting anyway.
1653 */
1654 ctxtrbits = istat &
1655 ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1656 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
1657 if (ctxtrbits) {
1658 rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1659 (1U << QLOGIC_IB_I_RCVURG_SHIFT);
1660 for (i = 0; i < dd->first_user_ctxt; i++) {
1661 if (ctxtrbits & rmask) {
1662 ctxtrbits &= ~rmask;
1663 crcs += qib_kreceive(dd->rcd[i],
1664 &dd->cspec->lli_counter,
1665 NULL);
1666 }
1667 rmask <<= 1;
1668 }
1669 if (crcs) {
1670 u32 cntr = dd->cspec->lli_counter;
1671 cntr += crcs;
1672 if (cntr) {
1673 if (cntr > dd->cspec->lli_thresh) {
1674 dd->cspec->lli_counter = 0;
1675 dd->cspec->lli_errs++;
1676 } else
1677 dd->cspec->lli_counter += cntr;
1678 }
1679 }
1680
1681
1682 if (ctxtrbits) {
1683 ctxtrbits =
1684 (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1685 (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
1686 qib_handle_urcv(dd, ctxtrbits);
1687 }
1688 }
1689
1690 if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
1691 qib_ib_piobufavail(dd);
1692
1693 ret = IRQ_HANDLED;
1694bail:
1695 return ret;
1696}
1697
1698/*
1699 * Set up our chip-specific interrupt handler
1700 * The interrupt type has already been setup, so
1701 * we just need to do the registration and error checking.
1702 */
1703static void qib_setup_6120_interrupt(struct qib_devdata *dd)
1704{
1705 /*
1706 * If the chip supports added error indication via GPIO pins,
1707 * enable interrupts on those bits so the interrupt routine
1708 * can count the events. Also set flag so interrupt routine
1709 * can know they are expected.
1710 */
1711 if (SYM_FIELD(dd->revision, Revision_R,
1712 ChipRevMinor) > 1) {
1713 /* Rev2+ reports extra errors via internal GPIO pins */
1714 dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
1715 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1716 }
1717
1718 if (!dd->cspec->irq)
1719 qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
1720 "work\n");
1721 else {
1722 int ret;
1723 ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
1724 QIB_DRV_NAME, dd);
1725 if (ret)
1726 qib_dev_err(dd, "Couldn't setup interrupt "
1727 "(irq=%d): %d\n", dd->cspec->irq,
1728 ret);
1729 }
1730}
1731
1732/**
1733 * pe_boardname - fill in the board name
1734 * @dd: the qlogic_ib device
1735 *
1736 * info is based on the board revision register
1737 */
1738static void pe_boardname(struct qib_devdata *dd)
1739{
1740 char *n;
1741 u32 boardid, namelen;
1742
1743 boardid = SYM_FIELD(dd->revision, Revision,
1744 BoardID);
1745
1746 switch (boardid) {
1747 case 2:
1748 n = "InfiniPath_QLE7140";
1749 break;
1750 default:
1751 qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
1752 n = "Unknown_InfiniPath_6120";
1753 break;
1754 }
1755 namelen = strlen(n) + 1;
1756 dd->boardname = kmalloc(namelen, GFP_KERNEL);
1757 if (!dd->boardname)
1758 qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
1759 else
1760 snprintf(dd->boardname, namelen, "%s", n);
1761
1762 if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
1763 qib_dev_err(dd, "Unsupported InfiniPath hardware revision "
1764 "%u.%u!\n", dd->majrev, dd->minrev);
1765
1766 snprintf(dd->boardversion, sizeof(dd->boardversion),
1767 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
1768 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
1769 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
1770 dd->majrev, dd->minrev,
1771 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
1772
1773}
1774
1775/*
1776 * This routine sleeps, so it can only be called from user context, not
1777 * from interrupt context. If we need interrupt context, we can split
1778 * it into two routines.
1779 */
1780static int qib_6120_setup_reset(struct qib_devdata *dd)
1781{
1782 u64 val;
1783 int i;
1784 int ret;
1785 u16 cmdval;
1786 u8 int_line, clinesz;
1787
1788 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
1789
1790 /* Use ERROR so it shows up in logs, etc. */
1791 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
1792
1793 /* no interrupts till re-initted */
1794 qib_6120_set_intr_state(dd, 0);
1795
1796 dd->cspec->ibdeltainprog = 0;
1797 dd->cspec->ibsymdelta = 0;
1798 dd->cspec->iblnkerrdelta = 0;
1799
1800 /*
1801 * Keep chip from being accessed until we are ready. Use
1802 * writeq() directly, to allow the write even though QIB_PRESENT
1803 * isnt' set.
1804 */
1805 dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
1806 dd->int_counter = 0; /* so we check interrupts work again */
1807 val = dd->control | QLOGIC_IB_C_RESET;
1808 writeq(val, &dd->kregbase[kr_control]);
1809 mb(); /* prevent compiler re-ordering around actual reset */
1810
1811 for (i = 1; i <= 5; i++) {
1812 /*
1813 * Allow MBIST, etc. to complete; longer on each retry.
1814 * We sometimes get machine checks from bus timeout if no
1815 * response, so for now, make it *really* long.
1816 */
1817 msleep(1000 + (1 + i) * 2000);
1818
1819 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
1820
1821 /*
1822 * Use readq directly, so we don't need to mark it as PRESENT
1823 * until we get a successful indication that all is well.
1824 */
1825 val = readq(&dd->kregbase[kr_revision]);
1826 if (val == dd->revision) {
1827 dd->flags |= QIB_PRESENT; /* it's back */
1828 ret = qib_reinit_intr(dd);
1829 goto bail;
1830 }
1831 }
1832 ret = 0; /* failed */
1833
1834bail:
1835 if (ret) {
1836 if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
1837 qib_dev_err(dd, "Reset failed to setup PCIe or "
1838 "interrupts; continuing anyway\n");
1839 /* clear the reset error, init error/hwerror mask */
1840 qib_6120_init_hwerrors(dd);
1841 /* for Rev2 error interrupts; nop for rev 1 */
1842 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1843 /* clear the reset error, init error/hwerror mask */
1844 qib_6120_init_hwerrors(dd);
1845 }
1846 return ret;
1847}
1848
1849/**
1850 * qib_6120_put_tid - write a TID in chip
1851 * @dd: the qlogic_ib device
1852 * @tidptr: pointer to the expected TID (in chip) to update
1853 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
1854 * for expected
1855 * @pa: physical address of in memory buffer; tidinvalid if freeing
1856 *
1857 * This exists as a separate routine to allow for special locking etc.
1858 * It's used for both the full cleanup on exit, as well as the normal
1859 * setup and teardown.
1860 */
1861static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
1862 u32 type, unsigned long pa)
1863{
1864 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1865 unsigned long flags;
1866 int tidx;
1867 spinlock_t *tidlockp; /* select appropriate spinlock */
1868
1869 if (!dd->kregbase)
1870 return;
1871
1872 if (pa != dd->tidinvalid) {
1873 if (pa & ((1U << 11) - 1)) {
1874 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
1875 pa);
1876 return;
1877 }
1878 pa >>= 11;
1879 if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
1880 qib_dev_err(dd, "Physical page address 0x%lx "
1881 "larger than supported\n", pa);
1882 return;
1883 }
1884
1885 if (type == RCVHQ_RCV_TYPE_EAGER)
1886 pa |= dd->tidtemplate;
1887 else /* for now, always full 4KB page */
1888 pa |= 2 << 29;
1889 }
1890
1891 /*
1892 * Avoid chip issue by writing the scratch register
1893 * before and after the TID, and with an io write barrier.
1894 * We use a spinlock around the writes, so they can't intermix
1895 * with other TID (eager or expected) writes (the chip problem
1896 * is triggered by back to back TID writes). Unfortunately, this
1897 * call can be done from interrupt level for the ctxt 0 eager TIDs,
1898 * so we have to use irqsave locks.
1899 */
1900 /*
1901 * Assumes tidptr always > egrtidbase
1902 * if type == RCVHQ_RCV_TYPE_EAGER.
1903 */
1904 tidx = tidptr - dd->egrtidbase;
1905
1906 tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
1907 ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
1908 spin_lock_irqsave(tidlockp, flags);
1909 qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
1910 writel(pa, tidp32);
1911 qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
1912 mmiowb();
1913 spin_unlock_irqrestore(tidlockp, flags);
1914}
1915
1916/**
1917 * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
1918 * @dd: the qlogic_ib device
1919 * @tidptr: pointer to the expected TID (in chip) to update
1920 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
1921 * for expected
1922 * @pa: physical address of in memory buffer; tidinvalid if freeing
1923 *
1924 * This exists as a separate routine to allow for selection of the
1925 * appropriate "flavor". The static calls in cleanup just use the
1926 * revision-agnostic form, as they are not performance critical.
1927 */
1928static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
1929 u32 type, unsigned long pa)
1930{
1931 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1932 u32 tidx;
1933
1934 if (!dd->kregbase)
1935 return;
1936
1937 if (pa != dd->tidinvalid) {
1938 if (pa & ((1U << 11) - 1)) {
1939 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
1940 pa);
1941 return;
1942 }
1943 pa >>= 11;
1944 if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
1945 qib_dev_err(dd, "Physical page address 0x%lx "
1946 "larger than supported\n", pa);
1947 return;
1948 }
1949
1950 if (type == RCVHQ_RCV_TYPE_EAGER)
1951 pa |= dd->tidtemplate;
1952 else /* for now, always full 4KB page */
1953 pa |= 2 << 29;
1954 }
1955 tidx = tidptr - dd->egrtidbase;
1956 writel(pa, tidp32);
1957 mmiowb();
1958}
1959
1960
1961/**
1962 * qib_6120_clear_tids - clear all TID entries for a context, expected and eager
1963 * @dd: the qlogic_ib device
1964 * @ctxt: the context
1965 *
1966 * clear all TID entries for a context, expected and eager.
1967 * Used from qib_close(). On this chip, TIDs are only 32 bits,
1968 * not 64, but they are still on 64 bit boundaries, so tidbase
1969 * is declared as u64 * for the pointer math, even though we write 32 bits
1970 */
1971static void qib_6120_clear_tids(struct qib_devdata *dd,
1972 struct qib_ctxtdata *rcd)
1973{
1974 u64 __iomem *tidbase;
1975 unsigned long tidinv;
1976 u32 ctxt;
1977 int i;
1978
1979 if (!dd->kregbase || !rcd)
1980 return;
1981
1982 ctxt = rcd->ctxt;
1983
1984 tidinv = dd->tidinvalid;
1985 tidbase = (u64 __iomem *)
1986 ((char __iomem *)(dd->kregbase) +
1987 dd->rcvtidbase +
1988 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
1989
1990 for (i = 0; i < dd->rcvtidcnt; i++)
1991 /* use func pointer because could be one of two funcs */
1992 dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1993 tidinv);
1994
1995 tidbase = (u64 __iomem *)
1996 ((char __iomem *)(dd->kregbase) +
1997 dd->rcvegrbase +
1998 rcd->rcvegr_tid_base * sizeof(*tidbase));
1999
2000 for (i = 0; i < rcd->rcvegrcnt; i++)
2001 /* use func pointer because could be one of two funcs */
2002 dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
2003 tidinv);
2004}
2005
2006/**
2007 * qib_6120_tidtemplate - setup constants for TID updates
2008 * @dd: the qlogic_ib device
2009 *
2010 * We setup stuff that we use a lot, to avoid calculating each time
2011 */
2012static void qib_6120_tidtemplate(struct qib_devdata *dd)
2013{
2014 u32 egrsize = dd->rcvegrbufsize;
2015
2016 /*
2017 * For now, we always allocate 4KB buffers (at init) so we can
2018 * receive max size packets. We may want a module parameter to
2019 * specify 2KB or 4KB and/or make be per ctxt instead of per device
2020 * for those who want to reduce memory footprint. Note that the
2021 * rcvhdrentsize size must be large enough to hold the largest
2022 * IB header (currently 96 bytes) that we expect to handle (plus of
2023 * course the 2 dwords of RHF).
2024 */
2025 if (egrsize == 2048)
2026 dd->tidtemplate = 1U << 29;
2027 else if (egrsize == 4096)
2028 dd->tidtemplate = 2U << 29;
2029 dd->tidinvalid = 0;
2030}
2031
2032int __attribute__((weak)) qib_unordered_wc(void)
2033{
2034 return 0;
2035}
2036
2037/**
2038 * qib_6120_get_base_info - set chip-specific flags for user code
2039 * @rcd: the qlogic_ib ctxt
2040 * @kbase: qib_base_info pointer
2041 *
2042 * We set the PCIE flag because the lower bandwidth on PCIe vs
2043 * HyperTransport can affect some user packet algorithms.
2044 */
2045static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,
2046 struct qib_base_info *kinfo)
2047{
2048 if (qib_unordered_wc())
2049 kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;
2050
2051 kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
2052 QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;
2053 return 0;
2054}
2055
2056
2057static struct qib_message_header *
2058qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
2059{
2060 return (struct qib_message_header *)
2061 &rhf_addr[sizeof(u64) / sizeof(u32)];
2062}
2063
2064static void qib_6120_config_ctxts(struct qib_devdata *dd)
2065{
2066 dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
2067 if (qib_n_krcv_queues > 1) {
2068 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2069 if (dd->first_user_ctxt > dd->ctxtcnt)
2070 dd->first_user_ctxt = dd->ctxtcnt;
2071 dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
2072 } else
2073 dd->first_user_ctxt = dd->num_pports;
2074 dd->n_krcv_queues = dd->first_user_ctxt;
2075}
2076
2077static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2078 u32 updegr, u32 egrhd)
2079{
2080 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2081 if (updegr)
2082 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
2083}
2084
2085static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
2086{
2087 u32 head, tail;
2088
2089 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
2090 if (rcd->rcvhdrtail_kvaddr)
2091 tail = qib_get_rcvhdrtail(rcd);
2092 else
2093 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
2094 return head == tail;
2095}
2096
2097/*
2098 * Used when we close any ctxt, for DMA already in flight
2099 * at close. Can't be done until we know hdrq size, so not
2100 * early in chip init.
2101 */
2102static void alloc_dummy_hdrq(struct qib_devdata *dd)
2103{
2104 dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
2105 dd->rcd[0]->rcvhdrq_size,
2106 &dd->cspec->dummy_hdrq_phys,
2107 GFP_KERNEL | __GFP_COMP);
2108 if (!dd->cspec->dummy_hdrq) {
2109 qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
2110 /* fallback to just 0'ing */
2111 dd->cspec->dummy_hdrq_phys = 0UL;
2112 }
2113}
2114
2115/*
2116 * Modify the RCVCTRL register in chip-specific way. This
2117 * is a function because bit positions and (future) register
2118 * location is chip-specific, but the needed operations are
2119 * generic. <op> is a bit-mask because we often want to
2120 * do multiple modifications.
2121 */
2122static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,
2123 int ctxt)
2124{
2125 struct qib_devdata *dd = ppd->dd;
2126 u64 mask, val;
2127 unsigned long flags;
2128
2129 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2130
2131 if (op & QIB_RCVCTRL_TAILUPD_ENB)
2132 dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
2133 if (op & QIB_RCVCTRL_TAILUPD_DIS)
2134 dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
2135 if (op & QIB_RCVCTRL_PKEY_ENB)
2136 dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
2137 if (op & QIB_RCVCTRL_PKEY_DIS)
2138 dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
2139 if (ctxt < 0)
2140 mask = (1ULL << dd->ctxtcnt) - 1;
2141 else
2142 mask = (1ULL << ctxt);
2143 if (op & QIB_RCVCTRL_CTXT_ENB) {
2144 /* always done for specific ctxt */
2145 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
2146 if (!(dd->flags & QIB_NODMA_RTAIL))
2147 dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
2148 /* Write these registers before the context is enabled. */
2149 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2150 dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
2151 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2152 dd->rcd[ctxt]->rcvhdrq_phys);
2153
2154 if (ctxt == 0 && !dd->cspec->dummy_hdrq)
2155 alloc_dummy_hdrq(dd);
2156 }
2157 if (op & QIB_RCVCTRL_CTXT_DIS)
2158 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
2159 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
2160 dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
2161 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
2162 dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
2163 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2164 if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
2165 /* arm rcv interrupt */
2166 val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
2167 dd->rhdrhead_intr_off;
2168 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2169 }
2170 if (op & QIB_RCVCTRL_CTXT_ENB) {
2171 /*
2172 * Init the context registers also; if we were
2173 * disabled, tail and head should both be zero
2174 * already from the enable, but since we don't
2175 * know, we have to do it explictly.
2176 */
2177 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
2178 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
2179
2180 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
2181 dd->rcd[ctxt]->head = val;
2182 /* If kctxt, interrupt on next receive. */
2183 if (ctxt < dd->first_user_ctxt)
2184 val |= dd->rhdrhead_intr_off;
2185 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2186 }
2187 if (op & QIB_RCVCTRL_CTXT_DIS) {
2188 /*
2189 * Be paranoid, and never write 0's to these, just use an
2190 * unused page. Of course,
2191 * rcvhdraddr points to a large chunk of memory, so this
2192 * could still trash things, but at least it won't trash
2193 * page 0, and by disabling the ctxt, it should stop "soon",
2194 * even if a packet or two is in already in flight after we
2195 * disabled the ctxt. Only 6120 has this issue.
2196 */
2197 if (ctxt >= 0) {
2198 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2199 dd->cspec->dummy_hdrq_phys);
2200 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2201 dd->cspec->dummy_hdrq_phys);
2202 } else {
2203 unsigned i;
2204
2205 for (i = 0; i < dd->cfgctxts; i++) {
2206 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
2207 i, dd->cspec->dummy_hdrq_phys);
2208 qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
2209 i, dd->cspec->dummy_hdrq_phys);
2210 }
2211 }
2212 }
2213 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2214}
2215
2216/*
2217 * Modify the SENDCTRL register in chip-specific way. This
2218 * is a function there may be multiple such registers with
2219 * slightly different layouts. Only operations actually used
2220 * are implemented yet.
2221 * Chip requires no back-back sendctrl writes, so write
2222 * scratch register after writing sendctrl
2223 */
2224static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
2225{
2226 struct qib_devdata *dd = ppd->dd;
2227 u64 tmp_dd_sendctrl;
2228 unsigned long flags;
2229
2230 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2231
2232 /* First the ones that are "sticky", saved in shadow */
2233 if (op & QIB_SENDCTRL_CLEAR)
2234 dd->sendctrl = 0;
2235 if (op & QIB_SENDCTRL_SEND_DIS)
2236 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
2237 else if (op & QIB_SENDCTRL_SEND_ENB)
2238 dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
2239 if (op & QIB_SENDCTRL_AVAIL_DIS)
2240 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
2241 else if (op & QIB_SENDCTRL_AVAIL_ENB)
2242 dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
2243
2244 if (op & QIB_SENDCTRL_DISARM_ALL) {
2245 u32 i, last;
2246
2247 tmp_dd_sendctrl = dd->sendctrl;
2248 /*
2249 * disarm any that are not yet launched, disabling sends
2250 * and updates until done.
2251 */
2252 last = dd->piobcnt2k + dd->piobcnt4k;
2253 tmp_dd_sendctrl &=
2254 ~(SYM_MASK(SendCtrl, PIOEnable) |
2255 SYM_MASK(SendCtrl, PIOBufAvailUpd));
2256 for (i = 0; i < last; i++) {
2257 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
2258 SYM_MASK(SendCtrl, Disarm) | i);
2259 qib_write_kreg(dd, kr_scratch, 0);
2260 }
2261 }
2262
2263 tmp_dd_sendctrl = dd->sendctrl;
2264
2265 if (op & QIB_SENDCTRL_FLUSH)
2266 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
2267 if (op & QIB_SENDCTRL_DISARM)
2268 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
2269 ((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<
2270 SYM_LSB(SendCtrl, DisarmPIOBuf));
2271 if (op & QIB_SENDCTRL_AVAIL_BLIP)
2272 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
2273
2274 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
2275 qib_write_kreg(dd, kr_scratch, 0);
2276
2277 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
2278 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2279 qib_write_kreg(dd, kr_scratch, 0);
2280 }
2281
2282 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2283
2284 if (op & QIB_SENDCTRL_FLUSH) {
2285 u32 v;
2286 /*
2287 * ensure writes have hit chip, then do a few
2288 * more reads, to allow DMA of pioavail registers
2289 * to occur, so in-memory copy is in sync with
2290 * the chip. Not always safe to sleep.
2291 */
2292 v = qib_read_kreg32(dd, kr_scratch);
2293 qib_write_kreg(dd, kr_scratch, v);
2294 v = qib_read_kreg32(dd, kr_scratch);
2295 qib_write_kreg(dd, kr_scratch, v);
2296 qib_read_kreg32(dd, kr_scratch);
2297 }
2298}
2299
2300/**
2301 * qib_portcntr_6120 - read a per-port counter
2302 * @dd: the qlogic_ib device
2303 * @creg: the counter to snapshot
2304 */
2305static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
2306{
2307 u64 ret = 0ULL;
2308 struct qib_devdata *dd = ppd->dd;
2309 u16 creg;
2310 /* 0xffff for unimplemented or synthesized counters */
2311 static const u16 xlator[] = {
2312 [QIBPORTCNTR_PKTSEND] = cr_pktsend,
2313 [QIBPORTCNTR_WORDSEND] = cr_wordsend,
2314 [QIBPORTCNTR_PSXMITDATA] = 0xffff,
2315 [QIBPORTCNTR_PSXMITPKTS] = 0xffff,
2316 [QIBPORTCNTR_PSXMITWAIT] = 0xffff,
2317 [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
2318 [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
2319 [QIBPORTCNTR_PSRCVDATA] = 0xffff,
2320 [QIBPORTCNTR_PSRCVPKTS] = 0xffff,
2321 [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
2322 [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
2323 [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
2324 [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
2325 [QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,
2326 [QIBPORTCNTR_RXVLERR] = 0xffff,
2327 [QIBPORTCNTR_ERRICRC] = cr_erricrc,
2328 [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
2329 [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
2330 [QIBPORTCNTR_BADFORMAT] = cr_badformat,
2331 [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
2332 [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
2333 [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
2334 [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
2335 [QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,
2336 [QIBPORTCNTR_ERRLINK] = cr_errlink,
2337 [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
2338 [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
2339 [QIBPORTCNTR_LLI] = 0xffff,
2340 [QIBPORTCNTR_PSINTERVAL] = 0xffff,
2341 [QIBPORTCNTR_PSSTART] = 0xffff,
2342 [QIBPORTCNTR_PSSTAT] = 0xffff,
2343 [QIBPORTCNTR_VL15PKTDROP] = 0xffff,
2344 [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
2345 [QIBPORTCNTR_KHDROVFL] = 0xffff,
2346 };
2347
2348 if (reg >= ARRAY_SIZE(xlator)) {
2349 qib_devinfo(ppd->dd->pcidev,
2350 "Unimplemented portcounter %u\n", reg);
2351 goto done;
2352 }
2353 creg = xlator[reg];
2354
2355 /* handle counters requests not implemented as chip counters */
2356 if (reg == QIBPORTCNTR_LLI)
2357 ret = dd->cspec->lli_errs;
2358 else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)
2359 ret = dd->cspec->overrun_thresh_errs;
2360 else if (reg == QIBPORTCNTR_KHDROVFL) {
2361 int i;
2362
2363 /* sum over all kernel contexts */
2364 for (i = 0; i < dd->first_user_ctxt; i++)
2365 ret += read_6120_creg32(dd, cr_portovfl + i);
2366 } else if (reg == QIBPORTCNTR_PSSTAT)
2367 ret = dd->cspec->pma_sample_status;
2368 if (creg == 0xffff)
2369 goto done;
2370
2371 /*
2372 * only fast incrementing counters are 64bit; use 32 bit reads to
2373 * avoid two independent reads when on opteron
2374 */
2375 if (creg == cr_wordsend || creg == cr_wordrcv ||
2376 creg == cr_pktsend || creg == cr_pktrcv)
2377 ret = read_6120_creg(dd, creg);
2378 else
2379 ret = read_6120_creg32(dd, creg);
2380 if (creg == cr_ibsymbolerr) {
2381 if (dd->cspec->ibdeltainprog)
2382 ret -= ret - dd->cspec->ibsymsnap;
2383 ret -= dd->cspec->ibsymdelta;
2384 } else if (creg == cr_iblinkerrrecov) {
2385 if (dd->cspec->ibdeltainprog)
2386 ret -= ret - dd->cspec->iblnkerrsnap;
2387 ret -= dd->cspec->iblnkerrdelta;
2388 }
2389 if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */
2390 ret += dd->cspec->rxfc_unsupvl_errs;
2391
2392done:
2393 return ret;
2394}
2395
2396/*
2397 * Device counter names (not port-specific), one line per stat,
2398 * single string. Used by utilities like ipathstats to print the stats
2399 * in a way which works for different versions of drivers, without changing
2400 * the utility. Names need to be 12 chars or less (w/o newline), for proper
2401 * display by utility.
2402 * Non-error counters are first.
2403 * Start of "error" conters is indicated by a leading "E " on the first
2404 * "error" counter, and doesn't count in label length.
2405 * The EgrOvfl list needs to be last so we truncate them at the configured
2406 * context count for the device.
2407 * cntr6120indices contains the corresponding register indices.
2408 */
2409static const char cntr6120names[] =
2410 "Interrupts\n"
2411 "HostBusStall\n"
2412 "E RxTIDFull\n"
2413 "RxTIDInvalid\n"
2414 "Ctxt0EgrOvfl\n"
2415 "Ctxt1EgrOvfl\n"
2416 "Ctxt2EgrOvfl\n"
2417 "Ctxt3EgrOvfl\n"
2418 "Ctxt4EgrOvfl\n";
2419
2420static const size_t cntr6120indices[] = {
2421 cr_lbint,
2422 cr_lbflowstall,
2423 cr_errtidfull,
2424 cr_errtidvalid,
2425 cr_portovfl + 0,
2426 cr_portovfl + 1,
2427 cr_portovfl + 2,
2428 cr_portovfl + 3,
2429 cr_portovfl + 4,
2430};
2431
2432/*
2433 * same as cntr6120names and cntr6120indices, but for port-specific counters.
2434 * portcntr6120indices is somewhat complicated by some registers needing
2435 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
2436 */
2437static const char portcntr6120names[] =
2438 "TxPkt\n"
2439 "TxFlowPkt\n"
2440 "TxWords\n"
2441 "RxPkt\n"
2442 "RxFlowPkt\n"
2443 "RxWords\n"
2444 "TxFlowStall\n"
2445 "E IBStatusChng\n"
2446 "IBLinkDown\n"
2447 "IBLnkRecov\n"
2448 "IBRxLinkErr\n"
2449 "IBSymbolErr\n"
2450 "RxLLIErr\n"
2451 "RxBadFormat\n"
2452 "RxBadLen\n"
2453 "RxBufOvrfl\n"
2454 "RxEBP\n"
2455 "RxFlowCtlErr\n"
2456 "RxICRCerr\n"
2457 "RxLPCRCerr\n"
2458 "RxVCRCerr\n"
2459 "RxInvalLen\n"
2460 "RxInvalPKey\n"
2461 "RxPktDropped\n"
2462 "TxBadLength\n"
2463 "TxDropped\n"
2464 "TxInvalLen\n"
2465 "TxUnderrun\n"
2466 "TxUnsupVL\n"
2467 ;
2468
2469#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
2470static const size_t portcntr6120indices[] = {
2471 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
2472 cr_pktsendflow,
2473 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
2474 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
2475 cr_pktrcvflowctrl,
2476 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
2477 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
2478 cr_ibstatuschange,
2479 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
2480 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
2481 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
2482 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
2483 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
2484 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
2485 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
2486 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
2487 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
2488 cr_rcvflowctrl_err,
2489 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
2490 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
2491 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
2492 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
2493 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
2494 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
2495 cr_invalidslen,
2496 cr_senddropped,
2497 cr_errslen,
2498 cr_sendunderrun,
2499 cr_txunsupvl,
2500};
2501
2502/* do all the setup to make the counter reads efficient later */
2503static void init_6120_cntrnames(struct qib_devdata *dd)
2504{
2505 int i, j = 0;
2506 char *s;
2507
2508 for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
2509 i++) {
2510 /* we always have at least one counter before the egrovfl */
2511 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
2512 j = 1;
2513 s = strchr(s + 1, '\n');
2514 if (s && j)
2515 j++;
2516 }
2517 dd->cspec->ncntrs = i;
2518 if (!s)
2519 /* full list; size is without terminating null */
2520 dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
2521 else
2522 dd->cspec->cntrnamelen = 1 + s - cntr6120names;
2523 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
2524 * sizeof(u64), GFP_KERNEL);
2525 if (!dd->cspec->cntrs)
2526 qib_dev_err(dd, "Failed allocation for counters\n");
2527
2528 for (i = 0, s = (char *)portcntr6120names; s; i++)
2529 s = strchr(s + 1, '\n');
2530 dd->cspec->nportcntrs = i - 1;
2531 dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
2532 dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
2533 * sizeof(u64), GFP_KERNEL);
2534 if (!dd->cspec->portcntrs)
2535 qib_dev_err(dd, "Failed allocation for portcounters\n");
2536}
2537
2538static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
2539 u64 **cntrp)
2540{
2541 u32 ret;
2542
2543 if (namep) {
2544 ret = dd->cspec->cntrnamelen;
2545 if (pos >= ret)
2546 ret = 0; /* final read after getting everything */
2547 else
2548 *namep = (char *)cntr6120names;
2549 } else {
2550 u64 *cntr = dd->cspec->cntrs;
2551 int i;
2552
2553 ret = dd->cspec->ncntrs * sizeof(u64);
2554 if (!cntr || pos >= ret) {
2555 /* everything read, or couldn't get memory */
2556 ret = 0;
2557 goto done;
2558 }
2559 if (pos >= ret) {
2560 ret = 0; /* final read after getting everything */
2561 goto done;
2562 }
2563 *cntrp = cntr;
2564 for (i = 0; i < dd->cspec->ncntrs; i++)
2565 *cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
2566 }
2567done:
2568 return ret;
2569}
2570
2571static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
2572 char **namep, u64 **cntrp)
2573{
2574 u32 ret;
2575
2576 if (namep) {
2577 ret = dd->cspec->portcntrnamelen;
2578 if (pos >= ret)
2579 ret = 0; /* final read after getting everything */
2580 else
2581 *namep = (char *)portcntr6120names;
2582 } else {
2583 u64 *cntr = dd->cspec->portcntrs;
2584 struct qib_pportdata *ppd = &dd->pport[port];
2585 int i;
2586
2587 ret = dd->cspec->nportcntrs * sizeof(u64);
2588 if (!cntr || pos >= ret) {
2589 /* everything read, or couldn't get memory */
2590 ret = 0;
2591 goto done;
2592 }
2593 *cntrp = cntr;
2594 for (i = 0; i < dd->cspec->nportcntrs; i++) {
2595 if (portcntr6120indices[i] & _PORT_VIRT_FLAG)
2596 *cntr++ = qib_portcntr_6120(ppd,
2597 portcntr6120indices[i] &
2598 ~_PORT_VIRT_FLAG);
2599 else
2600 *cntr++ = read_6120_creg32(dd,
2601 portcntr6120indices[i]);
2602 }
2603 }
2604done:
2605 return ret;
2606}
2607
2608static void qib_chk_6120_errormask(struct qib_devdata *dd)
2609{
2610 static u32 fixed;
2611 u32 ctrl;
2612 unsigned long errormask;
2613 unsigned long hwerrs;
2614
2615 if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
2616 return;
2617
2618 errormask = qib_read_kreg64(dd, kr_errmask);
2619
2620 if (errormask == dd->cspec->errormask)
2621 return;
2622 fixed++;
2623
2624 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2625 ctrl = qib_read_kreg32(dd, kr_control);
2626
2627 qib_write_kreg(dd, kr_errmask,
2628 dd->cspec->errormask);
2629
2630 if ((hwerrs & dd->cspec->hwerrmask) ||
2631 (ctrl & QLOGIC_IB_C_FREEZEMODE)) {
2632 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2633 qib_write_kreg(dd, kr_errclear, 0ULL);
2634 /* force re-interrupt of pending events, just in case */
2635 qib_write_kreg(dd, kr_intclear, 0ULL);
2636 qib_devinfo(dd->pcidev,
2637 "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",
2638 fixed, errormask, (unsigned long)dd->cspec->errormask,
2639 ctrl, hwerrs);
2640 }
2641}
2642
2643/**
2644 * qib_get_faststats - get word counters from chip before they overflow
2645 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
2646 *
2647 * This needs more work; in particular, decision on whether we really
2648 * need traffic_wds done the way it is
2649 * called from add_timer
2650 */
2651static void qib_get_6120_faststats(unsigned long opaque)
2652{
2653 struct qib_devdata *dd = (struct qib_devdata *) opaque;
2654 struct qib_pportdata *ppd = dd->pport;
2655 unsigned long flags;
2656 u64 traffic_wds;
2657
2658 /*
2659 * don't access the chip while running diags, or memory diags can
2660 * fail
2661 */
2662 if (!(dd->flags & QIB_INITTED) || dd->diag_client)
2663 /* but re-arm the timer, for diags case; won't hurt other */
2664 goto done;
2665
2666 /*
2667 * We now try to maintain an activity timer, based on traffic
2668 * exceeding a threshold, so we need to check the word-counts
2669 * even if they are 64-bit.
2670 */
2671 traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +
2672 qib_portcntr_6120(ppd, cr_wordrcv);
2673 spin_lock_irqsave(&dd->eep_st_lock, flags);
2674 traffic_wds -= dd->traffic_wds;
2675 dd->traffic_wds += traffic_wds;
2676 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
2677 atomic_add(5, &dd->active_time); /* S/B #define */
2678 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
2679
2680 qib_chk_6120_errormask(dd);
2681done:
2682 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
2683}
2684
2685/* no interrupt fallback for these chips */
2686static int qib_6120_nointr_fallback(struct qib_devdata *dd)
2687{
2688 return 0;
2689}
2690
2691/*
2692 * reset the XGXS (between serdes and IBC). Slightly less intrusive
2693 * than resetting the IBC or external link state, and useful in some
2694 * cases to cause some retraining. To do this right, we reset IBC
2695 * as well.
2696 */
2697static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)
2698{
2699 u64 val, prev_val;
2700 struct qib_devdata *dd = ppd->dd;
2701
2702 prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
2703 val = prev_val | QLOGIC_IB_XGXS_RESET;
2704 prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
2705 qib_write_kreg(dd, kr_control,
2706 dd->control & ~QLOGIC_IB_C_LINKENABLE);
2707 qib_write_kreg(dd, kr_xgxs_cfg, val);
2708 qib_read_kreg32(dd, kr_scratch);
2709 qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
2710 qib_write_kreg(dd, kr_control, dd->control);
2711}
2712
2713static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)
2714{
2715 int ret;
2716
2717 switch (which) {
2718 case QIB_IB_CFG_LWID:
2719 ret = ppd->link_width_active;
2720 break;
2721
2722 case QIB_IB_CFG_SPD:
2723 ret = ppd->link_speed_active;
2724 break;
2725
2726 case QIB_IB_CFG_LWID_ENB:
2727 ret = ppd->link_width_enabled;
2728 break;
2729
2730 case QIB_IB_CFG_SPD_ENB:
2731 ret = ppd->link_speed_enabled;
2732 break;
2733
2734 case QIB_IB_CFG_OP_VLS:
2735 ret = ppd->vls_operational;
2736 break;
2737
2738 case QIB_IB_CFG_VL_HIGH_CAP:
2739 ret = 0;
2740 break;
2741
2742 case QIB_IB_CFG_VL_LOW_CAP:
2743 ret = 0;
2744 break;
2745
2746 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2747 ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
2748 OverrunThreshold);
2749 break;
2750
2751 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2752 ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
2753 PhyerrThreshold);
2754 break;
2755
2756 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2757 /* will only take effect when the link state changes */
2758 ret = (ppd->dd->cspec->ibcctrl &
2759 SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
2760 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
2761 break;
2762
2763 case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
2764 ret = 0; /* no heartbeat on this chip */
2765 break;
2766
2767 case QIB_IB_CFG_PMA_TICKS:
2768 ret = 250; /* 1 usec. */
2769 break;
2770
2771 default:
2772 ret = -EINVAL;
2773 break;
2774 }
2775 return ret;
2776}
2777
2778/*
2779 * We assume range checking is already done, if needed.
2780 */
2781static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2782{
2783 struct qib_devdata *dd = ppd->dd;
2784 int ret = 0;
2785 u64 val64;
2786 u16 lcmd, licmd;
2787
2788 switch (which) {
2789 case QIB_IB_CFG_LWID_ENB:
2790 ppd->link_width_enabled = val;
2791 break;
2792
2793 case QIB_IB_CFG_SPD_ENB:
2794 ppd->link_speed_enabled = val;
2795 break;
2796
2797 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2798 val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
2799 OverrunThreshold);
2800 if (val64 != val) {
2801 dd->cspec->ibcctrl &=
2802 ~SYM_MASK(IBCCtrl, OverrunThreshold);
2803 dd->cspec->ibcctrl |= (u64) val <<
2804 SYM_LSB(IBCCtrl, OverrunThreshold);
2805 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2806 qib_write_kreg(dd, kr_scratch, 0);
2807 }
2808 break;
2809
2810 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2811 val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
2812 PhyerrThreshold);
2813 if (val64 != val) {
2814 dd->cspec->ibcctrl &=
2815 ~SYM_MASK(IBCCtrl, PhyerrThreshold);
2816 dd->cspec->ibcctrl |= (u64) val <<
2817 SYM_LSB(IBCCtrl, PhyerrThreshold);
2818 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2819 qib_write_kreg(dd, kr_scratch, 0);
2820 }
2821 break;
2822
2823 case QIB_IB_CFG_PKEYS: /* update pkeys */
2824 val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
2825 ((u64) ppd->pkeys[2] << 32) |
2826 ((u64) ppd->pkeys[3] << 48);
2827 qib_write_kreg(dd, kr_partitionkey, val64);
2828 break;
2829
2830 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2831 /* will only take effect when the link state changes */
2832 if (val == IB_LINKINITCMD_POLL)
2833 dd->cspec->ibcctrl &=
2834 ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
2835 else /* SLEEP */
2836 dd->cspec->ibcctrl |=
2837 SYM_MASK(IBCCtrl, LinkDownDefaultState);
2838 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2839 qib_write_kreg(dd, kr_scratch, 0);
2840 break;
2841
2842 case QIB_IB_CFG_MTU: /* update the MTU in IBC */
2843 /*
2844 * Update our housekeeping variables, and set IBC max
2845 * size, same as init code; max IBC is max we allow in
2846 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
2847 * Set even if it's unchanged, print debug message only
2848 * on changes.
2849 */
2850 val = (ppd->ibmaxlen >> 2) + 1;
2851 dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
2852 dd->cspec->ibcctrl |= (u64)val <<
2853 SYM_LSB(IBCCtrl, MaxPktLen);
2854 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2855 qib_write_kreg(dd, kr_scratch, 0);
2856 break;
2857
2858 case QIB_IB_CFG_LSTATE: /* set the IB link state */
2859 switch (val & 0xffff0000) {
2860 case IB_LINKCMD_DOWN:
2861 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
2862 if (!dd->cspec->ibdeltainprog) {
2863 dd->cspec->ibdeltainprog = 1;
2864 dd->cspec->ibsymsnap =
2865 read_6120_creg32(dd, cr_ibsymbolerr);
2866 dd->cspec->iblnkerrsnap =
2867 read_6120_creg32(dd, cr_iblinkerrrecov);
2868 }
2869 break;
2870
2871 case IB_LINKCMD_ARMED:
2872 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
2873 break;
2874
2875 case IB_LINKCMD_ACTIVE:
2876 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
2877 break;
2878
2879 default:
2880 ret = -EINVAL;
2881 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
2882 goto bail;
2883 }
2884 switch (val & 0xffff) {
2885 case IB_LINKINITCMD_NOP:
2886 licmd = 0;
2887 break;
2888
2889 case IB_LINKINITCMD_POLL:
2890 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
2891 break;
2892
2893 case IB_LINKINITCMD_SLEEP:
2894 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
2895 break;
2896
2897 case IB_LINKINITCMD_DISABLE:
2898 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
2899 break;
2900
2901 default:
2902 ret = -EINVAL;
2903 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
2904 val & 0xffff);
2905 goto bail;
2906 }
2907 qib_set_ib_6120_lstate(ppd, lcmd, licmd);
2908 goto bail;
2909
2910 case QIB_IB_CFG_HRTBT:
2911 ret = -EINVAL;
2912 break;
2913
2914 default:
2915 ret = -EINVAL;
2916 }
2917bail:
2918 return ret;
2919}
2920
2921static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
2922{
2923 int ret = 0;
2924 if (!strncmp(what, "ibc", 3)) {
2925 ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2926 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
2927 ppd->dd->unit, ppd->port);
2928 } else if (!strncmp(what, "off", 3)) {
2929 ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
2930 qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
2931 "(normal)\n", ppd->dd->unit, ppd->port);
2932 } else
2933 ret = -EINVAL;
2934 if (!ret) {
2935 qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
2936 qib_write_kreg(ppd->dd, kr_scratch, 0);
2937 }
2938 return ret;
2939}
2940
2941static void pma_6120_timer(unsigned long data)
2942{
2943 struct qib_pportdata *ppd = (struct qib_pportdata *)data;
2944 struct qib_chip_specific *cs = ppd->dd->cspec;
2945 struct qib_ibport *ibp = &ppd->ibport_data;
2946 unsigned long flags;
2947
2948 spin_lock_irqsave(&ibp->lock, flags);
2949 if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
2950 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
2951 qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
2952 &cs->spkts, &cs->rpkts, &cs->xmit_wait);
2953 mod_timer(&cs->pma_timer,
2954 jiffies + usecs_to_jiffies(ibp->pma_sample_interval));
2955 } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
2956 u64 ta, tb, tc, td, te;
2957
2958 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
2959 qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
2960
2961 cs->sword = ta - cs->sword;
2962 cs->rword = tb - cs->rword;
2963 cs->spkts = tc - cs->spkts;
2964 cs->rpkts = td - cs->rpkts;
2965 cs->xmit_wait = te - cs->xmit_wait;
2966 }
2967 spin_unlock_irqrestore(&ibp->lock, flags);
2968}
2969
2970/*
2971 * Note that the caller has the ibp->lock held.
2972 */
2973static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
2974 u32 start)
2975{
2976 struct qib_chip_specific *cs = ppd->dd->cspec;
2977
2978 if (start && intv) {
2979 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
2980 mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));
2981 } else if (intv) {
2982 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
2983 qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
2984 &cs->spkts, &cs->rpkts, &cs->xmit_wait);
2985 mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));
2986 } else {
2987 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
2988 cs->sword = 0;
2989 cs->rword = 0;
2990 cs->spkts = 0;
2991 cs->rpkts = 0;
2992 cs->xmit_wait = 0;
2993 }
2994}
2995
2996static u32 qib_6120_iblink_state(u64 ibcs)
2997{
2998 u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
2999
3000 switch (state) {
3001 case IB_6120_L_STATE_INIT:
3002 state = IB_PORT_INIT;
3003 break;
3004 case IB_6120_L_STATE_ARM:
3005 state = IB_PORT_ARMED;
3006 break;
3007 case IB_6120_L_STATE_ACTIVE:
3008 /* fall through */
3009 case IB_6120_L_STATE_ACT_DEFER:
3010 state = IB_PORT_ACTIVE;
3011 break;
3012 default: /* fall through */
3013 case IB_6120_L_STATE_DOWN:
3014 state = IB_PORT_DOWN;
3015 break;
3016 }
3017 return state;
3018}
3019
3020/* returns the IBTA port state, rather than the IBC link training state */
3021static u8 qib_6120_phys_portstate(u64 ibcs)
3022{
3023 u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
3024 return qib_6120_physportstate[state];
3025}
3026
3027static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
3028{
3029 unsigned long flags;
3030
3031 spin_lock_irqsave(&ppd->lflags_lock, flags);
3032 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
3033 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3034
3035 if (ibup) {
3036 if (ppd->dd->cspec->ibdeltainprog) {
3037 ppd->dd->cspec->ibdeltainprog = 0;
3038 ppd->dd->cspec->ibsymdelta +=
3039 read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
3040 ppd->dd->cspec->ibsymsnap;
3041 ppd->dd->cspec->iblnkerrdelta +=
3042 read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
3043 ppd->dd->cspec->iblnkerrsnap;
3044 }
3045 qib_hol_init(ppd);
3046 } else {
3047 ppd->dd->cspec->lli_counter = 0;
3048 if (!ppd->dd->cspec->ibdeltainprog) {
3049 ppd->dd->cspec->ibdeltainprog = 1;
3050 ppd->dd->cspec->ibsymsnap =
3051 read_6120_creg32(ppd->dd, cr_ibsymbolerr);
3052 ppd->dd->cspec->iblnkerrsnap =
3053 read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
3054 }
3055 qib_hol_down(ppd);
3056 }
3057
3058 qib_6120_setup_setextled(ppd, ibup);
3059
3060 return 0;
3061}
3062
3063/* Does read/modify/write to appropriate registers to
3064 * set output and direction bits selected by mask.
3065 * these are in their canonical postions (e.g. lsb of
3066 * dir will end up in D48 of extctrl on existing chips).
3067 * returns contents of GP Inputs.
3068 */
3069static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
3070{
3071 u64 read_val, new_out;
3072 unsigned long flags;
3073
3074 if (mask) {
3075 /* some bits being written, lock access to GPIO */
3076 dir &= mask;
3077 out &= mask;
3078 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
3079 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
3080 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
3081 new_out = (dd->cspec->gpio_out & ~mask) | out;
3082
3083 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
3084 qib_write_kreg(dd, kr_gpio_out, new_out);
3085 dd->cspec->gpio_out = new_out;
3086 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
3087 }
3088 /*
3089 * It is unlikely that a read at this time would get valid
3090 * data on a pin whose direction line was set in the same
3091 * call to this function. We include the read here because
3092 * that allows us to potentially combine a change on one pin with
3093 * a read on another, and because the old code did something like
3094 * this.
3095 */
3096 read_val = qib_read_kreg64(dd, kr_extstatus);
3097 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
3098}
3099
3100/*
3101 * Read fundamental info we need to use the chip. These are
3102 * the registers that describe chip capabilities, and are
3103 * saved in shadow registers.
3104 */
3105static void get_6120_chip_params(struct qib_devdata *dd)
3106{
3107 u64 val;
3108 u32 piobufs;
3109 int mtu;
3110
3111 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
3112
3113 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
3114 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
3115 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
3116 dd->palign = qib_read_kreg32(dd, kr_palign);
3117 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
3118 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
3119
3120 dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3121
3122 val = qib_read_kreg64(dd, kr_sendpiosize);
3123 dd->piosize2k = val & ~0U;
3124 dd->piosize4k = val >> 32;
3125
3126 mtu = ib_mtu_enum_to_int(qib_ibmtu);
3127 if (mtu == -1)
3128 mtu = QIB_DEFAULT_MTU;
3129 dd->pport->ibmtu = (u32)mtu;
3130
3131 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
3132 dd->piobcnt2k = val & ~0U;
3133 dd->piobcnt4k = val >> 32;
3134 /* these may be adjusted in init_chip_wc_pat() */
3135 dd->pio2kbase = (u32 __iomem *)
3136 (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
3137 if (dd->piobcnt4k) {
3138 dd->pio4kbase = (u32 __iomem *)
3139 (((char __iomem *) dd->kregbase) +
3140 (dd->piobufbase >> 32));
3141 /*
3142 * 4K buffers take 2 pages; we use roundup just to be
3143 * paranoid; we calculate it once here, rather than on
3144 * ever buf allocate
3145 */
3146 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
3147 }
3148
3149 piobufs = dd->piobcnt4k + dd->piobcnt2k;
3150
3151 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
3152 (sizeof(u64) * BITS_PER_BYTE / 2);
3153}
3154
3155/*
3156 * The chip base addresses in cspec and cpspec have to be set
3157 * after possible init_chip_wc_pat(), rather than in
3158 * get_6120_chip_params(), so split out as separate function
3159 */
3160static void set_6120_baseaddrs(struct qib_devdata *dd)
3161{
3162 u32 cregbase;
3163 cregbase = qib_read_kreg32(dd, kr_counterregbase);
3164 dd->cspec->cregbase = (u64 __iomem *)
3165 ((char __iomem *) dd->kregbase + cregbase);
3166
3167 dd->egrtidbase = (u64 __iomem *)
3168 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
3169}
3170
3171/*
3172 * Write the final few registers that depend on some of the
3173 * init setup. Done late in init, just before bringing up
3174 * the serdes.
3175 */
3176static int qib_late_6120_initreg(struct qib_devdata *dd)
3177{
3178 int ret = 0;
3179 u64 val;
3180
3181 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
3182 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
3183 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
3184 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
3185 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
3186 if (val != dd->pioavailregs_phys) {
3187 qib_dev_err(dd, "Catastrophic software error, "
3188 "SendPIOAvailAddr written as %lx, "
3189 "read back as %llx\n",
3190 (unsigned long) dd->pioavailregs_phys,
3191 (unsigned long long) val);
3192 ret = -EINVAL;
3193 }
3194 return ret;
3195}
3196
3197static int init_6120_variables(struct qib_devdata *dd)
3198{
3199 int ret = 0;
3200 struct qib_pportdata *ppd;
3201 u32 sbufs;
3202
3203 ppd = (struct qib_pportdata *)(dd + 1);
3204 dd->pport = ppd;
3205 dd->num_pports = 1;
3206
3207 dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
3208 ppd->cpspec = NULL; /* not used in this chip */
3209
3210 spin_lock_init(&dd->cspec->kernel_tid_lock);
3211 spin_lock_init(&dd->cspec->user_tid_lock);
3212 spin_lock_init(&dd->cspec->rcvmod_lock);
3213 spin_lock_init(&dd->cspec->gpio_lock);
3214
3215 /* we haven't yet set QIB_PRESENT, so use read directly */
3216 dd->revision = readq(&dd->kregbase[kr_revision]);
3217
3218 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
3219 qib_dev_err(dd, "Revision register read failure, "
3220 "giving up initialization\n");
3221 ret = -ENODEV;
3222 goto bail;
3223 }
3224 dd->flags |= QIB_PRESENT; /* now register routines work */
3225
3226 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3227 ChipRevMajor);
3228 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3229 ChipRevMinor);
3230
3231 get_6120_chip_params(dd);
3232 pe_boardname(dd); /* fill in boardname */
3233
3234 /*
3235 * GPIO bits for TWSI data and clock,
3236 * used for serial EEPROM.
3237 */
3238 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
3239 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
3240 dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
3241
3242 if (qib_unordered_wc())
3243 dd->flags |= QIB_PIO_FLUSH_WC;
3244
3245 /*
3246 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
3247 * 2 is Some Misc, 3 is reserved for future.
3248 */
3249 dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
3250
3251 /* Ignore errors in PIO/PBC on systems with unordered write-combining */
3252 if (qib_unordered_wc())
3253 dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
3254
3255 dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
3256
3257 dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
3258
3259 qib_init_pportdata(ppd, dd, 0, 1);
3260 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
3261 ppd->link_speed_supported = QIB_IB_SDR;
3262 ppd->link_width_enabled = IB_WIDTH_4X;
3263 ppd->link_speed_enabled = ppd->link_speed_supported;
3264 /* these can't change for this chip, so set once */
3265 ppd->link_width_active = ppd->link_width_enabled;
3266 ppd->link_speed_active = ppd->link_speed_enabled;
3267 ppd->vls_supported = IB_VL_VL0;
3268 ppd->vls_operational = ppd->vls_supported;
3269
3270 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
3271 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
3272 dd->rhf_offset = 0;
3273
3274 /* we always allocate at least 2048 bytes for eager buffers */
3275 ret = ib_mtu_enum_to_int(qib_ibmtu);
3276 dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
3277
3278 qib_6120_tidtemplate(dd);
3279
3280 /*
3281 * We can request a receive interrupt for 1 or
3282 * more packets from current offset. For now, we set this
3283 * up for a single packet.
3284 */
3285 dd->rhdrhead_intr_off = 1ULL << 32;
3286
3287 /* setup the stats timer; the add_timer is done at end of init */
3288 init_timer(&dd->stats_timer);
3289 dd->stats_timer.function = qib_get_6120_faststats;
3290 dd->stats_timer.data = (unsigned long) dd;
3291
3292 init_timer(&dd->cspec->pma_timer);
3293 dd->cspec->pma_timer.function = pma_6120_timer;
3294 dd->cspec->pma_timer.data = (unsigned long) ppd;
3295
3296 dd->ureg_align = qib_read_kreg32(dd, kr_palign);
3297
3298 dd->piosize2kmax_dwords = dd->piosize2k >> 2;
3299 qib_6120_config_ctxts(dd);
3300 qib_set_ctxtcnt(dd);
3301
3302 if (qib_wc_pat) {
3303 ret = init_chip_wc_pat(dd, 0);
3304 if (ret)
3305 goto bail;
3306 }
3307 set_6120_baseaddrs(dd); /* set chip access pointers now */
3308
3309 ret = 0;
3310 if (qib_mini_init)
3311 goto bail;
3312
3313 qib_num_cfg_vls = 1; /* if any 6120's, only one VL */
3314
3315 ret = qib_create_ctxts(dd);
3316 init_6120_cntrnames(dd);
3317
3318 /* use all of 4KB buffers for the kernel, otherwise 16 */
3319 sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16;
3320
3321 dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
3322 dd->pbufsctxt = dd->lastctxt_piobuf /
3323 (dd->cfgctxts - dd->first_user_ctxt);
3324
3325 if (ret)
3326 goto bail;
3327bail:
3328 return ret;
3329}
3330
3331/*
3332 * For this chip, we want to use the same buffer every time
3333 * when we are trying to bring the link up (they are always VL15
3334 * packets). At that link state the packet should always go out immediately
3335 * (or at least be discarded at the tx interface if the link is down).
3336 * If it doesn't, and the buffer isn't available, that means some other
3337 * sender has gotten ahead of us, and is preventing our packet from going
3338 * out. In that case, we flush all packets, and try again. If that still
3339 * fails, we fail the request, and hope things work the next time around.
3340 *
3341 * We don't need very complicated heuristics on whether the packet had
3342 * time to go out or not, since even at SDR 1X, it goes out in very short
3343 * time periods, covered by the chip reads done here and as part of the
3344 * flush.
3345 */
3346static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)
3347{
3348 u32 __iomem *buf;
3349 u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
3350
3351 /*
3352 * always blip to get avail list updated, since it's almost
3353 * always needed, and is fairly cheap.
3354 */
3355 sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
3356 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3357 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3358 if (buf)
3359 goto done;
3360
3361 sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
3362 QIB_SENDCTRL_AVAIL_BLIP);
3363 ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
3364 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3365 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3366done:
3367 return buf;
3368}
3369
3370static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
3371 u32 *pbufnum)
3372{
3373 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
3374 struct qib_devdata *dd = ppd->dd;
3375 u32 __iomem *buf;
3376
3377 if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&
3378 !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
3379 buf = get_6120_link_buf(ppd, pbufnum);
3380 else {
3381
3382 if ((plen + 1) > dd->piosize2kmax_dwords)
3383 first = dd->piobcnt2k;
3384 else
3385 first = 0;
3386 /* try 4k if all 2k busy, so same last for both sizes */
3387 last = dd->piobcnt2k + dd->piobcnt4k - 1;
3388 buf = qib_getsendbuf_range(dd, pbufnum, first, last);
3389 }
3390 return buf;
3391}
3392
3393static int init_sdma_6120_regs(struct qib_pportdata *ppd)
3394{
3395 return -ENODEV;
3396}
3397
3398static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)
3399{
3400 return 0;
3401}
3402
3403static int qib_sdma_6120_busy(struct qib_pportdata *ppd)
3404{
3405 return 0;
3406}
3407
3408static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)
3409{
3410}
3411
3412static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
3413{
3414}
3415
3416static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
3417{
3418}
3419
3420/*
3421 * the pbc doesn't need a VL15 indicator, but we need it for link_buf.
3422 * The chip ignores the bit if set.
3423 */
3424static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,
3425 u8 srate, u8 vl)
3426{
3427 return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;
3428}
3429
3430static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
3431{
3432}
3433
3434static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)
3435{
3436 rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
3437 rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;
3438}
3439
3440static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
3441 u32 len, u32 avail, struct qib_ctxtdata *rcd)
3442{
3443}
3444
3445static void writescratch(struct qib_devdata *dd, u32 val)
3446{
3447 (void) qib_write_kreg(dd, kr_scratch, val);
3448}
3449
3450static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
3451{
3452 return -ENXIO;
3453}
3454
3455/* Dummy function, as 6120 boards never disable EEPROM Write */
3456static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
3457{
3458 return 1;
3459}
3460
3461/**
3462 * qib_init_iba6120_funcs - set up the chip-specific function pointers
3463 * @pdev: pci_dev of the qlogic_ib device
3464 * @ent: pci_device_id matching this chip
3465 *
3466 * This is global, and is called directly at init to set up the
3467 * chip-specific function pointers for later use.
3468 *
3469 * It also allocates/partially-inits the qib_devdata struct for
3470 * this device.
3471 */
3472struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
3473 const struct pci_device_id *ent)
3474{
3475 struct qib_devdata *dd;
3476 int ret;
3477
3478 dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
3479 sizeof(struct qib_chip_specific));
3480 if (IS_ERR(dd))
3481 goto bail;
3482
3483 dd->f_bringup_serdes = qib_6120_bringup_serdes;
3484 dd->f_cleanup = qib_6120_setup_cleanup;
3485 dd->f_clear_tids = qib_6120_clear_tids;
3486 dd->f_free_irq = qib_6120_free_irq;
3487 dd->f_get_base_info = qib_6120_get_base_info;
3488 dd->f_get_msgheader = qib_6120_get_msgheader;
3489 dd->f_getsendbuf = qib_6120_getsendbuf;
3490 dd->f_gpio_mod = gpio_6120_mod;
3491 dd->f_eeprom_wen = qib_6120_eeprom_wen;
3492 dd->f_hdrqempty = qib_6120_hdrqempty;
3493 dd->f_ib_updown = qib_6120_ib_updown;
3494 dd->f_init_ctxt = qib_6120_init_ctxt;
3495 dd->f_initvl15_bufs = qib_6120_initvl15_bufs;
3496 dd->f_intr_fallback = qib_6120_nointr_fallback;
3497 dd->f_late_initreg = qib_late_6120_initreg;
3498 dd->f_setpbc_control = qib_6120_setpbc_control;
3499 dd->f_portcntr = qib_portcntr_6120;
3500 dd->f_put_tid = (dd->minrev >= 2) ?
3501 qib_6120_put_tid_2 :
3502 qib_6120_put_tid;
3503 dd->f_quiet_serdes = qib_6120_quiet_serdes;
3504 dd->f_rcvctrl = rcvctrl_6120_mod;
3505 dd->f_read_cntrs = qib_read_6120cntrs;
3506 dd->f_read_portcntrs = qib_read_6120portcntrs;
3507 dd->f_reset = qib_6120_setup_reset;
3508 dd->f_init_sdma_regs = init_sdma_6120_regs;
3509 dd->f_sdma_busy = qib_sdma_6120_busy;
3510 dd->f_sdma_gethead = qib_sdma_6120_gethead;
3511 dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl;
3512 dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
3513 dd->f_sdma_update_tail = qib_sdma_update_6120_tail;
3514 dd->f_sendctrl = sendctrl_6120_mod;
3515 dd->f_set_armlaunch = qib_set_6120_armlaunch;
3516 dd->f_set_cntr_sample = qib_set_cntr_6120_sample;
3517 dd->f_iblink_state = qib_6120_iblink_state;
3518 dd->f_ibphys_portstate = qib_6120_phys_portstate;
3519 dd->f_get_ib_cfg = qib_6120_get_ib_cfg;
3520 dd->f_set_ib_cfg = qib_6120_set_ib_cfg;
3521 dd->f_set_ib_loopback = qib_6120_set_loopback;
3522 dd->f_set_intr_state = qib_6120_set_intr_state;
3523 dd->f_setextled = qib_6120_setup_setextled;
3524 dd->f_txchk_change = qib_6120_txchk_change;
3525 dd->f_update_usrhead = qib_update_6120_usrhead;
3526 dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr;
3527 dd->f_xgxs_reset = qib_6120_xgxs_reset;
3528 dd->f_writescratch = writescratch;
3529 dd->f_tempsense_rd = qib_6120_tempsense_rd;
3530 /*
3531 * Do remaining pcie setup and save pcie values in dd.
3532 * Any error printing is already done by the init code.
3533 * On return, we have the chip mapped and accessible,
3534 * but chip registers are not set up until start of
3535 * init_6120_variables.
3536 */
3537 ret = qib_pcie_ddinit(dd, pdev, ent);
3538 if (ret < 0)
3539 goto bail_free;
3540
3541 /* initialize chip-specific variables */
3542 ret = init_6120_variables(dd);
3543 if (ret)
3544 goto bail_cleanup;
3545
3546 if (qib_mini_init)
3547 goto bail;
3548
3549 if (qib_pcie_params(dd, 8, NULL, NULL))
3550 qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
3551 "continuing anyway\n");
3552 dd->cspec->irq = pdev->irq; /* save IRQ */
3553
3554 /* clear diagctrl register, in case diags were running and crashed */
3555 qib_write_kreg(dd, kr_hwdiagctrl, 0);
3556
3557 if (qib_read_kreg64(dd, kr_hwerrstatus) &
3558 QLOGIC_IB_HWE_SERDESPLLFAILED)
3559 qib_write_kreg(dd, kr_hwerrclear,
3560 QLOGIC_IB_HWE_SERDESPLLFAILED);
3561
3562 /* setup interrupt handler (interrupt type handled above) */
3563 qib_setup_6120_interrupt(dd);
3564 /* Note that qpn_mask is set by qib_6120_config_ctxts() first */
3565 qib_6120_init_hwerrors(dd);
3566
3567 goto bail;
3568
3569bail_cleanup:
3570 qib_pcie_ddcleanup(dd);
3571bail_free:
3572 qib_free_devdata(dd);
3573 dd = ERR_PTR(ret);
3574bail:
3575 return dd;
3576}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
new file mode 100644
index 000000000000..6fd8d74e7392
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -0,0 +1,4618 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34/*
35 * This file contains all of the code that is specific to the
36 * QLogic_IB 7220 chip (except that specific to the SerDes)
37 */
38
39#include <linux/interrupt.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <linux/io.h>
43#include <rdma/ib_verbs.h>
44
45#include "qib.h"
46#include "qib_7220.h"
47
48static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
49static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
50static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
51static u32 qib_7220_iblink_state(u64);
52static u8 qib_7220_phys_portstate(u64);
53static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
54static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
55
56/*
57 * This file contains almost all the chip-specific register information and
58 * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
59 * exception of SerDes support, which in in qib_sd7220.c.
60 */
61
62/* Below uses machine-generated qib_chipnum_regs.h file */
63#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
64
65/* Use defines to tie machine-generated names to lower-case names */
66#define kr_control KREG_IDX(Control)
67#define kr_counterregbase KREG_IDX(CntrRegBase)
68#define kr_errclear KREG_IDX(ErrClear)
69#define kr_errmask KREG_IDX(ErrMask)
70#define kr_errstatus KREG_IDX(ErrStatus)
71#define kr_extctrl KREG_IDX(EXTCtrl)
72#define kr_extstatus KREG_IDX(EXTStatus)
73#define kr_gpio_clear KREG_IDX(GPIOClear)
74#define kr_gpio_mask KREG_IDX(GPIOMask)
75#define kr_gpio_out KREG_IDX(GPIOOut)
76#define kr_gpio_status KREG_IDX(GPIOStatus)
77#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
78#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
79#define kr_hwerrclear KREG_IDX(HwErrClear)
80#define kr_hwerrmask KREG_IDX(HwErrMask)
81#define kr_hwerrstatus KREG_IDX(HwErrStatus)
82#define kr_ibcctrl KREG_IDX(IBCCtrl)
83#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
84#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
85#define kr_ibcstatus KREG_IDX(IBCStatus)
86#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
87#define kr_intclear KREG_IDX(IntClear)
88#define kr_intmask KREG_IDX(IntMask)
89#define kr_intstatus KREG_IDX(IntStatus)
90#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
91#define kr_palign KREG_IDX(PageAlign)
92#define kr_partitionkey KREG_IDX(RcvPartitionKey)
93#define kr_portcnt KREG_IDX(PortCnt)
94#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
95#define kr_rcvctrl KREG_IDX(RcvCtrl)
96#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
97#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
98#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
99#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
100#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
101#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
102#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
103#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
104#define kr_revision KREG_IDX(Revision)
105#define kr_scratch KREG_IDX(Scratch)
106#define kr_sendbuffererror KREG_IDX(SendBufErr0)
107#define kr_sendctrl KREG_IDX(SendCtrl)
108#define kr_senddmabase KREG_IDX(SendDmaBase)
109#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
110#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
111#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
112#define kr_senddmahead KREG_IDX(SendDmaHead)
113#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
114#define kr_senddmalengen KREG_IDX(SendDmaLenGen)
115#define kr_senddmastatus KREG_IDX(SendDmaStatus)
116#define kr_senddmatail KREG_IDX(SendDmaTail)
117#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
118#define kr_sendpiobufbase KREG_IDX(SendBufBase)
119#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
120#define kr_sendpiosize KREG_IDX(SendBufSize)
121#define kr_sendregbase KREG_IDX(SendRegBase)
122#define kr_userregbase KREG_IDX(UserRegBase)
123#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
124
125/* These must only be written via qib_write_kreg_ctxt() */
126#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
127#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
128
129
130#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
131 QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
132
133#define cr_badformat CREG_IDX(RxVersionErrCnt)
134#define cr_erricrc CREG_IDX(RxICRCErrCnt)
135#define cr_errlink CREG_IDX(RxLinkMalformCnt)
136#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
137#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
138#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
139#define cr_err_rlen CREG_IDX(RxLenErrCnt)
140#define cr_errslen CREG_IDX(TxLenErrCnt)
141#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
142#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
143#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
144#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
145#define cr_lbint CREG_IDX(LBIntCnt)
146#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
147#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
148#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
149#define cr_pktrcv CREG_IDX(RxDataPktCnt)
150#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
151#define cr_pktsend CREG_IDX(TxDataPktCnt)
152#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
153#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
154#define cr_rcvebp CREG_IDX(RxEBPCnt)
155#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
156#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
157#define cr_sendstall CREG_IDX(TxFlowStallCnt)
158#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
159#define cr_wordrcv CREG_IDX(RxDwordCnt)
160#define cr_wordsend CREG_IDX(TxDwordCnt)
161#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
162#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
163#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
164#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
165#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
166#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
167#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
168#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
169#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
170#define cr_rxvlerr CREG_IDX(RxVlErrCnt)
171#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
172#define cr_psstat CREG_IDX(PSStat)
173#define cr_psstart CREG_IDX(PSStart)
174#define cr_psinterval CREG_IDX(PSInterval)
175#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
176#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
177#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
178#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
179#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
180#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
181#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
182
183#define SYM_RMASK(regname, fldname) ((u64) \
184 QIB_7220_##regname##_##fldname##_RMASK)
185#define SYM_MASK(regname, fldname) ((u64) \
186 QIB_7220_##regname##_##fldname##_RMASK << \
187 QIB_7220_##regname##_##fldname##_LSB)
188#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
189#define SYM_FIELD(value, regname, fldname) ((u64) \
190 (((value) >> SYM_LSB(regname, fldname)) & \
191 SYM_RMASK(regname, fldname)))
192#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
193#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
194
195/* ibcctrl bits */
196#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
197/* cycle through TS1/TS2 till OK */
198#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
199/* wait for TS1, then go on */
200#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
201#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
202
203#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
204#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
205#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
206
207#define BLOB_7220_IBCHG 0x81
208
209/*
210 * We could have a single register get/put routine, that takes a group type,
211 * but this is somewhat clearer and cleaner. It also gives us some error
212 * checking. 64 bit register reads should always work, but are inefficient
213 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
214 * so we use kreg32 wherever possible. User register and counter register
215 * reads are always 32 bit reads, so only one form of those routines.
216 */
217
218/**
219 * qib_read_ureg32 - read 32-bit virtualized per-context register
220 * @dd: device
221 * @regno: register number
222 * @ctxt: context number
223 *
224 * Return the contents of a register that is virtualized to be per context.
225 * Returns -1 on errors (not distinguishable from valid contents at
226 * runtime; we may add a separate error variable at some point).
227 */
228static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
229 enum qib_ureg regno, int ctxt)
230{
231 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
232 return 0;
233
234 if (dd->userbase)
235 return readl(regno + (u64 __iomem *)
236 ((char __iomem *)dd->userbase +
237 dd->ureg_align * ctxt));
238 else
239 return readl(regno + (u64 __iomem *)
240 (dd->uregbase +
241 (char __iomem *)dd->kregbase +
242 dd->ureg_align * ctxt));
243}
244
245/**
246 * qib_write_ureg - write 32-bit virtualized per-context register
247 * @dd: device
248 * @regno: register number
249 * @value: value
250 * @ctxt: context
251 *
252 * Write the contents of a register that is virtualized to be per context.
253 */
254static inline void qib_write_ureg(const struct qib_devdata *dd,
255 enum qib_ureg regno, u64 value, int ctxt)
256{
257 u64 __iomem *ubase;
258
259 if (dd->userbase)
260 ubase = (u64 __iomem *)
261 ((char __iomem *) dd->userbase +
262 dd->ureg_align * ctxt);
263 else
264 ubase = (u64 __iomem *)
265 (dd->uregbase +
266 (char __iomem *) dd->kregbase +
267 dd->ureg_align * ctxt);
268
269 if (dd->kregbase && (dd->flags & QIB_PRESENT))
270 writeq(value, &ubase[regno]);
271}
272
273/**
274 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
275 * @dd: the qlogic_ib device
276 * @regno: the register number to write
277 * @ctxt: the context containing the register
278 * @value: the value to write
279 */
280static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
281 const u16 regno, unsigned ctxt,
282 u64 value)
283{
284 qib_write_kreg(dd, regno + ctxt, value);
285}
286
287static inline void write_7220_creg(const struct qib_devdata *dd,
288 u16 regno, u64 value)
289{
290 if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
291 writeq(value, &dd->cspec->cregbase[regno]);
292}
293
294static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
295{
296 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
297 return 0;
298 return readq(&dd->cspec->cregbase[regno]);
299}
300
301static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
302{
303 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
304 return 0;
305 return readl(&dd->cspec->cregbase[regno]);
306}
307
308/* kr_revision bits */
309#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
310#define QLOGIC_IB_R_EMULATORREV_SHIFT 40
311
312/* kr_control bits */
313#define QLOGIC_IB_C_RESET (1U << 7)
314
315/* kr_intstatus, kr_intclear, kr_intmask bits */
316#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
317#define QLOGIC_IB_I_RCVURG_SHIFT 32
318#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
319#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
320#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
321
322#define QLOGIC_IB_C_FREEZEMODE 0x00000002
323#define QLOGIC_IB_C_LINKENABLE 0x00000004
324
325#define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL
326#define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL
327#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
328#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
329#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
330#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
331
332/* variables for sanity checking interrupt and errors */
333#define QLOGIC_IB_I_BITSEXTANT \
334 (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
335 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
336 (QLOGIC_IB_I_RCVAVAIL_MASK << \
337 QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
338 QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
339 QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
340 QLOGIC_IB_I_SERDESTRIMDONE)
341
342#define IB_HWE_BITSEXTANT \
343 (HWE_MASK(RXEMemParityErr) | \
344 HWE_MASK(TXEMemParityErr) | \
345 (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
346 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
347 QLOGIC_IB_HWE_PCIE1PLLFAILED | \
348 QLOGIC_IB_HWE_PCIE0PLLFAILED | \
349 QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
350 QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
351 QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
352 QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
353 QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
354 HWE_MASK(PowerOnBISTFailed) | \
355 QLOGIC_IB_HWE_COREPLL_FBSLIP | \
356 QLOGIC_IB_HWE_COREPLL_RFSLIP | \
357 QLOGIC_IB_HWE_SERDESPLLFAILED | \
358 HWE_MASK(IBCBusToSPCParityErr) | \
359 HWE_MASK(IBCBusFromSPCParityErr) | \
360 QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
361 QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
362 QLOGIC_IB_HWE_SDMAMEMREADERR | \
363 QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
364 QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
365 QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
366 QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
367 QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
368 QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
369 QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
370 QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
371 QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
372
373#define IB_E_BITSEXTANT \
374 (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
375 ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
376 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
377 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
378 ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
379 ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
380 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
381 ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
382 ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
383 ERR_MASK(SendSpecialTriggerErr) | \
384 ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \
385 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \
386 ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \
387 ERR_MASK(SendDroppedDataPktErr) | \
388 ERR_MASK(SendPioArmLaunchErr) | \
389 ERR_MASK(SendUnexpectedPktNumErr) | \
390 ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \
391 ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \
392 ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
393 ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
394 ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
395 ERR_MASK(SDmaUnexpDataErr) | \
396 ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \
397 ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \
398 ERR_MASK(SDmaDescAddrMisalignErr) | \
399 ERR_MASK(InvalidEEPCmd))
400
401/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
402#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
403#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
404#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
405#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
406#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
407#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
408#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
409#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
410#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
411#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
412#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
413#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
414/* specific to this chip */
415#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
416#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
417#define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL
418#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
419#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
420#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
421#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
422#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
423#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
424#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
425#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
426#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
427
428#define IBA7220_IBCC_LINKCMD_SHIFT 19
429
430/* kr_ibcddrctrl bits */
431#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
432#define IBA7220_IBC_DLIDLMC_SHIFT 32
433
434#define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
435 SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
436#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
437
438#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
439#define IBA7220_IBC_LREV_MASK 1
440#define IBA7220_IBC_LREV_SHIFT 8
441#define IBA7220_IBC_RXPOL_MASK 1
442#define IBA7220_IBC_RXPOL_SHIFT 7
443#define IBA7220_IBC_WIDTH_SHIFT 5
444#define IBA7220_IBC_WIDTH_MASK 0x3
445#define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT)
446#define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT)
447#define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT)
448#define IBA7220_IBC_SPEED_AUTONEG (1 << 1)
449#define IBA7220_IBC_SPEED_SDR (1 << 2)
450#define IBA7220_IBC_SPEED_DDR (1 << 3)
451#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1)
452#define IBA7220_IBC_IBTA_1_2_MASK (1)
453
454/* kr_ibcddrstatus */
455/* link latency shift is 0, don't bother defining */
456#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
457
458/* kr_extstatus bits */
459#define QLOGIC_IB_EXTS_FREQSEL 0x2
460#define QLOGIC_IB_EXTS_SERDESSEL 0x4
461#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
462#define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000
463
464/* kr_xgxsconfig bits */
465#define QLOGIC_IB_XGXS_RESET 0x5ULL
466#define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63)
467
468/* kr_rcvpktledcnt */
469#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
470#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
471
472#define _QIB_GPIO_SDA_NUM 1
473#define _QIB_GPIO_SCL_NUM 0
474#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
475#define QIB_TWSI_TEMP_DEV 0x98
476
477/* HW counter clock is at 4nsec */
478#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
479
480#define IBA7220_R_INTRAVAIL_SHIFT 17
481#define IBA7220_R_PKEY_DIS_SHIFT 34
482#define IBA7220_R_TAILUPD_SHIFT 35
483#define IBA7220_R_CTXTCFG_SHIFT 36
484
485#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
486
487/*
488 * the size bits give us 2^N, in KB units. 0 marks as invalid,
489 * and 7 is reserved. We currently use only 2KB and 4KB
490 */
491#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
492#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
493#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
494#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
495#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
496#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
497
498#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
499
500/* packet rate matching delay multiplier */
501static u8 rate_to_delay[2][2] = {
502 /* 1x, 4x */
503 { 8, 2 }, /* SDR */
504 { 4, 1 } /* DDR */
505};
506
507static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
508 [IB_RATE_2_5_GBPS] = 8,
509 [IB_RATE_5_GBPS] = 4,
510 [IB_RATE_10_GBPS] = 2,
511 [IB_RATE_20_GBPS] = 1
512};
513
514#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
515#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
516
517/* link training states, from IBC */
518#define IB_7220_LT_STATE_DISABLED 0x00
519#define IB_7220_LT_STATE_LINKUP 0x01
520#define IB_7220_LT_STATE_POLLACTIVE 0x02
521#define IB_7220_LT_STATE_POLLQUIET 0x03
522#define IB_7220_LT_STATE_SLEEPDELAY 0x04
523#define IB_7220_LT_STATE_SLEEPQUIET 0x05
524#define IB_7220_LT_STATE_CFGDEBOUNCE 0x08
525#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
526#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
527#define IB_7220_LT_STATE_CFGIDLE 0x0b
528#define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c
529#define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e
530#define IB_7220_LT_STATE_RECOVERIDLE 0x0f
531
532/* link state machine states from IBC */
533#define IB_7220_L_STATE_DOWN 0x0
534#define IB_7220_L_STATE_INIT 0x1
535#define IB_7220_L_STATE_ARM 0x2
536#define IB_7220_L_STATE_ACTIVE 0x3
537#define IB_7220_L_STATE_ACT_DEFER 0x4
538
539static const u8 qib_7220_physportstate[0x20] = {
540 [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
541 [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
542 [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
543 [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
544 [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
545 [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
546 [IB_7220_LT_STATE_CFGDEBOUNCE] =
547 IB_PHYSPORTSTATE_CFG_TRAIN,
548 [IB_7220_LT_STATE_CFGRCVFCFG] =
549 IB_PHYSPORTSTATE_CFG_TRAIN,
550 [IB_7220_LT_STATE_CFGWAITRMT] =
551 IB_PHYSPORTSTATE_CFG_TRAIN,
552 [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
553 [IB_7220_LT_STATE_RECOVERRETRAIN] =
554 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
555 [IB_7220_LT_STATE_RECOVERWAITRMT] =
556 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
557 [IB_7220_LT_STATE_RECOVERIDLE] =
558 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
559 [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
560 [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
561 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
562 [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
563 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
564 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
565 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
566 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
567};
568
569int qib_special_trigger;
570module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
571MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
572
573#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
574#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
575
576#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
577 (1ULL << (SYM_LSB(regname, fldname) + (bit))))
578
579#define TXEMEMPARITYERR_PIOBUF \
580 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
581#define TXEMEMPARITYERR_PIOPBC \
582 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
583#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
584 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
585
586#define RXEMEMPARITYERR_RCVBUF \
587 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
588#define RXEMEMPARITYERR_LOOKUPQ \
589 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
590#define RXEMEMPARITYERR_EXPTID \
591 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
592#define RXEMEMPARITYERR_EAGERTID \
593 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
594#define RXEMEMPARITYERR_FLAGBUF \
595 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
596#define RXEMEMPARITYERR_DATAINFO \
597 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
598#define RXEMEMPARITYERR_HDRINFO \
599 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
600
601/* 7220 specific hardware errors... */
602static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
603 /* generic hardware errors */
604 QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
605 QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
606
607 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
608 "TXE PIOBUF Memory Parity"),
609 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
610 "TXE PIOPBC Memory Parity"),
611 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
612 "TXE PIOLAUNCHFIFO Memory Parity"),
613
614 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
615 "RXE RCVBUF Memory Parity"),
616 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
617 "RXE LOOKUPQ Memory Parity"),
618 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
619 "RXE EAGERTID Memory Parity"),
620 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
621 "RXE EXPTID Memory Parity"),
622 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
623 "RXE FLAGBUF Memory Parity"),
624 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
625 "RXE DATAINFO Memory Parity"),
626 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
627 "RXE HDRINFO Memory Parity"),
628
629 /* chip-specific hardware errors */
630 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
631 "PCIe Poisoned TLP"),
632 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
633 "PCIe completion timeout"),
634 /*
635 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
636 * parity or memory parity error failures, because most likely we
637 * won't be able to talk to the core of the chip. Nonetheless, we
638 * might see them, if they are in parts of the PCIe core that aren't
639 * essential.
640 */
641 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
642 "PCIePLL1"),
643 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
644 "PCIePLL0"),
645 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
646 "PCIe XTLH core parity"),
647 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
648 "PCIe ADM TX core parity"),
649 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
650 "PCIe ADM RX core parity"),
651 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
652 "SerDes PLL"),
653 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
654 "PCIe cpl header queue"),
655 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
656 "PCIe cpl data queue"),
657 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
658 "Send DMA memory read"),
659 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
660 "uC PLL clock not locked"),
661 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
662 "PCIe serdes Q0 no clock"),
663 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
664 "PCIe serdes Q1 no clock"),
665 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
666 "PCIe serdes Q2 no clock"),
667 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
668 "PCIe serdes Q3 no clock"),
669 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
670 "DDS RXEQ memory parity"),
671 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
672 "IB uC memory parity"),
673 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
674 "PCIe uC oct0 memory parity"),
675 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
676 "PCIe uC oct1 memory parity"),
677};
678
679#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
680
681#define QLOGIC_IB_E_PKTERRS (\
682 ERR_MASK(SendPktLenErr) | \
683 ERR_MASK(SendDroppedDataPktErr) | \
684 ERR_MASK(RcvVCRCErr) | \
685 ERR_MASK(RcvICRCErr) | \
686 ERR_MASK(RcvShortPktLenErr) | \
687 ERR_MASK(RcvEBPErr))
688
689/* Convenience for decoding Send DMA errors */
690#define QLOGIC_IB_E_SDMAERRS ( \
691 ERR_MASK(SDmaGenMismatchErr) | \
692 ERR_MASK(SDmaOutOfBoundErr) | \
693 ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
694 ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
695 ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
696 ERR_MASK(SDmaUnexpDataErr) | \
697 ERR_MASK(SDmaDescAddrMisalignErr) | \
698 ERR_MASK(SDmaDisabledErr) | \
699 ERR_MASK(SendBufMisuseErr))
700
701/* These are all rcv-related errors which we want to count for stats */
702#define E_SUM_PKTERRS \
703 (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
704 ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
705 ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
706 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
707 ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
708 ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
709
710/* These are all send-related errors which we want to count for stats */
711#define E_SUM_ERRS \
712 (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
713 ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
714 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
715 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
716 ERR_MASK(InvalidAddrErr))
717
718/*
719 * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
720 * errors not related to freeze and cancelling buffers. Can't ignore
721 * armlaunch because could get more while still cleaning up, and need
722 * to cancel those as they happen.
723 */
724#define E_SPKT_ERRS_IGNORE \
725 (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
726 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
727 ERR_MASK(SendPktLenErr))
728
729/*
730 * these are errors that can occur when the link changes state while
731 * a packet is being sent or received. This doesn't cover things
732 * like EBP or VCRC that can be the result of a sending having the
733 * link change state, so we receive a "known bad" packet.
734 */
735#define E_SUM_LINK_PKTERRS \
736 (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
737 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
738 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
739 ERR_MASK(RcvUnexpectedCharErr))
740
741static void autoneg_7220_work(struct work_struct *);
742static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
743
744/*
745 * Called when we might have an error that is specific to a particular
746 * PIO buffer, and may need to cancel that buffer, so it can be re-used.
747 * because we don't need to force the update of pioavail.
748 */
749static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
750{
751 unsigned long sbuf[3];
752 struct qib_devdata *dd = ppd->dd;
753
754 /*
755 * It's possible that sendbuffererror could have bits set; might
756 * have already done this as a result of hardware error handling.
757 */
758 /* read these before writing errorclear */
759 sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
760 sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
761 sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
762
763 if (sbuf[0] || sbuf[1] || sbuf[2])
764 qib_disarm_piobufs_set(dd, sbuf,
765 dd->piobcnt2k + dd->piobcnt4k);
766}
767
768static void qib_7220_txe_recover(struct qib_devdata *dd)
769{
770 qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
771 qib_disarm_7220_senderrbufs(dd->pport);
772}
773
774/*
775 * This is called with interrupts disabled and sdma_lock held.
776 */
777static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
778{
779 struct qib_devdata *dd = ppd->dd;
780 u64 set_sendctrl = 0;
781 u64 clr_sendctrl = 0;
782
783 if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
784 set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
785 else
786 clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
787
788 if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
789 set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
790 else
791 clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
792
793 if (op & QIB_SDMA_SENDCTRL_OP_HALT)
794 set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
795 else
796 clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
797
798 spin_lock(&dd->sendctrl_lock);
799
800 dd->sendctrl |= set_sendctrl;
801 dd->sendctrl &= ~clr_sendctrl;
802
803 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
804 qib_write_kreg(dd, kr_scratch, 0);
805
806 spin_unlock(&dd->sendctrl_lock);
807}
808
809static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
810 u64 err, char *buf, size_t blen)
811{
812 static const struct {
813 u64 err;
814 const char *msg;
815 } errs[] = {
816 { ERR_MASK(SDmaGenMismatchErr),
817 "SDmaGenMismatch" },
818 { ERR_MASK(SDmaOutOfBoundErr),
819 "SDmaOutOfBound" },
820 { ERR_MASK(SDmaTailOutOfBoundErr),
821 "SDmaTailOutOfBound" },
822 { ERR_MASK(SDmaBaseErr),
823 "SDmaBase" },
824 { ERR_MASK(SDma1stDescErr),
825 "SDma1stDesc" },
826 { ERR_MASK(SDmaRpyTagErr),
827 "SDmaRpyTag" },
828 { ERR_MASK(SDmaDwEnErr),
829 "SDmaDwEn" },
830 { ERR_MASK(SDmaMissingDwErr),
831 "SDmaMissingDw" },
832 { ERR_MASK(SDmaUnexpDataErr),
833 "SDmaUnexpData" },
834 { ERR_MASK(SDmaDescAddrMisalignErr),
835 "SDmaDescAddrMisalign" },
836 { ERR_MASK(SendBufMisuseErr),
837 "SendBufMisuse" },
838 { ERR_MASK(SDmaDisabledErr),
839 "SDmaDisabled" },
840 };
841 int i;
842 size_t bidx = 0;
843
844 for (i = 0; i < ARRAY_SIZE(errs); i++) {
845 if (err & errs[i].err)
846 bidx += scnprintf(buf + bidx, blen - bidx,
847 "%s ", errs[i].msg);
848 }
849}
850
851/*
852 * This is called as part of link down clean up so disarm and flush
853 * all send buffers so that SMP packets can be sent.
854 */
855static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
856{
857 /* This will trigger the Abort interrupt */
858 sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
859 QIB_SENDCTRL_AVAIL_BLIP);
860 ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
861}
862
863static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
864{
865 /*
866 * Set SendDmaLenGen and clear and set
867 * the MSB of the generation count to enable generation checking
868 * and load the internal generation counter.
869 */
870 qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
871 qib_write_kreg(ppd->dd, kr_senddmalengen,
872 ppd->sdma_descq_cnt |
873 (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
874}
875
876static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
877{
878 qib_sdma_7220_setlengen(ppd);
879 qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
880 ppd->sdma_head_dma[0] = 0;
881}
882
883#define DISABLES_SDMA ( \
884 ERR_MASK(SDmaDisabledErr) | \
885 ERR_MASK(SDmaBaseErr) | \
886 ERR_MASK(SDmaTailOutOfBoundErr) | \
887 ERR_MASK(SDmaOutOfBoundErr) | \
888 ERR_MASK(SDma1stDescErr) | \
889 ERR_MASK(SDmaRpyTagErr) | \
890 ERR_MASK(SDmaGenMismatchErr) | \
891 ERR_MASK(SDmaDescAddrMisalignErr) | \
892 ERR_MASK(SDmaMissingDwErr) | \
893 ERR_MASK(SDmaDwEnErr))
894
895static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
896{
897 unsigned long flags;
898 struct qib_devdata *dd = ppd->dd;
899 char *msg;
900
901 errs &= QLOGIC_IB_E_SDMAERRS;
902
903 msg = dd->cspec->sdmamsgbuf;
904 qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
905 spin_lock_irqsave(&ppd->sdma_lock, flags);
906
907 if (errs & ERR_MASK(SendBufMisuseErr)) {
908 unsigned long sbuf[3];
909
910 sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
911 sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
912 sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
913
914 qib_dev_err(ppd->dd,
915 "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
916 ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
917 sbuf[0]);
918 }
919
920 if (errs & ERR_MASK(SDmaUnexpDataErr))
921 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
922 ppd->port);
923
924 switch (ppd->sdma_state.current_state) {
925 case qib_sdma_state_s00_hw_down:
926 /* not expecting any interrupts */
927 break;
928
929 case qib_sdma_state_s10_hw_start_up_wait:
930 /* handled in intr path */
931 break;
932
933 case qib_sdma_state_s20_idle:
934 /* not expecting any interrupts */
935 break;
936
937 case qib_sdma_state_s30_sw_clean_up_wait:
938 /* not expecting any interrupts */
939 break;
940
941 case qib_sdma_state_s40_hw_clean_up_wait:
942 if (errs & ERR_MASK(SDmaDisabledErr))
943 __qib_sdma_process_event(ppd,
944 qib_sdma_event_e50_hw_cleaned);
945 break;
946
947 case qib_sdma_state_s50_hw_halt_wait:
948 /* handled in intr path */
949 break;
950
951 case qib_sdma_state_s99_running:
952 if (errs & DISABLES_SDMA)
953 __qib_sdma_process_event(ppd,
954 qib_sdma_event_e7220_err_halted);
955 break;
956 }
957
958 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
959}
960
961/*
962 * Decode the error status into strings, deciding whether to always
963 * print * it or not depending on "normal packet errors" vs everything
964 * else. Return 1 if "real" errors, otherwise 0 if only packet
965 * errors, so caller can decide what to print with the string.
966 */
967static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
968 u64 err)
969{
970 int iserr = 1;
971
972 *buf = '\0';
973 if (err & QLOGIC_IB_E_PKTERRS) {
974 if (!(err & ~QLOGIC_IB_E_PKTERRS))
975 iserr = 0;
976 if ((err & ERR_MASK(RcvICRCErr)) &&
977 !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
978 strlcat(buf, "CRC ", blen);
979 if (!iserr)
980 goto done;
981 }
982 if (err & ERR_MASK(RcvHdrLenErr))
983 strlcat(buf, "rhdrlen ", blen);
984 if (err & ERR_MASK(RcvBadTidErr))
985 strlcat(buf, "rbadtid ", blen);
986 if (err & ERR_MASK(RcvBadVersionErr))
987 strlcat(buf, "rbadversion ", blen);
988 if (err & ERR_MASK(RcvHdrErr))
989 strlcat(buf, "rhdr ", blen);
990 if (err & ERR_MASK(SendSpecialTriggerErr))
991 strlcat(buf, "sendspecialtrigger ", blen);
992 if (err & ERR_MASK(RcvLongPktLenErr))
993 strlcat(buf, "rlongpktlen ", blen);
994 if (err & ERR_MASK(RcvMaxPktLenErr))
995 strlcat(buf, "rmaxpktlen ", blen);
996 if (err & ERR_MASK(RcvMinPktLenErr))
997 strlcat(buf, "rminpktlen ", blen);
998 if (err & ERR_MASK(SendMinPktLenErr))
999 strlcat(buf, "sminpktlen ", blen);
1000 if (err & ERR_MASK(RcvFormatErr))
1001 strlcat(buf, "rformaterr ", blen);
1002 if (err & ERR_MASK(RcvUnsupportedVLErr))
1003 strlcat(buf, "runsupvl ", blen);
1004 if (err & ERR_MASK(RcvUnexpectedCharErr))
1005 strlcat(buf, "runexpchar ", blen);
1006 if (err & ERR_MASK(RcvIBFlowErr))
1007 strlcat(buf, "ribflow ", blen);
1008 if (err & ERR_MASK(SendUnderRunErr))
1009 strlcat(buf, "sunderrun ", blen);
1010 if (err & ERR_MASK(SendPioArmLaunchErr))
1011 strlcat(buf, "spioarmlaunch ", blen);
1012 if (err & ERR_MASK(SendUnexpectedPktNumErr))
1013 strlcat(buf, "sunexperrpktnum ", blen);
1014 if (err & ERR_MASK(SendDroppedSmpPktErr))
1015 strlcat(buf, "sdroppedsmppkt ", blen);
1016 if (err & ERR_MASK(SendMaxPktLenErr))
1017 strlcat(buf, "smaxpktlen ", blen);
1018 if (err & ERR_MASK(SendUnsupportedVLErr))
1019 strlcat(buf, "sunsupVL ", blen);
1020 if (err & ERR_MASK(InvalidAddrErr))
1021 strlcat(buf, "invalidaddr ", blen);
1022 if (err & ERR_MASK(RcvEgrFullErr))
1023 strlcat(buf, "rcvegrfull ", blen);
1024 if (err & ERR_MASK(RcvHdrFullErr))
1025 strlcat(buf, "rcvhdrfull ", blen);
1026 if (err & ERR_MASK(IBStatusChanged))
1027 strlcat(buf, "ibcstatuschg ", blen);
1028 if (err & ERR_MASK(RcvIBLostLinkErr))
1029 strlcat(buf, "riblostlink ", blen);
1030 if (err & ERR_MASK(HardwareErr))
1031 strlcat(buf, "hardware ", blen);
1032 if (err & ERR_MASK(ResetNegated))
1033 strlcat(buf, "reset ", blen);
1034 if (err & QLOGIC_IB_E_SDMAERRS)
1035 qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
1036 if (err & ERR_MASK(InvalidEEPCmd))
1037 strlcat(buf, "invalideepromcmd ", blen);
1038done:
1039 return iserr;
1040}
1041
1042static void reenable_7220_chase(unsigned long opaque)
1043{
1044 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1045 ppd->cpspec->chase_timer.expires = 0;
1046 qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1047 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1048}
1049
1050static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
1051{
1052 u8 ibclt;
1053 u64 tnow;
1054
1055 ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
1056
1057 /*
1058 * Detect and handle the state chase issue, where we can
1059 * get stuck if we are unlucky on timing on both sides of
1060 * the link. If we are, we disable, set a timer, and
1061 * then re-enable.
1062 */
1063 switch (ibclt) {
1064 case IB_7220_LT_STATE_CFGRCVFCFG:
1065 case IB_7220_LT_STATE_CFGWAITRMT:
1066 case IB_7220_LT_STATE_TXREVLANES:
1067 case IB_7220_LT_STATE_CFGENH:
1068 tnow = get_jiffies_64();
1069 if (ppd->cpspec->chase_end &&
1070 time_after64(tnow, ppd->cpspec->chase_end)) {
1071 ppd->cpspec->chase_end = 0;
1072 qib_set_ib_7220_lstate(ppd,
1073 QLOGIC_IB_IBCC_LINKCMD_DOWN,
1074 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1075 ppd->cpspec->chase_timer.expires = jiffies +
1076 QIB_CHASE_DIS_TIME;
1077 add_timer(&ppd->cpspec->chase_timer);
1078 } else if (!ppd->cpspec->chase_end)
1079 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1080 break;
1081
1082 default:
1083 ppd->cpspec->chase_end = 0;
1084 break;
1085 }
1086}
1087
1088static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
1089{
1090 char *msg;
1091 u64 ignore_this_time = 0;
1092 u64 iserr = 0;
1093 int log_idx;
1094 struct qib_pportdata *ppd = dd->pport;
1095 u64 mask;
1096
1097 /* don't report errors that are masked */
1098 errs &= dd->cspec->errormask;
1099 msg = dd->cspec->emsgbuf;
1100
1101 /* do these first, they are most important */
1102 if (errs & ERR_MASK(HardwareErr))
1103 qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1104 else
1105 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1106 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1107 qib_inc_eeprom_err(dd, log_idx, 1);
1108
1109 if (errs & QLOGIC_IB_E_SDMAERRS)
1110 sdma_7220_errors(ppd, errs);
1111
1112 if (errs & ~IB_E_BITSEXTANT)
1113 qib_dev_err(dd, "error interrupt with unknown errors "
1114 "%llx set\n", (unsigned long long)
1115 (errs & ~IB_E_BITSEXTANT));
1116
1117 if (errs & E_SUM_ERRS) {
1118 qib_disarm_7220_senderrbufs(ppd);
1119 if ((errs & E_SUM_LINK_PKTERRS) &&
1120 !(ppd->lflags & QIBL_LINKACTIVE)) {
1121 /*
1122 * This can happen when trying to bring the link
1123 * up, but the IB link changes state at the "wrong"
1124 * time. The IB logic then complains that the packet
1125 * isn't valid. We don't want to confuse people, so
1126 * we just don't print them, except at debug
1127 */
1128 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1129 }
1130 } else if ((errs & E_SUM_LINK_PKTERRS) &&
1131 !(ppd->lflags & QIBL_LINKACTIVE)) {
1132 /*
1133 * This can happen when SMA is trying to bring the link
1134 * up, but the IB link changes state at the "wrong" time.
1135 * The IB logic then complains that the packet isn't
1136 * valid. We don't want to confuse people, so we just
1137 * don't print them, except at debug
1138 */
1139 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1140 }
1141
1142 qib_write_kreg(dd, kr_errclear, errs);
1143
1144 errs &= ~ignore_this_time;
1145 if (!errs)
1146 goto done;
1147
1148 /*
1149 * The ones we mask off are handled specially below
1150 * or above. Also mask SDMADISABLED by default as it
1151 * is too chatty.
1152 */
1153 mask = ERR_MASK(IBStatusChanged) |
1154 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
1155 ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
1156
1157 qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
1158
1159 if (errs & E_SUM_PKTERRS)
1160 qib_stats.sps_rcverrs++;
1161 if (errs & E_SUM_ERRS)
1162 qib_stats.sps_txerrs++;
1163 iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
1164 ERR_MASK(SDmaDisabledErr));
1165
1166 if (errs & ERR_MASK(IBStatusChanged)) {
1167 u64 ibcs;
1168
1169 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
1170 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1171 handle_7220_chase(ppd, ibcs);
1172
1173 /* Update our picture of width and speed from chip */
1174 ppd->link_width_active =
1175 ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
1176 IB_WIDTH_4X : IB_WIDTH_1X;
1177 ppd->link_speed_active =
1178 ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
1179 QIB_IB_DDR : QIB_IB_SDR;
1180
1181 /*
1182 * Since going into a recovery state causes the link state
1183 * to go down and since recovery is transitory, it is better
1184 * if we "miss" ever seeing the link training state go into
1185 * recovery (i.e., ignore this transition for link state
1186 * special handling purposes) without updating lastibcstat.
1187 */
1188 if (qib_7220_phys_portstate(ibcs) !=
1189 IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
1190 qib_handle_e_ibstatuschanged(ppd, ibcs);
1191 }
1192
1193 if (errs & ERR_MASK(ResetNegated)) {
1194 qib_dev_err(dd, "Got reset, requires re-init "
1195 "(unload and reload driver)\n");
1196 dd->flags &= ~QIB_INITTED; /* needs re-init */
1197 /* mark as having had error */
1198 *dd->devstatusp |= QIB_STATUS_HWERROR;
1199 *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
1200 }
1201
1202 if (*msg && iserr)
1203 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1204
1205 if (ppd->state_wanted & ppd->lflags)
1206 wake_up_interruptible(&ppd->state_wait);
1207
1208 /*
1209 * If there were hdrq or egrfull errors, wake up any processes
1210 * waiting in poll. We used to try to check which contexts had
1211 * the overflow, but given the cost of that and the chip reads
1212 * to support it, it's better to just wake everybody up if we
1213 * get an overflow; waiters can poll again if it's not them.
1214 */
1215 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1216 qib_handle_urcv(dd, ~0U);
1217 if (errs & ERR_MASK(RcvEgrFullErr))
1218 qib_stats.sps_buffull++;
1219 else
1220 qib_stats.sps_hdrfull++;
1221 }
1222done:
1223 return;
1224}
1225
1226/* enable/disable chip from delivering interrupts */
1227static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
1228{
1229 if (enable) {
1230 if (dd->flags & QIB_BADINTR)
1231 return;
1232 qib_write_kreg(dd, kr_intmask, ~0ULL);
1233 /* force re-interrupt of any pending interrupts. */
1234 qib_write_kreg(dd, kr_intclear, 0ULL);
1235 } else
1236 qib_write_kreg(dd, kr_intmask, 0ULL);
1237}
1238
1239/*
1240 * Try to cleanup as much as possible for anything that might have gone
1241 * wrong while in freeze mode, such as pio buffers being written by user
1242 * processes (causing armlaunch), send errors due to going into freeze mode,
1243 * etc., and try to avoid causing extra interrupts while doing so.
1244 * Forcibly update the in-memory pioavail register copies after cleanup
1245 * because the chip won't do it while in freeze mode (the register values
1246 * themselves are kept correct).
1247 * Make sure that we don't lose any important interrupts by using the chip
1248 * feature that says that writing 0 to a bit in *clear that is set in
1249 * *status will cause an interrupt to be generated again (if allowed by
1250 * the *mask value).
1251 * This is in chip-specific code because of all of the register accesses,
1252 * even though the details are similar on most chips.
1253 */
1254static void qib_7220_clear_freeze(struct qib_devdata *dd)
1255{
1256 /* disable error interrupts, to avoid confusion */
1257 qib_write_kreg(dd, kr_errmask, 0ULL);
1258
1259 /* also disable interrupts; errormask is sometimes overwriten */
1260 qib_7220_set_intr_state(dd, 0);
1261
1262 qib_cancel_sends(dd->pport);
1263
1264 /* clear the freeze, and be sure chip saw it */
1265 qib_write_kreg(dd, kr_control, dd->control);
1266 qib_read_kreg32(dd, kr_scratch);
1267
1268 /* force in-memory update now we are out of freeze */
1269 qib_force_pio_avail_update(dd);
1270
1271 /*
1272 * force new interrupt if any hwerr, error or interrupt bits are
1273 * still set, and clear "safe" send packet errors related to freeze
1274 * and cancelling sends. Re-enable error interrupts before possible
1275 * force of re-interrupt on pending interrupts.
1276 */
1277 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
1278 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
1279 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1280 qib_7220_set_intr_state(dd, 1);
1281}
1282
1283/**
1284 * qib_7220_handle_hwerrors - display hardware errors.
1285 * @dd: the qlogic_ib device
1286 * @msg: the output buffer
1287 * @msgl: the size of the output buffer
1288 *
1289 * Use same msg buffer as regular errors to avoid excessive stack
1290 * use. Most hardware errors are catastrophic, but for right now,
1291 * we'll print them and continue. We reuse the same message buffer as
1292 * handle_7220_errors() to avoid excessive stack usage.
1293 */
1294static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
1295 size_t msgl)
1296{
1297 u64 hwerrs;
1298 u32 bits, ctrl;
1299 int isfatal = 0;
1300 char *bitsmsg;
1301 int log_idx;
1302
1303 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
1304 if (!hwerrs)
1305 goto bail;
1306 if (hwerrs == ~0ULL) {
1307 qib_dev_err(dd, "Read of hardware error status failed "
1308 "(all bits set); ignoring\n");
1309 goto bail;
1310 }
1311 qib_stats.sps_hwerrs++;
1312
1313 /*
1314 * Always clear the error status register, except MEMBISTFAIL,
1315 * regardless of whether we continue or stop using the chip.
1316 * We want that set so we know it failed, even across driver reload.
1317 * We'll still ignore it in the hwerrmask. We do this partly for
1318 * diagnostics, but also for support.
1319 */
1320 qib_write_kreg(dd, kr_hwerrclear,
1321 hwerrs & ~HWE_MASK(PowerOnBISTFailed));
1322
1323 hwerrs &= dd->cspec->hwerrmask;
1324
1325 /* We log some errors to EEPROM, check if we have any of those. */
1326 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1327 if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
1328 qib_inc_eeprom_err(dd, log_idx, 1);
1329 if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
1330 RXE_PARITY))
1331 qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
1332 "(cleared)\n", (unsigned long long) hwerrs);
1333
1334 if (hwerrs & ~IB_HWE_BITSEXTANT)
1335 qib_dev_err(dd, "hwerror interrupt with unknown errors "
1336 "%llx set\n", (unsigned long long)
1337 (hwerrs & ~IB_HWE_BITSEXTANT));
1338
1339 if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
1340 qib_sd7220_clr_ibpar(dd);
1341
1342 ctrl = qib_read_kreg32(dd, kr_control);
1343 if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
1344 /*
1345 * Parity errors in send memory are recoverable by h/w
1346 * just do housekeeping, exit freeze mode and continue.
1347 */
1348 if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
1349 TXEMEMPARITYERR_PIOPBC)) {
1350 qib_7220_txe_recover(dd);
1351 hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
1352 TXEMEMPARITYERR_PIOPBC);
1353 }
1354 if (hwerrs)
1355 isfatal = 1;
1356 else
1357 qib_7220_clear_freeze(dd);
1358 }
1359
1360 *msg = '\0';
1361
1362 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
1363 isfatal = 1;
1364 strlcat(msg, "[Memory BIST test failed, "
1365 "InfiniPath hardware unusable]", msgl);
1366 /* ignore from now on, so disable until driver reloaded */
1367 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
1368 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1369 }
1370
1371 qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
1372 ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
1373
1374 bitsmsg = dd->cspec->bitsmsgbuf;
1375 if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
1376 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
1377 bits = (u32) ((hwerrs >>
1378 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
1379 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
1380 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
1381 "[PCIe Mem Parity Errs %x] ", bits);
1382 strlcat(msg, bitsmsg, msgl);
1383 }
1384
1385#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
1386 QLOGIC_IB_HWE_COREPLL_RFSLIP)
1387
1388 if (hwerrs & _QIB_PLL_FAIL) {
1389 isfatal = 1;
1390 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
1391 "[PLL failed (%llx), InfiniPath hardware unusable]",
1392 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
1393 strlcat(msg, bitsmsg, msgl);
1394 /* ignore from now on, so disable until driver reloaded */
1395 dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
1396 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1397 }
1398
1399 if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
1400 /*
1401 * If it occurs, it is left masked since the eternal
1402 * interface is unused.
1403 */
1404 dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
1405 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1406 }
1407
1408 qib_dev_err(dd, "%s hardware error\n", msg);
1409
1410 if (isfatal && !dd->diag_client) {
1411 qib_dev_err(dd, "Fatal Hardware Error, no longer"
1412 " usable, SN %.16s\n", dd->serial);
1413 /*
1414 * For /sys status file and user programs to print; if no
1415 * trailing brace is copied, we'll know it was truncated.
1416 */
1417 if (dd->freezemsg)
1418 snprintf(dd->freezemsg, dd->freezelen,
1419 "{%s}", msg);
1420 qib_disable_after_error(dd);
1421 }
1422bail:;
1423}
1424
1425/**
1426 * qib_7220_init_hwerrors - enable hardware errors
1427 * @dd: the qlogic_ib device
1428 *
1429 * now that we have finished initializing everything that might reasonably
1430 * cause a hardware error, and cleared those errors bits as they occur,
1431 * we can enable hardware errors in the mask (potentially enabling
1432 * freeze mode), and enable hardware errors as errors (along with
1433 * everything else) in errormask
1434 */
1435static void qib_7220_init_hwerrors(struct qib_devdata *dd)
1436{
1437 u64 val;
1438 u64 extsval;
1439
1440 extsval = qib_read_kreg64(dd, kr_extstatus);
1441
1442 if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
1443 QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
1444 qib_dev_err(dd, "MemBIST did not complete!\n");
1445 if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
1446 qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
1447
1448 val = ~0ULL; /* default to all hwerrors become interrupts, */
1449
1450 val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
1451 dd->cspec->hwerrmask = val;
1452
1453 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
1454 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1455
1456 /* clear all */
1457 qib_write_kreg(dd, kr_errclear, ~0ULL);
1458 /* enable errors that are masked, at least this first time. */
1459 qib_write_kreg(dd, kr_errmask, ~0ULL);
1460 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
1461 /* clear any interrupts up to this point (ints still not enabled) */
1462 qib_write_kreg(dd, kr_intclear, ~0ULL);
1463}
1464
1465/*
1466 * Disable and enable the armlaunch error. Used for PIO bandwidth testing
1467 * on chips that are count-based, rather than trigger-based. There is no
1468 * reference counting, but that's also fine, given the intended use.
1469 * Only chip-specific because it's all register accesses
1470 */
1471static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
1472{
1473 if (enable) {
1474 qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
1475 dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
1476 } else
1477 dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
1478 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1479}
1480
1481/*
1482 * Formerly took parameter <which> in pre-shifted,
1483 * pre-merged form with LinkCmd and LinkInitCmd
1484 * together, and assuming the zero was NOP.
1485 */
1486static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
1487 u16 linitcmd)
1488{
1489 u64 mod_wd;
1490 struct qib_devdata *dd = ppd->dd;
1491 unsigned long flags;
1492
1493 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
1494 /*
1495 * If we are told to disable, note that so link-recovery
1496 * code does not attempt to bring us back up.
1497 */
1498 spin_lock_irqsave(&ppd->lflags_lock, flags);
1499 ppd->lflags |= QIBL_IB_LINK_DISABLED;
1500 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1501 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
1502 /*
1503 * Any other linkinitcmd will lead to LINKDOWN and then
1504 * to INIT (if all is well), so clear flag to let
1505 * link-recovery code attempt to bring us back up.
1506 */
1507 spin_lock_irqsave(&ppd->lflags_lock, flags);
1508 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
1509 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1510 }
1511
1512 mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
1513 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1514
1515 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
1516 /* write to chip to prevent back-to-back writes of ibc reg */
1517 qib_write_kreg(dd, kr_scratch, 0);
1518}
1519
1520/*
1521 * All detailed interaction with the SerDes has been moved to qib_sd7220.c
1522 *
1523 * The portion of IBA7220-specific bringup_serdes() that actually deals with
1524 * registers and memory within the SerDes itself is qib_sd7220_init().
1525 */
1526
1527/**
1528 * qib_7220_bringup_serdes - bring up the serdes
1529 * @ppd: physical port on the qlogic_ib device
1530 */
1531static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
1532{
1533 struct qib_devdata *dd = ppd->dd;
1534 u64 val, prev_val, guid, ibc;
1535 int ret = 0;
1536
1537 /* Put IBC in reset, sends disabled */
1538 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1539 qib_write_kreg(dd, kr_control, 0ULL);
1540
1541 if (qib_compat_ddr_negotiate) {
1542 ppd->cpspec->ibdeltainprog = 1;
1543 ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
1544 ppd->cpspec->iblnkerrsnap =
1545 read_7220_creg32(dd, cr_iblinkerrrecov);
1546 }
1547
1548 /* flowcontrolwatermark is in units of KBytes */
1549 ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
1550 /*
1551 * How often flowctrl sent. More or less in usecs; balance against
1552 * watermark value, so that in theory senders always get a flow
1553 * control update in time to not let the IB link go idle.
1554 */
1555 ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
1556 /* max error tolerance */
1557 ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
1558 /* use "real" buffer space for */
1559 ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
1560 /* IB credit flow control. */
1561 ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
1562 /*
1563 * set initial max size pkt IBC will send, including ICRC; it's the
1564 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
1565 */
1566 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
1567 ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
1568
1569 /* initially come up waiting for TS1, without sending anything. */
1570 val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
1571 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1572 qib_write_kreg(dd, kr_ibcctrl, val);
1573
1574 if (!ppd->cpspec->ibcddrctrl) {
1575 /* not on re-init after reset */
1576 ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
1577
1578 if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
1579 ppd->cpspec->ibcddrctrl |=
1580 IBA7220_IBC_SPEED_AUTONEG_MASK |
1581 IBA7220_IBC_IBTA_1_2_MASK;
1582 else
1583 ppd->cpspec->ibcddrctrl |=
1584 ppd->link_speed_enabled == QIB_IB_DDR ?
1585 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
1586 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
1587 (IB_WIDTH_1X | IB_WIDTH_4X))
1588 ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
1589 else
1590 ppd->cpspec->ibcddrctrl |=
1591 ppd->link_width_enabled == IB_WIDTH_4X ?
1592 IBA7220_IBC_WIDTH_4X_ONLY :
1593 IBA7220_IBC_WIDTH_1X_ONLY;
1594
1595 /* always enable these on driver reload, not sticky */
1596 ppd->cpspec->ibcddrctrl |=
1597 IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
1598 ppd->cpspec->ibcddrctrl |=
1599 IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
1600
1601 /* enable automatic lane reversal detection for receive */
1602 ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
1603 } else
1604 /* write to chip to prevent back-to-back writes of ibc reg */
1605 qib_write_kreg(dd, kr_scratch, 0);
1606
1607 qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
1608 qib_write_kreg(dd, kr_scratch, 0);
1609
1610 qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
1611 qib_write_kreg(dd, kr_scratch, 0);
1612
1613 ret = qib_sd7220_init(dd);
1614
1615 val = qib_read_kreg64(dd, kr_xgxs_cfg);
1616 prev_val = val;
1617 val |= QLOGIC_IB_XGXS_FC_SAFE;
1618 if (val != prev_val) {
1619 qib_write_kreg(dd, kr_xgxs_cfg, val);
1620 qib_read_kreg32(dd, kr_scratch);
1621 }
1622 if (val & QLOGIC_IB_XGXS_RESET)
1623 val &= ~QLOGIC_IB_XGXS_RESET;
1624 if (val != prev_val)
1625 qib_write_kreg(dd, kr_xgxs_cfg, val);
1626
1627 /* first time through, set port guid */
1628 if (!ppd->guid)
1629 ppd->guid = dd->base_guid;
1630 guid = be64_to_cpu(ppd->guid);
1631
1632 qib_write_kreg(dd, kr_hrtbt_guid, guid);
1633 if (!ret) {
1634 dd->control |= QLOGIC_IB_C_LINKENABLE;
1635 qib_write_kreg(dd, kr_control, dd->control);
1636 } else
1637 /* write to chip to prevent back-to-back writes of ibc reg */
1638 qib_write_kreg(dd, kr_scratch, 0);
1639 return ret;
1640}
1641
1642/**
1643 * qib_7220_quiet_serdes - set serdes to txidle
1644 * @ppd: physical port of the qlogic_ib device
1645 * Called when driver is being unloaded
1646 */
1647static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
1648{
1649 u64 val;
1650 struct qib_devdata *dd = ppd->dd;
1651 unsigned long flags;
1652
1653 /* disable IBC */
1654 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1655 qib_write_kreg(dd, kr_control,
1656 dd->control | QLOGIC_IB_C_FREEZEMODE);
1657
1658 ppd->cpspec->chase_end = 0;
1659 if (ppd->cpspec->chase_timer.data) /* if initted */
1660 del_timer_sync(&ppd->cpspec->chase_timer);
1661
1662 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
1663 ppd->cpspec->ibdeltainprog) {
1664 u64 diagc;
1665
1666 /* enable counter writes */
1667 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
1668 qib_write_kreg(dd, kr_hwdiagctrl,
1669 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
1670
1671 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
1672 val = read_7220_creg32(dd, cr_ibsymbolerr);
1673 if (ppd->cpspec->ibdeltainprog)
1674 val -= val - ppd->cpspec->ibsymsnap;
1675 val -= ppd->cpspec->ibsymdelta;
1676 write_7220_creg(dd, cr_ibsymbolerr, val);
1677 }
1678 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
1679 val = read_7220_creg32(dd, cr_iblinkerrrecov);
1680 if (ppd->cpspec->ibdeltainprog)
1681 val -= val - ppd->cpspec->iblnkerrsnap;
1682 val -= ppd->cpspec->iblnkerrdelta;
1683 write_7220_creg(dd, cr_iblinkerrrecov, val);
1684 }
1685
1686 /* and disable counter writes */
1687 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
1688 }
1689 qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1690
1691 spin_lock_irqsave(&ppd->lflags_lock, flags);
1692 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
1693 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1694 wake_up(&ppd->cpspec->autoneg_wait);
1695 cancel_delayed_work(&ppd->cpspec->autoneg_work);
1696 flush_scheduled_work();
1697
1698 shutdown_7220_relock_poll(ppd->dd);
1699 val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
1700 val |= QLOGIC_IB_XGXS_RESET;
1701 qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
1702}
1703
1704/**
1705 * qib_setup_7220_setextled - set the state of the two external LEDs
1706 * @dd: the qlogic_ib device
1707 * @on: whether the link is up or not
1708 *
1709 * The exact combo of LEDs if on is true is determined by looking
1710 * at the ibcstatus.
1711 *
1712 * These LEDs indicate the physical and logical state of IB link.
1713 * For this chip (at least with recommended board pinouts), LED1
1714 * is Yellow (logical state) and LED2 is Green (physical state),
1715 *
1716 * Note: We try to match the Mellanox HCA LED behavior as best
1717 * we can. Green indicates physical link state is OK (something is
1718 * plugged in, and we can train).
1719 * Amber indicates the link is logically up (ACTIVE).
1720 * Mellanox further blinks the amber LED to indicate data packet
1721 * activity, but we have no hardware support for that, so it would
1722 * require waking up every 10-20 msecs and checking the counters
1723 * on the chip, and then turning the LED off if appropriate. That's
1724 * visible overhead, so not something we will do.
1725 *
1726 */
1727static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
1728{
1729 struct qib_devdata *dd = ppd->dd;
1730 u64 extctl, ledblink = 0, val, lst, ltst;
1731 unsigned long flags;
1732
1733 /*
1734 * The diags use the LED to indicate diag info, so we leave
1735 * the external LED alone when the diags are running.
1736 */
1737 if (dd->diag_client)
1738 return;
1739
1740 if (ppd->led_override) {
1741 ltst = (ppd->led_override & QIB_LED_PHYS) ?
1742 IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
1743 lst = (ppd->led_override & QIB_LED_LOG) ?
1744 IB_PORT_ACTIVE : IB_PORT_DOWN;
1745 } else if (on) {
1746 val = qib_read_kreg64(dd, kr_ibcstatus);
1747 ltst = qib_7220_phys_portstate(val);
1748 lst = qib_7220_iblink_state(val);
1749 } else {
1750 ltst = 0;
1751 lst = 0;
1752 }
1753
1754 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
1755 extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
1756 SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
1757 if (ltst == IB_PHYSPORTSTATE_LINKUP) {
1758 extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
1759 /*
1760 * counts are in chip clock (4ns) periods.
1761 * This is 1/16 sec (66.6ms) on,
1762 * 3/16 sec (187.5 ms) off, with packets rcvd
1763 */
1764 ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
1765 | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
1766 }
1767 if (lst == IB_PORT_ACTIVE)
1768 extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
1769 dd->cspec->extctrl = extctl;
1770 qib_write_kreg(dd, kr_extctrl, extctl);
1771 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
1772
1773 if (ledblink) /* blink the LED on packet receive */
1774 qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
1775}
1776
1777static void qib_7220_free_irq(struct qib_devdata *dd)
1778{
1779 if (dd->cspec->irq) {
1780 free_irq(dd->cspec->irq, dd);
1781 dd->cspec->irq = 0;
1782 }
1783 qib_nomsi(dd);
1784}
1785
1786/*
1787 * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
1788 * @dd: the qlogic_ib device
1789 *
1790 * This is called during driver unload.
1791 *
1792 */
1793static void qib_setup_7220_cleanup(struct qib_devdata *dd)
1794{
1795 qib_7220_free_irq(dd);
1796 kfree(dd->cspec->cntrs);
1797 kfree(dd->cspec->portcntrs);
1798}
1799
1800/*
1801 * This is only called for SDmaInt.
1802 * SDmaDisabled is handled on the error path.
1803 */
1804static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
1805{
1806 unsigned long flags;
1807
1808 spin_lock_irqsave(&ppd->sdma_lock, flags);
1809
1810 switch (ppd->sdma_state.current_state) {
1811 case qib_sdma_state_s00_hw_down:
1812 break;
1813
1814 case qib_sdma_state_s10_hw_start_up_wait:
1815 __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
1816 break;
1817
1818 case qib_sdma_state_s20_idle:
1819 break;
1820
1821 case qib_sdma_state_s30_sw_clean_up_wait:
1822 break;
1823
1824 case qib_sdma_state_s40_hw_clean_up_wait:
1825 break;
1826
1827 case qib_sdma_state_s50_hw_halt_wait:
1828 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1829 break;
1830
1831 case qib_sdma_state_s99_running:
1832 /* too chatty to print here */
1833 __qib_sdma_intr(ppd);
1834 break;
1835 }
1836 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1837}
1838
1839static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
1840{
1841 unsigned long flags;
1842
1843 spin_lock_irqsave(&dd->sendctrl_lock, flags);
1844 if (needint) {
1845 if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
1846 goto done;
1847 /*
1848 * blip the availupd off, next write will be on, so
1849 * we ensure an avail update, regardless of threshold or
1850 * buffers becoming free, whenever we want an interrupt
1851 */
1852 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
1853 ~SYM_MASK(SendCtrl, SendBufAvailUpd));
1854 qib_write_kreg(dd, kr_scratch, 0ULL);
1855 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
1856 } else
1857 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
1858 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
1859 qib_write_kreg(dd, kr_scratch, 0ULL);
1860done:
1861 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
1862}
1863
1864/*
1865 * Handle errors and unusual events first, separate function
1866 * to improve cache hits for fast path interrupt handling.
1867 */
1868static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
1869{
1870 if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
1871 qib_dev_err(dd,
1872 "interrupt with unknown interrupts %Lx set\n",
1873 istat & ~QLOGIC_IB_I_BITSEXTANT);
1874
1875 if (istat & QLOGIC_IB_I_GPIO) {
1876 u32 gpiostatus;
1877
1878 /*
1879 * Boards for this chip currently don't use GPIO interrupts,
1880 * so clear by writing GPIOstatus to GPIOclear, and complain
1881 * to alert developer. To avoid endless repeats, clear
1882 * the bits in the mask, since there is some kind of
1883 * programming error or chip problem.
1884 */
1885 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
1886 /*
1887 * In theory, writing GPIOstatus to GPIOclear could
1888 * have a bad side-effect on some diagnostic that wanted
1889 * to poll for a status-change, but the various shadows
1890 * make that problematic at best. Diags will just suppress
1891 * all GPIO interrupts during such tests.
1892 */
1893 qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
1894
1895 if (gpiostatus) {
1896 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
1897 u32 gpio_irq = mask & gpiostatus;
1898
1899 /*
1900 * A bit set in status and (chip) Mask register
1901 * would cause an interrupt. Since we are not
1902 * expecting any, report it. Also check that the
1903 * chip reflects our shadow, report issues,
1904 * and refresh from the shadow.
1905 */
1906 /*
1907 * Clear any troublemakers, and update chip
1908 * from shadow
1909 */
1910 dd->cspec->gpio_mask &= ~gpio_irq;
1911 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1912 }
1913 }
1914
1915 if (istat & QLOGIC_IB_I_ERROR) {
1916 u64 estat;
1917
1918 qib_stats.sps_errints++;
1919 estat = qib_read_kreg64(dd, kr_errstatus);
1920 if (!estat)
1921 qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
1922 "but no error bits set!\n", istat);
1923 else
1924 handle_7220_errors(dd, estat);
1925 }
1926}
1927
1928static irqreturn_t qib_7220intr(int irq, void *data)
1929{
1930 struct qib_devdata *dd = data;
1931 irqreturn_t ret;
1932 u64 istat;
1933 u64 ctxtrbits;
1934 u64 rmask;
1935 unsigned i;
1936
1937 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
1938 /*
1939 * This return value is not great, but we do not want the
1940 * interrupt core code to remove our interrupt handler
1941 * because we don't appear to be handling an interrupt
1942 * during a chip reset.
1943 */
1944 ret = IRQ_HANDLED;
1945 goto bail;
1946 }
1947
1948 istat = qib_read_kreg64(dd, kr_intstatus);
1949
1950 if (unlikely(!istat)) {
1951 ret = IRQ_NONE; /* not our interrupt, or already handled */
1952 goto bail;
1953 }
1954 if (unlikely(istat == -1)) {
1955 qib_bad_intrstatus(dd);
1956 /* don't know if it was our interrupt or not */
1957 ret = IRQ_NONE;
1958 goto bail;
1959 }
1960
1961 qib_stats.sps_ints++;
1962 if (dd->int_counter != (u32) -1)
1963 dd->int_counter++;
1964
1965 if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
1966 QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
1967 unlikely_7220_intr(dd, istat);
1968
1969 /*
1970 * Clear the interrupt bits we found set, relatively early, so we
1971 * "know" know the chip will have seen this by the time we process
1972 * the queue, and will re-interrupt if necessary. The processor
1973 * itself won't take the interrupt again until we return.
1974 */
1975 qib_write_kreg(dd, kr_intclear, istat);
1976
1977 /*
1978 * Handle kernel receive queues before checking for pio buffers
1979 * available since receives can overflow; piobuf waiters can afford
1980 * a few extra cycles, since they were waiting anyway.
1981 */
1982 ctxtrbits = istat &
1983 ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1984 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
1985 if (ctxtrbits) {
1986 rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1987 (1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
1988 for (i = 0; i < dd->first_user_ctxt; i++) {
1989 if (ctxtrbits & rmask) {
1990 ctxtrbits &= ~rmask;
1991 qib_kreceive(dd->rcd[i], NULL, NULL);
1992 }
1993 rmask <<= 1;
1994 }
1995 if (ctxtrbits) {
1996 ctxtrbits =
1997 (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1998 (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
1999 qib_handle_urcv(dd, ctxtrbits);
2000 }
2001 }
2002
2003 /* only call for SDmaInt */
2004 if (istat & QLOGIC_IB_I_SDMAINT)
2005 sdma_7220_intr(dd->pport, istat);
2006
2007 if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2008 qib_ib_piobufavail(dd);
2009
2010 ret = IRQ_HANDLED;
2011bail:
2012 return ret;
2013}
2014
2015/*
2016 * Set up our chip-specific interrupt handler.
2017 * The interrupt type has already been setup, so
2018 * we just need to do the registration and error checking.
2019 * If we are using MSI interrupts, we may fall back to
2020 * INTx later, if the interrupt handler doesn't get called
2021 * within 1/2 second (see verify_interrupt()).
2022 */
2023static void qib_setup_7220_interrupt(struct qib_devdata *dd)
2024{
2025 if (!dd->cspec->irq)
2026 qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
2027 "work\n");
2028 else {
2029 int ret = request_irq(dd->cspec->irq, qib_7220intr,
2030 dd->msi_lo ? 0 : IRQF_SHARED,
2031 QIB_DRV_NAME, dd);
2032
2033 if (ret)
2034 qib_dev_err(dd, "Couldn't setup %s interrupt "
2035 "(irq=%d): %d\n", dd->msi_lo ?
2036 "MSI" : "INTx", dd->cspec->irq, ret);
2037 }
2038}
2039
2040/**
2041 * qib_7220_boardname - fill in the board name
2042 * @dd: the qlogic_ib device
2043 *
2044 * info is based on the board revision register
2045 */
2046static void qib_7220_boardname(struct qib_devdata *dd)
2047{
2048 char *n;
2049 u32 boardid, namelen;
2050
2051 boardid = SYM_FIELD(dd->revision, Revision,
2052 BoardID);
2053
2054 switch (boardid) {
2055 case 1:
2056 n = "InfiniPath_QLE7240";
2057 break;
2058 case 2:
2059 n = "InfiniPath_QLE7280";
2060 break;
2061 default:
2062 qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
2063 n = "Unknown_InfiniPath_7220";
2064 break;
2065 }
2066
2067 namelen = strlen(n) + 1;
2068 dd->boardname = kmalloc(namelen, GFP_KERNEL);
2069 if (!dd->boardname)
2070 qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
2071 else
2072 snprintf(dd->boardname, namelen, "%s", n);
2073
2074 if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
2075 qib_dev_err(dd, "Unsupported InfiniPath hardware "
2076 "revision %u.%u!\n",
2077 dd->majrev, dd->minrev);
2078
2079 snprintf(dd->boardversion, sizeof(dd->boardversion),
2080 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
2081 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
2082 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
2083 dd->majrev, dd->minrev,
2084 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
2085}
2086
2087/*
2088 * This routine sleeps, so it can only be called from user context, not
2089 * from interrupt context.
2090 */
2091static int qib_setup_7220_reset(struct qib_devdata *dd)
2092{
2093 u64 val;
2094 int i;
2095 int ret;
2096 u16 cmdval;
2097 u8 int_line, clinesz;
2098 unsigned long flags;
2099
2100 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
2101
2102 /* Use dev_err so it shows up in logs, etc. */
2103 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
2104
2105 /* no interrupts till re-initted */
2106 qib_7220_set_intr_state(dd, 0);
2107
2108 dd->pport->cpspec->ibdeltainprog = 0;
2109 dd->pport->cpspec->ibsymdelta = 0;
2110 dd->pport->cpspec->iblnkerrdelta = 0;
2111
2112 /*
2113 * Keep chip from being accessed until we are ready. Use
2114 * writeq() directly, to allow the write even though QIB_PRESENT
2115 * isnt' set.
2116 */
2117 dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
2118 dd->int_counter = 0; /* so we check interrupts work again */
2119 val = dd->control | QLOGIC_IB_C_RESET;
2120 writeq(val, &dd->kregbase[kr_control]);
2121 mb(); /* prevent compiler reordering around actual reset */
2122
2123 for (i = 1; i <= 5; i++) {
2124 /*
2125 * Allow MBIST, etc. to complete; longer on each retry.
2126 * We sometimes get machine checks from bus timeout if no
2127 * response, so for now, make it *really* long.
2128 */
2129 msleep(1000 + (1 + i) * 2000);
2130
2131 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
2132
2133 /*
2134 * Use readq directly, so we don't need to mark it as PRESENT
2135 * until we get a successful indication that all is well.
2136 */
2137 val = readq(&dd->kregbase[kr_revision]);
2138 if (val == dd->revision) {
2139 dd->flags |= QIB_PRESENT; /* it's back */
2140 ret = qib_reinit_intr(dd);
2141 goto bail;
2142 }
2143 }
2144 ret = 0; /* failed */
2145
2146bail:
2147 if (ret) {
2148 if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
2149 qib_dev_err(dd, "Reset failed to setup PCIe or "
2150 "interrupts; continuing anyway\n");
2151
2152 /* hold IBC in reset, no sends, etc till later */
2153 qib_write_kreg(dd, kr_control, 0ULL);
2154
2155 /* clear the reset error, init error/hwerror mask */
2156 qib_7220_init_hwerrors(dd);
2157
2158 /* do setup similar to speed or link-width changes */
2159 if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
2160 dd->cspec->presets_needed = 1;
2161 spin_lock_irqsave(&dd->pport->lflags_lock, flags);
2162 dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
2163 dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
2164 spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
2165 }
2166
2167 return ret;
2168}
2169
2170/**
2171 * qib_7220_put_tid - write a TID to the chip
2172 * @dd: the qlogic_ib device
2173 * @tidptr: pointer to the expected TID (in chip) to update
2174 * @tidtype: 0 for eager, 1 for expected
2175 * @pa: physical address of in memory buffer; tidinvalid if freeing
2176 */
2177static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
2178 u32 type, unsigned long pa)
2179{
2180 if (pa != dd->tidinvalid) {
2181 u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
2182
2183 /* paranoia checks */
2184 if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
2185 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
2186 pa);
2187 return;
2188 }
2189 if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
2190 qib_dev_err(dd, "Physical page address 0x%lx "
2191 "larger than supported\n", pa);
2192 return;
2193 }
2194
2195 if (type == RCVHQ_RCV_TYPE_EAGER)
2196 chippa |= dd->tidtemplate;
2197 else /* for now, always full 4KB page */
2198 chippa |= IBA7220_TID_SZ_4K;
2199 pa = chippa;
2200 }
2201 writeq(pa, tidptr);
2202 mmiowb();
2203}
2204
2205/**
2206 * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
2207 * @dd: the qlogic_ib device
2208 * @ctxt: the ctxt
2209 *
2210 * clear all TID entries for a ctxt, expected and eager.
2211 * Used from qib_close(). On this chip, TIDs are only 32 bits,
2212 * not 64, but they are still on 64 bit boundaries, so tidbase
2213 * is declared as u64 * for the pointer math, even though we write 32 bits
2214 */
2215static void qib_7220_clear_tids(struct qib_devdata *dd,
2216 struct qib_ctxtdata *rcd)
2217{
2218 u64 __iomem *tidbase;
2219 unsigned long tidinv;
2220 u32 ctxt;
2221 int i;
2222
2223 if (!dd->kregbase || !rcd)
2224 return;
2225
2226 ctxt = rcd->ctxt;
2227
2228 tidinv = dd->tidinvalid;
2229 tidbase = (u64 __iomem *)
2230 ((char __iomem *)(dd->kregbase) +
2231 dd->rcvtidbase +
2232 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
2233
2234 for (i = 0; i < dd->rcvtidcnt; i++)
2235 qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
2236 tidinv);
2237
2238 tidbase = (u64 __iomem *)
2239 ((char __iomem *)(dd->kregbase) +
2240 dd->rcvegrbase +
2241 rcd->rcvegr_tid_base * sizeof(*tidbase));
2242
2243 for (i = 0; i < rcd->rcvegrcnt; i++)
2244 qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
2245 tidinv);
2246}
2247
2248/**
2249 * qib_7220_tidtemplate - setup constants for TID updates
2250 * @dd: the qlogic_ib device
2251 *
2252 * We setup stuff that we use a lot, to avoid calculating each time
2253 */
2254static void qib_7220_tidtemplate(struct qib_devdata *dd)
2255{
2256 if (dd->rcvegrbufsize == 2048)
2257 dd->tidtemplate = IBA7220_TID_SZ_2K;
2258 else if (dd->rcvegrbufsize == 4096)
2259 dd->tidtemplate = IBA7220_TID_SZ_4K;
2260 dd->tidinvalid = 0;
2261}
2262
2263/**
2264 * qib_init_7220_get_base_info - set chip-specific flags for user code
2265 * @rcd: the qlogic_ib ctxt
2266 * @kbase: qib_base_info pointer
2267 *
2268 * We set the PCIE flag because the lower bandwidth on PCIe vs
2269 * HyperTransport can affect some user packet algorithims.
2270 */
2271static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
2272 struct qib_base_info *kinfo)
2273{
2274 kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
2275 QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
2276
2277 if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
2278 kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
2279
2280 return 0;
2281}
2282
2283static struct qib_message_header *
2284qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
2285{
2286 u32 offset = qib_hdrget_offset(rhf_addr);
2287
2288 return (struct qib_message_header *)
2289 (rhf_addr - dd->rhf_offset + offset);
2290}
2291
2292static void qib_7220_config_ctxts(struct qib_devdata *dd)
2293{
2294 unsigned long flags;
2295 u32 nchipctxts;
2296
2297 nchipctxts = qib_read_kreg32(dd, kr_portcnt);
2298 dd->cspec->numctxts = nchipctxts;
2299 if (qib_n_krcv_queues > 1) {
2300 dd->qpn_mask = 0x3f;
2301 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2302 if (dd->first_user_ctxt > nchipctxts)
2303 dd->first_user_ctxt = nchipctxts;
2304 } else
2305 dd->first_user_ctxt = dd->num_pports;
2306 dd->n_krcv_queues = dd->first_user_ctxt;
2307
2308 if (!qib_cfgctxts) {
2309 int nctxts = dd->first_user_ctxt + num_online_cpus();
2310
2311 if (nctxts <= 5)
2312 dd->ctxtcnt = 5;
2313 else if (nctxts <= 9)
2314 dd->ctxtcnt = 9;
2315 else if (nctxts <= nchipctxts)
2316 dd->ctxtcnt = nchipctxts;
2317 } else if (qib_cfgctxts <= nchipctxts)
2318 dd->ctxtcnt = qib_cfgctxts;
2319 if (!dd->ctxtcnt) /* none of the above, set to max */
2320 dd->ctxtcnt = nchipctxts;
2321
2322 /*
2323 * Chip can be configured for 5, 9, or 17 ctxts, and choice
2324 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
2325 * Lock to be paranoid about later motion, etc.
2326 */
2327 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2328 if (dd->ctxtcnt > 9)
2329 dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
2330 else if (dd->ctxtcnt > 5)
2331 dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
2332 /* else configure for default 5 receive ctxts */
2333 if (dd->qpn_mask)
2334 dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
2335 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2336 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2337
2338 /* kr_rcvegrcnt changes based on the number of contexts enabled */
2339 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
2340 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
2341}
2342
2343static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
2344{
2345 int lsb, ret = 0;
2346 u64 maskr; /* right-justified mask */
2347
2348 switch (which) {
2349 case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
2350 ret = ppd->link_width_enabled;
2351 goto done;
2352
2353 case QIB_IB_CFG_LWID: /* Get currently active Link-width */
2354 ret = ppd->link_width_active;
2355 goto done;
2356
2357 case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
2358 ret = ppd->link_speed_enabled;
2359 goto done;
2360
2361 case QIB_IB_CFG_SPD: /* Get current Link spd */
2362 ret = ppd->link_speed_active;
2363 goto done;
2364
2365 case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
2366 lsb = IBA7220_IBC_RXPOL_SHIFT;
2367 maskr = IBA7220_IBC_RXPOL_MASK;
2368 break;
2369
2370 case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
2371 lsb = IBA7220_IBC_LREV_SHIFT;
2372 maskr = IBA7220_IBC_LREV_MASK;
2373 break;
2374
2375 case QIB_IB_CFG_LINKLATENCY:
2376 ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
2377 & IBA7220_DDRSTAT_LINKLAT_MASK;
2378 goto done;
2379
2380 case QIB_IB_CFG_OP_VLS:
2381 ret = ppd->vls_operational;
2382 goto done;
2383
2384 case QIB_IB_CFG_VL_HIGH_CAP:
2385 ret = 0;
2386 goto done;
2387
2388 case QIB_IB_CFG_VL_LOW_CAP:
2389 ret = 0;
2390 goto done;
2391
2392 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2393 ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2394 OverrunThreshold);
2395 goto done;
2396
2397 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2398 ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2399 PhyerrThreshold);
2400 goto done;
2401
2402 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2403 /* will only take effect when the link state changes */
2404 ret = (ppd->cpspec->ibcctrl &
2405 SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
2406 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
2407 goto done;
2408
2409 case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
2410 lsb = IBA7220_IBC_HRTBT_SHIFT;
2411 maskr = IBA7220_IBC_HRTBT_MASK;
2412 break;
2413
2414 case QIB_IB_CFG_PMA_TICKS:
2415 /*
2416 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
2417 * Since the clock is always 250MHz, the value is 1 or 0.
2418 */
2419 ret = (ppd->link_speed_active == QIB_IB_DDR);
2420 goto done;
2421
2422 default:
2423 ret = -EINVAL;
2424 goto done;
2425 }
2426 ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
2427done:
2428 return ret;
2429}
2430
2431static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2432{
2433 struct qib_devdata *dd = ppd->dd;
2434 u64 maskr; /* right-justified mask */
2435 int lsb, ret = 0, setforce = 0;
2436 u16 lcmd, licmd;
2437 unsigned long flags;
2438
2439 switch (which) {
2440 case QIB_IB_CFG_LIDLMC:
2441 /*
2442 * Set LID and LMC. Combined to avoid possible hazard
2443 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
2444 */
2445 lsb = IBA7220_IBC_DLIDLMC_SHIFT;
2446 maskr = IBA7220_IBC_DLIDLMC_MASK;
2447 break;
2448
2449 case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
2450 /*
2451 * As with speed, only write the actual register if
2452 * the link is currently down, otherwise takes effect
2453 * on next link change.
2454 */
2455 ppd->link_width_enabled = val;
2456 if (!(ppd->lflags & QIBL_LINKDOWN))
2457 goto bail;
2458 /*
2459 * We set the QIBL_IB_FORCE_NOTIFY bit so updown
2460 * will get called because we want update
2461 * link_width_active, and the change may not take
2462 * effect for some time (if we are in POLL), so this
2463 * flag will force the updown routine to be called
2464 * on the next ibstatuschange down interrupt, even
2465 * if it's not an down->up transition.
2466 */
2467 val--; /* convert from IB to chip */
2468 maskr = IBA7220_IBC_WIDTH_MASK;
2469 lsb = IBA7220_IBC_WIDTH_SHIFT;
2470 setforce = 1;
2471 spin_lock_irqsave(&ppd->lflags_lock, flags);
2472 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2473 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2474 break;
2475
2476 case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
2477 /*
2478 * If we turn off IB1.2, need to preset SerDes defaults,
2479 * but not right now. Set a flag for the next time
2480 * we command the link down. As with width, only write the
2481 * actual register if the link is currently down, otherwise
2482 * takes effect on next link change. Since setting is being
2483 * explictly requested (via MAD or sysfs), clear autoneg
2484 * failure status if speed autoneg is enabled.
2485 */
2486 ppd->link_speed_enabled = val;
2487 if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
2488 !(val & (val - 1)))
2489 dd->cspec->presets_needed = 1;
2490 if (!(ppd->lflags & QIBL_LINKDOWN))
2491 goto bail;
2492 /*
2493 * We set the QIBL_IB_FORCE_NOTIFY bit so updown
2494 * will get called because we want update
2495 * link_speed_active, and the change may not take
2496 * effect for some time (if we are in POLL), so this
2497 * flag will force the updown routine to be called
2498 * on the next ibstatuschange down interrupt, even
2499 * if it's not an down->up transition.
2500 */
2501 if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
2502 val = IBA7220_IBC_SPEED_AUTONEG_MASK |
2503 IBA7220_IBC_IBTA_1_2_MASK;
2504 spin_lock_irqsave(&ppd->lflags_lock, flags);
2505 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
2506 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2507 } else
2508 val = val == QIB_IB_DDR ?
2509 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
2510 maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
2511 IBA7220_IBC_IBTA_1_2_MASK;
2512 /* IBTA 1.2 mode + speed bits are contiguous */
2513 lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
2514 setforce = 1;
2515 break;
2516
2517 case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
2518 lsb = IBA7220_IBC_RXPOL_SHIFT;
2519 maskr = IBA7220_IBC_RXPOL_MASK;
2520 break;
2521
2522 case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
2523 lsb = IBA7220_IBC_LREV_SHIFT;
2524 maskr = IBA7220_IBC_LREV_MASK;
2525 break;
2526
2527 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2528 maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2529 OverrunThreshold);
2530 if (maskr != val) {
2531 ppd->cpspec->ibcctrl &=
2532 ~SYM_MASK(IBCCtrl, OverrunThreshold);
2533 ppd->cpspec->ibcctrl |= (u64) val <<
2534 SYM_LSB(IBCCtrl, OverrunThreshold);
2535 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2536 qib_write_kreg(dd, kr_scratch, 0);
2537 }
2538 goto bail;
2539
2540 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2541 maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2542 PhyerrThreshold);
2543 if (maskr != val) {
2544 ppd->cpspec->ibcctrl &=
2545 ~SYM_MASK(IBCCtrl, PhyerrThreshold);
2546 ppd->cpspec->ibcctrl |= (u64) val <<
2547 SYM_LSB(IBCCtrl, PhyerrThreshold);
2548 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2549 qib_write_kreg(dd, kr_scratch, 0);
2550 }
2551 goto bail;
2552
2553 case QIB_IB_CFG_PKEYS: /* update pkeys */
2554 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
2555 ((u64) ppd->pkeys[2] << 32) |
2556 ((u64) ppd->pkeys[3] << 48);
2557 qib_write_kreg(dd, kr_partitionkey, maskr);
2558 goto bail;
2559
2560 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2561 /* will only take effect when the link state changes */
2562 if (val == IB_LINKINITCMD_POLL)
2563 ppd->cpspec->ibcctrl &=
2564 ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
2565 else /* SLEEP */
2566 ppd->cpspec->ibcctrl |=
2567 SYM_MASK(IBCCtrl, LinkDownDefaultState);
2568 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2569 qib_write_kreg(dd, kr_scratch, 0);
2570 goto bail;
2571
2572 case QIB_IB_CFG_MTU: /* update the MTU in IBC */
2573 /*
2574 * Update our housekeeping variables, and set IBC max
2575 * size, same as init code; max IBC is max we allow in
2576 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
2577 * Set even if it's unchanged, print debug message only
2578 * on changes.
2579 */
2580 val = (ppd->ibmaxlen >> 2) + 1;
2581 ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
2582 ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
2583 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2584 qib_write_kreg(dd, kr_scratch, 0);
2585 goto bail;
2586
2587 case QIB_IB_CFG_LSTATE: /* set the IB link state */
2588 switch (val & 0xffff0000) {
2589 case IB_LINKCMD_DOWN:
2590 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
2591 if (!ppd->cpspec->ibdeltainprog &&
2592 qib_compat_ddr_negotiate) {
2593 ppd->cpspec->ibdeltainprog = 1;
2594 ppd->cpspec->ibsymsnap =
2595 read_7220_creg32(dd, cr_ibsymbolerr);
2596 ppd->cpspec->iblnkerrsnap =
2597 read_7220_creg32(dd, cr_iblinkerrrecov);
2598 }
2599 break;
2600
2601 case IB_LINKCMD_ARMED:
2602 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
2603 break;
2604
2605 case IB_LINKCMD_ACTIVE:
2606 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
2607 break;
2608
2609 default:
2610 ret = -EINVAL;
2611 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
2612 goto bail;
2613 }
2614 switch (val & 0xffff) {
2615 case IB_LINKINITCMD_NOP:
2616 licmd = 0;
2617 break;
2618
2619 case IB_LINKINITCMD_POLL:
2620 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
2621 break;
2622
2623 case IB_LINKINITCMD_SLEEP:
2624 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
2625 break;
2626
2627 case IB_LINKINITCMD_DISABLE:
2628 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
2629 ppd->cpspec->chase_end = 0;
2630 /*
2631 * stop state chase counter and timer, if running.
2632 * wait forpending timer, but don't clear .data (ppd)!
2633 */
2634 if (ppd->cpspec->chase_timer.expires) {
2635 del_timer_sync(&ppd->cpspec->chase_timer);
2636 ppd->cpspec->chase_timer.expires = 0;
2637 }
2638 break;
2639
2640 default:
2641 ret = -EINVAL;
2642 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
2643 val & 0xffff);
2644 goto bail;
2645 }
2646 qib_set_ib_7220_lstate(ppd, lcmd, licmd);
2647 goto bail;
2648
2649 case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
2650 if (val > IBA7220_IBC_HRTBT_MASK) {
2651 ret = -EINVAL;
2652 goto bail;
2653 }
2654 lsb = IBA7220_IBC_HRTBT_SHIFT;
2655 maskr = IBA7220_IBC_HRTBT_MASK;
2656 break;
2657
2658 default:
2659 ret = -EINVAL;
2660 goto bail;
2661 }
2662 ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
2663 ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
2664 qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
2665 qib_write_kreg(dd, kr_scratch, 0);
2666 if (setforce) {
2667 spin_lock_irqsave(&ppd->lflags_lock, flags);
2668 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2669 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2670 }
2671bail:
2672 return ret;
2673}
2674
2675static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
2676{
2677 int ret = 0;
2678 u64 val, ddr;
2679
2680 if (!strncmp(what, "ibc", 3)) {
2681 ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2682 val = 0; /* disable heart beat, so link will come up */
2683 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
2684 ppd->dd->unit, ppd->port);
2685 } else if (!strncmp(what, "off", 3)) {
2686 ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
2687 /* enable heart beat again */
2688 val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
2689 qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
2690 "(normal)\n", ppd->dd->unit, ppd->port);
2691 } else
2692 ret = -EINVAL;
2693 if (!ret) {
2694 qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2695 ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
2696 << IBA7220_IBC_HRTBT_SHIFT);
2697 ppd->cpspec->ibcddrctrl = ddr | val;
2698 qib_write_kreg(ppd->dd, kr_ibcddrctrl,
2699 ppd->cpspec->ibcddrctrl);
2700 qib_write_kreg(ppd->dd, kr_scratch, 0);
2701 }
2702 return ret;
2703}
2704
2705static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2706 u32 updegr, u32 egrhd)
2707{
2708 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2709 if (updegr)
2710 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
2711}
2712
2713static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
2714{
2715 u32 head, tail;
2716
2717 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
2718 if (rcd->rcvhdrtail_kvaddr)
2719 tail = qib_get_rcvhdrtail(rcd);
2720 else
2721 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
2722 return head == tail;
2723}
2724
2725/*
2726 * Modify the RCVCTRL register in chip-specific way. This
2727 * is a function because bit positions and (future) register
2728 * location is chip-specifc, but the needed operations are
2729 * generic. <op> is a bit-mask because we often want to
2730 * do multiple modifications.
2731 */
2732static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
2733 int ctxt)
2734{
2735 struct qib_devdata *dd = ppd->dd;
2736 u64 mask, val;
2737 unsigned long flags;
2738
2739 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2740 if (op & QIB_RCVCTRL_TAILUPD_ENB)
2741 dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
2742 if (op & QIB_RCVCTRL_TAILUPD_DIS)
2743 dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
2744 if (op & QIB_RCVCTRL_PKEY_ENB)
2745 dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
2746 if (op & QIB_RCVCTRL_PKEY_DIS)
2747 dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
2748 if (ctxt < 0)
2749 mask = (1ULL << dd->ctxtcnt) - 1;
2750 else
2751 mask = (1ULL << ctxt);
2752 if (op & QIB_RCVCTRL_CTXT_ENB) {
2753 /* always done for specific ctxt */
2754 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
2755 if (!(dd->flags & QIB_NODMA_RTAIL))
2756 dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
2757 /* Write these registers before the context is enabled. */
2758 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2759 dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
2760 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2761 dd->rcd[ctxt]->rcvhdrq_phys);
2762 dd->rcd[ctxt]->seq_cnt = 1;
2763 }
2764 if (op & QIB_RCVCTRL_CTXT_DIS)
2765 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
2766 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
2767 dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
2768 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
2769 dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
2770 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2771 if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
2772 /* arm rcv interrupt */
2773 val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
2774 dd->rhdrhead_intr_off;
2775 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2776 }
2777 if (op & QIB_RCVCTRL_CTXT_ENB) {
2778 /*
2779 * Init the context registers also; if we were
2780 * disabled, tail and head should both be zero
2781 * already from the enable, but since we don't
2782 * know, we have to do it explictly.
2783 */
2784 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
2785 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
2786
2787 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
2788 dd->rcd[ctxt]->head = val;
2789 /* If kctxt, interrupt on next receive. */
2790 if (ctxt < dd->first_user_ctxt)
2791 val |= dd->rhdrhead_intr_off;
2792 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2793 }
2794 if (op & QIB_RCVCTRL_CTXT_DIS) {
2795 if (ctxt >= 0) {
2796 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
2797 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
2798 } else {
2799 unsigned i;
2800
2801 for (i = 0; i < dd->cfgctxts; i++) {
2802 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
2803 i, 0);
2804 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
2805 }
2806 }
2807 }
2808 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2809}
2810
2811/*
2812 * Modify the SENDCTRL register in chip-specific way. This
2813 * is a function there may be multiple such registers with
2814 * slightly different layouts. To start, we assume the
2815 * "canonical" register layout of the first chips.
2816 * Chip requires no back-back sendctrl writes, so write
2817 * scratch register after writing sendctrl
2818 */
2819static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
2820{
2821 struct qib_devdata *dd = ppd->dd;
2822 u64 tmp_dd_sendctrl;
2823 unsigned long flags;
2824
2825 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2826
2827 /* First the ones that are "sticky", saved in shadow */
2828 if (op & QIB_SENDCTRL_CLEAR)
2829 dd->sendctrl = 0;
2830 if (op & QIB_SENDCTRL_SEND_DIS)
2831 dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
2832 else if (op & QIB_SENDCTRL_SEND_ENB) {
2833 dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
2834 if (dd->flags & QIB_USE_SPCL_TRIG)
2835 dd->sendctrl |= SYM_MASK(SendCtrl,
2836 SSpecialTriggerEn);
2837 }
2838 if (op & QIB_SENDCTRL_AVAIL_DIS)
2839 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
2840 else if (op & QIB_SENDCTRL_AVAIL_ENB)
2841 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
2842
2843 if (op & QIB_SENDCTRL_DISARM_ALL) {
2844 u32 i, last;
2845
2846 tmp_dd_sendctrl = dd->sendctrl;
2847 /*
2848 * disarm any that are not yet launched, disabling sends
2849 * and updates until done.
2850 */
2851 last = dd->piobcnt2k + dd->piobcnt4k;
2852 tmp_dd_sendctrl &=
2853 ~(SYM_MASK(SendCtrl, SPioEnable) |
2854 SYM_MASK(SendCtrl, SendBufAvailUpd));
2855 for (i = 0; i < last; i++) {
2856 qib_write_kreg(dd, kr_sendctrl,
2857 tmp_dd_sendctrl |
2858 SYM_MASK(SendCtrl, Disarm) | i);
2859 qib_write_kreg(dd, kr_scratch, 0);
2860 }
2861 }
2862
2863 tmp_dd_sendctrl = dd->sendctrl;
2864
2865 if (op & QIB_SENDCTRL_FLUSH)
2866 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
2867 if (op & QIB_SENDCTRL_DISARM)
2868 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
2869 ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
2870 SYM_LSB(SendCtrl, DisarmPIOBuf));
2871 if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
2872 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
2873 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
2874
2875 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
2876 qib_write_kreg(dd, kr_scratch, 0);
2877
2878 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
2879 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2880 qib_write_kreg(dd, kr_scratch, 0);
2881 }
2882
2883 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2884
2885 if (op & QIB_SENDCTRL_FLUSH) {
2886 u32 v;
2887 /*
2888 * ensure writes have hit chip, then do a few
2889 * more reads, to allow DMA of pioavail registers
2890 * to occur, so in-memory copy is in sync with
2891 * the chip. Not always safe to sleep.
2892 */
2893 v = qib_read_kreg32(dd, kr_scratch);
2894 qib_write_kreg(dd, kr_scratch, v);
2895 v = qib_read_kreg32(dd, kr_scratch);
2896 qib_write_kreg(dd, kr_scratch, v);
2897 qib_read_kreg32(dd, kr_scratch);
2898 }
2899}
2900
2901/**
2902 * qib_portcntr_7220 - read a per-port counter
2903 * @dd: the qlogic_ib device
2904 * @creg: the counter to snapshot
2905 */
2906static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
2907{
2908 u64 ret = 0ULL;
2909 struct qib_devdata *dd = ppd->dd;
2910 u16 creg;
2911 /* 0xffff for unimplemented or synthesized counters */
2912 static const u16 xlator[] = {
2913 [QIBPORTCNTR_PKTSEND] = cr_pktsend,
2914 [QIBPORTCNTR_WORDSEND] = cr_wordsend,
2915 [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
2916 [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
2917 [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
2918 [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
2919 [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
2920 [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
2921 [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
2922 [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
2923 [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
2924 [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
2925 [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
2926 [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
2927 [QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
2928 [QIBPORTCNTR_ERRICRC] = cr_erricrc,
2929 [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
2930 [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
2931 [QIBPORTCNTR_BADFORMAT] = cr_badformat,
2932 [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
2933 [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
2934 [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
2935 [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
2936 [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
2937 [QIBPORTCNTR_ERRLINK] = cr_errlink,
2938 [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
2939 [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
2940 [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
2941 [QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
2942 [QIBPORTCNTR_PSSTART] = cr_psstart,
2943 [QIBPORTCNTR_PSSTAT] = cr_psstat,
2944 [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
2945 [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
2946 [QIBPORTCNTR_KHDROVFL] = 0xffff,
2947 };
2948
2949 if (reg >= ARRAY_SIZE(xlator)) {
2950 qib_devinfo(ppd->dd->pcidev,
2951 "Unimplemented portcounter %u\n", reg);
2952 goto done;
2953 }
2954 creg = xlator[reg];
2955
2956 if (reg == QIBPORTCNTR_KHDROVFL) {
2957 int i;
2958
2959 /* sum over all kernel contexts */
2960 for (i = 0; i < dd->first_user_ctxt; i++)
2961 ret += read_7220_creg32(dd, cr_portovfl + i);
2962 }
2963 if (creg == 0xffff)
2964 goto done;
2965
2966 /*
2967 * only fast incrementing counters are 64bit; use 32 bit reads to
2968 * avoid two independent reads when on opteron
2969 */
2970 if ((creg == cr_wordsend || creg == cr_wordrcv ||
2971 creg == cr_pktsend || creg == cr_pktrcv))
2972 ret = read_7220_creg(dd, creg);
2973 else
2974 ret = read_7220_creg32(dd, creg);
2975 if (creg == cr_ibsymbolerr) {
2976 if (dd->pport->cpspec->ibdeltainprog)
2977 ret -= ret - ppd->cpspec->ibsymsnap;
2978 ret -= dd->pport->cpspec->ibsymdelta;
2979 } else if (creg == cr_iblinkerrrecov) {
2980 if (dd->pport->cpspec->ibdeltainprog)
2981 ret -= ret - ppd->cpspec->iblnkerrsnap;
2982 ret -= dd->pport->cpspec->iblnkerrdelta;
2983 }
2984done:
2985 return ret;
2986}
2987
2988/*
2989 * Device counter names (not port-specific), one line per stat,
2990 * single string. Used by utilities like ipathstats to print the stats
2991 * in a way which works for different versions of drivers, without changing
2992 * the utility. Names need to be 12 chars or less (w/o newline), for proper
2993 * display by utility.
2994 * Non-error counters are first.
2995 * Start of "error" conters is indicated by a leading "E " on the first
2996 * "error" counter, and doesn't count in label length.
2997 * The EgrOvfl list needs to be last so we truncate them at the configured
2998 * context count for the device.
2999 * cntr7220indices contains the corresponding register indices.
3000 */
3001static const char cntr7220names[] =
3002 "Interrupts\n"
3003 "HostBusStall\n"
3004 "E RxTIDFull\n"
3005 "RxTIDInvalid\n"
3006 "Ctxt0EgrOvfl\n"
3007 "Ctxt1EgrOvfl\n"
3008 "Ctxt2EgrOvfl\n"
3009 "Ctxt3EgrOvfl\n"
3010 "Ctxt4EgrOvfl\n"
3011 "Ctxt5EgrOvfl\n"
3012 "Ctxt6EgrOvfl\n"
3013 "Ctxt7EgrOvfl\n"
3014 "Ctxt8EgrOvfl\n"
3015 "Ctxt9EgrOvfl\n"
3016 "Ctx10EgrOvfl\n"
3017 "Ctx11EgrOvfl\n"
3018 "Ctx12EgrOvfl\n"
3019 "Ctx13EgrOvfl\n"
3020 "Ctx14EgrOvfl\n"
3021 "Ctx15EgrOvfl\n"
3022 "Ctx16EgrOvfl\n";
3023
3024static const size_t cntr7220indices[] = {
3025 cr_lbint,
3026 cr_lbflowstall,
3027 cr_errtidfull,
3028 cr_errtidvalid,
3029 cr_portovfl + 0,
3030 cr_portovfl + 1,
3031 cr_portovfl + 2,
3032 cr_portovfl + 3,
3033 cr_portovfl + 4,
3034 cr_portovfl + 5,
3035 cr_portovfl + 6,
3036 cr_portovfl + 7,
3037 cr_portovfl + 8,
3038 cr_portovfl + 9,
3039 cr_portovfl + 10,
3040 cr_portovfl + 11,
3041 cr_portovfl + 12,
3042 cr_portovfl + 13,
3043 cr_portovfl + 14,
3044 cr_portovfl + 15,
3045 cr_portovfl + 16,
3046};
3047
3048/*
3049 * same as cntr7220names and cntr7220indices, but for port-specific counters.
3050 * portcntr7220indices is somewhat complicated by some registers needing
3051 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
3052 */
3053static const char portcntr7220names[] =
3054 "TxPkt\n"
3055 "TxFlowPkt\n"
3056 "TxWords\n"
3057 "RxPkt\n"
3058 "RxFlowPkt\n"
3059 "RxWords\n"
3060 "TxFlowStall\n"
3061 "TxDmaDesc\n" /* 7220 and 7322-only */
3062 "E RxDlidFltr\n" /* 7220 and 7322-only */
3063 "IBStatusChng\n"
3064 "IBLinkDown\n"
3065 "IBLnkRecov\n"
3066 "IBRxLinkErr\n"
3067 "IBSymbolErr\n"
3068 "RxLLIErr\n"
3069 "RxBadFormat\n"
3070 "RxBadLen\n"
3071 "RxBufOvrfl\n"
3072 "RxEBP\n"
3073 "RxFlowCtlErr\n"
3074 "RxICRCerr\n"
3075 "RxLPCRCerr\n"
3076 "RxVCRCerr\n"
3077 "RxInvalLen\n"
3078 "RxInvalPKey\n"
3079 "RxPktDropped\n"
3080 "TxBadLength\n"
3081 "TxDropped\n"
3082 "TxInvalLen\n"
3083 "TxUnderrun\n"
3084 "TxUnsupVL\n"
3085 "RxLclPhyErr\n" /* 7220 and 7322-only */
3086 "RxVL15Drop\n" /* 7220 and 7322-only */
3087 "RxVlErr\n" /* 7220 and 7322-only */
3088 "XcessBufOvfl\n" /* 7220 and 7322-only */
3089 ;
3090
3091#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
3092static const size_t portcntr7220indices[] = {
3093 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
3094 cr_pktsendflow,
3095 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
3096 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
3097 cr_pktrcvflowctrl,
3098 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
3099 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
3100 cr_txsdmadesc,
3101 cr_rxdlidfltr,
3102 cr_ibstatuschange,
3103 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
3104 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
3105 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
3106 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
3107 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
3108 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
3109 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
3110 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
3111 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
3112 cr_rcvflowctrl_err,
3113 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
3114 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
3115 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
3116 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
3117 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
3118 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
3119 cr_invalidslen,
3120 cr_senddropped,
3121 cr_errslen,
3122 cr_sendunderrun,
3123 cr_txunsupvl,
3124 QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
3125 QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
3126 QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
3127 QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
3128};
3129
3130/* do all the setup to make the counter reads efficient later */
3131static void init_7220_cntrnames(struct qib_devdata *dd)
3132{
3133 int i, j = 0;
3134 char *s;
3135
3136 for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
3137 i++) {
3138 /* we always have at least one counter before the egrovfl */
3139 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
3140 j = 1;
3141 s = strchr(s + 1, '\n');
3142 if (s && j)
3143 j++;
3144 }
3145 dd->cspec->ncntrs = i;
3146 if (!s)
3147 /* full list; size is without terminating null */
3148 dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
3149 else
3150 dd->cspec->cntrnamelen = 1 + s - cntr7220names;
3151 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
3152 * sizeof(u64), GFP_KERNEL);
3153 if (!dd->cspec->cntrs)
3154 qib_dev_err(dd, "Failed allocation for counters\n");
3155
3156 for (i = 0, s = (char *)portcntr7220names; s; i++)
3157 s = strchr(s + 1, '\n');
3158 dd->cspec->nportcntrs = i - 1;
3159 dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
3160 dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
3161 * sizeof(u64), GFP_KERNEL);
3162 if (!dd->cspec->portcntrs)
3163 qib_dev_err(dd, "Failed allocation for portcounters\n");
3164}
3165
3166static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
3167 u64 **cntrp)
3168{
3169 u32 ret;
3170
3171 if (!dd->cspec->cntrs) {
3172 ret = 0;
3173 goto done;
3174 }
3175
3176 if (namep) {
3177 *namep = (char *)cntr7220names;
3178 ret = dd->cspec->cntrnamelen;
3179 if (pos >= ret)
3180 ret = 0; /* final read after getting everything */
3181 } else {
3182 u64 *cntr = dd->cspec->cntrs;
3183 int i;
3184
3185 ret = dd->cspec->ncntrs * sizeof(u64);
3186 if (!cntr || pos >= ret) {
3187 /* everything read, or couldn't get memory */
3188 ret = 0;
3189 goto done;
3190 }
3191
3192 *cntrp = cntr;
3193 for (i = 0; i < dd->cspec->ncntrs; i++)
3194 *cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
3195 }
3196done:
3197 return ret;
3198}
3199
3200static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
3201 char **namep, u64 **cntrp)
3202{
3203 u32 ret;
3204
3205 if (!dd->cspec->portcntrs) {
3206 ret = 0;
3207 goto done;
3208 }
3209 if (namep) {
3210 *namep = (char *)portcntr7220names;
3211 ret = dd->cspec->portcntrnamelen;
3212 if (pos >= ret)
3213 ret = 0; /* final read after getting everything */
3214 } else {
3215 u64 *cntr = dd->cspec->portcntrs;
3216 struct qib_pportdata *ppd = &dd->pport[port];
3217 int i;
3218
3219 ret = dd->cspec->nportcntrs * sizeof(u64);
3220 if (!cntr || pos >= ret) {
3221 /* everything read, or couldn't get memory */
3222 ret = 0;
3223 goto done;
3224 }
3225 *cntrp = cntr;
3226 for (i = 0; i < dd->cspec->nportcntrs; i++) {
3227 if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
3228 *cntr++ = qib_portcntr_7220(ppd,
3229 portcntr7220indices[i] &
3230 ~_PORT_VIRT_FLAG);
3231 else
3232 *cntr++ = read_7220_creg32(dd,
3233 portcntr7220indices[i]);
3234 }
3235 }
3236done:
3237 return ret;
3238}
3239
3240/**
3241 * qib_get_7220_faststats - get word counters from chip before they overflow
3242 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
3243 *
3244 * This needs more work; in particular, decision on whether we really
3245 * need traffic_wds done the way it is
3246 * called from add_timer
3247 */
3248static void qib_get_7220_faststats(unsigned long opaque)
3249{
3250 struct qib_devdata *dd = (struct qib_devdata *) opaque;
3251 struct qib_pportdata *ppd = dd->pport;
3252 unsigned long flags;
3253 u64 traffic_wds;
3254
3255 /*
3256 * don't access the chip while running diags, or memory diags can
3257 * fail
3258 */
3259 if (!(dd->flags & QIB_INITTED) || dd->diag_client)
3260 /* but re-arm the timer, for diags case; won't hurt other */
3261 goto done;
3262
3263 /*
3264 * We now try to maintain an activity timer, based on traffic
3265 * exceeding a threshold, so we need to check the word-counts
3266 * even if they are 64-bit.
3267 */
3268 traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
3269 qib_portcntr_7220(ppd, cr_wordrcv);
3270 spin_lock_irqsave(&dd->eep_st_lock, flags);
3271 traffic_wds -= dd->traffic_wds;
3272 dd->traffic_wds += traffic_wds;
3273 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
3274 atomic_add(5, &dd->active_time); /* S/B #define */
3275 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
3276done:
3277 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
3278}
3279
3280/*
3281 * If we are using MSI, try to fallback to INTx.
3282 */
3283static int qib_7220_intr_fallback(struct qib_devdata *dd)
3284{
3285 if (!dd->msi_lo)
3286 return 0;
3287
3288 qib_devinfo(dd->pcidev, "MSI interrupt not detected,"
3289 " trying INTx interrupts\n");
3290 qib_7220_free_irq(dd);
3291 qib_enable_intx(dd->pcidev);
3292 /*
3293 * Some newer kernels require free_irq before disable_msi,
3294 * and irq can be changed during disable and INTx enable
3295 * and we need to therefore use the pcidev->irq value,
3296 * not our saved MSI value.
3297 */
3298 dd->cspec->irq = dd->pcidev->irq;
3299 qib_setup_7220_interrupt(dd);
3300 return 1;
3301}
3302
3303/*
3304 * Reset the XGXS (between serdes and IBC). Slightly less intrusive
3305 * than resetting the IBC or external link state, and useful in some
3306 * cases to cause some retraining. To do this right, we reset IBC
3307 * as well.
3308 */
3309static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
3310{
3311 u64 val, prev_val;
3312 struct qib_devdata *dd = ppd->dd;
3313
3314 prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
3315 val = prev_val | QLOGIC_IB_XGXS_RESET;
3316 prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
3317 qib_write_kreg(dd, kr_control,
3318 dd->control & ~QLOGIC_IB_C_LINKENABLE);
3319 qib_write_kreg(dd, kr_xgxs_cfg, val);
3320 qib_read_kreg32(dd, kr_scratch);
3321 qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
3322 qib_write_kreg(dd, kr_control, dd->control);
3323}
3324
3325/*
3326 * For this chip, we want to use the same buffer every time
3327 * when we are trying to bring the link up (they are always VL15
3328 * packets). At that link state the packet should always go out immediately
3329 * (or at least be discarded at the tx interface if the link is down).
3330 * If it doesn't, and the buffer isn't available, that means some other
3331 * sender has gotten ahead of us, and is preventing our packet from going
3332 * out. In that case, we flush all packets, and try again. If that still
3333 * fails, we fail the request, and hope things work the next time around.
3334 *
3335 * We don't need very complicated heuristics on whether the packet had
3336 * time to go out or not, since even at SDR 1X, it goes out in very short
3337 * time periods, covered by the chip reads done here and as part of the
3338 * flush.
3339 */
3340static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
3341{
3342 u32 __iomem *buf;
3343 u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
3344 int do_cleanup;
3345 unsigned long flags;
3346
3347 /*
3348 * always blip to get avail list updated, since it's almost
3349 * always needed, and is fairly cheap.
3350 */
3351 sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
3352 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3353 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3354 if (buf)
3355 goto done;
3356
3357 spin_lock_irqsave(&ppd->sdma_lock, flags);
3358 if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
3359 ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
3360 __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
3361 do_cleanup = 0;
3362 } else {
3363 do_cleanup = 1;
3364 qib_7220_sdma_hw_clean_up(ppd);
3365 }
3366 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3367
3368 if (do_cleanup) {
3369 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3370 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3371 }
3372done:
3373 return buf;
3374}
3375
3376/*
3377 * This code for non-IBTA-compliant IB speed negotiation is only known to
3378 * work for the SDR to DDR transition, and only between an HCA and a switch
3379 * with recent firmware. It is based on observed heuristics, rather than
3380 * actual knowledge of the non-compliant speed negotiation.
3381 * It has a number of hard-coded fields, since the hope is to rewrite this
3382 * when a spec is available on how the negoation is intended to work.
3383 */
3384static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
3385 u32 dcnt, u32 *data)
3386{
3387 int i;
3388 u64 pbc;
3389 u32 __iomem *piobuf;
3390 u32 pnum;
3391 struct qib_devdata *dd = ppd->dd;
3392
3393 i = 0;
3394 pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
3395 pbc |= PBC_7220_VL15_SEND;
3396 while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
3397 if (i++ > 5)
3398 return;
3399 udelay(2);
3400 }
3401 sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
3402 writeq(pbc, piobuf);
3403 qib_flush_wc();
3404 qib_pio_copy(piobuf + 2, hdr, 7);
3405 qib_pio_copy(piobuf + 9, data, dcnt);
3406 if (dd->flags & QIB_USE_SPCL_TRIG) {
3407 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
3408
3409 qib_flush_wc();
3410 __raw_writel(0xaebecede, piobuf + spcl_off);
3411 }
3412 qib_flush_wc();
3413 qib_sendbuf_done(dd, pnum);
3414}
3415
3416/*
3417 * _start packet gets sent twice at start, _done gets sent twice at end
3418 */
3419static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
3420{
3421 struct qib_devdata *dd = ppd->dd;
3422 static u32 swapped;
3423 u32 dw, i, hcnt, dcnt, *data;
3424 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
3425 static u32 madpayload_start[0x40] = {
3426 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
3427 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3428 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
3429 };
3430 static u32 madpayload_done[0x40] = {
3431 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
3432 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3433 0x40000001, 0x1388, 0x15e, /* rest 0's */
3434 };
3435
3436 dcnt = ARRAY_SIZE(madpayload_start);
3437 hcnt = ARRAY_SIZE(hdr);
3438 if (!swapped) {
3439 /* for maintainability, do it at runtime */
3440 for (i = 0; i < hcnt; i++) {
3441 dw = (__force u32) cpu_to_be32(hdr[i]);
3442 hdr[i] = dw;
3443 }
3444 for (i = 0; i < dcnt; i++) {
3445 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
3446 madpayload_start[i] = dw;
3447 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
3448 madpayload_done[i] = dw;
3449 }
3450 swapped = 1;
3451 }
3452
3453 data = which ? madpayload_done : madpayload_start;
3454
3455 autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
3456 qib_read_kreg64(dd, kr_scratch);
3457 udelay(2);
3458 autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
3459 qib_read_kreg64(dd, kr_scratch);
3460 udelay(2);
3461}
3462
3463/*
3464 * Do the absolute minimum to cause an IB speed change, and make it
3465 * ready, but don't actually trigger the change. The caller will
3466 * do that when ready (if link is in Polling training state, it will
3467 * happen immediately, otherwise when link next goes down)
3468 *
3469 * This routine should only be used as part of the DDR autonegotation
3470 * code for devices that are not compliant with IB 1.2 (or code that
3471 * fixes things up for same).
3472 *
3473 * When link has gone down, and autoneg enabled, or autoneg has
3474 * failed and we give up until next time we set both speeds, and
3475 * then we want IBTA enabled as well as "use max enabled speed.
3476 */
3477static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
3478{
3479 ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
3480 IBA7220_IBC_IBTA_1_2_MASK);
3481
3482 if (speed == (QIB_IB_SDR | QIB_IB_DDR))
3483 ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
3484 IBA7220_IBC_IBTA_1_2_MASK;
3485 else
3486 ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
3487 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
3488
3489 qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
3490 qib_write_kreg(ppd->dd, kr_scratch, 0);
3491}
3492
3493/*
3494 * This routine is only used when we are not talking to another
3495 * IB 1.2-compliant device that we think can do DDR.
3496 * (This includes all existing switch chips as of Oct 2007.)
3497 * 1.2-compliant devices go directly to DDR prior to reaching INIT
3498 */
3499static void try_7220_autoneg(struct qib_pportdata *ppd)
3500{
3501 unsigned long flags;
3502
3503 /*
3504 * Required for older non-IB1.2 DDR switches. Newer
3505 * non-IB-compliant switches don't need it, but so far,
3506 * aren't bothered by it either. "Magic constant"
3507 */
3508 qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
3509
3510 spin_lock_irqsave(&ppd->lflags_lock, flags);
3511 ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
3512 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3513 autoneg_7220_send(ppd, 0);
3514 set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
3515
3516 toggle_7220_rclkrls(ppd->dd);
3517 /* 2 msec is minimum length of a poll cycle */
3518 schedule_delayed_work(&ppd->cpspec->autoneg_work,
3519 msecs_to_jiffies(2));
3520}
3521
3522/*
3523 * Handle the empirically determined mechanism for auto-negotiation
3524 * of DDR speed with switches.
3525 */
3526static void autoneg_7220_work(struct work_struct *work)
3527{
3528 struct qib_pportdata *ppd;
3529 struct qib_devdata *dd;
3530 u64 startms;
3531 u32 i;
3532 unsigned long flags;
3533
3534 ppd = &container_of(work, struct qib_chippport_specific,
3535 autoneg_work.work)->pportdata;
3536 dd = ppd->dd;
3537
3538 startms = jiffies_to_msecs(jiffies);
3539
3540 /*
3541 * Busy wait for this first part, it should be at most a
3542 * few hundred usec, since we scheduled ourselves for 2msec.
3543 */
3544 for (i = 0; i < 25; i++) {
3545 if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
3546 == IB_7220_LT_STATE_POLLQUIET) {
3547 qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
3548 break;
3549 }
3550 udelay(100);
3551 }
3552
3553 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
3554 goto done; /* we got there early or told to stop */
3555
3556 /* we expect this to timeout */
3557 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
3558 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3559 msecs_to_jiffies(90)))
3560 goto done;
3561
3562 toggle_7220_rclkrls(dd);
3563
3564 /* we expect this to timeout */
3565 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
3566 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3567 msecs_to_jiffies(1700)))
3568 goto done;
3569
3570 set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
3571 toggle_7220_rclkrls(dd);
3572
3573 /*
3574 * Wait up to 250 msec for link to train and get to INIT at DDR;
3575 * this should terminate early.
3576 */
3577 wait_event_timeout(ppd->cpspec->autoneg_wait,
3578 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3579 msecs_to_jiffies(250));
3580done:
3581 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
3582 spin_lock_irqsave(&ppd->lflags_lock, flags);
3583 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
3584 if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
3585 ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
3586 dd->cspec->autoneg_tries = 0;
3587 }
3588 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3589 set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
3590 }
3591}
3592
3593static u32 qib_7220_iblink_state(u64 ibcs)
3594{
3595 u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
3596
3597 switch (state) {
3598 case IB_7220_L_STATE_INIT:
3599 state = IB_PORT_INIT;
3600 break;
3601 case IB_7220_L_STATE_ARM:
3602 state = IB_PORT_ARMED;
3603 break;
3604 case IB_7220_L_STATE_ACTIVE:
3605 /* fall through */
3606 case IB_7220_L_STATE_ACT_DEFER:
3607 state = IB_PORT_ACTIVE;
3608 break;
3609 default: /* fall through */
3610 case IB_7220_L_STATE_DOWN:
3611 state = IB_PORT_DOWN;
3612 break;
3613 }
3614 return state;
3615}
3616
3617/* returns the IBTA port state, rather than the IBC link training state */
3618static u8 qib_7220_phys_portstate(u64 ibcs)
3619{
3620 u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
3621 return qib_7220_physportstate[state];
3622}
3623
3624static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
3625{
3626 int ret = 0, symadj = 0;
3627 struct qib_devdata *dd = ppd->dd;
3628 unsigned long flags;
3629
3630 spin_lock_irqsave(&ppd->lflags_lock, flags);
3631 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
3632 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3633
3634 if (!ibup) {
3635 /*
3636 * When the link goes down we don't want AEQ running, so it
3637 * won't interfere with IBC training, etc., and we need
3638 * to go back to the static SerDes preset values.
3639 */
3640 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
3641 QIBL_IB_AUTONEG_INPROG)))
3642 set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
3643 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
3644 qib_sd7220_presets(dd);
3645 qib_cancel_sends(ppd); /* initial disarm, etc. */
3646 spin_lock_irqsave(&ppd->sdma_lock, flags);
3647 if (__qib_sdma_running(ppd))
3648 __qib_sdma_process_event(ppd,
3649 qib_sdma_event_e70_go_idle);
3650 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3651 }
3652 /* this might better in qib_sd7220_presets() */
3653 set_7220_relock_poll(dd, ibup);
3654 } else {
3655 if (qib_compat_ddr_negotiate &&
3656 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
3657 QIBL_IB_AUTONEG_INPROG)) &&
3658 ppd->link_speed_active == QIB_IB_SDR &&
3659 (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
3660 (QIB_IB_DDR | QIB_IB_SDR) &&
3661 dd->cspec->autoneg_tries < AUTONEG_TRIES) {
3662 /* we are SDR, and DDR auto-negotiation enabled */
3663 ++dd->cspec->autoneg_tries;
3664 if (!ppd->cpspec->ibdeltainprog) {
3665 ppd->cpspec->ibdeltainprog = 1;
3666 ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
3667 cr_ibsymbolerr);
3668 ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
3669 cr_iblinkerrrecov);
3670 }
3671 try_7220_autoneg(ppd);
3672 ret = 1; /* no other IB status change processing */
3673 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
3674 ppd->link_speed_active == QIB_IB_SDR) {
3675 autoneg_7220_send(ppd, 1);
3676 set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
3677 udelay(2);
3678 toggle_7220_rclkrls(dd);
3679 ret = 1; /* no other IB status change processing */
3680 } else {
3681 if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
3682 (ppd->link_speed_active & QIB_IB_DDR)) {
3683 spin_lock_irqsave(&ppd->lflags_lock, flags);
3684 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
3685 QIBL_IB_AUTONEG_FAILED);
3686 spin_unlock_irqrestore(&ppd->lflags_lock,
3687 flags);
3688 dd->cspec->autoneg_tries = 0;
3689 /* re-enable SDR, for next link down */
3690 set_7220_ibspeed_fast(ppd,
3691 ppd->link_speed_enabled);
3692 wake_up(&ppd->cpspec->autoneg_wait);
3693 symadj = 1;
3694 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
3695 /*
3696 * Clear autoneg failure flag, and do setup
3697 * so we'll try next time link goes down and
3698 * back to INIT (possibly connected to a
3699 * different device).
3700 */
3701 spin_lock_irqsave(&ppd->lflags_lock, flags);
3702 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3703 spin_unlock_irqrestore(&ppd->lflags_lock,
3704 flags);
3705 ppd->cpspec->ibcddrctrl |=
3706 IBA7220_IBC_IBTA_1_2_MASK;
3707 qib_write_kreg(dd, kr_ncmodectrl, 0);
3708 symadj = 1;
3709 }
3710 }
3711
3712 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
3713 symadj = 1;
3714
3715 if (!ret) {
3716 ppd->delay_mult = rate_to_delay
3717 [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
3718 [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
3719
3720 set_7220_relock_poll(dd, ibup);
3721 spin_lock_irqsave(&ppd->sdma_lock, flags);
3722 /*
3723 * Unlike 7322, the 7220 needs this, due to lack of
3724 * interrupt in some cases when we have sdma active
3725 * when the link goes down.
3726 */
3727 if (ppd->sdma_state.current_state !=
3728 qib_sdma_state_s20_idle)
3729 __qib_sdma_process_event(ppd,
3730 qib_sdma_event_e00_go_hw_down);
3731 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3732 }
3733 }
3734
3735 if (symadj) {
3736 if (ppd->cpspec->ibdeltainprog) {
3737 ppd->cpspec->ibdeltainprog = 0;
3738 ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
3739 cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
3740 ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
3741 cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
3742 }
3743 } else if (!ibup && qib_compat_ddr_negotiate &&
3744 !ppd->cpspec->ibdeltainprog &&
3745 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
3746 ppd->cpspec->ibdeltainprog = 1;
3747 ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
3748 cr_ibsymbolerr);
3749 ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
3750 cr_iblinkerrrecov);
3751 }
3752
3753 if (!ret)
3754 qib_setup_7220_setextled(ppd, ibup);
3755 return ret;
3756}
3757
3758/*
3759 * Does read/modify/write to appropriate registers to
3760 * set output and direction bits selected by mask.
3761 * these are in their canonical postions (e.g. lsb of
3762 * dir will end up in D48 of extctrl on existing chips).
3763 * returns contents of GP Inputs.
3764 */
3765static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
3766{
3767 u64 read_val, new_out;
3768 unsigned long flags;
3769
3770 if (mask) {
3771 /* some bits being written, lock access to GPIO */
3772 dir &= mask;
3773 out &= mask;
3774 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
3775 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
3776 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
3777 new_out = (dd->cspec->gpio_out & ~mask) | out;
3778
3779 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
3780 qib_write_kreg(dd, kr_gpio_out, new_out);
3781 dd->cspec->gpio_out = new_out;
3782 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
3783 }
3784 /*
3785 * It is unlikely that a read at this time would get valid
3786 * data on a pin whose direction line was set in the same
3787 * call to this function. We include the read here because
3788 * that allows us to potentially combine a change on one pin with
3789 * a read on another, and because the old code did something like
3790 * this.
3791 */
3792 read_val = qib_read_kreg64(dd, kr_extstatus);
3793 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
3794}
3795
3796/*
3797 * Read fundamental info we need to use the chip. These are
3798 * the registers that describe chip capabilities, and are
3799 * saved in shadow registers.
3800 */
3801static void get_7220_chip_params(struct qib_devdata *dd)
3802{
3803 u64 val;
3804 u32 piobufs;
3805 int mtu;
3806
3807 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
3808
3809 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
3810 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
3811 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
3812 dd->palign = qib_read_kreg32(dd, kr_palign);
3813 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
3814 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
3815
3816 val = qib_read_kreg64(dd, kr_sendpiosize);
3817 dd->piosize2k = val & ~0U;
3818 dd->piosize4k = val >> 32;
3819
3820 mtu = ib_mtu_enum_to_int(qib_ibmtu);
3821 if (mtu == -1)
3822 mtu = QIB_DEFAULT_MTU;
3823 dd->pport->ibmtu = (u32)mtu;
3824
3825 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
3826 dd->piobcnt2k = val & ~0U;
3827 dd->piobcnt4k = val >> 32;
3828 /* these may be adjusted in init_chip_wc_pat() */
3829 dd->pio2kbase = (u32 __iomem *)
3830 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
3831 if (dd->piobcnt4k) {
3832 dd->pio4kbase = (u32 __iomem *)
3833 ((char __iomem *) dd->kregbase +
3834 (dd->piobufbase >> 32));
3835 /*
3836 * 4K buffers take 2 pages; we use roundup just to be
3837 * paranoid; we calculate it once here, rather than on
3838 * ever buf allocate
3839 */
3840 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
3841 }
3842
3843 piobufs = dd->piobcnt4k + dd->piobcnt2k;
3844
3845 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
3846 (sizeof(u64) * BITS_PER_BYTE / 2);
3847}
3848
3849/*
3850 * The chip base addresses in cspec and cpspec have to be set
3851 * after possible init_chip_wc_pat(), rather than in
3852 * qib_get_7220_chip_params(), so split out as separate function
3853 */
3854static void set_7220_baseaddrs(struct qib_devdata *dd)
3855{
3856 u32 cregbase;
3857 /* init after possible re-map in init_chip_wc_pat() */
3858 cregbase = qib_read_kreg32(dd, kr_counterregbase);
3859 dd->cspec->cregbase = (u64 __iomem *)
3860 ((char __iomem *) dd->kregbase + cregbase);
3861
3862 dd->egrtidbase = (u64 __iomem *)
3863 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
3864}
3865
3866
3867#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \
3868 SYM_MASK(SendCtrl, SPioEnable) | \
3869 SYM_MASK(SendCtrl, SSpecialTriggerEn) | \
3870 SYM_MASK(SendCtrl, SendBufAvailUpd) | \
3871 SYM_MASK(SendCtrl, AvailUpdThld) | \
3872 SYM_MASK(SendCtrl, SDmaEnable) | \
3873 SYM_MASK(SendCtrl, SDmaIntEnable) | \
3874 SYM_MASK(SendCtrl, SDmaHalt) | \
3875 SYM_MASK(SendCtrl, SDmaSingleDescriptor))
3876
3877static int sendctrl_hook(struct qib_devdata *dd,
3878 const struct diag_observer *op,
3879 u32 offs, u64 *data, u64 mask, int only_32)
3880{
3881 unsigned long flags;
3882 unsigned idx = offs / sizeof(u64);
3883 u64 local_data, all_bits;
3884
3885 if (idx != kr_sendctrl) {
3886 qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
3887 offs, only_32 ? "32" : "64");
3888 return 0;
3889 }
3890
3891 all_bits = ~0ULL;
3892 if (only_32)
3893 all_bits >>= 32;
3894 spin_lock_irqsave(&dd->sendctrl_lock, flags);
3895 if ((mask & all_bits) != all_bits) {
3896 /*
3897 * At least some mask bits are zero, so we need
3898 * to read. The judgement call is whether from
3899 * reg or shadow. First-cut: read reg, and complain
3900 * if any bits which should be shadowed are different
3901 * from their shadowed value.
3902 */
3903 if (only_32)
3904 local_data = (u64)qib_read_kreg32(dd, idx);
3905 else
3906 local_data = qib_read_kreg64(dd, idx);
3907 qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
3908 (u32)local_data, (u32)dd->sendctrl);
3909 if ((local_data & SENDCTRL_SHADOWED) !=
3910 (dd->sendctrl & SENDCTRL_SHADOWED))
3911 qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
3912 (u32)local_data, (u32) dd->sendctrl);
3913 *data = (local_data & ~mask) | (*data & mask);
3914 }
3915 if (mask) {
3916 /*
3917 * At least some mask bits are one, so we need
3918 * to write, but only shadow some bits.
3919 */
3920 u64 sval, tval; /* Shadowed, transient */
3921
3922 /*
3923 * New shadow val is bits we don't want to touch,
3924 * ORed with bits we do, that are intended for shadow.
3925 */
3926 sval = (dd->sendctrl & ~mask);
3927 sval |= *data & SENDCTRL_SHADOWED & mask;
3928 dd->sendctrl = sval;
3929 tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
3930 qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
3931 (u32)tval, (u32)sval);
3932 qib_write_kreg(dd, kr_sendctrl, tval);
3933 qib_write_kreg(dd, kr_scratch, 0Ull);
3934 }
3935 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
3936
3937 return only_32 ? 4 : 8;
3938}
3939
3940static const struct diag_observer sendctrl_observer = {
3941 sendctrl_hook, kr_sendctrl * sizeof(u64),
3942 kr_sendctrl * sizeof(u64)
3943};
3944
3945/*
3946 * write the final few registers that depend on some of the
3947 * init setup. Done late in init, just before bringing up
3948 * the serdes.
3949 */
3950static int qib_late_7220_initreg(struct qib_devdata *dd)
3951{
3952 int ret = 0;
3953 u64 val;
3954
3955 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
3956 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
3957 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
3958 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
3959 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
3960 if (val != dd->pioavailregs_phys) {
3961 qib_dev_err(dd, "Catastrophic software error, "
3962 "SendPIOAvailAddr written as %lx, "
3963 "read back as %llx\n",
3964 (unsigned long) dd->pioavailregs_phys,
3965 (unsigned long long) val);
3966 ret = -EINVAL;
3967 }
3968 qib_register_observer(dd, &sendctrl_observer);
3969 return ret;
3970}
3971
3972static int qib_init_7220_variables(struct qib_devdata *dd)
3973{
3974 struct qib_chippport_specific *cpspec;
3975 struct qib_pportdata *ppd;
3976 int ret = 0;
3977 u32 sbufs, updthresh;
3978
3979 cpspec = (struct qib_chippport_specific *)(dd + 1);
3980 ppd = &cpspec->pportdata;
3981 dd->pport = ppd;
3982 dd->num_pports = 1;
3983
3984 dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
3985 ppd->cpspec = cpspec;
3986
3987 spin_lock_init(&dd->cspec->sdepb_lock);
3988 spin_lock_init(&dd->cspec->rcvmod_lock);
3989 spin_lock_init(&dd->cspec->gpio_lock);
3990
3991 /* we haven't yet set QIB_PRESENT, so use read directly */
3992 dd->revision = readq(&dd->kregbase[kr_revision]);
3993
3994 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
3995 qib_dev_err(dd, "Revision register read failure, "
3996 "giving up initialization\n");
3997 ret = -ENODEV;
3998 goto bail;
3999 }
4000 dd->flags |= QIB_PRESENT; /* now register routines work */
4001
4002 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
4003 ChipRevMajor);
4004 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
4005 ChipRevMinor);
4006
4007 get_7220_chip_params(dd);
4008 qib_7220_boardname(dd);
4009
4010 /*
4011 * GPIO bits for TWSI data and clock,
4012 * used for serial EEPROM.
4013 */
4014 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
4015 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
4016 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
4017
4018 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
4019 QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
4020 dd->flags |= qib_special_trigger ?
4021 QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
4022
4023 /*
4024 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
4025 * 2 is Some Misc, 3 is reserved for future.
4026 */
4027 dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
4028
4029 dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
4030
4031 dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
4032
4033 init_waitqueue_head(&cpspec->autoneg_wait);
4034 INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
4035
4036 qib_init_pportdata(ppd, dd, 0, 1);
4037 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
4038 ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
4039
4040 ppd->link_width_enabled = ppd->link_width_supported;
4041 ppd->link_speed_enabled = ppd->link_speed_supported;
4042 /*
4043 * Set the initial values to reasonable default, will be set
4044 * for real when link is up.
4045 */
4046 ppd->link_width_active = IB_WIDTH_4X;
4047 ppd->link_speed_active = QIB_IB_SDR;
4048 ppd->delay_mult = rate_to_delay[0][1];
4049 ppd->vls_supported = IB_VL_VL0;
4050 ppd->vls_operational = ppd->vls_supported;
4051
4052 if (!qib_mini_init)
4053 qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
4054
4055 init_timer(&ppd->cpspec->chase_timer);
4056 ppd->cpspec->chase_timer.function = reenable_7220_chase;
4057 ppd->cpspec->chase_timer.data = (unsigned long)ppd;
4058
4059 qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
4060
4061 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
4062 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
4063 dd->rhf_offset =
4064 dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
4065
4066 /* we always allocate at least 2048 bytes for eager buffers */
4067 ret = ib_mtu_enum_to_int(qib_ibmtu);
4068 dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
4069
4070 qib_7220_tidtemplate(dd);
4071
4072 /*
4073 * We can request a receive interrupt for 1 or
4074 * more packets from current offset. For now, we set this
4075 * up for a single packet.
4076 */
4077 dd->rhdrhead_intr_off = 1ULL << 32;
4078
4079 /* setup the stats timer; the add_timer is done at end of init */
4080 init_timer(&dd->stats_timer);
4081 dd->stats_timer.function = qib_get_7220_faststats;
4082 dd->stats_timer.data = (unsigned long) dd;
4083 dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
4084
4085 /*
4086 * Control[4] has been added to change the arbitration within
4087 * the SDMA engine between favoring data fetches over descriptor
4088 * fetches. qib_sdma_fetch_arb==0 gives data fetches priority.
4089 */
4090 if (qib_sdma_fetch_arb)
4091 dd->control |= 1 << 4;
4092
4093 dd->ureg_align = 0x10000; /* 64KB alignment */
4094
4095 dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
4096 qib_7220_config_ctxts(dd);
4097 qib_set_ctxtcnt(dd); /* needed for PAT setup */
4098
4099 if (qib_wc_pat) {
4100 ret = init_chip_wc_pat(dd, 0);
4101 if (ret)
4102 goto bail;
4103 }
4104 set_7220_baseaddrs(dd); /* set chip access pointers now */
4105
4106 ret = 0;
4107 if (qib_mini_init)
4108 goto bail;
4109
4110 ret = qib_create_ctxts(dd);
4111 init_7220_cntrnames(dd);
4112
4113 /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
4114 * reserve the update threshold amount for other kernel use, such
4115 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
4116 * unless we aren't enabling SDMA, in which case we want to use
4117 * all the 4k bufs for the kernel.
4118 * if this was less than the update threshold, we could wait
4119 * a long time for an update. Coded this way because we
4120 * sometimes change the update threshold for various reasons,
4121 * and we want this to remain robust.
4122 */
4123 updthresh = 8U; /* update threshold */
4124 if (dd->flags & QIB_HAS_SEND_DMA) {
4125 dd->cspec->sdmabufcnt = dd->piobcnt4k;
4126 sbufs = updthresh > 3 ? updthresh : 3;
4127 } else {
4128 dd->cspec->sdmabufcnt = 0;
4129 sbufs = dd->piobcnt4k;
4130 }
4131
4132 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
4133 dd->cspec->sdmabufcnt;
4134 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
4135 dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
4136 dd->pbufsctxt = dd->lastctxt_piobuf /
4137 (dd->cfgctxts - dd->first_user_ctxt);
4138
4139 /*
4140 * if we are at 16 user contexts, we will have one 7 sbufs
4141 * per context, so drop the update threshold to match. We
4142 * want to update before we actually run out, at low pbufs/ctxt
4143 * so give ourselves some margin
4144 */
4145 if ((dd->pbufsctxt - 2) < updthresh)
4146 updthresh = dd->pbufsctxt - 2;
4147
4148 dd->cspec->updthresh_dflt = updthresh;
4149 dd->cspec->updthresh = updthresh;
4150
4151 /* before full enable, no interrupts, no locking needed */
4152 dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
4153 << SYM_LSB(SendCtrl, AvailUpdThld);
4154
4155 dd->psxmitwait_supported = 1;
4156 dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
4157bail:
4158 return ret;
4159}
4160
4161static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
4162 u32 *pbufnum)
4163{
4164 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
4165 struct qib_devdata *dd = ppd->dd;
4166 u32 __iomem *buf;
4167
4168 if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
4169 !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
4170 buf = get_7220_link_buf(ppd, pbufnum);
4171 else {
4172 if ((plen + 1) > dd->piosize2kmax_dwords)
4173 first = dd->piobcnt2k;
4174 else
4175 first = 0;
4176 /* try 4k if all 2k busy, so same last for both sizes */
4177 last = dd->cspec->lastbuf_for_pio;
4178 buf = qib_getsendbuf_range(dd, pbufnum, first, last);
4179 }
4180 return buf;
4181}
4182
4183/* these 2 "counters" are really control registers, and are always RW */
4184static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
4185 u32 start)
4186{
4187 write_7220_creg(ppd->dd, cr_psinterval, intv);
4188 write_7220_creg(ppd->dd, cr_psstart, start);
4189}
4190
4191/*
4192 * NOTE: no real attempt is made to generalize the SDMA stuff.
4193 * At some point "soon" we will have a new more generalized
4194 * set of sdma interface, and then we'll clean this up.
4195 */
4196
4197/* Must be called with sdma_lock held, or before init finished */
4198static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
4199{
4200 /* Commit writes to memory and advance the tail on the chip */
4201 wmb();
4202 ppd->sdma_descq_tail = tail;
4203 qib_write_kreg(ppd->dd, kr_senddmatail, tail);
4204}
4205
4206static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
4207{
4208}
4209
4210static struct sdma_set_state_action sdma_7220_action_table[] = {
4211 [qib_sdma_state_s00_hw_down] = {
4212 .op_enable = 0,
4213 .op_intenable = 0,
4214 .op_halt = 0,
4215 .go_s99_running_tofalse = 1,
4216 },
4217 [qib_sdma_state_s10_hw_start_up_wait] = {
4218 .op_enable = 1,
4219 .op_intenable = 1,
4220 .op_halt = 1,
4221 },
4222 [qib_sdma_state_s20_idle] = {
4223 .op_enable = 1,
4224 .op_intenable = 1,
4225 .op_halt = 1,
4226 },
4227 [qib_sdma_state_s30_sw_clean_up_wait] = {
4228 .op_enable = 0,
4229 .op_intenable = 1,
4230 .op_halt = 0,
4231 },
4232 [qib_sdma_state_s40_hw_clean_up_wait] = {
4233 .op_enable = 1,
4234 .op_intenable = 1,
4235 .op_halt = 1,
4236 },
4237 [qib_sdma_state_s50_hw_halt_wait] = {
4238 .op_enable = 1,
4239 .op_intenable = 1,
4240 .op_halt = 1,
4241 },
4242 [qib_sdma_state_s99_running] = {
4243 .op_enable = 1,
4244 .op_intenable = 1,
4245 .op_halt = 0,
4246 .go_s99_running_totrue = 1,
4247 },
4248};
4249
4250static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
4251{
4252 ppd->sdma_state.set_state_action = sdma_7220_action_table;
4253}
4254
4255static int init_sdma_7220_regs(struct qib_pportdata *ppd)
4256{
4257 struct qib_devdata *dd = ppd->dd;
4258 unsigned i, n;
4259 u64 senddmabufmask[3] = { 0 };
4260
4261 /* Set SendDmaBase */
4262 qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
4263 qib_sdma_7220_setlengen(ppd);
4264 qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
4265 /* Set SendDmaHeadAddr */
4266 qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
4267
4268 /*
4269 * Reserve all the former "kernel" piobufs, using high number range
4270 * so we get as many 4K buffers as possible
4271 */
4272 n = dd->piobcnt2k + dd->piobcnt4k;
4273 i = n - dd->cspec->sdmabufcnt;
4274
4275 for (; i < n; ++i) {
4276 unsigned word = i / 64;
4277 unsigned bit = i & 63;
4278
4279 BUG_ON(word >= 3);
4280 senddmabufmask[word] |= 1ULL << bit;
4281 }
4282 qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
4283 qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
4284 qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
4285
4286 ppd->sdma_state.first_sendbuf = i;
4287 ppd->sdma_state.last_sendbuf = n;
4288
4289 return 0;
4290}
4291
4292/* sdma_lock must be held */
4293static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
4294{
4295 struct qib_devdata *dd = ppd->dd;
4296 int sane;
4297 int use_dmahead;
4298 u16 swhead;
4299 u16 swtail;
4300 u16 cnt;
4301 u16 hwhead;
4302
4303 use_dmahead = __qib_sdma_running(ppd) &&
4304 (dd->flags & QIB_HAS_SDMA_TIMEOUT);
4305retry:
4306 hwhead = use_dmahead ?
4307 (u16)le64_to_cpu(*ppd->sdma_head_dma) :
4308 (u16)qib_read_kreg32(dd, kr_senddmahead);
4309
4310 swhead = ppd->sdma_descq_head;
4311 swtail = ppd->sdma_descq_tail;
4312 cnt = ppd->sdma_descq_cnt;
4313
4314 if (swhead < swtail) {
4315 /* not wrapped */
4316 sane = (hwhead >= swhead) & (hwhead <= swtail);
4317 } else if (swhead > swtail) {
4318 /* wrapped around */
4319 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
4320 (hwhead <= swtail);
4321 } else {
4322 /* empty */
4323 sane = (hwhead == swhead);
4324 }
4325
4326 if (unlikely(!sane)) {
4327 if (use_dmahead) {
4328 /* try one more time, directly from the register */
4329 use_dmahead = 0;
4330 goto retry;
4331 }
4332 /* assume no progress */
4333 hwhead = swhead;
4334 }
4335
4336 return hwhead;
4337}
4338
4339static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
4340{
4341 u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
4342
4343 return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
4344 (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
4345 (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
4346 !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
4347}
4348
4349/*
4350 * Compute the amount of delay before sending the next packet if the
4351 * port's send rate differs from the static rate set for the QP.
4352 * Since the delay affects this packet but the amount of the delay is
4353 * based on the length of the previous packet, use the last delay computed
4354 * and save the delay count for this packet to be used next time
4355 * we get here.
4356 */
4357static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
4358 u8 srate, u8 vl)
4359{
4360 u8 snd_mult = ppd->delay_mult;
4361 u8 rcv_mult = ib_rate_to_delay[srate];
4362 u32 ret = ppd->cpspec->last_delay_mult;
4363
4364 ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
4365 (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
4366
4367 /* Indicate VL15, if necessary */
4368 if (vl == 15)
4369 ret |= PBC_7220_VL15_SEND_CTRL;
4370 return ret;
4371}
4372
4373static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
4374{
4375}
4376
4377static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
4378{
4379 if (!rcd->ctxt) {
4380 rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
4381 rcd->rcvegr_tid_base = 0;
4382 } else {
4383 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
4384 rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
4385 (rcd->ctxt - 1) * rcd->rcvegrcnt;
4386 }
4387}
4388
4389static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
4390 u32 len, u32 which, struct qib_ctxtdata *rcd)
4391{
4392 int i;
4393 unsigned long flags;
4394
4395 switch (which) {
4396 case TXCHK_CHG_TYPE_KERN:
4397 /* see if we need to raise avail update threshold */
4398 spin_lock_irqsave(&dd->uctxt_lock, flags);
4399 for (i = dd->first_user_ctxt;
4400 dd->cspec->updthresh != dd->cspec->updthresh_dflt
4401 && i < dd->cfgctxts; i++)
4402 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
4403 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
4404 < dd->cspec->updthresh_dflt)
4405 break;
4406 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
4407 if (i == dd->cfgctxts) {
4408 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4409 dd->cspec->updthresh = dd->cspec->updthresh_dflt;
4410 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
4411 dd->sendctrl |= (dd->cspec->updthresh &
4412 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
4413 SYM_LSB(SendCtrl, AvailUpdThld);
4414 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4415 sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
4416 }
4417 break;
4418 case TXCHK_CHG_TYPE_USER:
4419 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4420 if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
4421 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
4422 dd->cspec->updthresh = (rcd->piocnt /
4423 rcd->subctxt_cnt) - 1;
4424 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
4425 dd->sendctrl |= (dd->cspec->updthresh &
4426 SYM_RMASK(SendCtrl, AvailUpdThld))
4427 << SYM_LSB(SendCtrl, AvailUpdThld);
4428 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4429 sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
4430 } else
4431 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4432 break;
4433 }
4434}
4435
4436static void writescratch(struct qib_devdata *dd, u32 val)
4437{
4438 qib_write_kreg(dd, kr_scratch, val);
4439}
4440
4441#define VALID_TS_RD_REG_MASK 0xBF
4442/**
4443 * qib_7220_tempsense_read - read register of temp sensor via TWSI
4444 * @dd: the qlogic_ib device
4445 * @regnum: register to read from
4446 *
4447 * returns reg contents (0..255) or < 0 for error
4448 */
4449static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
4450{
4451 int ret;
4452 u8 rdata;
4453
4454 if (regnum > 7) {
4455 ret = -EINVAL;
4456 goto bail;
4457 }
4458
4459 /* return a bogus value for (the one) register we do not have */
4460 if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
4461 ret = 0;
4462 goto bail;
4463 }
4464
4465 ret = mutex_lock_interruptible(&dd->eep_lock);
4466 if (ret)
4467 goto bail;
4468
4469 ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
4470 if (!ret)
4471 ret = rdata;
4472
4473 mutex_unlock(&dd->eep_lock);
4474
4475 /*
4476 * There are three possibilities here:
4477 * ret is actual value (0..255)
4478 * ret is -ENXIO or -EINVAL from twsi code or this file
4479 * ret is -EINTR from mutex_lock_interruptible.
4480 */
4481bail:
4482 return ret;
4483}
4484
4485/* Dummy function, as 7220 boards never disable EEPROM Write */
4486static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
4487{
4488 return 1;
4489}
4490
4491/**
4492 * qib_init_iba7220_funcs - set up the chip-specific function pointers
4493 * @dev: the pci_dev for qlogic_ib device
4494 * @ent: pci_device_id struct for this dev
4495 *
4496 * This is global, and is called directly at init to set up the
4497 * chip-specific function pointers for later use.
4498 */
4499struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
4500 const struct pci_device_id *ent)
4501{
4502 struct qib_devdata *dd;
4503 int ret;
4504 u32 boardid, minwidth;
4505
4506 dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
4507 sizeof(struct qib_chippport_specific));
4508 if (IS_ERR(dd))
4509 goto bail;
4510
4511 dd->f_bringup_serdes = qib_7220_bringup_serdes;
4512 dd->f_cleanup = qib_setup_7220_cleanup;
4513 dd->f_clear_tids = qib_7220_clear_tids;
4514 dd->f_free_irq = qib_7220_free_irq;
4515 dd->f_get_base_info = qib_7220_get_base_info;
4516 dd->f_get_msgheader = qib_7220_get_msgheader;
4517 dd->f_getsendbuf = qib_7220_getsendbuf;
4518 dd->f_gpio_mod = gpio_7220_mod;
4519 dd->f_eeprom_wen = qib_7220_eeprom_wen;
4520 dd->f_hdrqempty = qib_7220_hdrqempty;
4521 dd->f_ib_updown = qib_7220_ib_updown;
4522 dd->f_init_ctxt = qib_7220_init_ctxt;
4523 dd->f_initvl15_bufs = qib_7220_initvl15_bufs;
4524 dd->f_intr_fallback = qib_7220_intr_fallback;
4525 dd->f_late_initreg = qib_late_7220_initreg;
4526 dd->f_setpbc_control = qib_7220_setpbc_control;
4527 dd->f_portcntr = qib_portcntr_7220;
4528 dd->f_put_tid = qib_7220_put_tid;
4529 dd->f_quiet_serdes = qib_7220_quiet_serdes;
4530 dd->f_rcvctrl = rcvctrl_7220_mod;
4531 dd->f_read_cntrs = qib_read_7220cntrs;
4532 dd->f_read_portcntrs = qib_read_7220portcntrs;
4533 dd->f_reset = qib_setup_7220_reset;
4534 dd->f_init_sdma_regs = init_sdma_7220_regs;
4535 dd->f_sdma_busy = qib_sdma_7220_busy;
4536 dd->f_sdma_gethead = qib_sdma_7220_gethead;
4537 dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl;
4538 dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
4539 dd->f_sdma_update_tail = qib_sdma_update_7220_tail;
4540 dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up;
4541 dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up;
4542 dd->f_sdma_init_early = qib_7220_sdma_init_early;
4543 dd->f_sendctrl = sendctrl_7220_mod;
4544 dd->f_set_armlaunch = qib_set_7220_armlaunch;
4545 dd->f_set_cntr_sample = qib_set_cntr_7220_sample;
4546 dd->f_iblink_state = qib_7220_iblink_state;
4547 dd->f_ibphys_portstate = qib_7220_phys_portstate;
4548 dd->f_get_ib_cfg = qib_7220_get_ib_cfg;
4549 dd->f_set_ib_cfg = qib_7220_set_ib_cfg;
4550 dd->f_set_ib_loopback = qib_7220_set_loopback;
4551 dd->f_set_intr_state = qib_7220_set_intr_state;
4552 dd->f_setextled = qib_setup_7220_setextled;
4553 dd->f_txchk_change = qib_7220_txchk_change;
4554 dd->f_update_usrhead = qib_update_7220_usrhead;
4555 dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr;
4556 dd->f_xgxs_reset = qib_7220_xgxs_reset;
4557 dd->f_writescratch = writescratch;
4558 dd->f_tempsense_rd = qib_7220_tempsense_rd;
4559 /*
4560 * Do remaining pcie setup and save pcie values in dd.
4561 * Any error printing is already done by the init code.
4562 * On return, we have the chip mapped, but chip registers
4563 * are not set up until start of qib_init_7220_variables.
4564 */
4565 ret = qib_pcie_ddinit(dd, pdev, ent);
4566 if (ret < 0)
4567 goto bail_free;
4568
4569 /* initialize chip-specific variables */
4570 ret = qib_init_7220_variables(dd);
4571 if (ret)
4572 goto bail_cleanup;
4573
4574 if (qib_mini_init)
4575 goto bail;
4576
4577 boardid = SYM_FIELD(dd->revision, Revision,
4578 BoardID);
4579 switch (boardid) {
4580 case 0:
4581 case 2:
4582 case 10:
4583 case 12:
4584 minwidth = 16; /* x16 capable boards */
4585 break;
4586 default:
4587 minwidth = 8; /* x8 capable boards */
4588 break;
4589 }
4590 if (qib_pcie_params(dd, minwidth, NULL, NULL))
4591 qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
4592 "continuing anyway\n");
4593
4594 /* save IRQ for possible later use */
4595 dd->cspec->irq = pdev->irq;
4596
4597 if (qib_read_kreg64(dd, kr_hwerrstatus) &
4598 QLOGIC_IB_HWE_SERDESPLLFAILED)
4599 qib_write_kreg(dd, kr_hwerrclear,
4600 QLOGIC_IB_HWE_SERDESPLLFAILED);
4601
4602 /* setup interrupt handler (interrupt type handled above) */
4603 qib_setup_7220_interrupt(dd);
4604 qib_7220_init_hwerrors(dd);
4605
4606 /* clear diagctrl register, in case diags were running and crashed */
4607 qib_write_kreg(dd, kr_hwdiagctrl, 0);
4608
4609 goto bail;
4610
4611bail_cleanup:
4612 qib_pcie_ddcleanup(dd);
4613bail_free:
4614 qib_free_devdata(dd);
4615 dd = ERR_PTR(ret);
4616bail:
4617 return dd;
4618}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
new file mode 100644
index 000000000000..503992d9c5ce
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -0,0 +1,7645 @@
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file contains all of the code that is specific to the
35 * InfiniPath 7322 chip
36 */
37
38#include <linux/interrupt.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/io.h>
42#include <linux/jiffies.h>
43#include <rdma/ib_verbs.h>
44#include <rdma/ib_smi.h>
45
46#include "qib.h"
47#include "qib_7322_regs.h"
48#include "qib_qsfp.h"
49
50#include "qib_mad.h"
51
52static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
53static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
54static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
55static irqreturn_t qib_7322intr(int irq, void *data);
56static irqreturn_t qib_7322bufavail(int irq, void *data);
57static irqreturn_t sdma_intr(int irq, void *data);
58static irqreturn_t sdma_idle_intr(int irq, void *data);
59static irqreturn_t sdma_progress_intr(int irq, void *data);
60static irqreturn_t sdma_cleanup_intr(int irq, void *data);
61static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
62 struct qib_ctxtdata *rcd);
63static u8 qib_7322_phys_portstate(u64);
64static u32 qib_7322_iblink_state(u64);
65static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
66 u16 linitcmd);
67static void force_h1(struct qib_pportdata *);
68static void adj_tx_serdes(struct qib_pportdata *);
69static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
70static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
71
72static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
73static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
74
75#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
76
77/* LE2 serdes values for different cases */
78#define LE2_DEFAULT 5
79#define LE2_5m 4
80#define LE2_QME 0
81
82/* Below is special-purpose, so only really works for the IB SerDes blocks. */
83#define IBSD(hw_pidx) (hw_pidx + 2)
84
85/* these are variables for documentation and experimentation purposes */
86static const unsigned rcv_int_timeout = 375;
87static const unsigned rcv_int_count = 16;
88static const unsigned sdma_idle_cnt = 64;
89
90/* Time to stop altering Rx Equalization parameters, after link up. */
91#define RXEQ_DISABLE_MSECS 2500
92
93/*
94 * Number of VLs we are configured to use (to allow for more
95 * credits per vl, etc.)
96 */
97ushort qib_num_cfg_vls = 2;
98module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
99MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
100
101static ushort qib_chase = 1;
102module_param_named(chase, qib_chase, ushort, S_IRUGO);
103MODULE_PARM_DESC(chase, "Enable state chase handling");
104
105static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
106module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
107MODULE_PARM_DESC(long_attenuation, \
108 "attenuation cutoff (dB) for long copper cable setup");
109
110static ushort qib_singleport;
111module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
112MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
113
114#define MAX_ATTEN_LEN 64 /* plenty for any real system */
115/* for read back, default index is ~5m copper cable */
116static char txselect_list[MAX_ATTEN_LEN] = "10";
117static struct kparam_string kp_txselect = {
118 .string = txselect_list,
119 .maxlen = MAX_ATTEN_LEN
120};
121static int setup_txselect(const char *, struct kernel_param *);
122module_param_call(txselect, setup_txselect, param_get_string,
123 &kp_txselect, S_IWUSR | S_IRUGO);
124MODULE_PARM_DESC(txselect, \
125 "Tx serdes indices (for no QSFP or invalid QSFP data)");
126
127#define BOARD_QME7342 5
128#define BOARD_QMH7342 6
129#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
130 BOARD_QMH7342)
131#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
132 BOARD_QME7342)
133
134#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
135
136#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
137
138#define MASK_ACROSS(lsb, msb) \
139 (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
140
141#define SYM_RMASK(regname, fldname) ((u64) \
142 QIB_7322_##regname##_##fldname##_RMASK)
143
144#define SYM_MASK(regname, fldname) ((u64) \
145 QIB_7322_##regname##_##fldname##_RMASK << \
146 QIB_7322_##regname##_##fldname##_LSB)
147
148#define SYM_FIELD(value, regname, fldname) ((u64) \
149 (((value) >> SYM_LSB(regname, fldname)) & \
150 SYM_RMASK(regname, fldname)))
151
152/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
153#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
154 (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
155
156#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
157#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
158#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
159#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
160#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
161/* Below because most, but not all, fields of IntMask have that full suffix */
162#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
163
164
165#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
166
167/*
168 * the size bits give us 2^N, in KB units. 0 marks as invalid,
169 * and 7 is reserved. We currently use only 2KB and 4KB
170 */
171#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
172#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
173#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
174#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
175
176#define SendIBSLIDAssignMask \
177 QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
178#define SendIBSLMCMask \
179 QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
180
181#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
182#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
183#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
184#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
185#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
186#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
187
188#define _QIB_GPIO_SDA_NUM 1
189#define _QIB_GPIO_SCL_NUM 0
190#define QIB_EEPROM_WEN_NUM 14
191#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
192
193/* HW counter clock is at 4nsec */
194#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
195
196/* full speed IB port 1 only */
197#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
198#define PORT_SPD_CAP_SHIFT 3
199
200/* full speed featuremask, both ports */
201#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
202
203/*
204 * This file contains almost all the chip-specific register information and
205 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
206 */
207
208/* Use defines to tie machine-generated names to lower-case names */
209#define kr_contextcnt KREG_IDX(ContextCnt)
210#define kr_control KREG_IDX(Control)
211#define kr_counterregbase KREG_IDX(CntrRegBase)
212#define kr_errclear KREG_IDX(ErrClear)
213#define kr_errmask KREG_IDX(ErrMask)
214#define kr_errstatus KREG_IDX(ErrStatus)
215#define kr_extctrl KREG_IDX(EXTCtrl)
216#define kr_extstatus KREG_IDX(EXTStatus)
217#define kr_gpio_clear KREG_IDX(GPIOClear)
218#define kr_gpio_mask KREG_IDX(GPIOMask)
219#define kr_gpio_out KREG_IDX(GPIOOut)
220#define kr_gpio_status KREG_IDX(GPIOStatus)
221#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
222#define kr_debugportval KREG_IDX(DebugPortValueReg)
223#define kr_fmask KREG_IDX(feature_mask)
224#define kr_act_fmask KREG_IDX(active_feature_mask)
225#define kr_hwerrclear KREG_IDX(HwErrClear)
226#define kr_hwerrmask KREG_IDX(HwErrMask)
227#define kr_hwerrstatus KREG_IDX(HwErrStatus)
228#define kr_intclear KREG_IDX(IntClear)
229#define kr_intmask KREG_IDX(IntMask)
230#define kr_intredirect KREG_IDX(IntRedirect0)
231#define kr_intstatus KREG_IDX(IntStatus)
232#define kr_pagealign KREG_IDX(PageAlign)
233#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
234#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
235#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
236#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
237#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
238#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
239#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
240#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
241#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
242#define kr_revision KREG_IDX(Revision)
243#define kr_scratch KREG_IDX(Scratch)
244#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
245#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
246#define kr_sendctrl KREG_IDX(SendCtrl)
247#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
248#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
249#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
250#define kr_sendpiobufbase KREG_IDX(SendBufBase)
251#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
252#define kr_sendpiosize KREG_IDX(SendBufSize)
253#define kr_sendregbase KREG_IDX(SendRegBase)
254#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
255#define kr_userregbase KREG_IDX(UserRegBase)
256#define kr_intgranted KREG_IDX(Int_Granted)
257#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
258#define kr_intblocked KREG_IDX(IntBlocked)
259#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
260
261/*
262 * per-port kernel registers. Access only with qib_read_kreg_port()
263 * or qib_write_kreg_port()
264 */
265#define krp_errclear KREG_IBPORT_IDX(ErrClear)
266#define krp_errmask KREG_IBPORT_IDX(ErrMask)
267#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
268#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
269#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
270#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
271#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
272#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
273#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
274#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
275#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
276#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
277#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
278#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
279#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
280#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
281#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
282#define krp_psstart KREG_IBPORT_IDX(PSStart)
283#define krp_psstat KREG_IBPORT_IDX(PSStat)
284#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
285#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
286#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
287#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
288#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
289#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
290#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
291#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
292#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
293#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
294#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
295#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
296#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
297#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
298#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
299#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
300#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
301#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
302#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
303#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
304#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
305#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
306#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
307#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
308#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
309#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
310#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
311#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
312#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
313#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
314#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
315
316/*
317 * Per-context kernel registers. Acess only with qib_read_kreg_ctxt()
318 * or qib_write_kreg_ctxt()
319 */
320#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
321#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
322
323/*
324 * TID Flow table, per context. Reduces
325 * number of hdrq updates to one per flow (or on errors).
326 * context 0 and 1 share same memory, but have distinct
327 * addresses. Since for now, we never use expected sends
328 * on kernel contexts, we don't worry about that (we initialize
329 * those entries for ctxt 0/1 on driver load twice, for example).
330 */
331#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
332#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
333
334/* these are the error bits in the tid flows, and are W1C */
335#define TIDFLOW_ERRBITS ( \
336 (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
337 SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
338 (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
339 SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
340
341/* Most (not all) Counters are per-IBport.
342 * Requires LBIntCnt is at offset 0 in the group
343 */
344#define CREG_IDX(regname) \
345((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
346
347#define crp_badformat CREG_IDX(RxVersionErrCnt)
348#define crp_err_rlen CREG_IDX(RxLenErrCnt)
349#define crp_erricrc CREG_IDX(RxICRCErrCnt)
350#define crp_errlink CREG_IDX(RxLinkMalformCnt)
351#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
352#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
353#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
354#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
355#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
356#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
357#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
358#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
359#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
360#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
361#define crp_pktrcv CREG_IDX(RxDataPktCnt)
362#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
363#define crp_pktsend CREG_IDX(TxDataPktCnt)
364#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
365#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
366#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
367#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
368#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
369#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
370#define crp_rcvebp CREG_IDX(RxEBPCnt)
371#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
372#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
373#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
374#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
375#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
376#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
377#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
378#define crp_sendstall CREG_IDX(TxFlowStallCnt)
379#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
380#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
381#define crp_txlenerr CREG_IDX(TxLenErrCnt)
382#define crp_txlenerr CREG_IDX(TxLenErrCnt)
383#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
384#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
385#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
386#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
387#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
388#define crp_wordrcv CREG_IDX(RxDwordCnt)
389#define crp_wordsend CREG_IDX(TxDwordCnt)
390#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
391
392/* these are the (few) counters that are not port-specific */
393#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
394 QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
395#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
396#define cr_lbint CREG_DEVIDX(LBIntCnt)
397#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
398#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
399#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
400#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
401#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
402
403/* no chip register for # of IB ports supported, so define */
404#define NUM_IB_PORTS 2
405
406/* 1 VL15 buffer per hardware IB port, no register for this, so define */
407#define NUM_VL15_BUFS NUM_IB_PORTS
408
409/*
410 * context 0 and 1 are special, and there is no chip register that
411 * defines this value, so we have to define it here.
412 * These are all allocated to either 0 or 1 for single port
413 * hardware configuration, otherwise each gets half
414 */
415#define KCTXT0_EGRCNT 2048
416
417/* values for vl and port fields in PBC, 7322-specific */
418#define PBC_PORT_SEL_LSB 26
419#define PBC_PORT_SEL_RMASK 1
420#define PBC_VL_NUM_LSB 27
421#define PBC_VL_NUM_RMASK 7
422#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
423#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
424
425static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
426 [IB_RATE_2_5_GBPS] = 16,
427 [IB_RATE_5_GBPS] = 8,
428 [IB_RATE_10_GBPS] = 4,
429 [IB_RATE_20_GBPS] = 2,
430 [IB_RATE_30_GBPS] = 2,
431 [IB_RATE_40_GBPS] = 1
432};
433
434#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
435#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
436
437/* link training states, from IBC */
438#define IB_7322_LT_STATE_DISABLED 0x00
439#define IB_7322_LT_STATE_LINKUP 0x01
440#define IB_7322_LT_STATE_POLLACTIVE 0x02
441#define IB_7322_LT_STATE_POLLQUIET 0x03
442#define IB_7322_LT_STATE_SLEEPDELAY 0x04
443#define IB_7322_LT_STATE_SLEEPQUIET 0x05
444#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
445#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
446#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
447#define IB_7322_LT_STATE_CFGIDLE 0x0b
448#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
449#define IB_7322_LT_STATE_TXREVLANES 0x0d
450#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
451#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
452#define IB_7322_LT_STATE_CFGENH 0x10
453#define IB_7322_LT_STATE_CFGTEST 0x11
454
455/* link state machine states from IBC */
456#define IB_7322_L_STATE_DOWN 0x0
457#define IB_7322_L_STATE_INIT 0x1
458#define IB_7322_L_STATE_ARM 0x2
459#define IB_7322_L_STATE_ACTIVE 0x3
460#define IB_7322_L_STATE_ACT_DEFER 0x4
461
462static const u8 qib_7322_physportstate[0x20] = {
463 [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
464 [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
465 [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
466 [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
467 [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
468 [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
469 [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
470 [IB_7322_LT_STATE_CFGRCVFCFG] =
471 IB_PHYSPORTSTATE_CFG_TRAIN,
472 [IB_7322_LT_STATE_CFGWAITRMT] =
473 IB_PHYSPORTSTATE_CFG_TRAIN,
474 [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
475 [IB_7322_LT_STATE_RECOVERRETRAIN] =
476 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
477 [IB_7322_LT_STATE_RECOVERWAITRMT] =
478 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
479 [IB_7322_LT_STATE_RECOVERIDLE] =
480 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
481 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
482 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
483 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
484 [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
485 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
486 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
487 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
488 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
489};
490
491struct qib_chip_specific {
492 u64 __iomem *cregbase;
493 u64 *cntrs;
494 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
495 spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
496 u64 main_int_mask; /* clear bits which have dedicated handlers */
497 u64 int_enable_mask; /* for per port interrupts in single port mode */
498 u64 errormask;
499 u64 hwerrmask;
500 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
501 u64 gpio_mask; /* shadow the gpio mask register */
502 u64 extctrl; /* shadow the gpio output enable, etc... */
503 u32 ncntrs;
504 u32 nportcntrs;
505 u32 cntrnamelen;
506 u32 portcntrnamelen;
507 u32 numctxts;
508 u32 rcvegrcnt;
509 u32 updthresh; /* current AvailUpdThld */
510 u32 updthresh_dflt; /* default AvailUpdThld */
511 u32 r1;
512 int irq;
513 u32 num_msix_entries;
514 u32 sdmabufcnt;
515 u32 lastbuf_for_pio;
516 u32 stay_in_freeze;
517 u32 recovery_ports_initted;
518 struct msix_entry *msix_entries;
519 void **msix_arg;
520 unsigned long *sendchkenable;
521 unsigned long *sendgrhchk;
522 unsigned long *sendibchk;
523 u32 rcvavail_timeout[18];
524 char emsgbuf[128]; /* for device error interrupt msg buffer */
525};
526
527/* Table of entries in "human readable" form Tx Emphasis. */
528struct txdds_ent {
529 u8 amp;
530 u8 pre;
531 u8 main;
532 u8 post;
533};
534
535struct vendor_txdds_ent {
536 u8 oui[QSFP_VOUI_LEN];
537 u8 *partnum;
538 struct txdds_ent sdr;
539 struct txdds_ent ddr;
540 struct txdds_ent qdr;
541};
542
543static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
544
545#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
546#define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */
547#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
548
549#define H1_FORCE_VAL 8
550#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
551#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
552
553/* The static and dynamic registers are paired, and the pairs indexed by spd */
554#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
555 + ((spd) * 2))
556
557#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
558#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
559#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
560#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
561#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
562
563struct qib_chippport_specific {
564 u64 __iomem *kpregbase;
565 u64 __iomem *cpregbase;
566 u64 *portcntrs;
567 struct qib_pportdata *ppd;
568 wait_queue_head_t autoneg_wait;
569 struct delayed_work autoneg_work;
570 struct delayed_work ipg_work;
571 struct timer_list chase_timer;
572 /*
573 * these 5 fields are used to establish deltas for IB symbol
574 * errors and linkrecovery errors. They can be reported on
575 * some chips during link negotiation prior to INIT, and with
576 * DDR when faking DDR negotiations with non-IBTA switches.
577 * The chip counters are adjusted at driver unload if there is
578 * a non-zero delta.
579 */
580 u64 ibdeltainprog;
581 u64 ibsymdelta;
582 u64 ibsymsnap;
583 u64 iblnkerrdelta;
584 u64 iblnkerrsnap;
585 u64 iblnkdownsnap;
586 u64 iblnkdowndelta;
587 u64 ibmalfdelta;
588 u64 ibmalfsnap;
589 u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
590 u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
591 u64 qdr_dfe_time;
592 u64 chase_end;
593 u32 autoneg_tries;
594 u32 recovery_init;
595 u32 qdr_dfe_on;
596 u32 qdr_reforce;
597 /*
598 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
599 * entry zero is unused, to simplify indexing
600 */
601 u8 h1_val;
602 u8 no_eep; /* txselect table index to use if no qsfp info */
603 u8 ipg_tries;
604 u8 ibmalfusesnap;
605 struct qib_qsfp_data qsfp_data;
606 char epmsgbuf[192]; /* for port error interrupt msg buffer */
607};
608
609static struct {
610 const char *name;
611 irq_handler_t handler;
612 int lsb;
613 int port; /* 0 if not port-specific, else port # */
614} irq_table[] = {
615 { QIB_DRV_NAME, qib_7322intr, -1, 0 },
616 { QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
617 SYM_LSB(IntStatus, SendBufAvail), 0 },
618 { QIB_DRV_NAME " (sdma 0)", sdma_intr,
619 SYM_LSB(IntStatus, SDmaInt_0), 1 },
620 { QIB_DRV_NAME " (sdma 1)", sdma_intr,
621 SYM_LSB(IntStatus, SDmaInt_1), 2 },
622 { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
623 SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
624 { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
625 SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
626 { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
627 SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
628 { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
629 SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
630 { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
631 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
632 { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
633 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
634};
635
636/* ibcctrl bits */
637#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
638/* cycle through TS1/TS2 till OK */
639#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
640/* wait for TS1, then go on */
641#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
642#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
643
644#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
645#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
646#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
647
648#define BLOB_7322_IBCHG 0x101
649
650static inline void qib_write_kreg(const struct qib_devdata *dd,
651 const u32 regno, u64 value);
652static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
653static void write_7322_initregs(struct qib_devdata *);
654static void write_7322_init_portregs(struct qib_pportdata *);
655static void setup_7322_link_recovery(struct qib_pportdata *, u32);
656static void check_7322_rxe_status(struct qib_pportdata *);
657static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
658
659/**
660 * qib_read_ureg32 - read 32-bit virtualized per-context register
661 * @dd: device
662 * @regno: register number
663 * @ctxt: context number
664 *
665 * Return the contents of a register that is virtualized to be per context.
666 * Returns -1 on errors (not distinguishable from valid contents at
667 * runtime; we may add a separate error variable at some point).
668 */
669static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
670 enum qib_ureg regno, int ctxt)
671{
672 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
673 return 0;
674 return readl(regno + (u64 __iomem *)(
675 (dd->ureg_align * ctxt) + (dd->userbase ?
676 (char __iomem *)dd->userbase :
677 (char __iomem *)dd->kregbase + dd->uregbase)));
678}
679
680/**
681 * qib_read_ureg - read virtualized per-context register
682 * @dd: device
683 * @regno: register number
684 * @ctxt: context number
685 *
686 * Return the contents of a register that is virtualized to be per context.
687 * Returns -1 on errors (not distinguishable from valid contents at
688 * runtime; we may add a separate error variable at some point).
689 */
690static inline u64 qib_read_ureg(const struct qib_devdata *dd,
691 enum qib_ureg regno, int ctxt)
692{
693
694 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
695 return 0;
696 return readq(regno + (u64 __iomem *)(
697 (dd->ureg_align * ctxt) + (dd->userbase ?
698 (char __iomem *)dd->userbase :
699 (char __iomem *)dd->kregbase + dd->uregbase)));
700}
701
702/**
703 * qib_write_ureg - write virtualized per-context register
704 * @dd: device
705 * @regno: register number
706 * @value: value
707 * @ctxt: context
708 *
709 * Write the contents of a register that is virtualized to be per context.
710 */
711static inline void qib_write_ureg(const struct qib_devdata *dd,
712 enum qib_ureg regno, u64 value, int ctxt)
713{
714 u64 __iomem *ubase;
715 if (dd->userbase)
716 ubase = (u64 __iomem *)
717 ((char __iomem *) dd->userbase +
718 dd->ureg_align * ctxt);
719 else
720 ubase = (u64 __iomem *)
721 (dd->uregbase +
722 (char __iomem *) dd->kregbase +
723 dd->ureg_align * ctxt);
724
725 if (dd->kregbase && (dd->flags & QIB_PRESENT))
726 writeq(value, &ubase[regno]);
727}
728
729static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
730 const u32 regno)
731{
732 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
733 return -1;
734 return readl((u32 __iomem *) &dd->kregbase[regno]);
735}
736
737static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
738 const u32 regno)
739{
740 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
741 return -1;
742 return readq(&dd->kregbase[regno]);
743}
744
745static inline void qib_write_kreg(const struct qib_devdata *dd,
746 const u32 regno, u64 value)
747{
748 if (dd->kregbase && (dd->flags & QIB_PRESENT))
749 writeq(value, &dd->kregbase[regno]);
750}
751
752/*
753 * not many sanity checks for the port-specific kernel register routines,
754 * since they are only used when it's known to be safe.
755*/
756static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
757 const u16 regno)
758{
759 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
760 return 0ULL;
761 return readq(&ppd->cpspec->kpregbase[regno]);
762}
763
764static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
765 const u16 regno, u64 value)
766{
767 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
768 (ppd->dd->flags & QIB_PRESENT))
769 writeq(value, &ppd->cpspec->kpregbase[regno]);
770}
771
772/**
773 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
774 * @dd: the qlogic_ib device
775 * @regno: the register number to write
776 * @ctxt: the context containing the register
777 * @value: the value to write
778 */
779static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
780 const u16 regno, unsigned ctxt,
781 u64 value)
782{
783 qib_write_kreg(dd, regno + ctxt, value);
784}
785
786static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
787{
788 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
789 return 0;
790 return readq(&dd->cspec->cregbase[regno]);
791
792
793}
794
795static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
796{
797 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
798 return 0;
799 return readl(&dd->cspec->cregbase[regno]);
800
801
802}
803
804static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
805 u16 regno, u64 value)
806{
807 if (ppd->cpspec && ppd->cpspec->cpregbase &&
808 (ppd->dd->flags & QIB_PRESENT))
809 writeq(value, &ppd->cpspec->cpregbase[regno]);
810}
811
812static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
813 u16 regno)
814{
815 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
816 !(ppd->dd->flags & QIB_PRESENT))
817 return 0;
818 return readq(&ppd->cpspec->cpregbase[regno]);
819}
820
821static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
822 u16 regno)
823{
824 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
825 !(ppd->dd->flags & QIB_PRESENT))
826 return 0;
827 return readl(&ppd->cpspec->cpregbase[regno]);
828}
829
830/* bits in Control register */
831#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
832#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
833
834/* bits in general interrupt regs */
835#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
836#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
837#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
838#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
839#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
840#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
841#define QIB_I_C_ERROR INT_MASK(Err)
842
843#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
844#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
845#define QIB_I_GPIO INT_MASK(AssertGPIO)
846#define QIB_I_P_SDMAINT(pidx) \
847 (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
848 INT_MASK_P(SDmaProgress, pidx) | \
849 INT_MASK_PM(SDmaCleanupDone, pidx))
850
851/* Interrupt bits that are "per port" */
852#define QIB_I_P_BITSEXTANT(pidx) \
853 (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
854 INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
855 INT_MASK_P(SDmaProgress, pidx) | \
856 INT_MASK_PM(SDmaCleanupDone, pidx))
857
858/* Interrupt bits that are common to a device */
859/* currently unused: QIB_I_SPIOSENT */
860#define QIB_I_C_BITSEXTANT \
861 (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
862 QIB_I_SPIOSENT | \
863 QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
864
865#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
866 QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
867
868/*
869 * Error bits that are "per port".
870 */
871#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
872#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
873#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
874#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
875#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
876#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
877#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
878#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
879#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
880#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
881#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
882#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
883#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
884#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
885#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
886#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
887#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
888#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
889#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
890#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
891#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
892#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
893#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
894#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
895#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
896#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
897#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
898#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
899
900#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
901#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
902#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
903#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
904#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
905#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
906#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
907#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
908#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
909#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
910#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
911
912/* Error bits that are common to a device */
913#define QIB_E_RESET ERR_MASK(ResetNegated)
914#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
915#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
916
917
918/*
919 * Per chip (rather than per-port) errors. Most either do
920 * nothing but trigger a print (because they self-recover, or
921 * always occur in tandem with other errors that handle the
922 * issue), or because they indicate errors with no recovery,
923 * but we want to know that they happened.
924 */
925#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
926#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
927#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
928#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
929#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
930#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
931#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
932#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
933
934/* SDMA chip errors (not per port)
935 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
936 * the SDMAHALT error immediately, so we just print the dup error via the
937 * E_AUTO mechanism. This is true of most of the per-port fatal errors
938 * as well, but since this is port-independent, by definition, it's
939 * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
940 * packet send errors, and so are handled in the same manner as other
941 * per-packet errors.
942 */
943#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
944#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
945#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
946
947/*
948 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
949 * it is used to print "common" packet errors.
950 */
951#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
952 QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
953 QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
954 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
955 QIB_E_P_REBP)
956
957/* Error Bits that Packet-related (Receive, per-port) */
958#define QIB_E_P_RPKTERRS (\
959 QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
960 QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
961 QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
962 QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
963 QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
964 QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
965
966/*
967 * Error bits that are Send-related (per port)
968 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
969 * All of these potentially need to have a buffer disarmed
970 */
971#define QIB_E_P_SPKTERRS (\
972 QIB_E_P_SUNEXP_PKTNUM |\
973 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
974 QIB_E_P_SMAXPKTLEN |\
975 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
976 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
977 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
978
979#define QIB_E_SPKTERRS ( \
980 QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
981 ERR_MASK_N(SendUnsupportedVLErr) | \
982 QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
983
984#define QIB_E_P_SDMAERRS ( \
985 QIB_E_P_SDMAHALT | \
986 QIB_E_P_SDMADESCADDRMISALIGN | \
987 QIB_E_P_SDMAUNEXPDATA | \
988 QIB_E_P_SDMAMISSINGDW | \
989 QIB_E_P_SDMADWEN | \
990 QIB_E_P_SDMARPYTAG | \
991 QIB_E_P_SDMA1STDESC | \
992 QIB_E_P_SDMABASE | \
993 QIB_E_P_SDMATAILOUTOFBOUND | \
994 QIB_E_P_SDMAOUTOFBOUND | \
995 QIB_E_P_SDMAGENMISMATCH)
996
997/*
998 * This sets some bits more than once, but makes it more obvious which
999 * bits are not handled under other categories, and the repeat definition
1000 * is not a problem.
1001 */
1002#define QIB_E_P_BITSEXTANT ( \
1003 QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1004 QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1005 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1006 QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1007 )
1008
1009/*
1010 * These are errors that can occur when the link
1011 * changes state while a packet is being sent or received. This doesn't
1012 * cover things like EBP or VCRC that can be the result of a sending
1013 * having the link change state, so we receive a "known bad" packet.
1014 * All of these are "per port", so renamed:
1015 */
1016#define QIB_E_P_LINK_PKTERRS (\
1017 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1018 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1019 QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1020 QIB_E_P_RUNEXPCHAR)
1021
1022/*
1023 * This sets some bits more than once, but makes it more obvious which
1024 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1025 * and the repeat definition is not a problem.
1026 */
1027#define QIB_E_C_BITSEXTANT (\
1028 QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1029 QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1030 QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1031
1032/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1033#define E_SPKT_ERRS_IGNORE 0
1034
1035#define QIB_EXTS_MEMBIST_DISABLED \
1036 SYM_MASK(EXTStatus, MemBISTDisabled)
1037#define QIB_EXTS_MEMBIST_ENDTEST \
1038 SYM_MASK(EXTStatus, MemBISTEndTest)
1039
1040#define QIB_E_SPIOARMLAUNCH \
1041 ERR_MASK(SendArmLaunchErr)
1042
1043#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1044#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1045
1046/*
1047 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1048 * and also if forced QDR (only QDR enabled). It's enabled for the
1049 * forced QDR case so that scrambling will be enabled by the TS3
1050 * exchange, when supported by both sides of the link.
1051 */
1052#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1053#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1054#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1055#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1056#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1057#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1058 SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1059#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1060
1061#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1062#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1063
1064#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1065#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1066#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1067
1068#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1069#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1070#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1071 SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1072#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1073 SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1074#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1075
1076#define IBA7322_REDIRECT_VEC_PER_REG 12
1077
1078#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1079#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1080#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1081#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1082#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1083
1084#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1085
1086#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1087 .msg = #fldname }
1088#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1089 fldname##Mask##_##port), .msg = #fldname }
1090static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1091 HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1092 HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1093 HWE_AUTO(PCIESerdesPClkNotDetect),
1094 HWE_AUTO(PowerOnBISTFailed),
1095 HWE_AUTO(TempsenseTholdReached),
1096 HWE_AUTO(MemoryErr),
1097 HWE_AUTO(PCIeBusParityErr),
1098 HWE_AUTO(PcieCplTimeout),
1099 HWE_AUTO(PciePoisonedTLP),
1100 HWE_AUTO_P(SDmaMemReadErr, 1),
1101 HWE_AUTO_P(SDmaMemReadErr, 0),
1102 HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1103 HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1104 HWE_AUTO_P(statusValidNoEop, 1),
1105 HWE_AUTO_P(statusValidNoEop, 0),
1106 HWE_AUTO(LATriggered),
1107 { .mask = 0 }
1108};
1109
1110#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1111 .msg = #fldname }
1112#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1113 .msg = #fldname }
1114static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1115 E_AUTO(ResetNegated),
1116 E_AUTO(HardwareErr),
1117 E_AUTO(InvalidAddrErr),
1118 E_AUTO(SDmaVL15Err),
1119 E_AUTO(SBufVL15MisUseErr),
1120 E_AUTO(InvalidEEPCmd),
1121 E_AUTO(RcvContextShareErr),
1122 E_AUTO(SendVLMismatchErr),
1123 E_AUTO(SendArmLaunchErr),
1124 E_AUTO(SendSpecialTriggerErr),
1125 E_AUTO(SDmaWrongPortErr),
1126 E_AUTO(SDmaBufMaskDuplicateErr),
1127 E_AUTO(RcvHdrFullErr),
1128 E_AUTO(RcvEgrFullErr),
1129 { .mask = 0 }
1130};
1131
1132static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1133 E_P_AUTO(IBStatusChanged),
1134 E_P_AUTO(SHeadersErr),
1135 E_P_AUTO(VL15BufMisuseErr),
1136 /*
1137 * SDmaHaltErr is not really an error, make it clearer;
1138 */
1139 {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
1140 E_P_AUTO(SDmaDescAddrMisalignErr),
1141 E_P_AUTO(SDmaUnexpDataErr),
1142 E_P_AUTO(SDmaMissingDwErr),
1143 E_P_AUTO(SDmaDwEnErr),
1144 E_P_AUTO(SDmaRpyTagErr),
1145 E_P_AUTO(SDma1stDescErr),
1146 E_P_AUTO(SDmaBaseErr),
1147 E_P_AUTO(SDmaTailOutOfBoundErr),
1148 E_P_AUTO(SDmaOutOfBoundErr),
1149 E_P_AUTO(SDmaGenMismatchErr),
1150 E_P_AUTO(SendBufMisuseErr),
1151 E_P_AUTO(SendUnsupportedVLErr),
1152 E_P_AUTO(SendUnexpectedPktNumErr),
1153 E_P_AUTO(SendDroppedDataPktErr),
1154 E_P_AUTO(SendDroppedSmpPktErr),
1155 E_P_AUTO(SendPktLenErr),
1156 E_P_AUTO(SendUnderRunErr),
1157 E_P_AUTO(SendMaxPktLenErr),
1158 E_P_AUTO(SendMinPktLenErr),
1159 E_P_AUTO(RcvIBLostLinkErr),
1160 E_P_AUTO(RcvHdrErr),
1161 E_P_AUTO(RcvHdrLenErr),
1162 E_P_AUTO(RcvBadTidErr),
1163 E_P_AUTO(RcvBadVersionErr),
1164 E_P_AUTO(RcvIBFlowErr),
1165 E_P_AUTO(RcvEBPErr),
1166 E_P_AUTO(RcvUnsupportedVLErr),
1167 E_P_AUTO(RcvUnexpectedCharErr),
1168 E_P_AUTO(RcvShortPktLenErr),
1169 E_P_AUTO(RcvLongPktLenErr),
1170 E_P_AUTO(RcvMaxPktLenErr),
1171 E_P_AUTO(RcvMinPktLenErr),
1172 E_P_AUTO(RcvICRCErr),
1173 E_P_AUTO(RcvVCRCErr),
1174 E_P_AUTO(RcvFormatErr),
1175 { .mask = 0 }
1176};
1177
1178/*
1179 * Below generates "auto-message" for interrupts not specific to any port or
1180 * context
1181 */
1182#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1183 .msg = #fldname }
1184/* Below generates "auto-message" for interrupts specific to a port */
1185#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1186 SYM_LSB(IntMask, fldname##Mask##_0), \
1187 SYM_LSB(IntMask, fldname##Mask##_1)), \
1188 .msg = #fldname "_P" }
1189/* For some reason, the SerDesTrimDone bits are reversed */
1190#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1191 SYM_LSB(IntMask, fldname##Mask##_1), \
1192 SYM_LSB(IntMask, fldname##Mask##_0)), \
1193 .msg = #fldname "_P" }
1194/*
1195 * Below generates "auto-message" for interrupts specific to a context,
1196 * with ctxt-number appended
1197 */
1198#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1199 SYM_LSB(IntMask, fldname##0IntMask), \
1200 SYM_LSB(IntMask, fldname##17IntMask)), \
1201 .msg = #fldname "_C"}
1202
1203static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
1204 INTR_AUTO_P(SDmaInt),
1205 INTR_AUTO_P(SDmaProgressInt),
1206 INTR_AUTO_P(SDmaIdleInt),
1207 INTR_AUTO_P(SDmaCleanupDone),
1208 INTR_AUTO_C(RcvUrg),
1209 INTR_AUTO_P(ErrInt),
1210 INTR_AUTO(ErrInt), /* non-port-specific errs */
1211 INTR_AUTO(AssertGPIOInt),
1212 INTR_AUTO_P(SendDoneInt),
1213 INTR_AUTO(SendBufAvailInt),
1214 INTR_AUTO_C(RcvAvail),
1215 { .mask = 0 }
1216};
1217
1218#define TXSYMPTOM_AUTO_P(fldname) \
1219 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
1220static const struct qib_hwerror_msgs hdrchk_msgs[] = {
1221 TXSYMPTOM_AUTO_P(NonKeyPacket),
1222 TXSYMPTOM_AUTO_P(GRHFail),
1223 TXSYMPTOM_AUTO_P(PkeyFail),
1224 TXSYMPTOM_AUTO_P(QPFail),
1225 TXSYMPTOM_AUTO_P(SLIDFail),
1226 TXSYMPTOM_AUTO_P(RawIPV6),
1227 TXSYMPTOM_AUTO_P(PacketTooSmall),
1228 { .mask = 0 }
1229};
1230
1231#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1232
1233/*
1234 * Called when we might have an error that is specific to a particular
1235 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1236 * because we don't need to force the update of pioavail
1237 */
1238static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1239{
1240 struct qib_devdata *dd = ppd->dd;
1241 u32 i;
1242 int any;
1243 u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1244 u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1245 unsigned long sbuf[4];
1246
1247 /*
1248 * It's possible that sendbuffererror could have bits set; might
1249 * have already done this as a result of hardware error handling.
1250 */
1251 any = 0;
1252 for (i = 0; i < regcnt; ++i) {
1253 sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1254 if (sbuf[i]) {
1255 any = 1;
1256 qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1257 }
1258 }
1259
1260 if (any)
1261 qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1262}
1263
1264/* No txe_recover yet, if ever */
1265
1266/* No decode__errors yet */
1267static void err_decode(char *msg, size_t len, u64 errs,
1268 const struct qib_hwerror_msgs *msp)
1269{
1270 u64 these, lmask;
1271 int took, multi, n = 0;
1272
1273 while (msp && msp->mask) {
1274 multi = (msp->mask & (msp->mask - 1));
1275 while (errs & msp->mask) {
1276 these = (errs & msp->mask);
1277 lmask = (these & (these - 1)) ^ these;
1278 if (len) {
1279 if (n++) {
1280 /* separate the strings */
1281 *msg++ = ',';
1282 len--;
1283 }
1284 took = scnprintf(msg, len, "%s", msp->msg);
1285 len -= took;
1286 msg += took;
1287 }
1288 errs &= ~lmask;
1289 if (len && multi) {
1290 /* More than one bit this mask */
1291 int idx = -1;
1292
1293 while (lmask & msp->mask) {
1294 ++idx;
1295 lmask >>= 1;
1296 }
1297 took = scnprintf(msg, len, "_%d", idx);
1298 len -= took;
1299 msg += took;
1300 }
1301 }
1302 ++msp;
1303 }
1304 /* If some bits are left, show in hex. */
1305 if (len && errs)
1306 snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1307 (unsigned long long) errs);
1308}
1309
1310/* only called if r1 set */
1311static void flush_fifo(struct qib_pportdata *ppd)
1312{
1313 struct qib_devdata *dd = ppd->dd;
1314 u32 __iomem *piobuf;
1315 u32 bufn;
1316 u32 *hdr;
1317 u64 pbc;
1318 const unsigned hdrwords = 7;
1319 static struct qib_ib_header ibhdr = {
1320 .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1321 .lrh[1] = IB_LID_PERMISSIVE,
1322 .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1323 .lrh[3] = IB_LID_PERMISSIVE,
1324 .u.oth.bth[0] = cpu_to_be32(
1325 (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1326 .u.oth.bth[1] = cpu_to_be32(0),
1327 .u.oth.bth[2] = cpu_to_be32(0),
1328 .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1329 .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1330 };
1331
1332 /*
1333 * Send a dummy VL15 packet to flush the launch FIFO.
1334 * This will not actually be sent since the TxeBypassIbc bit is set.
1335 */
1336 pbc = PBC_7322_VL15_SEND |
1337 (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1338 (hdrwords + SIZE_OF_CRC);
1339 piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1340 if (!piobuf)
1341 return;
1342 writeq(pbc, piobuf);
1343 hdr = (u32 *) &ibhdr;
1344 if (dd->flags & QIB_PIO_FLUSH_WC) {
1345 qib_flush_wc();
1346 qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1347 qib_flush_wc();
1348 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1349 qib_flush_wc();
1350 } else
1351 qib_pio_copy(piobuf + 2, hdr, hdrwords);
1352 qib_sendbuf_done(dd, bufn);
1353}
1354
1355/*
1356 * This is called with interrupts disabled and sdma_lock held.
1357 */
1358static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1359{
1360 struct qib_devdata *dd = ppd->dd;
1361 u64 set_sendctrl = 0;
1362 u64 clr_sendctrl = 0;
1363
1364 if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1365 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1366 else
1367 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1368
1369 if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1370 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1371 else
1372 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1373
1374 if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1375 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1376 else
1377 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1378
1379 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1380 set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1381 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1382 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1383 else
1384 clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1385 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1386 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1387
1388 spin_lock(&dd->sendctrl_lock);
1389
1390 /* If we are draining everything, block sends first */
1391 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1392 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1393 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1394 qib_write_kreg(dd, kr_scratch, 0);
1395 }
1396
1397 ppd->p_sendctrl |= set_sendctrl;
1398 ppd->p_sendctrl &= ~clr_sendctrl;
1399
1400 if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1401 qib_write_kreg_port(ppd, krp_sendctrl,
1402 ppd->p_sendctrl |
1403 SYM_MASK(SendCtrl_0, SDmaCleanup));
1404 else
1405 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1406 qib_write_kreg(dd, kr_scratch, 0);
1407
1408 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1409 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1410 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1411 qib_write_kreg(dd, kr_scratch, 0);
1412 }
1413
1414 spin_unlock(&dd->sendctrl_lock);
1415
1416 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1417 flush_fifo(ppd);
1418}
1419
1420static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1421{
1422 __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1423}
1424
1425static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1426{
1427 /*
1428 * Set SendDmaLenGen and clear and set
1429 * the MSB of the generation count to enable generation checking
1430 * and load the internal generation counter.
1431 */
1432 qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1433 qib_write_kreg_port(ppd, krp_senddmalengen,
1434 ppd->sdma_descq_cnt |
1435 (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1436}
1437
1438/*
1439 * Must be called with sdma_lock held, or before init finished.
1440 */
1441static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1442{
1443 /* Commit writes to memory and advance the tail on the chip */
1444 wmb();
1445 ppd->sdma_descq_tail = tail;
1446 qib_write_kreg_port(ppd, krp_senddmatail, tail);
1447}
1448
1449/*
1450 * This is called with interrupts disabled and sdma_lock held.
1451 */
1452static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1453{
1454 /*
1455 * Drain all FIFOs.
1456 * The hardware doesn't require this but we do it so that verbs
1457 * and user applications don't wait for link active to send stale
1458 * data.
1459 */
1460 sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1461
1462 qib_sdma_7322_setlengen(ppd);
1463 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1464 ppd->sdma_head_dma[0] = 0;
1465 qib_7322_sdma_sendctrl(ppd,
1466 ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1467}
1468
1469#define DISABLES_SDMA ( \
1470 QIB_E_P_SDMAHALT | \
1471 QIB_E_P_SDMADESCADDRMISALIGN | \
1472 QIB_E_P_SDMAMISSINGDW | \
1473 QIB_E_P_SDMADWEN | \
1474 QIB_E_P_SDMARPYTAG | \
1475 QIB_E_P_SDMA1STDESC | \
1476 QIB_E_P_SDMABASE | \
1477 QIB_E_P_SDMATAILOUTOFBOUND | \
1478 QIB_E_P_SDMAOUTOFBOUND | \
1479 QIB_E_P_SDMAGENMISMATCH)
1480
1481static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1482{
1483 unsigned long flags;
1484 struct qib_devdata *dd = ppd->dd;
1485
1486 errs &= QIB_E_P_SDMAERRS;
1487
1488 if (errs & QIB_E_P_SDMAUNEXPDATA)
1489 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1490 ppd->port);
1491
1492 spin_lock_irqsave(&ppd->sdma_lock, flags);
1493
1494 switch (ppd->sdma_state.current_state) {
1495 case qib_sdma_state_s00_hw_down:
1496 break;
1497
1498 case qib_sdma_state_s10_hw_start_up_wait:
1499 if (errs & QIB_E_P_SDMAHALT)
1500 __qib_sdma_process_event(ppd,
1501 qib_sdma_event_e20_hw_started);
1502 break;
1503
1504 case qib_sdma_state_s20_idle:
1505 break;
1506
1507 case qib_sdma_state_s30_sw_clean_up_wait:
1508 break;
1509
1510 case qib_sdma_state_s40_hw_clean_up_wait:
1511 if (errs & QIB_E_P_SDMAHALT)
1512 __qib_sdma_process_event(ppd,
1513 qib_sdma_event_e50_hw_cleaned);
1514 break;
1515
1516 case qib_sdma_state_s50_hw_halt_wait:
1517 if (errs & QIB_E_P_SDMAHALT)
1518 __qib_sdma_process_event(ppd,
1519 qib_sdma_event_e60_hw_halted);
1520 break;
1521
1522 case qib_sdma_state_s99_running:
1523 __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1524 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1525 break;
1526 }
1527
1528 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1529}
1530
1531/*
1532 * handle per-device errors (not per-port errors)
1533 */
1534static noinline void handle_7322_errors(struct qib_devdata *dd)
1535{
1536 char *msg;
1537 u64 iserr = 0;
1538 u64 errs;
1539 u64 mask;
1540 int log_idx;
1541
1542 qib_stats.sps_errints++;
1543 errs = qib_read_kreg64(dd, kr_errstatus);
1544 if (!errs) {
1545 qib_devinfo(dd->pcidev, "device error interrupt, "
1546 "but no error bits set!\n");
1547 goto done;
1548 }
1549
1550 /* don't report errors that are masked */
1551 errs &= dd->cspec->errormask;
1552 msg = dd->cspec->emsgbuf;
1553
1554 /* do these first, they are most important */
1555 if (errs & QIB_E_HARDWARE) {
1556 *msg = '\0';
1557 qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1558 } else
1559 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1560 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1561 qib_inc_eeprom_err(dd, log_idx, 1);
1562
1563 if (errs & QIB_E_SPKTERRS) {
1564 qib_disarm_7322_senderrbufs(dd->pport);
1565 qib_stats.sps_txerrs++;
1566 } else if (errs & QIB_E_INVALIDADDR)
1567 qib_stats.sps_txerrs++;
1568 else if (errs & QIB_E_ARMLAUNCH) {
1569 qib_stats.sps_txerrs++;
1570 qib_disarm_7322_senderrbufs(dd->pport);
1571 }
1572 qib_write_kreg(dd, kr_errclear, errs);
1573
1574 /*
1575 * The ones we mask off are handled specially below
1576 * or above. Also mask SDMADISABLED by default as it
1577 * is too chatty.
1578 */
1579 mask = QIB_E_HARDWARE;
1580 *msg = '\0';
1581
1582 err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1583 qib_7322error_msgs);
1584
1585 /*
1586 * Getting reset is a tragedy for all ports. Mark the device
1587 * _and_ the ports as "offline" in way meaningful to each.
1588 */
1589 if (errs & QIB_E_RESET) {
1590 int pidx;
1591
1592 qib_dev_err(dd, "Got reset, requires re-init "
1593 "(unload and reload driver)\n");
1594 dd->flags &= ~QIB_INITTED; /* needs re-init */
1595 /* mark as having had error */
1596 *dd->devstatusp |= QIB_STATUS_HWERROR;
1597 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1598 if (dd->pport[pidx].link_speed_supported)
1599 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1600 }
1601
1602 if (*msg && iserr)
1603 qib_dev_err(dd, "%s error\n", msg);
1604
1605 /*
1606 * If there were hdrq or egrfull errors, wake up any processes
1607 * waiting in poll. We used to try to check which contexts had
1608 * the overflow, but given the cost of that and the chip reads
1609 * to support it, it's better to just wake everybody up if we
1610 * get an overflow; waiters can poll again if it's not them.
1611 */
1612 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1613 qib_handle_urcv(dd, ~0U);
1614 if (errs & ERR_MASK(RcvEgrFullErr))
1615 qib_stats.sps_buffull++;
1616 else
1617 qib_stats.sps_hdrfull++;
1618 }
1619
1620done:
1621 return;
1622}
1623
1624static void reenable_chase(unsigned long opaque)
1625{
1626 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1627
1628 ppd->cpspec->chase_timer.expires = 0;
1629 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1630 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1631}
1632
1633static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
1634{
1635 ppd->cpspec->chase_end = 0;
1636
1637 if (!qib_chase)
1638 return;
1639
1640 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1641 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1642 ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1643 add_timer(&ppd->cpspec->chase_timer);
1644}
1645
1646static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1647{
1648 u8 ibclt;
1649 u64 tnow;
1650
1651 ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1652
1653 /*
1654 * Detect and handle the state chase issue, where we can
1655 * get stuck if we are unlucky on timing on both sides of
1656 * the link. If we are, we disable, set a timer, and
1657 * then re-enable.
1658 */
1659 switch (ibclt) {
1660 case IB_7322_LT_STATE_CFGRCVFCFG:
1661 case IB_7322_LT_STATE_CFGWAITRMT:
1662 case IB_7322_LT_STATE_TXREVLANES:
1663 case IB_7322_LT_STATE_CFGENH:
1664 tnow = get_jiffies_64();
1665 if (ppd->cpspec->chase_end &&
1666 time_after64(tnow, ppd->cpspec->chase_end))
1667 disable_chase(ppd, tnow, ibclt);
1668 else if (!ppd->cpspec->chase_end)
1669 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1670 break;
1671 default:
1672 ppd->cpspec->chase_end = 0;
1673 break;
1674 }
1675
1676 if (ibclt == IB_7322_LT_STATE_CFGTEST &&
1677 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1678 force_h1(ppd);
1679 ppd->cpspec->qdr_reforce = 1;
1680 } else if (ppd->cpspec->qdr_reforce &&
1681 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1682 (ibclt == IB_7322_LT_STATE_CFGENH ||
1683 ibclt == IB_7322_LT_STATE_CFGIDLE ||
1684 ibclt == IB_7322_LT_STATE_LINKUP))
1685 force_h1(ppd);
1686
1687 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1688 ppd->link_speed_enabled == QIB_IB_QDR &&
1689 (ibclt == IB_7322_LT_STATE_CFGTEST ||
1690 ibclt == IB_7322_LT_STATE_CFGENH ||
1691 (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1692 ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1693 adj_tx_serdes(ppd);
1694
1695 if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
1696 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1697 ppd->cpspec->qdr_dfe_on = 1;
1698 ppd->cpspec->qdr_dfe_time = 0;
1699 /* On link down, reenable QDR adaptation */
1700 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1701 ppd->dd->cspec->r1 ?
1702 QDR_STATIC_ADAPT_DOWN_R1 :
1703 QDR_STATIC_ADAPT_DOWN);
1704 }
1705}
1706
1707/*
1708 * This is per-pport error handling.
1709 * will likely get it's own MSIx interrupt (one for each port,
1710 * although just a single handler).
1711 */
1712static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1713{
1714 char *msg;
1715 u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1716 struct qib_devdata *dd = ppd->dd;
1717
1718 /* do this as soon as possible */
1719 fmask = qib_read_kreg64(dd, kr_act_fmask);
1720 if (!fmask)
1721 check_7322_rxe_status(ppd);
1722
1723 errs = qib_read_kreg_port(ppd, krp_errstatus);
1724 if (!errs)
1725 qib_devinfo(dd->pcidev,
1726 "Port%d error interrupt, but no error bits set!\n",
1727 ppd->port);
1728 if (!fmask)
1729 errs &= ~QIB_E_P_IBSTATUSCHANGED;
1730 if (!errs)
1731 goto done;
1732
1733 msg = ppd->cpspec->epmsgbuf;
1734 *msg = '\0';
1735
1736 if (errs & ~QIB_E_P_BITSEXTANT) {
1737 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1738 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1739 if (!*msg)
1740 snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1741 "no others");
1742 qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
1743 " errors 0x%016Lx set (and %s)\n",
1744 (errs & ~QIB_E_P_BITSEXTANT), msg);
1745 *msg = '\0';
1746 }
1747
1748 if (errs & QIB_E_P_SHDR) {
1749 u64 symptom;
1750
1751 /* determine cause, then write to clear */
1752 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1753 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1754 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1755 hdrchk_msgs);
1756 *msg = '\0';
1757 /* senderrbuf cleared in SPKTERRS below */
1758 }
1759
1760 if (errs & QIB_E_P_SPKTERRS) {
1761 if ((errs & QIB_E_P_LINK_PKTERRS) &&
1762 !(ppd->lflags & QIBL_LINKACTIVE)) {
1763 /*
1764 * This can happen when trying to bring the link
1765 * up, but the IB link changes state at the "wrong"
1766 * time. The IB logic then complains that the packet
1767 * isn't valid. We don't want to confuse people, so
1768 * we just don't print them, except at debug
1769 */
1770 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1771 (errs & QIB_E_P_LINK_PKTERRS),
1772 qib_7322p_error_msgs);
1773 *msg = '\0';
1774 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1775 }
1776 qib_disarm_7322_senderrbufs(ppd);
1777 } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1778 !(ppd->lflags & QIBL_LINKACTIVE)) {
1779 /*
1780 * This can happen when SMA is trying to bring the link
1781 * up, but the IB link changes state at the "wrong" time.
1782 * The IB logic then complains that the packet isn't
1783 * valid. We don't want to confuse people, so we just
1784 * don't print them, except at debug
1785 */
1786 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1787 qib_7322p_error_msgs);
1788 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1789 *msg = '\0';
1790 }
1791
1792 qib_write_kreg_port(ppd, krp_errclear, errs);
1793
1794 errs &= ~ignore_this_time;
1795 if (!errs)
1796 goto done;
1797
1798 if (errs & QIB_E_P_RPKTERRS)
1799 qib_stats.sps_rcverrs++;
1800 if (errs & QIB_E_P_SPKTERRS)
1801 qib_stats.sps_txerrs++;
1802
1803 iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1804
1805 if (errs & QIB_E_P_SDMAERRS)
1806 sdma_7322_p_errors(ppd, errs);
1807
1808 if (errs & QIB_E_P_IBSTATUSCHANGED) {
1809 u64 ibcs;
1810 u8 ltstate;
1811
1812 ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1813 ltstate = qib_7322_phys_portstate(ibcs);
1814
1815 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1816 handle_serdes_issues(ppd, ibcs);
1817 if (!(ppd->cpspec->ibcctrl_a &
1818 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1819 /*
1820 * We got our interrupt, so init code should be
1821 * happy and not try alternatives. Now squelch
1822 * other "chatter" from link-negotiation (pre Init)
1823 */
1824 ppd->cpspec->ibcctrl_a |=
1825 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1826 qib_write_kreg_port(ppd, krp_ibcctrl_a,
1827 ppd->cpspec->ibcctrl_a);
1828 }
1829
1830 /* Update our picture of width and speed from chip */
1831 ppd->link_width_active =
1832 (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1833 IB_WIDTH_4X : IB_WIDTH_1X;
1834 ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1835 LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1836 SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1837 QIB_IB_DDR : QIB_IB_SDR;
1838
1839 if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1840 IB_PHYSPORTSTATE_DISABLED)
1841 qib_set_ib_7322_lstate(ppd, 0,
1842 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1843 else
1844 /*
1845 * Since going into a recovery state causes the link
1846 * state to go down and since recovery is transitory,
1847 * it is better if we "miss" ever seeing the link
1848 * training state go into recovery (i.e., ignore this
1849 * transition for link state special handling purposes)
1850 * without updating lastibcstat.
1851 */
1852 if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1853 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1854 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1855 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1856 qib_handle_e_ibstatuschanged(ppd, ibcs);
1857 }
1858 if (*msg && iserr)
1859 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1860
1861 if (ppd->state_wanted & ppd->lflags)
1862 wake_up_interruptible(&ppd->state_wait);
1863done:
1864 return;
1865}
1866
1867/* enable/disable chip from delivering interrupts */
1868static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
1869{
1870 if (enable) {
1871 if (dd->flags & QIB_BADINTR)
1872 return;
1873 qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
1874 /* cause any pending enabled interrupts to be re-delivered */
1875 qib_write_kreg(dd, kr_intclear, 0ULL);
1876 if (dd->cspec->num_msix_entries) {
1877 /* and same for MSIx */
1878 u64 val = qib_read_kreg64(dd, kr_intgranted);
1879 if (val)
1880 qib_write_kreg(dd, kr_intgranted, val);
1881 }
1882 } else
1883 qib_write_kreg(dd, kr_intmask, 0ULL);
1884}
1885
1886/*
1887 * Try to cleanup as much as possible for anything that might have gone
1888 * wrong while in freeze mode, such as pio buffers being written by user
1889 * processes (causing armlaunch), send errors due to going into freeze mode,
1890 * etc., and try to avoid causing extra interrupts while doing so.
1891 * Forcibly update the in-memory pioavail register copies after cleanup
1892 * because the chip won't do it while in freeze mode (the register values
1893 * themselves are kept correct).
1894 * Make sure that we don't lose any important interrupts by using the chip
1895 * feature that says that writing 0 to a bit in *clear that is set in
1896 * *status will cause an interrupt to be generated again (if allowed by
1897 * the *mask value).
1898 * This is in chip-specific code because of all of the register accesses,
1899 * even though the details are similar on most chips.
1900 */
1901static void qib_7322_clear_freeze(struct qib_devdata *dd)
1902{
1903 int pidx;
1904
1905 /* disable error interrupts, to avoid confusion */
1906 qib_write_kreg(dd, kr_errmask, 0ULL);
1907
1908 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1909 if (dd->pport[pidx].link_speed_supported)
1910 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
1911 0ULL);
1912
1913 /* also disable interrupts; errormask is sometimes overwriten */
1914 qib_7322_set_intr_state(dd, 0);
1915
1916 /* clear the freeze, and be sure chip saw it */
1917 qib_write_kreg(dd, kr_control, dd->control);
1918 qib_read_kreg32(dd, kr_scratch);
1919
1920 /*
1921 * Force new interrupt if any hwerr, error or interrupt bits are
1922 * still set, and clear "safe" send packet errors related to freeze
1923 * and cancelling sends. Re-enable error interrupts before possible
1924 * force of re-interrupt on pending interrupts.
1925 */
1926 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
1927 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
1928 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1929 /* We need to purge per-port errs and reset mask, too */
1930 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1931 if (!dd->pport[pidx].link_speed_supported)
1932 continue;
1933 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
1934 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
1935 }
1936 qib_7322_set_intr_state(dd, 1);
1937}
1938
1939/* no error handling to speak of */
1940/**
1941 * qib_7322_handle_hwerrors - display hardware errors.
1942 * @dd: the qlogic_ib device
1943 * @msg: the output buffer
1944 * @msgl: the size of the output buffer
1945 *
1946 * Use same msg buffer as regular errors to avoid excessive stack
1947 * use. Most hardware errors are catastrophic, but for right now,
1948 * we'll print them and continue. We reuse the same message buffer as
1949 * qib_handle_errors() to avoid excessive stack usage.
1950 */
1951static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
1952 size_t msgl)
1953{
1954 u64 hwerrs;
1955 u32 ctrl;
1956 int isfatal = 0;
1957
1958 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
1959 if (!hwerrs)
1960 goto bail;
1961 if (hwerrs == ~0ULL) {
1962 qib_dev_err(dd, "Read of hardware error status failed "
1963 "(all bits set); ignoring\n");
1964 goto bail;
1965 }
1966 qib_stats.sps_hwerrs++;
1967
1968 /* Always clear the error status register, except BIST fail */
1969 qib_write_kreg(dd, kr_hwerrclear, hwerrs &
1970 ~HWE_MASK(PowerOnBISTFailed));
1971
1972 hwerrs &= dd->cspec->hwerrmask;
1973
1974 /* no EEPROM logging, yet */
1975
1976 if (hwerrs)
1977 qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
1978 "(cleared)\n", (unsigned long long) hwerrs);
1979
1980 ctrl = qib_read_kreg32(dd, kr_control);
1981 if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
1982 /*
1983 * No recovery yet...
1984 */
1985 if ((hwerrs & ~HWE_MASK(LATriggered)) ||
1986 dd->cspec->stay_in_freeze) {
1987 /*
1988 * If any set that we aren't ignoring only make the
1989 * complaint once, in case it's stuck or recurring,
1990 * and we get here multiple times
1991 * Force link down, so switch knows, and
1992 * LEDs are turned off.
1993 */
1994 if (dd->flags & QIB_INITTED)
1995 isfatal = 1;
1996 } else
1997 qib_7322_clear_freeze(dd);
1998 }
1999
2000 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2001 isfatal = 1;
2002 strlcpy(msg, "[Memory BIST test failed, "
2003 "InfiniPath hardware unusable]", msgl);
2004 /* ignore from now on, so disable until driver reloaded */
2005 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2006 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2007 }
2008
2009 err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2010
2011 /* Ignore esoteric PLL failures et al. */
2012
2013 qib_dev_err(dd, "%s hardware error\n", msg);
2014
2015 if (isfatal && !dd->diag_client) {
2016 qib_dev_err(dd, "Fatal Hardware Error, no longer"
2017 " usable, SN %.16s\n", dd->serial);
2018 /*
2019 * for /sys status file and user programs to print; if no
2020 * trailing brace is copied, we'll know it was truncated.
2021 */
2022 if (dd->freezemsg)
2023 snprintf(dd->freezemsg, dd->freezelen,
2024 "{%s}", msg);
2025 qib_disable_after_error(dd);
2026 }
2027bail:;
2028}
2029
2030/**
2031 * qib_7322_init_hwerrors - enable hardware errors
2032 * @dd: the qlogic_ib device
2033 *
2034 * now that we have finished initializing everything that might reasonably
2035 * cause a hardware error, and cleared those errors bits as they occur,
2036 * we can enable hardware errors in the mask (potentially enabling
2037 * freeze mode), and enable hardware errors as errors (along with
2038 * everything else) in errormask
2039 */
2040static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2041{
2042 int pidx;
2043 u64 extsval;
2044
2045 extsval = qib_read_kreg64(dd, kr_extstatus);
2046 if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2047 QIB_EXTS_MEMBIST_ENDTEST)))
2048 qib_dev_err(dd, "MemBIST did not complete!\n");
2049
2050 /* never clear BIST failure, so reported on each driver load */
2051 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2052 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2053
2054 /* clear all */
2055 qib_write_kreg(dd, kr_errclear, ~0ULL);
2056 /* enable errors that are masked, at least this first time. */
2057 qib_write_kreg(dd, kr_errmask, ~0ULL);
2058 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2059 for (pidx = 0; pidx < dd->num_pports; ++pidx)
2060 if (dd->pport[pidx].link_speed_supported)
2061 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2062 ~0ULL);
2063}
2064
2065/*
2066 * Disable and enable the armlaunch error. Used for PIO bandwidth testing
2067 * on chips that are count-based, rather than trigger-based. There is no
2068 * reference counting, but that's also fine, given the intended use.
2069 * Only chip-specific because it's all register accesses
2070 */
2071static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2072{
2073 if (enable) {
2074 qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2075 dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2076 } else
2077 dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2078 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2079}
2080
2081/*
2082 * Formerly took parameter <which> in pre-shifted,
2083 * pre-merged form with LinkCmd and LinkInitCmd
2084 * together, and assuming the zero was NOP.
2085 */
2086static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2087 u16 linitcmd)
2088{
2089 u64 mod_wd;
2090 struct qib_devdata *dd = ppd->dd;
2091 unsigned long flags;
2092
2093 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2094 /*
2095 * If we are told to disable, note that so link-recovery
2096 * code does not attempt to bring us back up.
2097 * Also reset everything that we can, so we start
2098 * completely clean when re-enabled (before we
2099 * actually issue the disable to the IBC)
2100 */
2101 qib_7322_mini_pcs_reset(ppd);
2102 spin_lock_irqsave(&ppd->lflags_lock, flags);
2103 ppd->lflags |= QIBL_IB_LINK_DISABLED;
2104 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2105 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2106 /*
2107 * Any other linkinitcmd will lead to LINKDOWN and then
2108 * to INIT (if all is well), so clear flag to let
2109 * link-recovery code attempt to bring us back up.
2110 */
2111 spin_lock_irqsave(&ppd->lflags_lock, flags);
2112 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2113 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2114 /*
2115 * Clear status change interrupt reduction so the
2116 * new state is seen.
2117 */
2118 ppd->cpspec->ibcctrl_a &=
2119 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2120 }
2121
2122 mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2123 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2124
2125 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2126 mod_wd);
2127 /* write to chip to prevent back-to-back writes of ibc reg */
2128 qib_write_kreg(dd, kr_scratch, 0);
2129
2130}
2131
2132/*
2133 * The total RCV buffer memory is 64KB, used for both ports, and is
2134 * in units of 64 bytes (same as IB flow control credit unit).
2135 * The consumedVL unit in the same registers are in 32 byte units!
2136 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2137 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2138 * in krp_rxcreditvl15, rather than 10.
2139 */
2140#define RCV_BUF_UNITSZ 64
2141#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2142
2143static void set_vls(struct qib_pportdata *ppd)
2144{
2145 int i, numvls, totcred, cred_vl, vl0extra;
2146 struct qib_devdata *dd = ppd->dd;
2147 u64 val;
2148
2149 numvls = qib_num_vls(ppd->vls_operational);
2150
2151 /*
2152 * Set up per-VL credits. Below is kluge based on these assumptions:
2153 * 1) port is disabled at the time early_init is called.
2154 * 2) give VL15 17 credits, for two max-plausible packets.
2155 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2156 */
2157 /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2158 totcred = NUM_RCV_BUF_UNITS(dd);
2159 cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2160 totcred -= cred_vl;
2161 qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2162 cred_vl = totcred / numvls;
2163 vl0extra = totcred - cred_vl * numvls;
2164 qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2165 for (i = 1; i < numvls; i++)
2166 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2167 for (; i < 8; i++) /* no buffer space for other VLs */
2168 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2169
2170 /* Notify IBC that credits need to be recalculated */
2171 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2172 val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2173 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2174 qib_write_kreg(dd, kr_scratch, 0ULL);
2175 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2176 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2177
2178 for (i = 0; i < numvls; i++)
2179 val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2180 val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2181
2182 /* Change the number of operational VLs */
2183 ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2184 ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2185 ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2186 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2187 qib_write_kreg(dd, kr_scratch, 0ULL);
2188}
2189
2190/*
2191 * The code that deals with actual SerDes is in serdes_7322_init().
2192 * Compared to the code for iba7220, it is minimal.
2193 */
2194static int serdes_7322_init(struct qib_pportdata *ppd);
2195
2196/**
2197 * qib_7322_bringup_serdes - bring up the serdes
2198 * @ppd: physical port on the qlogic_ib device
2199 */
2200static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2201{
2202 struct qib_devdata *dd = ppd->dd;
2203 u64 val, guid, ibc;
2204 unsigned long flags;
2205 int ret = 0;
2206
2207 /*
2208 * SerDes model not in Pd, but still need to
2209 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2210 * eventually.
2211 */
2212 /* Put IBC in reset, sends disabled (should be in reset already) */
2213 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2214 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2215 qib_write_kreg(dd, kr_scratch, 0ULL);
2216
2217 if (qib_compat_ddr_negotiate) {
2218 ppd->cpspec->ibdeltainprog = 1;
2219 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2220 crp_ibsymbolerr);
2221 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2222 crp_iblinkerrrecov);
2223 }
2224
2225 /* flowcontrolwatermark is in units of KBytes */
2226 ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2227 /*
2228 * Flow control is sent this often, even if no changes in
2229 * buffer space occur. Units are 128ns for this chip.
2230 * Set to 3usec.
2231 */
2232 ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2233 /* max error tolerance */
2234 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2235 /* IB credit flow control. */
2236 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2237 /*
2238 * set initial max size pkt IBC will send, including ICRC; it's the
2239 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2240 */
2241 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2242 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2243 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2244
2245 /* initially come up waiting for TS1, without sending anything. */
2246 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2247 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2248
2249 /*
2250 * Reset the PCS interface to the serdes (and also ibc, which is still
2251 * in reset from above). Writes new value of ibcctrl_a as last step.
2252 */
2253 qib_7322_mini_pcs_reset(ppd);
2254 qib_write_kreg(dd, kr_scratch, 0ULL);
2255
2256 if (!ppd->cpspec->ibcctrl_b) {
2257 unsigned lse = ppd->link_speed_enabled;
2258
2259 /*
2260 * Not on re-init after reset, establish shadow
2261 * and force initial config.
2262 */
2263 ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2264 krp_ibcctrl_b);
2265 ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2266 IBA7322_IBC_SPEED_DDR |
2267 IBA7322_IBC_SPEED_SDR |
2268 IBA7322_IBC_WIDTH_AUTONEG |
2269 SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2270 if (lse & (lse - 1)) /* Muliple speeds enabled */
2271 ppd->cpspec->ibcctrl_b |=
2272 (lse << IBA7322_IBC_SPEED_LSB) |
2273 IBA7322_IBC_IBTA_1_2_MASK |
2274 IBA7322_IBC_MAX_SPEED_MASK;
2275 else
2276 ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2277 IBA7322_IBC_SPEED_QDR |
2278 IBA7322_IBC_IBTA_1_2_MASK :
2279 (lse == QIB_IB_DDR) ?
2280 IBA7322_IBC_SPEED_DDR :
2281 IBA7322_IBC_SPEED_SDR;
2282 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2283 (IB_WIDTH_1X | IB_WIDTH_4X))
2284 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2285 else
2286 ppd->cpspec->ibcctrl_b |=
2287 ppd->link_width_enabled == IB_WIDTH_4X ?
2288 IBA7322_IBC_WIDTH_4X_ONLY :
2289 IBA7322_IBC_WIDTH_1X_ONLY;
2290
2291 /* always enable these on driver reload, not sticky */
2292 ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2293 IBA7322_IBC_HRTBT_MASK);
2294 }
2295 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2296
2297 /* setup so we have more time at CFGTEST to change H1 */
2298 val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2299 val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2300 val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2301 qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2302
2303 serdes_7322_init(ppd);
2304
2305 guid = be64_to_cpu(ppd->guid);
2306 if (!guid) {
2307 if (dd->base_guid)
2308 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2309 ppd->guid = cpu_to_be64(guid);
2310 }
2311
2312 qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2313 /* write to chip to prevent back-to-back writes of ibc reg */
2314 qib_write_kreg(dd, kr_scratch, 0);
2315
2316 /* Enable port */
2317 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2318 set_vls(ppd);
2319
2320 /* be paranoid against later code motion, etc. */
2321 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2322 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2323 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2324 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2325
2326 /* Also enable IBSTATUSCHG interrupt. */
2327 val = qib_read_kreg_port(ppd, krp_errmask);
2328 qib_write_kreg_port(ppd, krp_errmask,
2329 val | ERR_MASK_N(IBStatusChanged));
2330
2331 /* Always zero until we start messing with SerDes for real */
2332 return ret;
2333}
2334
2335/**
2336 * qib_7322_quiet_serdes - set serdes to txidle
2337 * @dd: the qlogic_ib device
2338 * Called when driver is being unloaded
2339 */
2340static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2341{
2342 u64 val;
2343 unsigned long flags;
2344
2345 qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2346
2347 spin_lock_irqsave(&ppd->lflags_lock, flags);
2348 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2349 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2350 wake_up(&ppd->cpspec->autoneg_wait);
2351 cancel_delayed_work(&ppd->cpspec->autoneg_work);
2352 if (ppd->dd->cspec->r1)
2353 cancel_delayed_work(&ppd->cpspec->ipg_work);
2354 flush_scheduled_work();
2355
2356 ppd->cpspec->chase_end = 0;
2357 if (ppd->cpspec->chase_timer.data) /* if initted */
2358 del_timer_sync(&ppd->cpspec->chase_timer);
2359
2360 /*
2361 * Despite the name, actually disables IBC as well. Do it when
2362 * we are as sure as possible that no more packets can be
2363 * received, following the down and the PCS reset.
2364 * The actual disabling happens in qib_7322_mini_pci_reset(),
2365 * along with the PCS being reset.
2366 */
2367 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2368 qib_7322_mini_pcs_reset(ppd);
2369
2370 /*
2371 * Update the adjusted counters so the adjustment persists
2372 * across driver reload.
2373 */
2374 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2375 ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2376 struct qib_devdata *dd = ppd->dd;
2377 u64 diagc;
2378
2379 /* enable counter writes */
2380 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2381 qib_write_kreg(dd, kr_hwdiagctrl,
2382 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2383
2384 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2385 val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2386 if (ppd->cpspec->ibdeltainprog)
2387 val -= val - ppd->cpspec->ibsymsnap;
2388 val -= ppd->cpspec->ibsymdelta;
2389 write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2390 }
2391 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2392 val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2393 if (ppd->cpspec->ibdeltainprog)
2394 val -= val - ppd->cpspec->iblnkerrsnap;
2395 val -= ppd->cpspec->iblnkerrdelta;
2396 write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2397 }
2398 if (ppd->cpspec->iblnkdowndelta) {
2399 val = read_7322_creg32_port(ppd, crp_iblinkdown);
2400 val += ppd->cpspec->iblnkdowndelta;
2401 write_7322_creg_port(ppd, crp_iblinkdown, val);
2402 }
2403 /*
2404 * No need to save ibmalfdelta since IB perfcounters
2405 * are cleared on driver reload.
2406 */
2407
2408 /* and disable counter writes */
2409 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2410 }
2411}
2412
2413/**
2414 * qib_setup_7322_setextled - set the state of the two external LEDs
2415 * @ppd: physical port on the qlogic_ib device
2416 * @on: whether the link is up or not
2417 *
2418 * The exact combo of LEDs if on is true is determined by looking
2419 * at the ibcstatus.
2420 *
2421 * These LEDs indicate the physical and logical state of IB link.
2422 * For this chip (at least with recommended board pinouts), LED1
2423 * is Yellow (logical state) and LED2 is Green (physical state),
2424 *
2425 * Note: We try to match the Mellanox HCA LED behavior as best
2426 * we can. Green indicates physical link state is OK (something is
2427 * plugged in, and we can train).
2428 * Amber indicates the link is logically up (ACTIVE).
2429 * Mellanox further blinks the amber LED to indicate data packet
2430 * activity, but we have no hardware support for that, so it would
2431 * require waking up every 10-20 msecs and checking the counters
2432 * on the chip, and then turning the LED off if appropriate. That's
2433 * visible overhead, so not something we will do.
2434 */
2435static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2436{
2437 struct qib_devdata *dd = ppd->dd;
2438 u64 extctl, ledblink = 0, val;
2439 unsigned long flags;
2440 int yel, grn;
2441
2442 /*
2443 * The diags use the LED to indicate diag info, so we leave
2444 * the external LED alone when the diags are running.
2445 */
2446 if (dd->diag_client)
2447 return;
2448
2449 /* Allow override of LED display for, e.g. Locating system in rack */
2450 if (ppd->led_override) {
2451 grn = (ppd->led_override & QIB_LED_PHYS);
2452 yel = (ppd->led_override & QIB_LED_LOG);
2453 } else if (on) {
2454 val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2455 grn = qib_7322_phys_portstate(val) ==
2456 IB_PHYSPORTSTATE_LINKUP;
2457 yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2458 } else {
2459 grn = 0;
2460 yel = 0;
2461 }
2462
2463 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2464 extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2465 ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2466 if (grn) {
2467 extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2468 /*
2469 * Counts are in chip clock (4ns) periods.
2470 * This is 1/16 sec (66.6ms) on,
2471 * 3/16 sec (187.5 ms) off, with packets rcvd.
2472 */
2473 ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2474 ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2475 }
2476 if (yel)
2477 extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2478 dd->cspec->extctrl = extctl;
2479 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2480 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2481
2482 if (ledblink) /* blink the LED on packet receive */
2483 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2484}
2485
2486/*
2487 * Disable MSIx interrupt if enabled, call generic MSIx code
2488 * to cleanup, and clear pending MSIx interrupts.
2489 * Used for fallback to INTx, after reset, and when MSIx setup fails.
2490 */
2491static void qib_7322_nomsix(struct qib_devdata *dd)
2492{
2493 u64 intgranted;
2494 int n;
2495
2496 dd->cspec->main_int_mask = ~0ULL;
2497 n = dd->cspec->num_msix_entries;
2498 if (n) {
2499 int i;
2500
2501 dd->cspec->num_msix_entries = 0;
2502 for (i = 0; i < n; i++)
2503 free_irq(dd->cspec->msix_entries[i].vector,
2504 dd->cspec->msix_arg[i]);
2505 qib_nomsix(dd);
2506 }
2507 /* make sure no MSIx interrupts are left pending */
2508 intgranted = qib_read_kreg64(dd, kr_intgranted);
2509 if (intgranted)
2510 qib_write_kreg(dd, kr_intgranted, intgranted);
2511}
2512
2513static void qib_7322_free_irq(struct qib_devdata *dd)
2514{
2515 if (dd->cspec->irq) {
2516 free_irq(dd->cspec->irq, dd);
2517 dd->cspec->irq = 0;
2518 }
2519 qib_7322_nomsix(dd);
2520}
2521
2522static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2523{
2524 int i;
2525
2526 qib_7322_free_irq(dd);
2527 kfree(dd->cspec->cntrs);
2528 kfree(dd->cspec->sendchkenable);
2529 kfree(dd->cspec->sendgrhchk);
2530 kfree(dd->cspec->sendibchk);
2531 kfree(dd->cspec->msix_entries);
2532 kfree(dd->cspec->msix_arg);
2533 for (i = 0; i < dd->num_pports; i++) {
2534 unsigned long flags;
2535 u32 mask = QSFP_GPIO_MOD_PRS_N |
2536 (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2537
2538 kfree(dd->pport[i].cpspec->portcntrs);
2539 if (dd->flags & QIB_HAS_QSFP) {
2540 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2541 dd->cspec->gpio_mask &= ~mask;
2542 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2543 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2544 qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2545 }
2546 if (dd->pport[i].ibport_data.smi_ah)
2547 ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2548 }
2549}
2550
2551/* handle SDMA interrupts */
2552static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2553{
2554 struct qib_pportdata *ppd0 = &dd->pport[0];
2555 struct qib_pportdata *ppd1 = &dd->pport[1];
2556 u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2557 INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2558 u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2559 INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2560
2561 if (intr0)
2562 qib_sdma_intr(ppd0);
2563 if (intr1)
2564 qib_sdma_intr(ppd1);
2565
2566 if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2567 qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2568 if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2569 qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2570}
2571
2572/*
2573 * Set or clear the Send buffer available interrupt enable bit.
2574 */
2575static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2576{
2577 unsigned long flags;
2578
2579 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2580 if (needint)
2581 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2582 else
2583 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2584 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2585 qib_write_kreg(dd, kr_scratch, 0ULL);
2586 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2587}
2588
2589/*
2590 * Somehow got an interrupt with reserved bits set in interrupt status.
2591 * Print a message so we know it happened, then clear them.
2592 * keep mainline interrupt handler cache-friendly
2593 */
2594static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2595{
2596 u64 kills;
2597 char msg[128];
2598
2599 kills = istat & ~QIB_I_BITSEXTANT;
2600 qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
2601 " %s\n", (unsigned long long) kills, msg);
2602 qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2603}
2604
2605/* keep mainline interrupt handler cache-friendly */
2606static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2607{
2608 u32 gpiostatus;
2609 int handled = 0;
2610 int pidx;
2611
2612 /*
2613 * Boards for this chip currently don't use GPIO interrupts,
2614 * so clear by writing GPIOstatus to GPIOclear, and complain
2615 * to developer. To avoid endless repeats, clear
2616 * the bits in the mask, since there is some kind of
2617 * programming error or chip problem.
2618 */
2619 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2620 /*
2621 * In theory, writing GPIOstatus to GPIOclear could
2622 * have a bad side-effect on some diagnostic that wanted
2623 * to poll for a status-change, but the various shadows
2624 * make that problematic at best. Diags will just suppress
2625 * all GPIO interrupts during such tests.
2626 */
2627 qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2628 /*
2629 * Check for QSFP MOD_PRS changes
2630 * only works for single port if IB1 != pidx1
2631 */
2632 for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2633 ++pidx) {
2634 struct qib_pportdata *ppd;
2635 struct qib_qsfp_data *qd;
2636 u32 mask;
2637 if (!dd->pport[pidx].link_speed_supported)
2638 continue;
2639 mask = QSFP_GPIO_MOD_PRS_N;
2640 ppd = dd->pport + pidx;
2641 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2642 if (gpiostatus & dd->cspec->gpio_mask & mask) {
2643 u64 pins;
2644 qd = &ppd->cpspec->qsfp_data;
2645 gpiostatus &= ~mask;
2646 pins = qib_read_kreg64(dd, kr_extstatus);
2647 pins >>= SYM_LSB(EXTStatus, GPIOIn);
2648 if (!(pins & mask)) {
2649 ++handled;
2650 qd->t_insert = get_jiffies_64();
2651 schedule_work(&qd->work);
2652 }
2653 }
2654 }
2655
2656 if (gpiostatus && !handled) {
2657 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2658 u32 gpio_irq = mask & gpiostatus;
2659
2660 /*
2661 * Clear any troublemakers, and update chip from shadow
2662 */
2663 dd->cspec->gpio_mask &= ~gpio_irq;
2664 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2665 }
2666}
2667
2668/*
2669 * Handle errors and unusual events first, separate function
2670 * to improve cache hits for fast path interrupt handling.
2671 */
2672static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2673{
2674 if (istat & ~QIB_I_BITSEXTANT)
2675 unknown_7322_ibits(dd, istat);
2676 if (istat & QIB_I_GPIO)
2677 unknown_7322_gpio_intr(dd);
2678 if (istat & QIB_I_C_ERROR)
2679 handle_7322_errors(dd);
2680 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
2681 handle_7322_p_errors(dd->rcd[0]->ppd);
2682 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
2683 handle_7322_p_errors(dd->rcd[1]->ppd);
2684}
2685
2686/*
2687 * Dynamically adjust the rcv int timeout for a context based on incoming
2688 * packet rate.
2689 */
2690static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
2691{
2692 struct qib_devdata *dd = rcd->dd;
2693 u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
2694
2695 /*
2696 * Dynamically adjust idle timeout on chip
2697 * based on number of packets processed.
2698 */
2699 if (npkts < rcv_int_count && timeout > 2)
2700 timeout >>= 1;
2701 else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
2702 timeout = min(timeout << 1, rcv_int_timeout);
2703 else
2704 return;
2705
2706 dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
2707 qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
2708}
2709
2710/*
2711 * This is the main interrupt handler.
2712 * It will normally only be used for low frequency interrupts but may
2713 * have to handle all interrupts if INTx is enabled or fewer than normal
2714 * MSIx interrupts were allocated.
2715 * This routine should ignore the interrupt bits for any of the
2716 * dedicated MSIx handlers.
2717 */
2718static irqreturn_t qib_7322intr(int irq, void *data)
2719{
2720 struct qib_devdata *dd = data;
2721 irqreturn_t ret;
2722 u64 istat;
2723 u64 ctxtrbits;
2724 u64 rmask;
2725 unsigned i;
2726 u32 npkts;
2727
2728 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
2729 /*
2730 * This return value is not great, but we do not want the
2731 * interrupt core code to remove our interrupt handler
2732 * because we don't appear to be handling an interrupt
2733 * during a chip reset.
2734 */
2735 ret = IRQ_HANDLED;
2736 goto bail;
2737 }
2738
2739 istat = qib_read_kreg64(dd, kr_intstatus);
2740
2741 if (unlikely(istat == ~0ULL)) {
2742 qib_bad_intrstatus(dd);
2743 qib_dev_err(dd, "Interrupt status all f's, skipping\n");
2744 /* don't know if it was our interrupt or not */
2745 ret = IRQ_NONE;
2746 goto bail;
2747 }
2748
2749 istat &= dd->cspec->main_int_mask;
2750 if (unlikely(!istat)) {
2751 /* already handled, or shared and not us */
2752 ret = IRQ_NONE;
2753 goto bail;
2754 }
2755
2756 qib_stats.sps_ints++;
2757 if (dd->int_counter != (u32) -1)
2758 dd->int_counter++;
2759
2760 /* handle "errors" of various kinds first, device ahead of port */
2761 if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
2762 QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
2763 INT_MASK_P(Err, 1))))
2764 unlikely_7322_intr(dd, istat);
2765
2766 /*
2767 * Clear the interrupt bits we found set, relatively early, so we
2768 * "know" know the chip will have seen this by the time we process
2769 * the queue, and will re-interrupt if necessary. The processor
2770 * itself won't take the interrupt again until we return.
2771 */
2772 qib_write_kreg(dd, kr_intclear, istat);
2773
2774 /*
2775 * Handle kernel receive queues before checking for pio buffers
2776 * available since receives can overflow; piobuf waiters can afford
2777 * a few extra cycles, since they were waiting anyway.
2778 */
2779 ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
2780 if (ctxtrbits) {
2781 rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
2782 (1ULL << QIB_I_RCVURG_LSB);
2783 for (i = 0; i < dd->first_user_ctxt; i++) {
2784 if (ctxtrbits & rmask) {
2785 ctxtrbits &= ~rmask;
2786 if (dd->rcd[i]) {
2787 qib_kreceive(dd->rcd[i], NULL, &npkts);
2788 adjust_rcv_timeout(dd->rcd[i], npkts);
2789 }
2790 }
2791 rmask <<= 1;
2792 }
2793 if (ctxtrbits) {
2794 ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
2795 (ctxtrbits >> QIB_I_RCVURG_LSB);
2796 qib_handle_urcv(dd, ctxtrbits);
2797 }
2798 }
2799
2800 if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
2801 sdma_7322_intr(dd, istat);
2802
2803 if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2804 qib_ib_piobufavail(dd);
2805
2806 ret = IRQ_HANDLED;
2807bail:
2808 return ret;
2809}
2810
2811/*
2812 * Dedicated receive packet available interrupt handler.
2813 */
2814static irqreturn_t qib_7322pintr(int irq, void *data)
2815{
2816 struct qib_ctxtdata *rcd = data;
2817 struct qib_devdata *dd = rcd->dd;
2818 u32 npkts;
2819
2820 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2821 /*
2822 * This return value is not great, but we do not want the
2823 * interrupt core code to remove our interrupt handler
2824 * because we don't appear to be handling an interrupt
2825 * during a chip reset.
2826 */
2827 return IRQ_HANDLED;
2828
2829 qib_stats.sps_ints++;
2830 if (dd->int_counter != (u32) -1)
2831 dd->int_counter++;
2832
2833 /* Clear the interrupt bit we expect to be set. */
2834 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
2835 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
2836
2837 qib_kreceive(rcd, NULL, &npkts);
2838 adjust_rcv_timeout(rcd, npkts);
2839
2840 return IRQ_HANDLED;
2841}
2842
2843/*
2844 * Dedicated Send buffer available interrupt handler.
2845 */
2846static irqreturn_t qib_7322bufavail(int irq, void *data)
2847{
2848 struct qib_devdata *dd = data;
2849
2850 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2851 /*
2852 * This return value is not great, but we do not want the
2853 * interrupt core code to remove our interrupt handler
2854 * because we don't appear to be handling an interrupt
2855 * during a chip reset.
2856 */
2857 return IRQ_HANDLED;
2858
2859 qib_stats.sps_ints++;
2860 if (dd->int_counter != (u32) -1)
2861 dd->int_counter++;
2862
2863 /* Clear the interrupt bit we expect to be set. */
2864 qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
2865
2866 /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
2867 if (dd->flags & QIB_INITTED)
2868 qib_ib_piobufavail(dd);
2869 else
2870 qib_wantpiobuf_7322_intr(dd, 0);
2871
2872 return IRQ_HANDLED;
2873}
2874
2875/*
2876 * Dedicated Send DMA interrupt handler.
2877 */
2878static irqreturn_t sdma_intr(int irq, void *data)
2879{
2880 struct qib_pportdata *ppd = data;
2881 struct qib_devdata *dd = ppd->dd;
2882
2883 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2884 /*
2885 * This return value is not great, but we do not want the
2886 * interrupt core code to remove our interrupt handler
2887 * because we don't appear to be handling an interrupt
2888 * during a chip reset.
2889 */
2890 return IRQ_HANDLED;
2891
2892 qib_stats.sps_ints++;
2893 if (dd->int_counter != (u32) -1)
2894 dd->int_counter++;
2895
2896 /* Clear the interrupt bit we expect to be set. */
2897 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2898 INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
2899 qib_sdma_intr(ppd);
2900
2901 return IRQ_HANDLED;
2902}
2903
2904/*
2905 * Dedicated Send DMA idle interrupt handler.
2906 */
2907static irqreturn_t sdma_idle_intr(int irq, void *data)
2908{
2909 struct qib_pportdata *ppd = data;
2910 struct qib_devdata *dd = ppd->dd;
2911
2912 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2913 /*
2914 * This return value is not great, but we do not want the
2915 * interrupt core code to remove our interrupt handler
2916 * because we don't appear to be handling an interrupt
2917 * during a chip reset.
2918 */
2919 return IRQ_HANDLED;
2920
2921 qib_stats.sps_ints++;
2922 if (dd->int_counter != (u32) -1)
2923 dd->int_counter++;
2924
2925 /* Clear the interrupt bit we expect to be set. */
2926 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2927 INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
2928 qib_sdma_intr(ppd);
2929
2930 return IRQ_HANDLED;
2931}
2932
2933/*
2934 * Dedicated Send DMA progress interrupt handler.
2935 */
2936static irqreturn_t sdma_progress_intr(int irq, void *data)
2937{
2938 struct qib_pportdata *ppd = data;
2939 struct qib_devdata *dd = ppd->dd;
2940
2941 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2942 /*
2943 * This return value is not great, but we do not want the
2944 * interrupt core code to remove our interrupt handler
2945 * because we don't appear to be handling an interrupt
2946 * during a chip reset.
2947 */
2948 return IRQ_HANDLED;
2949
2950 qib_stats.sps_ints++;
2951 if (dd->int_counter != (u32) -1)
2952 dd->int_counter++;
2953
2954 /* Clear the interrupt bit we expect to be set. */
2955 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2956 INT_MASK_P(SDmaProgress, 1) :
2957 INT_MASK_P(SDmaProgress, 0));
2958 qib_sdma_intr(ppd);
2959
2960 return IRQ_HANDLED;
2961}
2962
2963/*
2964 * Dedicated Send DMA cleanup interrupt handler.
2965 */
2966static irqreturn_t sdma_cleanup_intr(int irq, void *data)
2967{
2968 struct qib_pportdata *ppd = data;
2969 struct qib_devdata *dd = ppd->dd;
2970
2971 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2972 /*
2973 * This return value is not great, but we do not want the
2974 * interrupt core code to remove our interrupt handler
2975 * because we don't appear to be handling an interrupt
2976 * during a chip reset.
2977 */
2978 return IRQ_HANDLED;
2979
2980 qib_stats.sps_ints++;
2981 if (dd->int_counter != (u32) -1)
2982 dd->int_counter++;
2983
2984 /* Clear the interrupt bit we expect to be set. */
2985 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2986 INT_MASK_PM(SDmaCleanupDone, 1) :
2987 INT_MASK_PM(SDmaCleanupDone, 0));
2988 qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
2989
2990 return IRQ_HANDLED;
2991}
2992
2993/*
2994 * Set up our chip-specific interrupt handler.
2995 * The interrupt type has already been setup, so
2996 * we just need to do the registration and error checking.
2997 * If we are using MSIx interrupts, we may fall back to
2998 * INTx later, if the interrupt handler doesn't get called
2999 * within 1/2 second (see verify_interrupt()).
3000 */
3001static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3002{
3003 int ret, i, msixnum;
3004 u64 redirect[6];
3005 u64 mask;
3006
3007 if (!dd->num_pports)
3008 return;
3009
3010 if (clearpend) {
3011 /*
3012 * if not switching interrupt types, be sure interrupts are
3013 * disabled, and then clear anything pending at this point,
3014 * because we are starting clean.
3015 */
3016 qib_7322_set_intr_state(dd, 0);
3017
3018 /* clear the reset error, init error/hwerror mask */
3019 qib_7322_init_hwerrors(dd);
3020
3021 /* clear any interrupt bits that might be set */
3022 qib_write_kreg(dd, kr_intclear, ~0ULL);
3023
3024 /* make sure no pending MSIx intr, and clear diag reg */
3025 qib_write_kreg(dd, kr_intgranted, ~0ULL);
3026 qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3027 }
3028
3029 if (!dd->cspec->num_msix_entries) {
3030 /* Try to get INTx interrupt */
3031try_intx:
3032 if (!dd->pcidev->irq) {
3033 qib_dev_err(dd, "irq is 0, BIOS error? "
3034 "Interrupts won't work\n");
3035 goto bail;
3036 }
3037 ret = request_irq(dd->pcidev->irq, qib_7322intr,
3038 IRQF_SHARED, QIB_DRV_NAME, dd);
3039 if (ret) {
3040 qib_dev_err(dd, "Couldn't setup INTx "
3041 "interrupt (irq=%d): %d\n",
3042 dd->pcidev->irq, ret);
3043 goto bail;
3044 }
3045 dd->cspec->irq = dd->pcidev->irq;
3046 dd->cspec->main_int_mask = ~0ULL;
3047 goto bail;
3048 }
3049
3050 /* Try to get MSIx interrupts */
3051 memset(redirect, 0, sizeof redirect);
3052 mask = ~0ULL;
3053 msixnum = 0;
3054 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3055 irq_handler_t handler;
3056 const char *name;
3057 void *arg;
3058 u64 val;
3059 int lsb, reg, sh;
3060
3061 if (i < ARRAY_SIZE(irq_table)) {
3062 if (irq_table[i].port) {
3063 /* skip if for a non-configured port */
3064 if (irq_table[i].port > dd->num_pports)
3065 continue;
3066 arg = dd->pport + irq_table[i].port - 1;
3067 } else
3068 arg = dd;
3069 lsb = irq_table[i].lsb;
3070 handler = irq_table[i].handler;
3071 name = irq_table[i].name;
3072 } else {
3073 unsigned ctxt;
3074
3075 ctxt = i - ARRAY_SIZE(irq_table);
3076 /* per krcvq context receive interrupt */
3077 arg = dd->rcd[ctxt];
3078 if (!arg)
3079 continue;
3080 lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3081 handler = qib_7322pintr;
3082 name = QIB_DRV_NAME " (kctx)";
3083 }
3084 ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
3085 handler, 0, name, arg);
3086 if (ret) {
3087 /*
3088 * Shouldn't happen since the enable said we could
3089 * have as many as we are trying to setup here.
3090 */
3091 qib_dev_err(dd, "Couldn't setup MSIx "
3092 "interrupt (vec=%d, irq=%d): %d\n", msixnum,
3093 dd->cspec->msix_entries[msixnum].vector,
3094 ret);
3095 qib_7322_nomsix(dd);
3096 goto try_intx;
3097 }
3098 dd->cspec->msix_arg[msixnum] = arg;
3099 if (lsb >= 0) {
3100 reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3101 sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3102 SYM_LSB(IntRedirect0, vec1);
3103 mask &= ~(1ULL << lsb);
3104 redirect[reg] |= ((u64) msixnum) << sh;
3105 }
3106 val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3107 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3108 msixnum++;
3109 }
3110 /* Initialize the vector mapping */
3111 for (i = 0; i < ARRAY_SIZE(redirect); i++)
3112 qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3113 dd->cspec->main_int_mask = mask;
3114bail:;
3115}
3116
3117/**
3118 * qib_7322_boardname - fill in the board name and note features
3119 * @dd: the qlogic_ib device
3120 *
3121 * info will be based on the board revision register
3122 */
3123static unsigned qib_7322_boardname(struct qib_devdata *dd)
3124{
3125 /* Will need enumeration of board-types here */
3126 char *n;
3127 u32 boardid, namelen;
3128 unsigned features = DUAL_PORT_CAP;
3129
3130 boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3131
3132 switch (boardid) {
3133 case 0:
3134 n = "InfiniPath_QLE7342_Emulation";
3135 break;
3136 case 1:
3137 n = "InfiniPath_QLE7340";
3138 dd->flags |= QIB_HAS_QSFP;
3139 features = PORT_SPD_CAP;
3140 break;
3141 case 2:
3142 n = "InfiniPath_QLE7342";
3143 dd->flags |= QIB_HAS_QSFP;
3144 break;
3145 case 3:
3146 n = "InfiniPath_QMI7342";
3147 break;
3148 case 4:
3149 n = "InfiniPath_Unsupported7342";
3150 qib_dev_err(dd, "Unsupported version of QMH7342\n");
3151 features = 0;
3152 break;
3153 case BOARD_QMH7342:
3154 n = "InfiniPath_QMH7342";
3155 features = 0x24;
3156 break;
3157 case BOARD_QME7342:
3158 n = "InfiniPath_QME7342";
3159 break;
3160 case 15:
3161 n = "InfiniPath_QLE7342_TEST";
3162 dd->flags |= QIB_HAS_QSFP;
3163 break;
3164 default:
3165 n = "InfiniPath_QLE73xy_UNKNOWN";
3166 qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3167 break;
3168 }
3169 dd->board_atten = 1; /* index into txdds_Xdr */
3170
3171 namelen = strlen(n) + 1;
3172 dd->boardname = kmalloc(namelen, GFP_KERNEL);
3173 if (!dd->boardname)
3174 qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3175 else
3176 snprintf(dd->boardname, namelen, "%s", n);
3177
3178 snprintf(dd->boardversion, sizeof(dd->boardversion),
3179 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3180 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3181 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3182 dd->majrev, dd->minrev,
3183 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3184
3185 if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3186 qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
3187 " by module parameter\n", dd->unit);
3188 features &= PORT_SPD_CAP;
3189 }
3190
3191 return features;
3192}
3193
3194/*
3195 * This routine sleeps, so it can only be called from user context, not
3196 * from interrupt context.
3197 */
3198static int qib_do_7322_reset(struct qib_devdata *dd)
3199{
3200 u64 val;
3201 u64 *msix_vecsave;
3202 int i, msix_entries, ret = 1;
3203 u16 cmdval;
3204 u8 int_line, clinesz;
3205 unsigned long flags;
3206
3207 /* Use dev_err so it shows up in logs, etc. */
3208 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3209
3210 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3211
3212 msix_entries = dd->cspec->num_msix_entries;
3213
3214 /* no interrupts till re-initted */
3215 qib_7322_set_intr_state(dd, 0);
3216
3217 if (msix_entries) {
3218 qib_7322_nomsix(dd);
3219 /* can be up to 512 bytes, too big for stack */
3220 msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3221 sizeof(u64), GFP_KERNEL);
3222 if (!msix_vecsave)
3223 qib_dev_err(dd, "No mem to save MSIx data\n");
3224 } else
3225 msix_vecsave = NULL;
3226
3227 /*
3228 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3229 * info that is set up by the BIOS, so we have to save and restore
3230 * it ourselves. There is some risk something could change it,
3231 * after we save it, but since we have disabled the MSIx, it
3232 * shouldn't be touched...
3233 */
3234 for (i = 0; i < msix_entries; i++) {
3235 u64 vecaddr, vecdata;
3236 vecaddr = qib_read_kreg64(dd, 2 * i +
3237 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3238 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3239 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3240 if (msix_vecsave) {
3241 msix_vecsave[2 * i] = vecaddr;
3242 /* save it without the masked bit set */
3243 msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3244 }
3245 }
3246
3247 dd->pport->cpspec->ibdeltainprog = 0;
3248 dd->pport->cpspec->ibsymdelta = 0;
3249 dd->pport->cpspec->iblnkerrdelta = 0;
3250 dd->pport->cpspec->ibmalfdelta = 0;
3251 dd->int_counter = 0; /* so we check interrupts work again */
3252
3253 /*
3254 * Keep chip from being accessed until we are ready. Use
3255 * writeq() directly, to allow the write even though QIB_PRESENT
3256 * isnt' set.
3257 */
3258 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3259 dd->flags |= QIB_DOING_RESET;
3260 val = dd->control | QLOGIC_IB_C_RESET;
3261 writeq(val, &dd->kregbase[kr_control]);
3262
3263 for (i = 1; i <= 5; i++) {
3264 /*
3265 * Allow MBIST, etc. to complete; longer on each retry.
3266 * We sometimes get machine checks from bus timeout if no
3267 * response, so for now, make it *really* long.
3268 */
3269 msleep(1000 + (1 + i) * 3000);
3270
3271 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3272
3273 /*
3274 * Use readq directly, so we don't need to mark it as PRESENT
3275 * until we get a successful indication that all is well.
3276 */
3277 val = readq(&dd->kregbase[kr_revision]);
3278 if (val == dd->revision)
3279 break;
3280 if (i == 5) {
3281 qib_dev_err(dd, "Failed to initialize after reset, "
3282 "unusable\n");
3283 ret = 0;
3284 goto bail;
3285 }
3286 }
3287
3288 dd->flags |= QIB_PRESENT; /* it's back */
3289
3290 if (msix_entries) {
3291 /* restore the MSIx vector address and data if saved above */
3292 for (i = 0; i < msix_entries; i++) {
3293 dd->cspec->msix_entries[i].entry = i;
3294 if (!msix_vecsave || !msix_vecsave[2 * i])
3295 continue;
3296 qib_write_kreg(dd, 2 * i +
3297 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3298 msix_vecsave[2 * i]);
3299 qib_write_kreg(dd, 1 + 2 * i +
3300 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3301 msix_vecsave[1 + 2 * i]);
3302 }
3303 }
3304
3305 /* initialize the remaining registers. */
3306 for (i = 0; i < dd->num_pports; ++i)
3307 write_7322_init_portregs(&dd->pport[i]);
3308 write_7322_initregs(dd);
3309
3310 if (qib_pcie_params(dd, dd->lbus_width,
3311 &dd->cspec->num_msix_entries,
3312 dd->cspec->msix_entries))
3313 qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
3314 "continuing anyway\n");
3315
3316 qib_setup_7322_interrupt(dd, 1);
3317
3318 for (i = 0; i < dd->num_pports; ++i) {
3319 struct qib_pportdata *ppd = &dd->pport[i];
3320
3321 spin_lock_irqsave(&ppd->lflags_lock, flags);
3322 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3323 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3324 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3325 }
3326
3327bail:
3328 dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3329 kfree(msix_vecsave);
3330 return ret;
3331}
3332
3333/**
3334 * qib_7322_put_tid - write a TID to the chip
3335 * @dd: the qlogic_ib device
3336 * @tidptr: pointer to the expected TID (in chip) to update
3337 * @tidtype: 0 for eager, 1 for expected
3338 * @pa: physical address of in memory buffer; tidinvalid if freeing
3339 */
3340static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3341 u32 type, unsigned long pa)
3342{
3343 if (!(dd->flags & QIB_PRESENT))
3344 return;
3345 if (pa != dd->tidinvalid) {
3346 u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3347
3348 /* paranoia checks */
3349 if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3350 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3351 pa);
3352 return;
3353 }
3354 if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3355 qib_dev_err(dd, "Physical page address 0x%lx "
3356 "larger than supported\n", pa);
3357 return;
3358 }
3359
3360 if (type == RCVHQ_RCV_TYPE_EAGER)
3361 chippa |= dd->tidtemplate;
3362 else /* for now, always full 4KB page */
3363 chippa |= IBA7322_TID_SZ_4K;
3364 pa = chippa;
3365 }
3366 writeq(pa, tidptr);
3367 mmiowb();
3368}
3369
3370/**
3371 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3372 * @dd: the qlogic_ib device
3373 * @ctxt: the ctxt
3374 *
3375 * clear all TID entries for a ctxt, expected and eager.
3376 * Used from qib_close().
3377 */
3378static void qib_7322_clear_tids(struct qib_devdata *dd,
3379 struct qib_ctxtdata *rcd)
3380{
3381 u64 __iomem *tidbase;
3382 unsigned long tidinv;
3383 u32 ctxt;
3384 int i;
3385
3386 if (!dd->kregbase || !rcd)
3387 return;
3388
3389 ctxt = rcd->ctxt;
3390
3391 tidinv = dd->tidinvalid;
3392 tidbase = (u64 __iomem *)
3393 ((char __iomem *) dd->kregbase +
3394 dd->rcvtidbase +
3395 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3396
3397 for (i = 0; i < dd->rcvtidcnt; i++)
3398 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3399 tidinv);
3400
3401 tidbase = (u64 __iomem *)
3402 ((char __iomem *) dd->kregbase +
3403 dd->rcvegrbase +
3404 rcd->rcvegr_tid_base * sizeof(*tidbase));
3405
3406 for (i = 0; i < rcd->rcvegrcnt; i++)
3407 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3408 tidinv);
3409}
3410
3411/**
3412 * qib_7322_tidtemplate - setup constants for TID updates
3413 * @dd: the qlogic_ib device
3414 *
3415 * We setup stuff that we use a lot, to avoid calculating each time
3416 */
3417static void qib_7322_tidtemplate(struct qib_devdata *dd)
3418{
3419 /*
3420 * For now, we always allocate 4KB buffers (at init) so we can
3421 * receive max size packets. We may want a module parameter to
3422 * specify 2KB or 4KB and/or make it per port instead of per device
3423 * for those who want to reduce memory footprint. Note that the
3424 * rcvhdrentsize size must be large enough to hold the largest
3425 * IB header (currently 96 bytes) that we expect to handle (plus of
3426 * course the 2 dwords of RHF).
3427 */
3428 if (dd->rcvegrbufsize == 2048)
3429 dd->tidtemplate = IBA7322_TID_SZ_2K;
3430 else if (dd->rcvegrbufsize == 4096)
3431 dd->tidtemplate = IBA7322_TID_SZ_4K;
3432 dd->tidinvalid = 0;
3433}
3434
3435/**
3436 * qib_init_7322_get_base_info - set chip-specific flags for user code
3437 * @rcd: the qlogic_ib ctxt
3438 * @kbase: qib_base_info pointer
3439 *
3440 * We set the PCIE flag because the lower bandwidth on PCIe vs
3441 * HyperTransport can affect some user packet algorithims.
3442 */
3443
3444static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3445 struct qib_base_info *kinfo)
3446{
3447 kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3448 QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3449 QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3450 if (rcd->dd->cspec->r1)
3451 kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3452 if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3453 kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3454
3455 return 0;
3456}
3457
3458static struct qib_message_header *
3459qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3460{
3461 u32 offset = qib_hdrget_offset(rhf_addr);
3462
3463 return (struct qib_message_header *)
3464 (rhf_addr - dd->rhf_offset + offset);
3465}
3466
3467/*
3468 * Configure number of contexts.
3469 */
3470static void qib_7322_config_ctxts(struct qib_devdata *dd)
3471{
3472 unsigned long flags;
3473 u32 nchipctxts;
3474
3475 nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3476 dd->cspec->numctxts = nchipctxts;
3477 if (qib_n_krcv_queues > 1 && dd->num_pports) {
3478 /*
3479 * Set the mask for which bits from the QPN are used
3480 * to select a context number.
3481 */
3482 dd->qpn_mask = 0x3f;
3483 dd->first_user_ctxt = NUM_IB_PORTS +
3484 (qib_n_krcv_queues - 1) * dd->num_pports;
3485 if (dd->first_user_ctxt > nchipctxts)
3486 dd->first_user_ctxt = nchipctxts;
3487 dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3488 } else {
3489 dd->first_user_ctxt = NUM_IB_PORTS;
3490 dd->n_krcv_queues = 1;
3491 }
3492
3493 if (!qib_cfgctxts) {
3494 int nctxts = dd->first_user_ctxt + num_online_cpus();
3495
3496 if (nctxts <= 6)
3497 dd->ctxtcnt = 6;
3498 else if (nctxts <= 10)
3499 dd->ctxtcnt = 10;
3500 else if (nctxts <= nchipctxts)
3501 dd->ctxtcnt = nchipctxts;
3502 } else if (qib_cfgctxts < dd->num_pports)
3503 dd->ctxtcnt = dd->num_pports;
3504 else if (qib_cfgctxts <= nchipctxts)
3505 dd->ctxtcnt = qib_cfgctxts;
3506 if (!dd->ctxtcnt) /* none of the above, set to max */
3507 dd->ctxtcnt = nchipctxts;
3508
3509 /*
3510 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3511 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3512 * Lock to be paranoid about later motion, etc.
3513 */
3514 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3515 if (dd->ctxtcnt > 10)
3516 dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3517 else if (dd->ctxtcnt > 6)
3518 dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3519 /* else configure for default 6 receive ctxts */
3520
3521 /* The XRC opcode is 5. */
3522 dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3523
3524 /*
3525 * RcvCtrl *must* be written here so that the
3526 * chip understands how to change rcvegrcnt below.
3527 */
3528 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3529 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3530
3531 /* kr_rcvegrcnt changes based on the number of contexts enabled */
3532 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3533 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
3534 dd->num_pports > 1 ? 1024U : 2048U);
3535}
3536
3537static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3538{
3539
3540 int lsb, ret = 0;
3541 u64 maskr; /* right-justified mask */
3542
3543 switch (which) {
3544
3545 case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3546 ret = ppd->link_width_enabled;
3547 goto done;
3548
3549 case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3550 ret = ppd->link_width_active;
3551 goto done;
3552
3553 case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3554 ret = ppd->link_speed_enabled;
3555 goto done;
3556
3557 case QIB_IB_CFG_SPD: /* Get current Link spd */
3558 ret = ppd->link_speed_active;
3559 goto done;
3560
3561 case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3562 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3563 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3564 break;
3565
3566 case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3567 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3568 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3569 break;
3570
3571 case QIB_IB_CFG_LINKLATENCY:
3572 ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3573 SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3574 goto done;
3575
3576 case QIB_IB_CFG_OP_VLS:
3577 ret = ppd->vls_operational;
3578 goto done;
3579
3580 case QIB_IB_CFG_VL_HIGH_CAP:
3581 ret = 16;
3582 goto done;
3583
3584 case QIB_IB_CFG_VL_LOW_CAP:
3585 ret = 16;
3586 goto done;
3587
3588 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3589 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3590 OverrunThreshold);
3591 goto done;
3592
3593 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3594 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3595 PhyerrThreshold);
3596 goto done;
3597
3598 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3599 /* will only take effect when the link state changes */
3600 ret = (ppd->cpspec->ibcctrl_a &
3601 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
3602 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
3603 goto done;
3604
3605 case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
3606 lsb = IBA7322_IBC_HRTBT_LSB;
3607 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
3608 break;
3609
3610 case QIB_IB_CFG_PMA_TICKS:
3611 /*
3612 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
3613 * Since the clock is always 250MHz, the value is 3, 1 or 0.
3614 */
3615 if (ppd->link_speed_active == QIB_IB_QDR)
3616 ret = 3;
3617 else if (ppd->link_speed_active == QIB_IB_DDR)
3618 ret = 1;
3619 else
3620 ret = 0;
3621 goto done;
3622
3623 default:
3624 ret = -EINVAL;
3625 goto done;
3626 }
3627 ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
3628done:
3629 return ret;
3630}
3631
3632/*
3633 * Below again cribbed liberally from older version. Do not lean
3634 * heavily on it.
3635 */
3636#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
3637#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
3638 | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
3639
3640static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
3641{
3642 struct qib_devdata *dd = ppd->dd;
3643 u64 maskr; /* right-justified mask */
3644 int lsb, ret = 0;
3645 u16 lcmd, licmd;
3646 unsigned long flags;
3647
3648 switch (which) {
3649 case QIB_IB_CFG_LIDLMC:
3650 /*
3651 * Set LID and LMC. Combined to avoid possible hazard
3652 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
3653 */
3654 lsb = IBA7322_IBC_DLIDLMC_SHIFT;
3655 maskr = IBA7322_IBC_DLIDLMC_MASK;
3656 /*
3657 * For header-checking, the SLID in the packet will
3658 * be masked with SendIBSLMCMask, and compared
3659 * with SendIBSLIDAssignMask. Make sure we do not
3660 * set any bits not covered by the mask, or we get
3661 * false-positives.
3662 */
3663 qib_write_kreg_port(ppd, krp_sendslid,
3664 val & (val >> 16) & SendIBSLIDAssignMask);
3665 qib_write_kreg_port(ppd, krp_sendslidmask,
3666 (val >> 16) & SendIBSLMCMask);
3667 break;
3668
3669 case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
3670 ppd->link_width_enabled = val;
3671 /* convert IB value to chip register value */
3672 if (val == IB_WIDTH_1X)
3673 val = 0;
3674 else if (val == IB_WIDTH_4X)
3675 val = 1;
3676 else
3677 val = 3;
3678 maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
3679 lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
3680 break;
3681
3682 case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
3683 /*
3684 * As with width, only write the actual register if the
3685 * link is currently down, otherwise takes effect on next
3686 * link change. Since setting is being explictly requested
3687 * (via MAD or sysfs), clear autoneg failure status if speed
3688 * autoneg is enabled.
3689 */
3690 ppd->link_speed_enabled = val;
3691 val <<= IBA7322_IBC_SPEED_LSB;
3692 maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
3693 IBA7322_IBC_MAX_SPEED_MASK;
3694 if (val & (val - 1)) {
3695 /* Muliple speeds enabled */
3696 val |= IBA7322_IBC_IBTA_1_2_MASK |
3697 IBA7322_IBC_MAX_SPEED_MASK;
3698 spin_lock_irqsave(&ppd->lflags_lock, flags);
3699 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3700 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3701 } else if (val & IBA7322_IBC_SPEED_QDR)
3702 val |= IBA7322_IBC_IBTA_1_2_MASK;
3703 /* IBTA 1.2 mode + min/max + speed bits are contiguous */
3704 lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
3705 break;
3706
3707 case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
3708 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3709 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3710 break;
3711
3712 case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
3713 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3714 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3715 break;
3716
3717 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3718 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3719 OverrunThreshold);
3720 if (maskr != val) {
3721 ppd->cpspec->ibcctrl_a &=
3722 ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
3723 ppd->cpspec->ibcctrl_a |= (u64) val <<
3724 SYM_LSB(IBCCtrlA_0, OverrunThreshold);
3725 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3726 ppd->cpspec->ibcctrl_a);
3727 qib_write_kreg(dd, kr_scratch, 0ULL);
3728 }
3729 goto bail;
3730
3731 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3732 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3733 PhyerrThreshold);
3734 if (maskr != val) {
3735 ppd->cpspec->ibcctrl_a &=
3736 ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
3737 ppd->cpspec->ibcctrl_a |= (u64) val <<
3738 SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
3739 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3740 ppd->cpspec->ibcctrl_a);
3741 qib_write_kreg(dd, kr_scratch, 0ULL);
3742 }
3743 goto bail;
3744
3745 case QIB_IB_CFG_PKEYS: /* update pkeys */
3746 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
3747 ((u64) ppd->pkeys[2] << 32) |
3748 ((u64) ppd->pkeys[3] << 48);
3749 qib_write_kreg_port(ppd, krp_partitionkey, maskr);
3750 goto bail;
3751
3752 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3753 /* will only take effect when the link state changes */
3754 if (val == IB_LINKINITCMD_POLL)
3755 ppd->cpspec->ibcctrl_a &=
3756 ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3757 else /* SLEEP */
3758 ppd->cpspec->ibcctrl_a |=
3759 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3760 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
3761 qib_write_kreg(dd, kr_scratch, 0ULL);
3762 goto bail;
3763
3764 case QIB_IB_CFG_MTU: /* update the MTU in IBC */
3765 /*
3766 * Update our housekeeping variables, and set IBC max
3767 * size, same as init code; max IBC is max we allow in
3768 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
3769 * Set even if it's unchanged, print debug message only
3770 * on changes.
3771 */
3772 val = (ppd->ibmaxlen >> 2) + 1;
3773 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
3774 ppd->cpspec->ibcctrl_a |= (u64)val <<
3775 SYM_LSB(IBCCtrlA_0, MaxPktLen);
3776 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3777 ppd->cpspec->ibcctrl_a);
3778 qib_write_kreg(dd, kr_scratch, 0ULL);
3779 goto bail;
3780
3781 case QIB_IB_CFG_LSTATE: /* set the IB link state */
3782 switch (val & 0xffff0000) {
3783 case IB_LINKCMD_DOWN:
3784 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
3785 ppd->cpspec->ibmalfusesnap = 1;
3786 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
3787 crp_errlink);
3788 if (!ppd->cpspec->ibdeltainprog &&
3789 qib_compat_ddr_negotiate) {
3790 ppd->cpspec->ibdeltainprog = 1;
3791 ppd->cpspec->ibsymsnap =
3792 read_7322_creg32_port(ppd,
3793 crp_ibsymbolerr);
3794 ppd->cpspec->iblnkerrsnap =
3795 read_7322_creg32_port(ppd,
3796 crp_iblinkerrrecov);
3797 }
3798 break;
3799
3800 case IB_LINKCMD_ARMED:
3801 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
3802 if (ppd->cpspec->ibmalfusesnap) {
3803 ppd->cpspec->ibmalfusesnap = 0;
3804 ppd->cpspec->ibmalfdelta +=
3805 read_7322_creg32_port(ppd,
3806 crp_errlink) -
3807 ppd->cpspec->ibmalfsnap;
3808 }
3809 break;
3810
3811 case IB_LINKCMD_ACTIVE:
3812 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
3813 break;
3814
3815 default:
3816 ret = -EINVAL;
3817 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
3818 goto bail;
3819 }
3820 switch (val & 0xffff) {
3821 case IB_LINKINITCMD_NOP:
3822 licmd = 0;
3823 break;
3824
3825 case IB_LINKINITCMD_POLL:
3826 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
3827 break;
3828
3829 case IB_LINKINITCMD_SLEEP:
3830 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
3831 break;
3832
3833 case IB_LINKINITCMD_DISABLE:
3834 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
3835 ppd->cpspec->chase_end = 0;
3836 /*
3837 * stop state chase counter and timer, if running.
3838 * wait forpending timer, but don't clear .data (ppd)!
3839 */
3840 if (ppd->cpspec->chase_timer.expires) {
3841 del_timer_sync(&ppd->cpspec->chase_timer);
3842 ppd->cpspec->chase_timer.expires = 0;
3843 }
3844 break;
3845
3846 default:
3847 ret = -EINVAL;
3848 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
3849 val & 0xffff);
3850 goto bail;
3851 }
3852 qib_set_ib_7322_lstate(ppd, lcmd, licmd);
3853 goto bail;
3854
3855 case QIB_IB_CFG_OP_VLS:
3856 if (ppd->vls_operational != val) {
3857 ppd->vls_operational = val;
3858 set_vls(ppd);
3859 }
3860 goto bail;
3861
3862 case QIB_IB_CFG_VL_HIGH_LIMIT:
3863 qib_write_kreg_port(ppd, krp_highprio_limit, val);
3864 goto bail;
3865
3866 case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
3867 if (val > 3) {
3868 ret = -EINVAL;
3869 goto bail;
3870 }
3871 lsb = IBA7322_IBC_HRTBT_LSB;
3872 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
3873 break;
3874
3875 case QIB_IB_CFG_PORT:
3876 /* val is the port number of the switch we are connected to. */
3877 if (ppd->dd->cspec->r1) {
3878 cancel_delayed_work(&ppd->cpspec->ipg_work);
3879 ppd->cpspec->ipg_tries = 0;
3880 }
3881 goto bail;
3882
3883 default:
3884 ret = -EINVAL;
3885 goto bail;
3886 }
3887 ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
3888 ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
3889 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
3890 qib_write_kreg(dd, kr_scratch, 0);
3891bail:
3892 return ret;
3893}
3894
3895static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
3896{
3897 int ret = 0;
3898 u64 val, ctrlb;
3899
3900 /* only IBC loopback, may add serdes and xgxs loopbacks later */
3901 if (!strncmp(what, "ibc", 3)) {
3902 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
3903 Loopback);
3904 val = 0; /* disable heart beat, so link will come up */
3905 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
3906 ppd->dd->unit, ppd->port);
3907 } else if (!strncmp(what, "off", 3)) {
3908 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
3909 Loopback);
3910 /* enable heart beat again */
3911 val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
3912 qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
3913 "(normal)\n", ppd->dd->unit, ppd->port);
3914 } else
3915 ret = -EINVAL;
3916 if (!ret) {
3917 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3918 ppd->cpspec->ibcctrl_a);
3919 ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
3920 << IBA7322_IBC_HRTBT_LSB);
3921 ppd->cpspec->ibcctrl_b = ctrlb | val;
3922 qib_write_kreg_port(ppd, krp_ibcctrl_b,
3923 ppd->cpspec->ibcctrl_b);
3924 qib_write_kreg(ppd->dd, kr_scratch, 0);
3925 }
3926 return ret;
3927}
3928
3929static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
3930 struct ib_vl_weight_elem *vl)
3931{
3932 unsigned i;
3933
3934 for (i = 0; i < 16; i++, regno++, vl++) {
3935 u32 val = qib_read_kreg_port(ppd, regno);
3936
3937 vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
3938 SYM_RMASK(LowPriority0_0, VirtualLane);
3939 vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
3940 SYM_RMASK(LowPriority0_0, Weight);
3941 }
3942}
3943
3944static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
3945 struct ib_vl_weight_elem *vl)
3946{
3947 unsigned i;
3948
3949 for (i = 0; i < 16; i++, regno++, vl++) {
3950 u64 val;
3951
3952 val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
3953 SYM_LSB(LowPriority0_0, VirtualLane)) |
3954 ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
3955 SYM_LSB(LowPriority0_0, Weight));
3956 qib_write_kreg_port(ppd, regno, val);
3957 }
3958 if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
3959 struct qib_devdata *dd = ppd->dd;
3960 unsigned long flags;
3961
3962 spin_lock_irqsave(&dd->sendctrl_lock, flags);
3963 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
3964 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
3965 qib_write_kreg(dd, kr_scratch, 0);
3966 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
3967 }
3968}
3969
3970static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
3971{
3972 switch (which) {
3973 case QIB_IB_TBL_VL_HIGH_ARB:
3974 get_vl_weights(ppd, krp_highprio_0, t);
3975 break;
3976
3977 case QIB_IB_TBL_VL_LOW_ARB:
3978 get_vl_weights(ppd, krp_lowprio_0, t);
3979 break;
3980
3981 default:
3982 return -EINVAL;
3983 }
3984 return 0;
3985}
3986
3987static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
3988{
3989 switch (which) {
3990 case QIB_IB_TBL_VL_HIGH_ARB:
3991 set_vl_weights(ppd, krp_highprio_0, t);
3992 break;
3993
3994 case QIB_IB_TBL_VL_LOW_ARB:
3995 set_vl_weights(ppd, krp_lowprio_0, t);
3996 break;
3997
3998 default:
3999 return -EINVAL;
4000 }
4001 return 0;
4002}
4003
4004static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4005 u32 updegr, u32 egrhd)
4006{
4007 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4008 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4009 if (updegr)
4010 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4011}
4012
4013static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4014{
4015 u32 head, tail;
4016
4017 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4018 if (rcd->rcvhdrtail_kvaddr)
4019 tail = qib_get_rcvhdrtail(rcd);
4020 else
4021 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4022 return head == tail;
4023}
4024
4025#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4026 QIB_RCVCTRL_CTXT_DIS | \
4027 QIB_RCVCTRL_TIDFLOW_ENB | \
4028 QIB_RCVCTRL_TIDFLOW_DIS | \
4029 QIB_RCVCTRL_TAILUPD_ENB | \
4030 QIB_RCVCTRL_TAILUPD_DIS | \
4031 QIB_RCVCTRL_INTRAVAIL_ENB | \
4032 QIB_RCVCTRL_INTRAVAIL_DIS | \
4033 QIB_RCVCTRL_BP_ENB | \
4034 QIB_RCVCTRL_BP_DIS)
4035
4036#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4037 QIB_RCVCTRL_CTXT_DIS | \
4038 QIB_RCVCTRL_PKEY_DIS | \
4039 QIB_RCVCTRL_PKEY_ENB)
4040
4041/*
4042 * Modify the RCVCTRL register in chip-specific way. This
4043 * is a function because bit positions and (future) register
4044 * location is chip-specifc, but the needed operations are
4045 * generic. <op> is a bit-mask because we often want to
4046 * do multiple modifications.
4047 */
4048static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4049 int ctxt)
4050{
4051 struct qib_devdata *dd = ppd->dd;
4052 struct qib_ctxtdata *rcd;
4053 u64 mask, val;
4054 unsigned long flags;
4055
4056 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4057
4058 if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4059 dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4060 if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4061 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4062 if (op & QIB_RCVCTRL_TAILUPD_ENB)
4063 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4064 if (op & QIB_RCVCTRL_TAILUPD_DIS)
4065 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4066 if (op & QIB_RCVCTRL_PKEY_ENB)
4067 ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4068 if (op & QIB_RCVCTRL_PKEY_DIS)
4069 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4070 if (ctxt < 0) {
4071 mask = (1ULL << dd->ctxtcnt) - 1;
4072 rcd = NULL;
4073 } else {
4074 mask = (1ULL << ctxt);
4075 rcd = dd->rcd[ctxt];
4076 }
4077 if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4078 ppd->p_rcvctrl |=
4079 (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4080 if (!(dd->flags & QIB_NODMA_RTAIL)) {
4081 op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4082 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4083 }
4084 /* Write these registers before the context is enabled. */
4085 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4086 rcd->rcvhdrqtailaddr_phys);
4087 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4088 rcd->rcvhdrq_phys);
4089 rcd->seq_cnt = 1;
4090 }
4091 if (op & QIB_RCVCTRL_CTXT_DIS)
4092 ppd->p_rcvctrl &=
4093 ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4094 if (op & QIB_RCVCTRL_BP_ENB)
4095 dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4096 if (op & QIB_RCVCTRL_BP_DIS)
4097 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4098 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4099 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4100 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4101 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4102 /*
4103 * Decide which registers to write depending on the ops enabled.
4104 * Special case is "flush" (no bits set at all)
4105 * which needs to write both.
4106 */
4107 if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4108 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4109 if (op == 0 || (op & RCVCTRL_PORT_MODS))
4110 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4111 if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4112 /*
4113 * Init the context registers also; if we were
4114 * disabled, tail and head should both be zero
4115 * already from the enable, but since we don't
4116 * know, we have to do it explictly.
4117 */
4118 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4119 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4120
4121 /* be sure enabling write seen; hd/tl should be 0 */
4122 (void) qib_read_kreg32(dd, kr_scratch);
4123 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4124 dd->rcd[ctxt]->head = val;
4125 /* If kctxt, interrupt on next receive. */
4126 if (ctxt < dd->first_user_ctxt)
4127 val |= dd->rhdrhead_intr_off;
4128 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4129 } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4130 dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4131 /* arm rcv interrupt */
4132 val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4133 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4134 }
4135 if (op & QIB_RCVCTRL_CTXT_DIS) {
4136 unsigned f;
4137
4138 /* Now that the context is disabled, clear these registers. */
4139 if (ctxt >= 0) {
4140 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4141 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4142 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4143 qib_write_ureg(dd, ur_rcvflowtable + f,
4144 TIDFLOW_ERRBITS, ctxt);
4145 } else {
4146 unsigned i;
4147
4148 for (i = 0; i < dd->cfgctxts; i++) {
4149 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4150 i, 0);
4151 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4152 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4153 qib_write_ureg(dd, ur_rcvflowtable + f,
4154 TIDFLOW_ERRBITS, i);
4155 }
4156 }
4157 }
4158 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4159}
4160
4161/*
4162 * Modify the SENDCTRL register in chip-specific way. This
4163 * is a function where there are multiple such registers with
4164 * slightly different layouts.
4165 * The chip doesn't allow back-to-back sendctrl writes, so write
4166 * the scratch register after writing sendctrl.
4167 *
4168 * Which register is written depends on the operation.
4169 * Most operate on the common register, while
4170 * SEND_ENB and SEND_DIS operate on the per-port ones.
4171 * SEND_ENB is included in common because it can change SPCL_TRIG
4172 */
4173#define SENDCTRL_COMMON_MODS (\
4174 QIB_SENDCTRL_CLEAR | \
4175 QIB_SENDCTRL_AVAIL_DIS | \
4176 QIB_SENDCTRL_AVAIL_ENB | \
4177 QIB_SENDCTRL_AVAIL_BLIP | \
4178 QIB_SENDCTRL_DISARM | \
4179 QIB_SENDCTRL_DISARM_ALL | \
4180 QIB_SENDCTRL_SEND_ENB)
4181
4182#define SENDCTRL_PORT_MODS (\
4183 QIB_SENDCTRL_CLEAR | \
4184 QIB_SENDCTRL_SEND_ENB | \
4185 QIB_SENDCTRL_SEND_DIS | \
4186 QIB_SENDCTRL_FLUSH)
4187
4188static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4189{
4190 struct qib_devdata *dd = ppd->dd;
4191 u64 tmp_dd_sendctrl;
4192 unsigned long flags;
4193
4194 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4195
4196 /* First the dd ones that are "sticky", saved in shadow */
4197 if (op & QIB_SENDCTRL_CLEAR)
4198 dd->sendctrl = 0;
4199 if (op & QIB_SENDCTRL_AVAIL_DIS)
4200 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4201 else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4202 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4203 if (dd->flags & QIB_USE_SPCL_TRIG)
4204 dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4205 }
4206
4207 /* Then the ppd ones that are "sticky", saved in shadow */
4208 if (op & QIB_SENDCTRL_SEND_DIS)
4209 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4210 else if (op & QIB_SENDCTRL_SEND_ENB)
4211 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4212
4213 if (op & QIB_SENDCTRL_DISARM_ALL) {
4214 u32 i, last;
4215
4216 tmp_dd_sendctrl = dd->sendctrl;
4217 last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4218 /*
4219 * Disarm any buffers that are not yet launched,
4220 * disabling updates until done.
4221 */
4222 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4223 for (i = 0; i < last; i++) {
4224 qib_write_kreg(dd, kr_sendctrl,
4225 tmp_dd_sendctrl |
4226 SYM_MASK(SendCtrl, Disarm) | i);
4227 qib_write_kreg(dd, kr_scratch, 0);
4228 }
4229 }
4230
4231 if (op & QIB_SENDCTRL_FLUSH) {
4232 u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4233
4234 /*
4235 * Now drain all the fifos. The Abort bit should never be
4236 * needed, so for now, at least, we don't use it.
4237 */
4238 tmp_ppd_sendctrl |=
4239 SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4240 SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4241 SYM_MASK(SendCtrl_0, TxeBypassIbc);
4242 qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4243 qib_write_kreg(dd, kr_scratch, 0);
4244 }
4245
4246 tmp_dd_sendctrl = dd->sendctrl;
4247
4248 if (op & QIB_SENDCTRL_DISARM)
4249 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4250 ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4251 SYM_LSB(SendCtrl, DisarmSendBuf));
4252 if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4253 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4254 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4255
4256 if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4257 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4258 qib_write_kreg(dd, kr_scratch, 0);
4259 }
4260
4261 if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4262 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4263 qib_write_kreg(dd, kr_scratch, 0);
4264 }
4265
4266 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4267 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4268 qib_write_kreg(dd, kr_scratch, 0);
4269 }
4270
4271 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4272
4273 if (op & QIB_SENDCTRL_FLUSH) {
4274 u32 v;
4275 /*
4276 * ensure writes have hit chip, then do a few
4277 * more reads, to allow DMA of pioavail registers
4278 * to occur, so in-memory copy is in sync with
4279 * the chip. Not always safe to sleep.
4280 */
4281 v = qib_read_kreg32(dd, kr_scratch);
4282 qib_write_kreg(dd, kr_scratch, v);
4283 v = qib_read_kreg32(dd, kr_scratch);
4284 qib_write_kreg(dd, kr_scratch, v);
4285 qib_read_kreg32(dd, kr_scratch);
4286 }
4287}
4288
4289#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4290#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4291#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4292
4293/**
4294 * qib_portcntr_7322 - read a per-port chip counter
4295 * @ppd: the qlogic_ib pport
4296 * @creg: the counter to read (not a chip offset)
4297 */
4298static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4299{
4300 struct qib_devdata *dd = ppd->dd;
4301 u64 ret = 0ULL;
4302 u16 creg;
4303 /* 0xffff for unimplemented or synthesized counters */
4304 static const u32 xlator[] = {
4305 [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4306 [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4307 [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4308 [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4309 [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4310 [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4311 [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4312 [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4313 [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4314 [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4315 [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4316 [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4317 [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
4318 [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4319 [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4320 [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4321 [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4322 [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4323 [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4324 [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4325 [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4326 [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4327 [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4328 [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4329 [QIBPORTCNTR_ERRLINK] = crp_errlink,
4330 [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4331 [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4332 [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4333 [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4334 [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4335 /*
4336 * the next 3 aren't really counters, but were implemented
4337 * as counters in older chips, so still get accessed as
4338 * though they were counters from this code.
4339 */
4340 [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4341 [QIBPORTCNTR_PSSTART] = krp_psstart,
4342 [QIBPORTCNTR_PSSTAT] = krp_psstat,
4343 /* pseudo-counter, summed for all ports */
4344 [QIBPORTCNTR_KHDROVFL] = 0xffff,
4345 };
4346
4347 if (reg >= ARRAY_SIZE(xlator)) {
4348 qib_devinfo(ppd->dd->pcidev,
4349 "Unimplemented portcounter %u\n", reg);
4350 goto done;
4351 }
4352 creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4353
4354 /* handle non-counters and special cases first */
4355 if (reg == QIBPORTCNTR_KHDROVFL) {
4356 int i;
4357
4358 /* sum over all kernel contexts (skip if mini_init) */
4359 for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4360 struct qib_ctxtdata *rcd = dd->rcd[i];
4361
4362 if (!rcd || rcd->ppd != ppd)
4363 continue;
4364 ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4365 }
4366 goto done;
4367 } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4368 /*
4369 * Used as part of the synthesis of port_rcv_errors
4370 * in the verbs code for IBTA counters. Not needed for 7322,
4371 * because all the errors are already counted by other cntrs.
4372 */
4373 goto done;
4374 } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4375 reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4376 /* were counters in older chips, now per-port kernel regs */
4377 ret = qib_read_kreg_port(ppd, creg);
4378 goto done;
4379 }
4380
4381 /*
4382 * Only fast increment counters are 64 bits; use 32 bit reads to
4383 * avoid two independent reads when on Opteron.
4384 */
4385 if (xlator[reg] & _PORT_64BIT_FLAG)
4386 ret = read_7322_creg_port(ppd, creg);
4387 else
4388 ret = read_7322_creg32_port(ppd, creg);
4389 if (creg == crp_ibsymbolerr) {
4390 if (ppd->cpspec->ibdeltainprog)
4391 ret -= ret - ppd->cpspec->ibsymsnap;
4392 ret -= ppd->cpspec->ibsymdelta;
4393 } else if (creg == crp_iblinkerrrecov) {
4394 if (ppd->cpspec->ibdeltainprog)
4395 ret -= ret - ppd->cpspec->iblnkerrsnap;
4396 ret -= ppd->cpspec->iblnkerrdelta;
4397 } else if (creg == crp_errlink)
4398 ret -= ppd->cpspec->ibmalfdelta;
4399 else if (creg == crp_iblinkdown)
4400 ret += ppd->cpspec->iblnkdowndelta;
4401done:
4402 return ret;
4403}
4404
4405/*
4406 * Device counter names (not port-specific), one line per stat,
4407 * single string. Used by utilities like ipathstats to print the stats
4408 * in a way which works for different versions of drivers, without changing
4409 * the utility. Names need to be 12 chars or less (w/o newline), for proper
4410 * display by utility.
4411 * Non-error counters are first.
4412 * Start of "error" conters is indicated by a leading "E " on the first
4413 * "error" counter, and doesn't count in label length.
4414 * The EgrOvfl list needs to be last so we truncate them at the configured
4415 * context count for the device.
4416 * cntr7322indices contains the corresponding register indices.
4417 */
4418static const char cntr7322names[] =
4419 "Interrupts\n"
4420 "HostBusStall\n"
4421 "E RxTIDFull\n"
4422 "RxTIDInvalid\n"
4423 "RxTIDFloDrop\n" /* 7322 only */
4424 "Ctxt0EgrOvfl\n"
4425 "Ctxt1EgrOvfl\n"
4426 "Ctxt2EgrOvfl\n"
4427 "Ctxt3EgrOvfl\n"
4428 "Ctxt4EgrOvfl\n"
4429 "Ctxt5EgrOvfl\n"
4430 "Ctxt6EgrOvfl\n"
4431 "Ctxt7EgrOvfl\n"
4432 "Ctxt8EgrOvfl\n"
4433 "Ctxt9EgrOvfl\n"
4434 "Ctx10EgrOvfl\n"
4435 "Ctx11EgrOvfl\n"
4436 "Ctx12EgrOvfl\n"
4437 "Ctx13EgrOvfl\n"
4438 "Ctx14EgrOvfl\n"
4439 "Ctx15EgrOvfl\n"
4440 "Ctx16EgrOvfl\n"
4441 "Ctx17EgrOvfl\n"
4442 ;
4443
4444static const u32 cntr7322indices[] = {
4445 cr_lbint | _PORT_64BIT_FLAG,
4446 cr_lbstall | _PORT_64BIT_FLAG,
4447 cr_tidfull,
4448 cr_tidinvalid,
4449 cr_rxtidflowdrop,
4450 cr_base_egrovfl + 0,
4451 cr_base_egrovfl + 1,
4452 cr_base_egrovfl + 2,
4453 cr_base_egrovfl + 3,
4454 cr_base_egrovfl + 4,
4455 cr_base_egrovfl + 5,
4456 cr_base_egrovfl + 6,
4457 cr_base_egrovfl + 7,
4458 cr_base_egrovfl + 8,
4459 cr_base_egrovfl + 9,
4460 cr_base_egrovfl + 10,
4461 cr_base_egrovfl + 11,
4462 cr_base_egrovfl + 12,
4463 cr_base_egrovfl + 13,
4464 cr_base_egrovfl + 14,
4465 cr_base_egrovfl + 15,
4466 cr_base_egrovfl + 16,
4467 cr_base_egrovfl + 17,
4468};
4469
4470/*
4471 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4472 * portcntr7322indices is somewhat complicated by some registers needing
4473 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4474 */
4475static const char portcntr7322names[] =
4476 "TxPkt\n"
4477 "TxFlowPkt\n"
4478 "TxWords\n"
4479 "RxPkt\n"
4480 "RxFlowPkt\n"
4481 "RxWords\n"
4482 "TxFlowStall\n"
4483 "TxDmaDesc\n" /* 7220 and 7322-only */
4484 "E RxDlidFltr\n" /* 7220 and 7322-only */
4485 "IBStatusChng\n"
4486 "IBLinkDown\n"
4487 "IBLnkRecov\n"
4488 "IBRxLinkErr\n"
4489 "IBSymbolErr\n"
4490 "RxLLIErr\n"
4491 "RxBadFormat\n"
4492 "RxBadLen\n"
4493 "RxBufOvrfl\n"
4494 "RxEBP\n"
4495 "RxFlowCtlErr\n"
4496 "RxICRCerr\n"
4497 "RxLPCRCerr\n"
4498 "RxVCRCerr\n"
4499 "RxInvalLen\n"
4500 "RxInvalPKey\n"
4501 "RxPktDropped\n"
4502 "TxBadLength\n"
4503 "TxDropped\n"
4504 "TxInvalLen\n"
4505 "TxUnderrun\n"
4506 "TxUnsupVL\n"
4507 "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4508 "RxVL15Drop\n"
4509 "RxVlErr\n"
4510 "XcessBufOvfl\n"
4511 "RxQPBadCtxt\n" /* 7322-only from here down */
4512 "TXBadHeader\n"
4513 ;
4514
4515static const u32 portcntr7322indices[] = {
4516 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4517 crp_pktsendflow,
4518 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4519 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4520 crp_pktrcvflowctrl,
4521 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4522 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4523 crp_txsdmadesc | _PORT_64BIT_FLAG,
4524 crp_rxdlidfltr,
4525 crp_ibstatuschange,
4526 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4527 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4528 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4529 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4530 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4531 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4532 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4533 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4534 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4535 crp_rcvflowctrlviol,
4536 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4537 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4538 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4539 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4540 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4541 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4542 crp_txminmaxlenerr,
4543 crp_txdroppedpkt,
4544 crp_txlenerr,
4545 crp_txunderrun,
4546 crp_txunsupvl,
4547 QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4548 QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4549 QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4550 QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4551 crp_rxqpinvalidctxt,
4552 crp_txhdrerr,
4553};
4554
4555/* do all the setup to make the counter reads efficient later */
4556static void init_7322_cntrnames(struct qib_devdata *dd)
4557{
4558 int i, j = 0;
4559 char *s;
4560
4561 for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4562 i++) {
4563 /* we always have at least one counter before the egrovfl */
4564 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4565 j = 1;
4566 s = strchr(s + 1, '\n');
4567 if (s && j)
4568 j++;
4569 }
4570 dd->cspec->ncntrs = i;
4571 if (!s)
4572 /* full list; size is without terminating null */
4573 dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
4574 else
4575 dd->cspec->cntrnamelen = 1 + s - cntr7322names;
4576 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
4577 * sizeof(u64), GFP_KERNEL);
4578 if (!dd->cspec->cntrs)
4579 qib_dev_err(dd, "Failed allocation for counters\n");
4580
4581 for (i = 0, s = (char *)portcntr7322names; s; i++)
4582 s = strchr(s + 1, '\n');
4583 dd->cspec->nportcntrs = i - 1;
4584 dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
4585 for (i = 0; i < dd->num_pports; ++i) {
4586 dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
4587 * sizeof(u64), GFP_KERNEL);
4588 if (!dd->pport[i].cpspec->portcntrs)
4589 qib_dev_err(dd, "Failed allocation for"
4590 " portcounters\n");
4591 }
4592}
4593
4594static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
4595 u64 **cntrp)
4596{
4597 u32 ret;
4598
4599 if (namep) {
4600 ret = dd->cspec->cntrnamelen;
4601 if (pos >= ret)
4602 ret = 0; /* final read after getting everything */
4603 else
4604 *namep = (char *) cntr7322names;
4605 } else {
4606 u64 *cntr = dd->cspec->cntrs;
4607 int i;
4608
4609 ret = dd->cspec->ncntrs * sizeof(u64);
4610 if (!cntr || pos >= ret) {
4611 /* everything read, or couldn't get memory */
4612 ret = 0;
4613 goto done;
4614 }
4615 *cntrp = cntr;
4616 for (i = 0; i < dd->cspec->ncntrs; i++)
4617 if (cntr7322indices[i] & _PORT_64BIT_FLAG)
4618 *cntr++ = read_7322_creg(dd,
4619 cntr7322indices[i] &
4620 _PORT_CNTR_IDXMASK);
4621 else
4622 *cntr++ = read_7322_creg32(dd,
4623 cntr7322indices[i]);
4624 }
4625done:
4626 return ret;
4627}
4628
4629static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
4630 char **namep, u64 **cntrp)
4631{
4632 u32 ret;
4633
4634 if (namep) {
4635 ret = dd->cspec->portcntrnamelen;
4636 if (pos >= ret)
4637 ret = 0; /* final read after getting everything */
4638 else
4639 *namep = (char *)portcntr7322names;
4640 } else {
4641 struct qib_pportdata *ppd = &dd->pport[port];
4642 u64 *cntr = ppd->cpspec->portcntrs;
4643 int i;
4644
4645 ret = dd->cspec->nportcntrs * sizeof(u64);
4646 if (!cntr || pos >= ret) {
4647 /* everything read, or couldn't get memory */
4648 ret = 0;
4649 goto done;
4650 }
4651 *cntrp = cntr;
4652 for (i = 0; i < dd->cspec->nportcntrs; i++) {
4653 if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
4654 *cntr++ = qib_portcntr_7322(ppd,
4655 portcntr7322indices[i] &
4656 _PORT_CNTR_IDXMASK);
4657 else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
4658 *cntr++ = read_7322_creg_port(ppd,
4659 portcntr7322indices[i] &
4660 _PORT_CNTR_IDXMASK);
4661 else
4662 *cntr++ = read_7322_creg32_port(ppd,
4663 portcntr7322indices[i]);
4664 }
4665 }
4666done:
4667 return ret;
4668}
4669
4670/**
4671 * qib_get_7322_faststats - get word counters from chip before they overflow
4672 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
4673 *
4674 * VESTIGIAL IBA7322 has no "small fast counters", so the only
4675 * real purpose of this function is to maintain the notion of
4676 * "active time", which in turn is only logged into the eeprom,
4677 * which we don;t have, yet, for 7322-based boards.
4678 *
4679 * called from add_timer
4680 */
4681static void qib_get_7322_faststats(unsigned long opaque)
4682{
4683 struct qib_devdata *dd = (struct qib_devdata *) opaque;
4684 struct qib_pportdata *ppd;
4685 unsigned long flags;
4686 u64 traffic_wds;
4687 int pidx;
4688
4689 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
4690 ppd = dd->pport + pidx;
4691
4692 /*
4693 * If port isn't enabled or not operational ports, or
4694 * diags is running (can cause memory diags to fail)
4695 * skip this port this time.
4696 */
4697 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
4698 || dd->diag_client)
4699 continue;
4700
4701 /*
4702 * Maintain an activity timer, based on traffic
4703 * exceeding a threshold, so we need to check the word-counts
4704 * even if they are 64-bit.
4705 */
4706 traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
4707 qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
4708 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
4709 traffic_wds -= ppd->dd->traffic_wds;
4710 ppd->dd->traffic_wds += traffic_wds;
4711 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
4712 atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
4713 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
4714 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
4715 QIB_IB_QDR) &&
4716 (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
4717 QIBL_LINKACTIVE)) &&
4718 ppd->cpspec->qdr_dfe_time &&
4719 time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
4720 ppd->cpspec->qdr_dfe_on = 0;
4721
4722 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
4723 ppd->dd->cspec->r1 ?
4724 QDR_STATIC_ADAPT_INIT_R1 :
4725 QDR_STATIC_ADAPT_INIT);
4726 force_h1(ppd);
4727 }
4728 }
4729 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
4730}
4731
4732/*
4733 * If we were using MSIx, try to fallback to INTx.
4734 */
4735static int qib_7322_intr_fallback(struct qib_devdata *dd)
4736{
4737 if (!dd->cspec->num_msix_entries)
4738 return 0; /* already using INTx */
4739
4740 qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
4741 " trying INTx interrupts\n");
4742 qib_7322_nomsix(dd);
4743 qib_enable_intx(dd->pcidev);
4744 qib_setup_7322_interrupt(dd, 0);
4745 return 1;
4746}
4747
4748/*
4749 * Reset the XGXS (between serdes and IBC). Slightly less intrusive
4750 * than resetting the IBC or external link state, and useful in some
4751 * cases to cause some retraining. To do this right, we reset IBC
4752 * as well, then return to previous state (which may be still in reset)
4753 * NOTE: some callers of this "know" this writes the current value
4754 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
4755 * check all callers.
4756 */
4757static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
4758{
4759 u64 val;
4760 struct qib_devdata *dd = ppd->dd;
4761 const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
4762 SYM_MASK(IBPCSConfig_0, xcv_treset) |
4763 SYM_MASK(IBPCSConfig_0, tx_rx_reset);
4764
4765 val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
4766 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4767 ppd->cpspec->ibcctrl_a &
4768 ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
4769
4770 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
4771 qib_read_kreg32(dd, kr_scratch);
4772 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
4773 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4774 qib_write_kreg(dd, kr_scratch, 0ULL);
4775}
4776
4777/*
4778 * This code for non-IBTA-compliant IB speed negotiation is only known to
4779 * work for the SDR to DDR transition, and only between an HCA and a switch
4780 * with recent firmware. It is based on observed heuristics, rather than
4781 * actual knowledge of the non-compliant speed negotiation.
4782 * It has a number of hard-coded fields, since the hope is to rewrite this
4783 * when a spec is available on how the negoation is intended to work.
4784 */
4785static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
4786 u32 dcnt, u32 *data)
4787{
4788 int i;
4789 u64 pbc;
4790 u32 __iomem *piobuf;
4791 u32 pnum, control, len;
4792 struct qib_devdata *dd = ppd->dd;
4793
4794 i = 0;
4795 len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
4796 control = qib_7322_setpbc_control(ppd, len, 0, 15);
4797 pbc = ((u64) control << 32) | len;
4798 while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
4799 if (i++ > 15)
4800 return;
4801 udelay(2);
4802 }
4803 /* disable header check on this packet, since it can't be valid */
4804 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
4805 writeq(pbc, piobuf);
4806 qib_flush_wc();
4807 qib_pio_copy(piobuf + 2, hdr, 7);
4808 qib_pio_copy(piobuf + 9, data, dcnt);
4809 if (dd->flags & QIB_USE_SPCL_TRIG) {
4810 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
4811
4812 qib_flush_wc();
4813 __raw_writel(0xaebecede, piobuf + spcl_off);
4814 }
4815 qib_flush_wc();
4816 qib_sendbuf_done(dd, pnum);
4817 /* and re-enable hdr check */
4818 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
4819}
4820
4821/*
4822 * _start packet gets sent twice at start, _done gets sent twice at end
4823 */
4824static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
4825{
4826 struct qib_devdata *dd = ppd->dd;
4827 static u32 swapped;
4828 u32 dw, i, hcnt, dcnt, *data;
4829 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
4830 static u32 madpayload_start[0x40] = {
4831 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4832 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4833 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
4834 };
4835 static u32 madpayload_done[0x40] = {
4836 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4837 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4838 0x40000001, 0x1388, 0x15e, /* rest 0's */
4839 };
4840
4841 dcnt = ARRAY_SIZE(madpayload_start);
4842 hcnt = ARRAY_SIZE(hdr);
4843 if (!swapped) {
4844 /* for maintainability, do it at runtime */
4845 for (i = 0; i < hcnt; i++) {
4846 dw = (__force u32) cpu_to_be32(hdr[i]);
4847 hdr[i] = dw;
4848 }
4849 for (i = 0; i < dcnt; i++) {
4850 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
4851 madpayload_start[i] = dw;
4852 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
4853 madpayload_done[i] = dw;
4854 }
4855 swapped = 1;
4856 }
4857
4858 data = which ? madpayload_done : madpayload_start;
4859
4860 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
4861 qib_read_kreg64(dd, kr_scratch);
4862 udelay(2);
4863 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
4864 qib_read_kreg64(dd, kr_scratch);
4865 udelay(2);
4866}
4867
4868/*
4869 * Do the absolute minimum to cause an IB speed change, and make it
4870 * ready, but don't actually trigger the change. The caller will
4871 * do that when ready (if link is in Polling training state, it will
4872 * happen immediately, otherwise when link next goes down)
4873 *
4874 * This routine should only be used as part of the DDR autonegotation
4875 * code for devices that are not compliant with IB 1.2 (or code that
4876 * fixes things up for same).
4877 *
4878 * When link has gone down, and autoneg enabled, or autoneg has
4879 * failed and we give up until next time we set both speeds, and
4880 * then we want IBTA enabled as well as "use max enabled speed.
4881 */
4882static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
4883{
4884 u64 newctrlb;
4885 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
4886 IBA7322_IBC_IBTA_1_2_MASK |
4887 IBA7322_IBC_MAX_SPEED_MASK);
4888
4889 if (speed & (speed - 1)) /* multiple speeds */
4890 newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
4891 IBA7322_IBC_IBTA_1_2_MASK |
4892 IBA7322_IBC_MAX_SPEED_MASK;
4893 else
4894 newctrlb |= speed == QIB_IB_QDR ?
4895 IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
4896 ((speed == QIB_IB_DDR ?
4897 IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
4898
4899 if (newctrlb == ppd->cpspec->ibcctrl_b)
4900 return;
4901
4902 ppd->cpspec->ibcctrl_b = newctrlb;
4903 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4904 qib_write_kreg(ppd->dd, kr_scratch, 0);
4905}
4906
4907/*
4908 * This routine is only used when we are not talking to another
4909 * IB 1.2-compliant device that we think can do DDR.
4910 * (This includes all existing switch chips as of Oct 2007.)
4911 * 1.2-compliant devices go directly to DDR prior to reaching INIT
4912 */
4913static void try_7322_autoneg(struct qib_pportdata *ppd)
4914{
4915 unsigned long flags;
4916
4917 spin_lock_irqsave(&ppd->lflags_lock, flags);
4918 ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
4919 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4920 qib_autoneg_7322_send(ppd, 0);
4921 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
4922 qib_7322_mini_pcs_reset(ppd);
4923 /* 2 msec is minimum length of a poll cycle */
4924 schedule_delayed_work(&ppd->cpspec->autoneg_work,
4925 msecs_to_jiffies(2));
4926}
4927
4928/*
4929 * Handle the empirically determined mechanism for auto-negotiation
4930 * of DDR speed with switches.
4931 */
4932static void autoneg_7322_work(struct work_struct *work)
4933{
4934 struct qib_pportdata *ppd;
4935 struct qib_devdata *dd;
4936 u64 startms;
4937 u32 i;
4938 unsigned long flags;
4939
4940 ppd = container_of(work, struct qib_chippport_specific,
4941 autoneg_work.work)->ppd;
4942 dd = ppd->dd;
4943
4944 startms = jiffies_to_msecs(jiffies);
4945
4946 /*
4947 * Busy wait for this first part, it should be at most a
4948 * few hundred usec, since we scheduled ourselves for 2msec.
4949 */
4950 for (i = 0; i < 25; i++) {
4951 if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
4952 == IB_7322_LT_STATE_POLLQUIET) {
4953 qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
4954 break;
4955 }
4956 udelay(100);
4957 }
4958
4959 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
4960 goto done; /* we got there early or told to stop */
4961
4962 /* we expect this to timeout */
4963 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
4964 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
4965 msecs_to_jiffies(90)))
4966 goto done;
4967 qib_7322_mini_pcs_reset(ppd);
4968
4969 /* we expect this to timeout */
4970 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
4971 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
4972 msecs_to_jiffies(1700)))
4973 goto done;
4974 qib_7322_mini_pcs_reset(ppd);
4975
4976 set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
4977
4978 /*
4979 * Wait up to 250 msec for link to train and get to INIT at DDR;
4980 * this should terminate early.
4981 */
4982 wait_event_timeout(ppd->cpspec->autoneg_wait,
4983 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
4984 msecs_to_jiffies(250));
4985done:
4986 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
4987 spin_lock_irqsave(&ppd->lflags_lock, flags);
4988 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
4989 if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
4990 ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
4991 ppd->cpspec->autoneg_tries = 0;
4992 }
4993 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4994 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
4995 }
4996}
4997
4998/*
4999 * This routine is used to request IPG set in the QLogic switch.
5000 * Only called if r1.
5001 */
5002static void try_7322_ipg(struct qib_pportdata *ppd)
5003{
5004 struct qib_ibport *ibp = &ppd->ibport_data;
5005 struct ib_mad_send_buf *send_buf;
5006 struct ib_mad_agent *agent;
5007 struct ib_smp *smp;
5008 unsigned delay;
5009 int ret;
5010
5011 agent = ibp->send_agent;
5012 if (!agent)
5013 goto retry;
5014
5015 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5016 IB_MGMT_MAD_DATA, GFP_ATOMIC);
5017 if (IS_ERR(send_buf))
5018 goto retry;
5019
5020 if (!ibp->smi_ah) {
5021 struct ib_ah_attr attr;
5022 struct ib_ah *ah;
5023
5024 memset(&attr, 0, sizeof attr);
5025 attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
5026 attr.port_num = ppd->port;
5027 ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
5028 if (IS_ERR(ah))
5029 ret = -EINVAL;
5030 else {
5031 send_buf->ah = ah;
5032 ibp->smi_ah = to_iah(ah);
5033 ret = 0;
5034 }
5035 } else {
5036 send_buf->ah = &ibp->smi_ah->ibah;
5037 ret = 0;
5038 }
5039
5040 smp = send_buf->mad;
5041 smp->base_version = IB_MGMT_BASE_VERSION;
5042 smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5043 smp->class_version = 1;
5044 smp->method = IB_MGMT_METHOD_SEND;
5045 smp->hop_cnt = 1;
5046 smp->attr_id = QIB_VENDOR_IPG;
5047 smp->attr_mod = 0;
5048
5049 if (!ret)
5050 ret = ib_post_send_mad(send_buf, NULL);
5051 if (ret)
5052 ib_free_send_mad(send_buf);
5053retry:
5054 delay = 2 << ppd->cpspec->ipg_tries;
5055 schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
5056}
5057
5058/*
5059 * Timeout handler for setting IPG.
5060 * Only called if r1.
5061 */
5062static void ipg_7322_work(struct work_struct *work)
5063{
5064 struct qib_pportdata *ppd;
5065
5066 ppd = container_of(work, struct qib_chippport_specific,
5067 ipg_work.work)->ppd;
5068 if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5069 && ++ppd->cpspec->ipg_tries <= 10)
5070 try_7322_ipg(ppd);
5071}
5072
5073static u32 qib_7322_iblink_state(u64 ibcs)
5074{
5075 u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5076
5077 switch (state) {
5078 case IB_7322_L_STATE_INIT:
5079 state = IB_PORT_INIT;
5080 break;
5081 case IB_7322_L_STATE_ARM:
5082 state = IB_PORT_ARMED;
5083 break;
5084 case IB_7322_L_STATE_ACTIVE:
5085 /* fall through */
5086 case IB_7322_L_STATE_ACT_DEFER:
5087 state = IB_PORT_ACTIVE;
5088 break;
5089 default: /* fall through */
5090 case IB_7322_L_STATE_DOWN:
5091 state = IB_PORT_DOWN;
5092 break;
5093 }
5094 return state;
5095}
5096
5097/* returns the IBTA port state, rather than the IBC link training state */
5098static u8 qib_7322_phys_portstate(u64 ibcs)
5099{
5100 u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5101 return qib_7322_physportstate[state];
5102}
5103
5104static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5105{
5106 int ret = 0, symadj = 0;
5107 unsigned long flags;
5108 int mult;
5109
5110 spin_lock_irqsave(&ppd->lflags_lock, flags);
5111 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5112 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5113
5114 /* Update our picture of width and speed from chip */
5115 if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5116 ppd->link_speed_active = QIB_IB_QDR;
5117 mult = 4;
5118 } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5119 ppd->link_speed_active = QIB_IB_DDR;
5120 mult = 2;
5121 } else {
5122 ppd->link_speed_active = QIB_IB_SDR;
5123 mult = 1;
5124 }
5125 if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5126 ppd->link_width_active = IB_WIDTH_4X;
5127 mult *= 4;
5128 } else
5129 ppd->link_width_active = IB_WIDTH_1X;
5130 ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5131
5132 if (!ibup) {
5133 u64 clr;
5134
5135 /* Link went down. */
5136 /* do IPG MAD again after linkdown, even if last time failed */
5137 ppd->cpspec->ipg_tries = 0;
5138 clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5139 (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5140 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5141 if (clr)
5142 qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5143 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5144 QIBL_IB_AUTONEG_INPROG)))
5145 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5146 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5147 /* unlock the Tx settings, speed may change */
5148 qib_write_kreg_port(ppd, krp_tx_deemph_override,
5149 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5150 reset_tx_deemphasis_override));
5151 qib_cancel_sends(ppd);
5152 /* on link down, ensure sane pcs state */
5153 qib_7322_mini_pcs_reset(ppd);
5154 spin_lock_irqsave(&ppd->sdma_lock, flags);
5155 if (__qib_sdma_running(ppd))
5156 __qib_sdma_process_event(ppd,
5157 qib_sdma_event_e70_go_idle);
5158 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5159 }
5160 clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5161 if (clr == ppd->cpspec->iblnkdownsnap)
5162 ppd->cpspec->iblnkdowndelta++;
5163 } else {
5164 if (qib_compat_ddr_negotiate &&
5165 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5166 QIBL_IB_AUTONEG_INPROG)) &&
5167 ppd->link_speed_active == QIB_IB_SDR &&
5168 (ppd->link_speed_enabled & QIB_IB_DDR)
5169 && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5170 /* we are SDR, and auto-negotiation enabled */
5171 ++ppd->cpspec->autoneg_tries;
5172 if (!ppd->cpspec->ibdeltainprog) {
5173 ppd->cpspec->ibdeltainprog = 1;
5174 ppd->cpspec->ibsymdelta +=
5175 read_7322_creg32_port(ppd,
5176 crp_ibsymbolerr) -
5177 ppd->cpspec->ibsymsnap;
5178 ppd->cpspec->iblnkerrdelta +=
5179 read_7322_creg32_port(ppd,
5180 crp_iblinkerrrecov) -
5181 ppd->cpspec->iblnkerrsnap;
5182 }
5183 try_7322_autoneg(ppd);
5184 ret = 1; /* no other IB status change processing */
5185 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5186 ppd->link_speed_active == QIB_IB_SDR) {
5187 qib_autoneg_7322_send(ppd, 1);
5188 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5189 qib_7322_mini_pcs_reset(ppd);
5190 udelay(2);
5191 ret = 1; /* no other IB status change processing */
5192 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5193 (ppd->link_speed_active & QIB_IB_DDR)) {
5194 spin_lock_irqsave(&ppd->lflags_lock, flags);
5195 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5196 QIBL_IB_AUTONEG_FAILED);
5197 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5198 ppd->cpspec->autoneg_tries = 0;
5199 /* re-enable SDR, for next link down */
5200 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5201 wake_up(&ppd->cpspec->autoneg_wait);
5202 symadj = 1;
5203 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5204 /*
5205 * Clear autoneg failure flag, and do setup
5206 * so we'll try next time link goes down and
5207 * back to INIT (possibly connected to a
5208 * different device).
5209 */
5210 spin_lock_irqsave(&ppd->lflags_lock, flags);
5211 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5212 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5213 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5214 symadj = 1;
5215 }
5216 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5217 symadj = 1;
5218 if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5219 try_7322_ipg(ppd);
5220 if (!ppd->cpspec->recovery_init)
5221 setup_7322_link_recovery(ppd, 0);
5222 ppd->cpspec->qdr_dfe_time = jiffies +
5223 msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5224 }
5225 ppd->cpspec->ibmalfusesnap = 0;
5226 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5227 crp_errlink);
5228 }
5229 if (symadj) {
5230 ppd->cpspec->iblnkdownsnap =
5231 read_7322_creg32_port(ppd, crp_iblinkdown);
5232 if (ppd->cpspec->ibdeltainprog) {
5233 ppd->cpspec->ibdeltainprog = 0;
5234 ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5235 crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5236 ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5237 crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5238 }
5239 } else if (!ibup && qib_compat_ddr_negotiate &&
5240 !ppd->cpspec->ibdeltainprog &&
5241 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5242 ppd->cpspec->ibdeltainprog = 1;
5243 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5244 crp_ibsymbolerr);
5245 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5246 crp_iblinkerrrecov);
5247 }
5248
5249 if (!ret)
5250 qib_setup_7322_setextled(ppd, ibup);
5251 return ret;
5252}
5253
5254/*
5255 * Does read/modify/write to appropriate registers to
5256 * set output and direction bits selected by mask.
5257 * these are in their canonical postions (e.g. lsb of
5258 * dir will end up in D48 of extctrl on existing chips).
5259 * returns contents of GP Inputs.
5260 */
5261static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5262{
5263 u64 read_val, new_out;
5264 unsigned long flags;
5265
5266 if (mask) {
5267 /* some bits being written, lock access to GPIO */
5268 dir &= mask;
5269 out &= mask;
5270 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5271 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5272 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5273 new_out = (dd->cspec->gpio_out & ~mask) | out;
5274
5275 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5276 qib_write_kreg(dd, kr_gpio_out, new_out);
5277 dd->cspec->gpio_out = new_out;
5278 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5279 }
5280 /*
5281 * It is unlikely that a read at this time would get valid
5282 * data on a pin whose direction line was set in the same
5283 * call to this function. We include the read here because
5284 * that allows us to potentially combine a change on one pin with
5285 * a read on another, and because the old code did something like
5286 * this.
5287 */
5288 read_val = qib_read_kreg64(dd, kr_extstatus);
5289 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5290}
5291
5292/* Enable writes to config EEPROM, if possible. Returns previous state */
5293static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5294{
5295 int prev_wen;
5296 u32 mask;
5297
5298 mask = 1 << QIB_EEPROM_WEN_NUM;
5299 prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5300 gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5301
5302 return prev_wen & 1;
5303}
5304
5305/*
5306 * Read fundamental info we need to use the chip. These are
5307 * the registers that describe chip capabilities, and are
5308 * saved in shadow registers.
5309 */
5310static void get_7322_chip_params(struct qib_devdata *dd)
5311{
5312 u64 val;
5313 u32 piobufs;
5314 int mtu;
5315
5316 dd->palign = qib_read_kreg32(dd, kr_pagealign);
5317
5318 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5319
5320 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5321 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5322 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5323 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5324 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5325
5326 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5327 dd->piobcnt2k = val & ~0U;
5328 dd->piobcnt4k = val >> 32;
5329 val = qib_read_kreg64(dd, kr_sendpiosize);
5330 dd->piosize2k = val & ~0U;
5331 dd->piosize4k = val >> 32;
5332
5333 mtu = ib_mtu_enum_to_int(qib_ibmtu);
5334 if (mtu == -1)
5335 mtu = QIB_DEFAULT_MTU;
5336 dd->pport[0].ibmtu = (u32)mtu;
5337 dd->pport[1].ibmtu = (u32)mtu;
5338
5339 /* these may be adjusted in init_chip_wc_pat() */
5340 dd->pio2kbase = (u32 __iomem *)
5341 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5342 dd->pio4kbase = (u32 __iomem *)
5343 ((char __iomem *) dd->kregbase +
5344 (dd->piobufbase >> 32));
5345 /*
5346 * 4K buffers take 2 pages; we use roundup just to be
5347 * paranoid; we calculate it once here, rather than on
5348 * ever buf allocate
5349 */
5350 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5351
5352 piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5353
5354 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5355 (sizeof(u64) * BITS_PER_BYTE / 2);
5356}
5357
5358/*
5359 * The chip base addresses in cspec and cpspec have to be set
5360 * after possible init_chip_wc_pat(), rather than in
5361 * get_7322_chip_params(), so split out as separate function
5362 */
5363static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5364{
5365 u32 cregbase;
5366 cregbase = qib_read_kreg32(dd, kr_counterregbase);
5367
5368 dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5369 (char __iomem *)dd->kregbase);
5370
5371 dd->egrtidbase = (u64 __iomem *)
5372 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5373
5374 /* port registers are defined as relative to base of chip */
5375 dd->pport[0].cpspec->kpregbase =
5376 (u64 __iomem *)((char __iomem *)dd->kregbase);
5377 dd->pport[1].cpspec->kpregbase =
5378 (u64 __iomem *)(dd->palign +
5379 (char __iomem *)dd->kregbase);
5380 dd->pport[0].cpspec->cpregbase =
5381 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5382 kr_counterregbase) + (char __iomem *)dd->kregbase);
5383 dd->pport[1].cpspec->cpregbase =
5384 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5385 kr_counterregbase) + (char __iomem *)dd->kregbase);
5386}
5387
5388/*
5389 * This is a fairly special-purpose observer, so we only support
5390 * the port-specific parts of SendCtrl
5391 */
5392
5393#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
5394 SYM_MASK(SendCtrl_0, SDmaEnable) | \
5395 SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
5396 SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5397 SYM_MASK(SendCtrl_0, SDmaHalt) | \
5398 SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
5399 SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5400
5401static int sendctrl_hook(struct qib_devdata *dd,
5402 const struct diag_observer *op, u32 offs,
5403 u64 *data, u64 mask, int only_32)
5404{
5405 unsigned long flags;
5406 unsigned idx;
5407 unsigned pidx;
5408 struct qib_pportdata *ppd = NULL;
5409 u64 local_data, all_bits;
5410
5411 /*
5412 * The fixed correspondence between Physical ports and pports is
5413 * severed. We need to hunt for the ppd that corresponds
5414 * to the offset we got. And we have to do that without admitting
5415 * we know the stride, apparently.
5416 */
5417 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5418 u64 __iomem *psptr;
5419 u32 psoffs;
5420
5421 ppd = dd->pport + pidx;
5422 if (!ppd->cpspec->kpregbase)
5423 continue;
5424
5425 psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5426 psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5427 if (psoffs == offs)
5428 break;
5429 }
5430
5431 /* If pport is not being managed by driver, just avoid shadows. */
5432 if (pidx >= dd->num_pports)
5433 ppd = NULL;
5434
5435 /* In any case, "idx" is flat index in kreg space */
5436 idx = offs / sizeof(u64);
5437
5438 all_bits = ~0ULL;
5439 if (only_32)
5440 all_bits >>= 32;
5441
5442 spin_lock_irqsave(&dd->sendctrl_lock, flags);
5443 if (!ppd || (mask & all_bits) != all_bits) {
5444 /*
5445 * At least some mask bits are zero, so we need
5446 * to read. The judgement call is whether from
5447 * reg or shadow. First-cut: read reg, and complain
5448 * if any bits which should be shadowed are different
5449 * from their shadowed value.
5450 */
5451 if (only_32)
5452 local_data = (u64)qib_read_kreg32(dd, idx);
5453 else
5454 local_data = qib_read_kreg64(dd, idx);
5455 *data = (local_data & ~mask) | (*data & mask);
5456 }
5457 if (mask) {
5458 /*
5459 * At least some mask bits are one, so we need
5460 * to write, but only shadow some bits.
5461 */
5462 u64 sval, tval; /* Shadowed, transient */
5463
5464 /*
5465 * New shadow val is bits we don't want to touch,
5466 * ORed with bits we do, that are intended for shadow.
5467 */
5468 if (ppd) {
5469 sval = ppd->p_sendctrl & ~mask;
5470 sval |= *data & SENDCTRL_SHADOWED & mask;
5471 ppd->p_sendctrl = sval;
5472 } else
5473 sval = *data & SENDCTRL_SHADOWED & mask;
5474 tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5475 qib_write_kreg(dd, idx, tval);
5476 qib_write_kreg(dd, kr_scratch, 0Ull);
5477 }
5478 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5479 return only_32 ? 4 : 8;
5480}
5481
5482static const struct diag_observer sendctrl_0_observer = {
5483 sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5484 KREG_IDX(SendCtrl_0) * sizeof(u64)
5485};
5486
5487static const struct diag_observer sendctrl_1_observer = {
5488 sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5489 KREG_IDX(SendCtrl_1) * sizeof(u64)
5490};
5491
5492static ushort sdma_fetch_prio = 8;
5493module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5494MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5495
5496/* Besides logging QSFP events, we set appropriate TxDDS values */
5497static void init_txdds_table(struct qib_pportdata *ppd, int override);
5498
5499static void qsfp_7322_event(struct work_struct *work)
5500{
5501 struct qib_qsfp_data *qd;
5502 struct qib_pportdata *ppd;
5503 u64 pwrup;
5504 int ret;
5505 u32 le2;
5506
5507 qd = container_of(work, struct qib_qsfp_data, work);
5508 ppd = qd->ppd;
5509 pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
5510
5511 /*
5512 * Some QSFP's not only do not respond until the full power-up
5513 * time, but may behave badly if we try. So hold off responding
5514 * to insertion.
5515 */
5516 while (1) {
5517 u64 now = get_jiffies_64();
5518 if (time_after64(now, pwrup))
5519 break;
5520 msleep(1);
5521 }
5522 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5523 /*
5524 * Need to change LE2 back to defaults if we couldn't
5525 * read the cable type (to handle cable swaps), so do this
5526 * even on failure to read cable information. We don't
5527 * get here for QME, so IS_QME check not needed here.
5528 */
5529 le2 = (!ret && qd->cache.atten[1] >= qib_long_atten &&
5530 !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ?
5531 LE2_5m : LE2_DEFAULT;
5532 ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5533 init_txdds_table(ppd, 0);
5534}
5535
5536/*
5537 * There is little we can do but complain to the user if QSFP
5538 * initialization fails.
5539 */
5540static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
5541{
5542 unsigned long flags;
5543 struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
5544 struct qib_devdata *dd = ppd->dd;
5545 u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
5546
5547 mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
5548 qd->ppd = ppd;
5549 qib_qsfp_init(qd, qsfp_7322_event);
5550 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5551 dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
5552 dd->cspec->gpio_mask |= mod_prs_bit;
5553 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5554 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
5555 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5556}
5557
5558/*
5559 * called at device initialization time, and also if the txselect
5560 * module parameter is changed. This is used for cables that don't
5561 * have valid QSFP EEPROMs (not present, or attenuation is zero).
5562 * We initialize to the default, then if there is a specific
5563 * unit,port match, we use that (and set it immediately, for the
5564 * current speed, if the link is at INIT or better).
5565 * String format is "default# unit#,port#=# ... u,p=#", separators must
5566 * be a SPACE character. A newline terminates. The u,p=# tuples may
5567 * optionally have "u,p=#,#", where the final # is the H1 value
5568 * The last specific match is used (actually, all are used, but last
5569 * one is the one that winds up set); if none at all, fall back on default.
5570 */
5571static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5572{
5573 char *nxt, *str;
5574 u32 pidx, unit, port, deflt, h1;
5575 unsigned long val;
5576 int any = 0, seth1;
5577
5578 str = txselect_list;
5579
5580 /* default number is validated in setup_txselect() */
5581 deflt = simple_strtoul(str, &nxt, 0);
5582 for (pidx = 0; pidx < dd->num_pports; ++pidx)
5583 dd->pport[pidx].cpspec->no_eep = deflt;
5584
5585 while (*nxt && nxt[1]) {
5586 str = ++nxt;
5587 unit = simple_strtoul(str, &nxt, 0);
5588 if (nxt == str || !*nxt || *nxt != ',') {
5589 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5590 ;
5591 continue;
5592 }
5593 str = ++nxt;
5594 port = simple_strtoul(str, &nxt, 0);
5595 if (nxt == str || *nxt != '=') {
5596 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5597 ;
5598 continue;
5599 }
5600 str = ++nxt;
5601 val = simple_strtoul(str, &nxt, 0);
5602 if (nxt == str) {
5603 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5604 ;
5605 continue;
5606 }
5607 if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
5608 continue;
5609 seth1 = 0;
5610 h1 = 0; /* gcc thinks it might be used uninitted */
5611 if (*nxt == ',' && nxt[1]) {
5612 str = ++nxt;
5613 h1 = (u32)simple_strtoul(str, &nxt, 0);
5614 if (nxt == str)
5615 while (*nxt && *nxt++ != ' ') /* skip */
5616 ;
5617 else
5618 seth1 = 1;
5619 }
5620 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
5621 ++pidx) {
5622 struct qib_pportdata *ppd = &dd->pport[pidx];
5623
5624 if (ppd->port != port || !ppd->link_speed_supported)
5625 continue;
5626 ppd->cpspec->no_eep = val;
5627 /* now change the IBC and serdes, overriding generic */
5628 init_txdds_table(ppd, 1);
5629 any++;
5630 }
5631 if (*nxt == '\n')
5632 break; /* done */
5633 }
5634 if (change && !any) {
5635 /* no specific setting, use the default.
5636 * Change the IBC and serdes, but since it's
5637 * general, don't override specific settings.
5638 */
5639 for (pidx = 0; pidx < dd->num_pports; ++pidx)
5640 if (dd->pport[pidx].link_speed_supported)
5641 init_txdds_table(&dd->pport[pidx], 0);
5642 }
5643}
5644
5645/* handle the txselect parameter changing */
5646static int setup_txselect(const char *str, struct kernel_param *kp)
5647{
5648 struct qib_devdata *dd;
5649 unsigned long val;
5650 char *n;
5651 if (strlen(str) >= MAX_ATTEN_LEN) {
5652 printk(KERN_INFO QIB_DRV_NAME " txselect_values string "
5653 "too long\n");
5654 return -ENOSPC;
5655 }
5656 val = simple_strtoul(str, &n, 0);
5657 if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
5658 printk(KERN_INFO QIB_DRV_NAME
5659 "txselect_values must start with a number < %d\n",
5660 TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
5661 return -EINVAL;
5662 }
5663 strcpy(txselect_list, str);
5664
5665 list_for_each_entry(dd, &qib_dev_list, list)
5666 if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
5667 set_no_qsfp_atten(dd, 1);
5668 return 0;
5669}
5670
5671/*
5672 * Write the final few registers that depend on some of the
5673 * init setup. Done late in init, just before bringing up
5674 * the serdes.
5675 */
5676static int qib_late_7322_initreg(struct qib_devdata *dd)
5677{
5678 int ret = 0, n;
5679 u64 val;
5680
5681 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
5682 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
5683 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
5684 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
5685 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
5686 if (val != dd->pioavailregs_phys) {
5687 qib_dev_err(dd, "Catastrophic software error, "
5688 "SendPIOAvailAddr written as %lx, "
5689 "read back as %llx\n",
5690 (unsigned long) dd->pioavailregs_phys,
5691 (unsigned long long) val);
5692 ret = -EINVAL;
5693 }
5694
5695 n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
5696 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
5697 /* driver sends get pkey, lid, etc. checking also, to catch bugs */
5698 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
5699
5700 qib_register_observer(dd, &sendctrl_0_observer);
5701 qib_register_observer(dd, &sendctrl_1_observer);
5702
5703 dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
5704 qib_write_kreg(dd, kr_control, dd->control);
5705 /*
5706 * Set SendDmaFetchPriority and init Tx params, including
5707 * QSFP handler on boards that have QSFP.
5708 * First set our default attenuation entry for cables that
5709 * don't have valid attenuation.
5710 */
5711 set_no_qsfp_atten(dd, 0);
5712 for (n = 0; n < dd->num_pports; ++n) {
5713 struct qib_pportdata *ppd = dd->pport + n;
5714
5715 qib_write_kreg_port(ppd, krp_senddmaprioritythld,
5716 sdma_fetch_prio & 0xf);
5717 /* Initialize qsfp if present on board. */
5718 if (dd->flags & QIB_HAS_QSFP)
5719 qib_init_7322_qsfp(ppd);
5720 }
5721 dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
5722 qib_write_kreg(dd, kr_control, dd->control);
5723
5724 return ret;
5725}
5726
5727/* per IB port errors. */
5728#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
5729 MASK_ACROSS(8, 15))
5730#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
5731#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
5732 MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
5733 MASK_ACROSS(0, 11))
5734
5735/*
5736 * Write the initialization per-port registers that need to be done at
5737 * driver load and after reset completes (i.e., that aren't done as part
5738 * of other init procedures called from qib_init.c).
5739 * Some of these should be redundant on reset, but play safe.
5740 */
5741static void write_7322_init_portregs(struct qib_pportdata *ppd)
5742{
5743 u64 val;
5744 int i;
5745
5746 if (!ppd->link_speed_supported) {
5747 /* no buffer credits for this port */
5748 for (i = 1; i < 8; i++)
5749 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
5750 qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
5751 qib_write_kreg(ppd->dd, kr_scratch, 0);
5752 return;
5753 }
5754
5755 /*
5756 * Set the number of supported virtual lanes in IBC,
5757 * for flow control packet handling on unsupported VLs
5758 */
5759 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
5760 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
5761 val |= (u64)(ppd->vls_supported - 1) <<
5762 SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
5763 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
5764
5765 qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
5766
5767 /* enable tx header checking */
5768 qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
5769 IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
5770 IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
5771
5772 qib_write_kreg_port(ppd, krp_ncmodectrl,
5773 SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
5774
5775 /*
5776 * Unconditionally clear the bufmask bits. If SDMA is
5777 * enabled, we'll set them appropriately later.
5778 */
5779 qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
5780 qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
5781 qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
5782 if (ppd->dd->cspec->r1)
5783 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
5784}
5785
5786/*
5787 * Write the initialization per-device registers that need to be done at
5788 * driver load and after reset completes (i.e., that aren't done as part
5789 * of other init procedures called from qib_init.c). Also write per-port
5790 * registers that are affected by overall device config, such as QP mapping
5791 * Some of these should be redundant on reset, but play safe.
5792 */
5793static void write_7322_initregs(struct qib_devdata *dd)
5794{
5795 struct qib_pportdata *ppd;
5796 int i, pidx;
5797 u64 val;
5798
5799 /* Set Multicast QPs received by port 2 to map to context one. */
5800 qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
5801
5802 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5803 unsigned n, regno;
5804 unsigned long flags;
5805
5806 if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
5807 continue;
5808
5809 ppd = &dd->pport[pidx];
5810
5811 /* be paranoid against later code motion, etc. */
5812 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
5813 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
5814 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
5815
5816 /* Initialize QP to context mapping */
5817 regno = krp_rcvqpmaptable;
5818 val = 0;
5819 if (dd->num_pports > 1)
5820 n = dd->first_user_ctxt / dd->num_pports;
5821 else
5822 n = dd->first_user_ctxt - 1;
5823 for (i = 0; i < 32; ) {
5824 unsigned ctxt;
5825
5826 if (dd->num_pports > 1)
5827 ctxt = (i % n) * dd->num_pports + pidx;
5828 else if (i % n)
5829 ctxt = (i % n) + 1;
5830 else
5831 ctxt = ppd->hw_pidx;
5832 val |= ctxt << (5 * (i % 6));
5833 i++;
5834 if (i % 6 == 0) {
5835 qib_write_kreg_port(ppd, regno, val);
5836 val = 0;
5837 regno++;
5838 }
5839 }
5840 qib_write_kreg_port(ppd, regno, val);
5841 }
5842
5843 /*
5844 * Setup up interrupt mitigation for kernel contexts, but
5845 * not user contexts (user contexts use interrupts when
5846 * stalled waiting for any packet, so want those interrupts
5847 * right away).
5848 */
5849 for (i = 0; i < dd->first_user_ctxt; i++) {
5850 dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
5851 qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
5852 }
5853
5854 /*
5855 * Initialize as (disabled) rcvflow tables. Application code
5856 * will setup each flow as it uses the flow.
5857 * Doesn't clear any of the error bits that might be set.
5858 */
5859 val = TIDFLOW_ERRBITS; /* these are W1C */
5860 for (i = 0; i < dd->ctxtcnt; i++) {
5861 int flow;
5862 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
5863 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
5864 }
5865
5866 /*
5867 * dual cards init to dual port recovery, single port cards to
5868 * the one port. Dual port cards may later adjust to 1 port,
5869 * and then back to dual port if both ports are connected
5870 * */
5871 if (dd->num_pports)
5872 setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
5873}
5874
5875static int qib_init_7322_variables(struct qib_devdata *dd)
5876{
5877 struct qib_pportdata *ppd;
5878 unsigned features, pidx, sbufcnt;
5879 int ret, mtu;
5880 u32 sbufs, updthresh;
5881
5882 /* pport structs are contiguous, allocated after devdata */
5883 ppd = (struct qib_pportdata *)(dd + 1);
5884 dd->pport = ppd;
5885 ppd[0].dd = dd;
5886 ppd[1].dd = dd;
5887
5888 dd->cspec = (struct qib_chip_specific *)(ppd + 2);
5889
5890 ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
5891 ppd[1].cpspec = &ppd[0].cpspec[1];
5892 ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
5893 ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
5894
5895 spin_lock_init(&dd->cspec->rcvmod_lock);
5896 spin_lock_init(&dd->cspec->gpio_lock);
5897
5898 /* we haven't yet set QIB_PRESENT, so use read directly */
5899 dd->revision = readq(&dd->kregbase[kr_revision]);
5900
5901 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
5902 qib_dev_err(dd, "Revision register read failure, "
5903 "giving up initialization\n");
5904 ret = -ENODEV;
5905 goto bail;
5906 }
5907 dd->flags |= QIB_PRESENT; /* now register routines work */
5908
5909 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
5910 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
5911 dd->cspec->r1 = dd->minrev == 1;
5912
5913 get_7322_chip_params(dd);
5914 features = qib_7322_boardname(dd);
5915
5916 /* now that piobcnt2k and 4k set, we can allocate these */
5917 sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
5918 NUM_VL15_BUFS + BITS_PER_LONG - 1;
5919 sbufcnt /= BITS_PER_LONG;
5920 dd->cspec->sendchkenable = kmalloc(sbufcnt *
5921 sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
5922 dd->cspec->sendgrhchk = kmalloc(sbufcnt *
5923 sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
5924 dd->cspec->sendibchk = kmalloc(sbufcnt *
5925 sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
5926 if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
5927 !dd->cspec->sendibchk) {
5928 qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
5929 ret = -ENOMEM;
5930 goto bail;
5931 }
5932
5933 ppd = dd->pport;
5934
5935 /*
5936 * GPIO bits for TWSI data and clock,
5937 * used for serial EEPROM.
5938 */
5939 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
5940 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
5941 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
5942
5943 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
5944 QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
5945 QIB_HAS_THRESH_UPDATE |
5946 (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
5947 dd->flags |= qib_special_trigger ?
5948 QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
5949
5950 /*
5951 * Setup initial values. These may change when PAT is enabled, but
5952 * we need these to do initial chip register accesses.
5953 */
5954 qib_7322_set_baseaddrs(dd);
5955
5956 mtu = ib_mtu_enum_to_int(qib_ibmtu);
5957 if (mtu == -1)
5958 mtu = QIB_DEFAULT_MTU;
5959
5960 dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
5961 /* all hwerrors become interrupts, unless special purposed */
5962 dd->cspec->hwerrmask = ~0ULL;
5963 /* link_recovery setup causes these errors, so ignore them,
5964 * other than clearing them when they occur */
5965 dd->cspec->hwerrmask &=
5966 ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
5967 SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
5968 HWE_MASK(LATriggered));
5969
5970 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
5971 struct qib_chippport_specific *cp = ppd->cpspec;
5972 ppd->link_speed_supported = features & PORT_SPD_CAP;
5973 features >>= PORT_SPD_CAP_SHIFT;
5974 if (!ppd->link_speed_supported) {
5975 /* single port mode (7340, or configured) */
5976 dd->skip_kctxt_mask |= 1 << pidx;
5977 if (pidx == 0) {
5978 /* Make sure port is disabled. */
5979 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
5980 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
5981 ppd[0] = ppd[1];
5982 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
5983 IBSerdesPClkNotDetectMask_0)
5984 | SYM_MASK(HwErrMask,
5985 SDmaMemReadErrMask_0));
5986 dd->cspec->int_enable_mask &= ~(
5987 SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
5988 SYM_MASK(IntMask, SDmaIdleIntMask_0) |
5989 SYM_MASK(IntMask, SDmaProgressIntMask_0) |
5990 SYM_MASK(IntMask, SDmaIntMask_0) |
5991 SYM_MASK(IntMask, ErrIntMask_0) |
5992 SYM_MASK(IntMask, SendDoneIntMask_0));
5993 } else {
5994 /* Make sure port is disabled. */
5995 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
5996 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
5997 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
5998 IBSerdesPClkNotDetectMask_1)
5999 | SYM_MASK(HwErrMask,
6000 SDmaMemReadErrMask_1));
6001 dd->cspec->int_enable_mask &= ~(
6002 SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6003 SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6004 SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6005 SYM_MASK(IntMask, SDmaIntMask_1) |
6006 SYM_MASK(IntMask, ErrIntMask_1) |
6007 SYM_MASK(IntMask, SendDoneIntMask_1));
6008 }
6009 continue;
6010 }
6011
6012 dd->num_pports++;
6013 qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6014
6015 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6016 ppd->link_width_enabled = IB_WIDTH_4X;
6017 ppd->link_speed_enabled = ppd->link_speed_supported;
6018 /*
6019 * Set the initial values to reasonable default, will be set
6020 * for real when link is up.
6021 */
6022 ppd->link_width_active = IB_WIDTH_4X;
6023 ppd->link_speed_active = QIB_IB_SDR;
6024 ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6025 switch (qib_num_cfg_vls) {
6026 case 1:
6027 ppd->vls_supported = IB_VL_VL0;
6028 break;
6029 case 2:
6030 ppd->vls_supported = IB_VL_VL0_1;
6031 break;
6032 default:
6033 qib_devinfo(dd->pcidev,
6034 "Invalid num_vls %u, using 4 VLs\n",
6035 qib_num_cfg_vls);
6036 qib_num_cfg_vls = 4;
6037 /* fall through */
6038 case 4:
6039 ppd->vls_supported = IB_VL_VL0_3;
6040 break;
6041 case 8:
6042 if (mtu <= 2048)
6043 ppd->vls_supported = IB_VL_VL0_7;
6044 else {
6045 qib_devinfo(dd->pcidev,
6046 "Invalid num_vls %u for MTU %d "
6047 ", using 4 VLs\n",
6048 qib_num_cfg_vls, mtu);
6049 ppd->vls_supported = IB_VL_VL0_3;
6050 qib_num_cfg_vls = 4;
6051 }
6052 break;
6053 }
6054 ppd->vls_operational = ppd->vls_supported;
6055
6056 init_waitqueue_head(&cp->autoneg_wait);
6057 INIT_DELAYED_WORK(&cp->autoneg_work,
6058 autoneg_7322_work);
6059 if (ppd->dd->cspec->r1)
6060 INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6061
6062 /*
6063 * For Mez and similar cards, no qsfp info, so do
6064 * the "cable info" setup here. Can be overridden
6065 * in adapter-specific routines.
6066 */
6067 if (!(ppd->dd->flags & QIB_HAS_QSFP)) {
6068 if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd))
6069 qib_devinfo(ppd->dd->pcidev, "IB%u:%u: "
6070 "Unknown mezzanine card type\n",
6071 dd->unit, ppd->port);
6072 cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6073 /*
6074 * Choose center value as default tx serdes setting
6075 * until changed through module parameter.
6076 */
6077 ppd->cpspec->no_eep = IS_QMH(dd) ?
6078 TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6079 } else
6080 cp->h1_val = H1_FORCE_VAL;
6081
6082 /* Avoid writes to chip for mini_init */
6083 if (!qib_mini_init)
6084 write_7322_init_portregs(ppd);
6085
6086 init_timer(&cp->chase_timer);
6087 cp->chase_timer.function = reenable_chase;
6088 cp->chase_timer.data = (unsigned long)ppd;
6089
6090 ppd++;
6091 }
6092
6093 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
6094 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
6095 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6096
6097 /* we always allocate at least 2048 bytes for eager buffers */
6098 dd->rcvegrbufsize = max(mtu, 2048);
6099
6100 qib_7322_tidtemplate(dd);
6101
6102 /*
6103 * We can request a receive interrupt for 1 or
6104 * more packets from current offset.
6105 */
6106 dd->rhdrhead_intr_off =
6107 (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6108
6109 /* setup the stats timer; the add_timer is done at end of init */
6110 init_timer(&dd->stats_timer);
6111 dd->stats_timer.function = qib_get_7322_faststats;
6112 dd->stats_timer.data = (unsigned long) dd;
6113
6114 dd->ureg_align = 0x10000; /* 64KB alignment */
6115
6116 dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6117
6118 qib_7322_config_ctxts(dd);
6119 qib_set_ctxtcnt(dd);
6120
6121 if (qib_wc_pat) {
6122 ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k);
6123 if (ret)
6124 goto bail;
6125 }
6126 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6127
6128 ret = 0;
6129 if (qib_mini_init)
6130 goto bail;
6131 if (!dd->num_pports) {
6132 qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6133 goto bail; /* no error, so can still figure out why err */
6134 }
6135
6136 write_7322_initregs(dd);
6137 ret = qib_create_ctxts(dd);
6138 init_7322_cntrnames(dd);
6139
6140 updthresh = 8U; /* update threshold */
6141
6142 /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6143 * reserve the update threshold amount for other kernel use, such
6144 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6145 * unless we aren't enabling SDMA, in which case we want to use
6146 * all the 4k bufs for the kernel.
6147 * if this was less than the update threshold, we could wait
6148 * a long time for an update. Coded this way because we
6149 * sometimes change the update threshold for various reasons,
6150 * and we want this to remain robust.
6151 */
6152 if (dd->flags & QIB_HAS_SEND_DMA) {
6153 dd->cspec->sdmabufcnt = dd->piobcnt4k;
6154 sbufs = updthresh > 3 ? updthresh : 3;
6155 } else {
6156 dd->cspec->sdmabufcnt = 0;
6157 sbufs = dd->piobcnt4k;
6158 }
6159 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6160 dd->cspec->sdmabufcnt;
6161 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6162 dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6163 dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6164 dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6165
6166 /*
6167 * If we have 16 user contexts, we will have 7 sbufs
6168 * per context, so reduce the update threshold to match. We
6169 * want to update before we actually run out, at low pbufs/ctxt
6170 * so give ourselves some margin.
6171 */
6172 if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6173 updthresh = dd->pbufsctxt - 2;
6174 dd->cspec->updthresh_dflt = updthresh;
6175 dd->cspec->updthresh = updthresh;
6176
6177 /* before full enable, no interrupts, no locking needed */
6178 dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6179 << SYM_LSB(SendCtrl, AvailUpdThld)) |
6180 SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6181
6182 dd->psxmitwait_supported = 1;
6183 dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6184bail:
6185 if (!dd->ctxtcnt)
6186 dd->ctxtcnt = 1; /* for other initialization code */
6187
6188 return ret;
6189}
6190
6191static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6192 u32 *pbufnum)
6193{
6194 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6195 struct qib_devdata *dd = ppd->dd;
6196
6197 /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6198 if (pbc & PBC_7322_VL15_SEND) {
6199 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6200 last = first;
6201 } else {
6202 if ((plen + 1) > dd->piosize2kmax_dwords)
6203 first = dd->piobcnt2k;
6204 else
6205 first = 0;
6206 last = dd->cspec->lastbuf_for_pio;
6207 }
6208 return qib_getsendbuf_range(dd, pbufnum, first, last);
6209}
6210
6211static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6212 u32 start)
6213{
6214 qib_write_kreg_port(ppd, krp_psinterval, intv);
6215 qib_write_kreg_port(ppd, krp_psstart, start);
6216}
6217
6218/*
6219 * Must be called with sdma_lock held, or before init finished.
6220 */
6221static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6222{
6223 qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6224}
6225
6226static struct sdma_set_state_action sdma_7322_action_table[] = {
6227 [qib_sdma_state_s00_hw_down] = {
6228 .go_s99_running_tofalse = 1,
6229 .op_enable = 0,
6230 .op_intenable = 0,
6231 .op_halt = 0,
6232 .op_drain = 0,
6233 },
6234 [qib_sdma_state_s10_hw_start_up_wait] = {
6235 .op_enable = 0,
6236 .op_intenable = 1,
6237 .op_halt = 1,
6238 .op_drain = 0,
6239 },
6240 [qib_sdma_state_s20_idle] = {
6241 .op_enable = 1,
6242 .op_intenable = 1,
6243 .op_halt = 1,
6244 .op_drain = 0,
6245 },
6246 [qib_sdma_state_s30_sw_clean_up_wait] = {
6247 .op_enable = 0,
6248 .op_intenable = 1,
6249 .op_halt = 1,
6250 .op_drain = 0,
6251 },
6252 [qib_sdma_state_s40_hw_clean_up_wait] = {
6253 .op_enable = 1,
6254 .op_intenable = 1,
6255 .op_halt = 1,
6256 .op_drain = 0,
6257 },
6258 [qib_sdma_state_s50_hw_halt_wait] = {
6259 .op_enable = 1,
6260 .op_intenable = 1,
6261 .op_halt = 1,
6262 .op_drain = 1,
6263 },
6264 [qib_sdma_state_s99_running] = {
6265 .op_enable = 1,
6266 .op_intenable = 1,
6267 .op_halt = 0,
6268 .op_drain = 0,
6269 .go_s99_running_totrue = 1,
6270 },
6271};
6272
6273static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6274{
6275 ppd->sdma_state.set_state_action = sdma_7322_action_table;
6276}
6277
6278static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6279{
6280 struct qib_devdata *dd = ppd->dd;
6281 unsigned lastbuf, erstbuf;
6282 u64 senddmabufmask[3] = { 0 };
6283 int n, ret = 0;
6284
6285 qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6286 qib_sdma_7322_setlengen(ppd);
6287 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6288 qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6289 qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6290 qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6291
6292 if (dd->num_pports)
6293 n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6294 else
6295 n = dd->cspec->sdmabufcnt; /* failsafe for init */
6296 erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6297 ((dd->num_pports == 1 || ppd->port == 2) ? n :
6298 dd->cspec->sdmabufcnt);
6299 lastbuf = erstbuf + n;
6300
6301 ppd->sdma_state.first_sendbuf = erstbuf;
6302 ppd->sdma_state.last_sendbuf = lastbuf;
6303 for (; erstbuf < lastbuf; ++erstbuf) {
6304 unsigned word = erstbuf / BITS_PER_LONG;
6305 unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6306
6307 BUG_ON(word >= 3);
6308 senddmabufmask[word] |= 1ULL << bit;
6309 }
6310 qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6311 qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6312 qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6313 return ret;
6314}
6315
6316/* sdma_lock must be held */
6317static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6318{
6319 struct qib_devdata *dd = ppd->dd;
6320 int sane;
6321 int use_dmahead;
6322 u16 swhead;
6323 u16 swtail;
6324 u16 cnt;
6325 u16 hwhead;
6326
6327 use_dmahead = __qib_sdma_running(ppd) &&
6328 (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6329retry:
6330 hwhead = use_dmahead ?
6331 (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6332 (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6333
6334 swhead = ppd->sdma_descq_head;
6335 swtail = ppd->sdma_descq_tail;
6336 cnt = ppd->sdma_descq_cnt;
6337
6338 if (swhead < swtail)
6339 /* not wrapped */
6340 sane = (hwhead >= swhead) & (hwhead <= swtail);
6341 else if (swhead > swtail)
6342 /* wrapped around */
6343 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6344 (hwhead <= swtail);
6345 else
6346 /* empty */
6347 sane = (hwhead == swhead);
6348
6349 if (unlikely(!sane)) {
6350 if (use_dmahead) {
6351 /* try one more time, directly from the register */
6352 use_dmahead = 0;
6353 goto retry;
6354 }
6355 /* proceed as if no progress */
6356 hwhead = swhead;
6357 }
6358
6359 return hwhead;
6360}
6361
6362static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6363{
6364 u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6365
6366 return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6367 (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6368 !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6369 !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6370}
6371
6372/*
6373 * Compute the amount of delay before sending the next packet if the
6374 * port's send rate differs from the static rate set for the QP.
6375 * The delay affects the next packet and the amount of the delay is
6376 * based on the length of the this packet.
6377 */
6378static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6379 u8 srate, u8 vl)
6380{
6381 u8 snd_mult = ppd->delay_mult;
6382 u8 rcv_mult = ib_rate_to_delay[srate];
6383 u32 ret;
6384
6385 ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6386
6387 /* Indicate VL15, else set the VL in the control word */
6388 if (vl == 15)
6389 ret |= PBC_7322_VL15_SEND_CTRL;
6390 else
6391 ret |= vl << PBC_VL_NUM_LSB;
6392 ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6393
6394 return ret;
6395}
6396
6397/*
6398 * Enable the per-port VL15 send buffers for use.
6399 * They follow the rest of the buffers, without a config parameter.
6400 * This was in initregs, but that is done before the shadow
6401 * is set up, and this has to be done after the shadow is
6402 * set up.
6403 */
6404static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6405{
6406 unsigned vl15bufs;
6407
6408 vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
6409 qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
6410 TXCHK_CHG_TYPE_KERN, NULL);
6411}
6412
6413static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
6414{
6415 if (rcd->ctxt < NUM_IB_PORTS) {
6416 if (rcd->dd->num_pports > 1) {
6417 rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
6418 rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
6419 } else {
6420 rcd->rcvegrcnt = KCTXT0_EGRCNT;
6421 rcd->rcvegr_tid_base = 0;
6422 }
6423 } else {
6424 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
6425 rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
6426 (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
6427 }
6428}
6429
6430#define QTXSLEEPS 5000
6431static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6432 u32 len, u32 which, struct qib_ctxtdata *rcd)
6433{
6434 int i;
6435 const int last = start + len - 1;
6436 const int lastr = last / BITS_PER_LONG;
6437 u32 sleeps = 0;
6438 int wait = rcd != NULL;
6439 unsigned long flags;
6440
6441 while (wait) {
6442 unsigned long shadow;
6443 int cstart, previ = -1;
6444
6445 /*
6446 * when flipping from kernel to user, we can't change
6447 * the checking type if the buffer is allocated to the
6448 * driver. It's OK the other direction, because it's
6449 * from close, and we have just disarm'ed all the
6450 * buffers. All the kernel to kernel changes are also
6451 * OK.
6452 */
6453 for (cstart = start; cstart <= last; cstart++) {
6454 i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6455 / BITS_PER_LONG;
6456 if (i != previ) {
6457 shadow = (unsigned long)
6458 le64_to_cpu(dd->pioavailregs_dma[i]);
6459 previ = i;
6460 }
6461 if (test_bit(((2 * cstart) +
6462 QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6463 % BITS_PER_LONG, &shadow))
6464 break;
6465 }
6466
6467 if (cstart > last)
6468 break;
6469
6470 if (sleeps == QTXSLEEPS)
6471 break;
6472 /* make sure we see an updated copy next time around */
6473 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6474 sleeps++;
6475 msleep(1);
6476 }
6477
6478 switch (which) {
6479 case TXCHK_CHG_TYPE_DIS1:
6480 /*
6481 * disable checking on a range; used by diags; just
6482 * one buffer, but still written generically
6483 */
6484 for (i = start; i <= last; i++)
6485 clear_bit(i, dd->cspec->sendchkenable);
6486 break;
6487
6488 case TXCHK_CHG_TYPE_ENAB1:
6489 /*
6490 * (re)enable checking on a range; used by diags; just
6491 * one buffer, but still written generically; read
6492 * scratch to be sure buffer actually triggered, not
6493 * just flushed from processor.
6494 */
6495 qib_read_kreg32(dd, kr_scratch);
6496 for (i = start; i <= last; i++)
6497 set_bit(i, dd->cspec->sendchkenable);
6498 break;
6499
6500 case TXCHK_CHG_TYPE_KERN:
6501 /* usable by kernel */
6502 for (i = start; i <= last; i++) {
6503 set_bit(i, dd->cspec->sendibchk);
6504 clear_bit(i, dd->cspec->sendgrhchk);
6505 }
6506 spin_lock_irqsave(&dd->uctxt_lock, flags);
6507 /* see if we need to raise avail update threshold */
6508 for (i = dd->first_user_ctxt;
6509 dd->cspec->updthresh != dd->cspec->updthresh_dflt
6510 && i < dd->cfgctxts; i++)
6511 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
6512 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
6513 < dd->cspec->updthresh_dflt)
6514 break;
6515 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
6516 if (i == dd->cfgctxts) {
6517 spin_lock_irqsave(&dd->sendctrl_lock, flags);
6518 dd->cspec->updthresh = dd->cspec->updthresh_dflt;
6519 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6520 dd->sendctrl |= (dd->cspec->updthresh &
6521 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
6522 SYM_LSB(SendCtrl, AvailUpdThld);
6523 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6524 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6525 }
6526 break;
6527
6528 case TXCHK_CHG_TYPE_USER:
6529 /* for user process */
6530 for (i = start; i <= last; i++) {
6531 clear_bit(i, dd->cspec->sendibchk);
6532 set_bit(i, dd->cspec->sendgrhchk);
6533 }
6534 spin_lock_irqsave(&dd->sendctrl_lock, flags);
6535 if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
6536 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
6537 dd->cspec->updthresh = (rcd->piocnt /
6538 rcd->subctxt_cnt) - 1;
6539 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6540 dd->sendctrl |= (dd->cspec->updthresh &
6541 SYM_RMASK(SendCtrl, AvailUpdThld))
6542 << SYM_LSB(SendCtrl, AvailUpdThld);
6543 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6544 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6545 } else
6546 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6547 break;
6548
6549 default:
6550 break;
6551 }
6552
6553 for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
6554 qib_write_kreg(dd, kr_sendcheckmask + i,
6555 dd->cspec->sendchkenable[i]);
6556
6557 for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
6558 qib_write_kreg(dd, kr_sendgrhcheckmask + i,
6559 dd->cspec->sendgrhchk[i]);
6560 qib_write_kreg(dd, kr_sendibpktmask + i,
6561 dd->cspec->sendibchk[i]);
6562 }
6563
6564 /*
6565 * Be sure whatever we did was seen by the chip and acted upon,
6566 * before we return. Mostly important for which >= 2.
6567 */
6568 qib_read_kreg32(dd, kr_scratch);
6569}
6570
6571
6572/* useful for trigger analyzers, etc. */
6573static void writescratch(struct qib_devdata *dd, u32 val)
6574{
6575 qib_write_kreg(dd, kr_scratch, val);
6576}
6577
6578/* Dummy for now, use chip regs soon */
6579static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
6580{
6581 return -ENXIO;
6582}
6583
6584/**
6585 * qib_init_iba7322_funcs - set up the chip-specific function pointers
6586 * @dev: the pci_dev for qlogic_ib device
6587 * @ent: pci_device_id struct for this dev
6588 *
6589 * Also allocates, inits, and returns the devdata struct for this
6590 * device instance
6591 *
6592 * This is global, and is called directly at init to set up the
6593 * chip-specific function pointers for later use.
6594 */
6595struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6596 const struct pci_device_id *ent)
6597{
6598 struct qib_devdata *dd;
6599 int ret, i;
6600 u32 tabsize, actual_cnt = 0;
6601
6602 dd = qib_alloc_devdata(pdev,
6603 NUM_IB_PORTS * sizeof(struct qib_pportdata) +
6604 sizeof(struct qib_chip_specific) +
6605 NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
6606 if (IS_ERR(dd))
6607 goto bail;
6608
6609 dd->f_bringup_serdes = qib_7322_bringup_serdes;
6610 dd->f_cleanup = qib_setup_7322_cleanup;
6611 dd->f_clear_tids = qib_7322_clear_tids;
6612 dd->f_free_irq = qib_7322_free_irq;
6613 dd->f_get_base_info = qib_7322_get_base_info;
6614 dd->f_get_msgheader = qib_7322_get_msgheader;
6615 dd->f_getsendbuf = qib_7322_getsendbuf;
6616 dd->f_gpio_mod = gpio_7322_mod;
6617 dd->f_eeprom_wen = qib_7322_eeprom_wen;
6618 dd->f_hdrqempty = qib_7322_hdrqempty;
6619 dd->f_ib_updown = qib_7322_ib_updown;
6620 dd->f_init_ctxt = qib_7322_init_ctxt;
6621 dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
6622 dd->f_intr_fallback = qib_7322_intr_fallback;
6623 dd->f_late_initreg = qib_late_7322_initreg;
6624 dd->f_setpbc_control = qib_7322_setpbc_control;
6625 dd->f_portcntr = qib_portcntr_7322;
6626 dd->f_put_tid = qib_7322_put_tid;
6627 dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
6628 dd->f_rcvctrl = rcvctrl_7322_mod;
6629 dd->f_read_cntrs = qib_read_7322cntrs;
6630 dd->f_read_portcntrs = qib_read_7322portcntrs;
6631 dd->f_reset = qib_do_7322_reset;
6632 dd->f_init_sdma_regs = init_sdma_7322_regs;
6633 dd->f_sdma_busy = qib_sdma_7322_busy;
6634 dd->f_sdma_gethead = qib_sdma_7322_gethead;
6635 dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
6636 dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
6637 dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
6638 dd->f_sendctrl = sendctrl_7322_mod;
6639 dd->f_set_armlaunch = qib_set_7322_armlaunch;
6640 dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
6641 dd->f_iblink_state = qib_7322_iblink_state;
6642 dd->f_ibphys_portstate = qib_7322_phys_portstate;
6643 dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
6644 dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
6645 dd->f_set_ib_loopback = qib_7322_set_loopback;
6646 dd->f_get_ib_table = qib_7322_get_ib_table;
6647 dd->f_set_ib_table = qib_7322_set_ib_table;
6648 dd->f_set_intr_state = qib_7322_set_intr_state;
6649 dd->f_setextled = qib_setup_7322_setextled;
6650 dd->f_txchk_change = qib_7322_txchk_change;
6651 dd->f_update_usrhead = qib_update_7322_usrhead;
6652 dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
6653 dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
6654 dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
6655 dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
6656 dd->f_sdma_init_early = qib_7322_sdma_init_early;
6657 dd->f_writescratch = writescratch;
6658 dd->f_tempsense_rd = qib_7322_tempsense_rd;
6659 /*
6660 * Do remaining PCIe setup and save PCIe values in dd.
6661 * Any error printing is already done by the init code.
6662 * On return, we have the chip mapped, but chip registers
6663 * are not set up until start of qib_init_7322_variables.
6664 */
6665 ret = qib_pcie_ddinit(dd, pdev, ent);
6666 if (ret < 0)
6667 goto bail_free;
6668
6669 /* initialize chip-specific variables */
6670 ret = qib_init_7322_variables(dd);
6671 if (ret)
6672 goto bail_cleanup;
6673
6674 if (qib_mini_init || !dd->num_pports)
6675 goto bail;
6676
6677 /*
6678 * Determine number of vectors we want; depends on port count
6679 * and number of configured kernel receive queues actually used.
6680 * Should also depend on whether sdma is enabled or not, but
6681 * that's such a rare testing case it's not worth worrying about.
6682 */
6683 tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
6684 for (i = 0; i < tabsize; i++)
6685 if ((i < ARRAY_SIZE(irq_table) &&
6686 irq_table[i].port <= dd->num_pports) ||
6687 (i >= ARRAY_SIZE(irq_table) &&
6688 dd->rcd[i - ARRAY_SIZE(irq_table)]))
6689 actual_cnt++;
6690 tabsize = actual_cnt;
6691 dd->cspec->msix_entries = kmalloc(tabsize *
6692 sizeof(struct msix_entry), GFP_KERNEL);
6693 dd->cspec->msix_arg = kmalloc(tabsize *
6694 sizeof(void *), GFP_KERNEL);
6695 if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
6696 qib_dev_err(dd, "No memory for MSIx table\n");
6697 tabsize = 0;
6698 }
6699 for (i = 0; i < tabsize; i++)
6700 dd->cspec->msix_entries[i].entry = i;
6701
6702 if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
6703 qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
6704 "continuing anyway\n");
6705 /* may be less than we wanted, if not enough available */
6706 dd->cspec->num_msix_entries = tabsize;
6707
6708 /* setup interrupt handler */
6709 qib_setup_7322_interrupt(dd, 1);
6710
6711 /* clear diagctrl register, in case diags were running and crashed */
6712 qib_write_kreg(dd, kr_hwdiagctrl, 0);
6713
6714 goto bail;
6715
6716bail_cleanup:
6717 qib_pcie_ddcleanup(dd);
6718bail_free:
6719 qib_free_devdata(dd);
6720 dd = ERR_PTR(ret);
6721bail:
6722 return dd;
6723}
6724
6725/*
6726 * Set the table entry at the specified index from the table specifed.
6727 * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
6728 * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
6729 * 'idx' below addresses the correct entry, while its 4 LSBs select the
6730 * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
6731 */
6732#define DDS_ENT_AMP_LSB 14
6733#define DDS_ENT_MAIN_LSB 9
6734#define DDS_ENT_POST_LSB 5
6735#define DDS_ENT_PRE_XTRA_LSB 3
6736#define DDS_ENT_PRE_LSB 0
6737
6738/*
6739 * Set one entry in the TxDDS table for spec'd port
6740 * ridx picks one of the entries, while tp points
6741 * to the appropriate table entry.
6742 */
6743static void set_txdds(struct qib_pportdata *ppd, int ridx,
6744 const struct txdds_ent *tp)
6745{
6746 struct qib_devdata *dd = ppd->dd;
6747 u32 pack_ent;
6748 int regidx;
6749
6750 /* Get correct offset in chip-space, and in source table */
6751 regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
6752 /*
6753 * We do not use qib_write_kreg_port() because it was intended
6754 * only for registers in the lower "port specific" pages.
6755 * So do index calculation by hand.
6756 */
6757 if (ppd->hw_pidx)
6758 regidx += (dd->palign / sizeof(u64));
6759
6760 pack_ent = tp->amp << DDS_ENT_AMP_LSB;
6761 pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
6762 pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
6763 pack_ent |= tp->post << DDS_ENT_POST_LSB;
6764 qib_write_kreg(dd, regidx, pack_ent);
6765 /* Prevent back-to-back writes by hitting scratch */
6766 qib_write_kreg(ppd->dd, kr_scratch, 0);
6767}
6768
6769static const struct vendor_txdds_ent vendor_txdds[] = {
6770 { /* Amphenol 1m 30awg NoEq */
6771 { 0x41, 0x50, 0x48 }, "584470002 ",
6772 { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
6773 },
6774 { /* Amphenol 3m 28awg NoEq */
6775 { 0x41, 0x50, 0x48 }, "584470004 ",
6776 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
6777 },
6778 { /* Finisar 3m OM2 Optical */
6779 { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
6780 { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
6781 },
6782 { /* Finisar 30m OM2 Optical */
6783 { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
6784 { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
6785 },
6786 { /* Finisar Default OM2 Optical */
6787 { 0x00, 0x90, 0x65 }, NULL,
6788 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
6789 },
6790 { /* Gore 1m 30awg NoEq */
6791 { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
6792 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
6793 },
6794 { /* Gore 2m 30awg NoEq */
6795 { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
6796 { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
6797 },
6798 { /* Gore 1m 28awg NoEq */
6799 { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
6800 { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
6801 },
6802 { /* Gore 3m 28awg NoEq */
6803 { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
6804 { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
6805 },
6806 { /* Gore 5m 24awg Eq */
6807 { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
6808 { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
6809 },
6810 { /* Gore 7m 24awg Eq */
6811 { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
6812 { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
6813 },
6814 { /* Gore 5m 26awg Eq */
6815 { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
6816 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
6817 },
6818 { /* Gore 7m 26awg Eq */
6819 { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
6820 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
6821 },
6822 { /* Intersil 12m 24awg Active */
6823 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
6824 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
6825 },
6826 { /* Intersil 10m 28awg Active */
6827 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
6828 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
6829 },
6830 { /* Intersil 7m 30awg Active */
6831 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
6832 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
6833 },
6834 { /* Intersil 5m 32awg Active */
6835 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
6836 { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
6837 },
6838 { /* Intersil Default Active */
6839 { 0x00, 0x30, 0xB4 }, NULL,
6840 { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
6841 },
6842 { /* Luxtera 20m Active Optical */
6843 { 0x00, 0x25, 0x63 }, NULL,
6844 { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
6845 },
6846 { /* Molex 1M Cu loopback */
6847 { 0x00, 0x09, 0x3A }, "74763-0025 ",
6848 { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
6849 },
6850 { /* Molex 2m 28awg NoEq */
6851 { 0x00, 0x09, 0x3A }, "74757-2201 ",
6852 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
6853 },
6854};
6855
6856static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
6857 /* amp, pre, main, post */
6858 { 2, 2, 15, 6 }, /* Loopback */
6859 { 0, 0, 0, 1 }, /* 2 dB */
6860 { 0, 0, 0, 2 }, /* 3 dB */
6861 { 0, 0, 0, 3 }, /* 4 dB */
6862 { 0, 0, 0, 4 }, /* 5 dB */
6863 { 0, 0, 0, 5 }, /* 6 dB */
6864 { 0, 0, 0, 6 }, /* 7 dB */
6865 { 0, 0, 0, 7 }, /* 8 dB */
6866 { 0, 0, 0, 8 }, /* 9 dB */
6867 { 0, 0, 0, 9 }, /* 10 dB */
6868 { 0, 0, 0, 10 }, /* 11 dB */
6869 { 0, 0, 0, 11 }, /* 12 dB */
6870 { 0, 0, 0, 12 }, /* 13 dB */
6871 { 0, 0, 0, 13 }, /* 14 dB */
6872 { 0, 0, 0, 14 }, /* 15 dB */
6873 { 0, 0, 0, 15 }, /* 16 dB */
6874};
6875
6876static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
6877 /* amp, pre, main, post */
6878 { 2, 2, 15, 6 }, /* Loopback */
6879 { 0, 0, 0, 8 }, /* 2 dB */
6880 { 0, 0, 0, 8 }, /* 3 dB */
6881 { 0, 0, 0, 9 }, /* 4 dB */
6882 { 0, 0, 0, 9 }, /* 5 dB */
6883 { 0, 0, 0, 10 }, /* 6 dB */
6884 { 0, 0, 0, 10 }, /* 7 dB */
6885 { 0, 0, 0, 11 }, /* 8 dB */
6886 { 0, 0, 0, 11 }, /* 9 dB */
6887 { 0, 0, 0, 12 }, /* 10 dB */
6888 { 0, 0, 0, 12 }, /* 11 dB */
6889 { 0, 0, 0, 13 }, /* 12 dB */
6890 { 0, 0, 0, 13 }, /* 13 dB */
6891 { 0, 0, 0, 14 }, /* 14 dB */
6892 { 0, 0, 0, 14 }, /* 15 dB */
6893 { 0, 0, 0, 15 }, /* 16 dB */
6894};
6895
6896static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
6897 /* amp, pre, main, post */
6898 { 2, 2, 15, 6 }, /* Loopback */
6899 { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
6900 { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
6901 { 0, 1, 0, 11 }, /* 4 dB */
6902 { 0, 1, 0, 13 }, /* 5 dB */
6903 { 0, 1, 0, 15 }, /* 6 dB */
6904 { 0, 1, 3, 15 }, /* 7 dB */
6905 { 0, 1, 7, 15 }, /* 8 dB */
6906 { 0, 1, 7, 15 }, /* 9 dB */
6907 { 0, 1, 8, 15 }, /* 10 dB */
6908 { 0, 1, 9, 15 }, /* 11 dB */
6909 { 0, 1, 10, 15 }, /* 12 dB */
6910 { 0, 2, 6, 15 }, /* 13 dB */
6911 { 0, 2, 7, 15 }, /* 14 dB */
6912 { 0, 2, 8, 15 }, /* 15 dB */
6913 { 0, 2, 9, 15 }, /* 16 dB */
6914};
6915
6916/*
6917 * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
6918 * These are mostly used for mez cards going through connectors
6919 * and backplane traces, but can be used to add other "unusual"
6920 * table values as well.
6921 */
6922static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
6923 /* amp, pre, main, post */
6924 { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
6925 { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
6926 { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
6927 { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
6928 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6929 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6930 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6931 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6932 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6933 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6934 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6935};
6936
6937static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
6938 /* amp, pre, main, post */
6939 { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
6940 { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
6941 { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
6942 { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
6943 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6944 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6945 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6946 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6947 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6948 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6949 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6950};
6951
6952static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
6953 /* amp, pre, main, post */
6954 { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
6955 { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
6956 { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
6957 { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
6958 { 0, 1, 12, 10 }, /* QME7342 backplane setting */
6959 { 0, 1, 12, 11 }, /* QME7342 backplane setting */
6960 { 0, 1, 12, 12 }, /* QME7342 backplane setting */
6961 { 0, 1, 12, 14 }, /* QME7342 backplane setting */
6962 { 0, 1, 12, 6 }, /* QME7342 backplane setting */
6963 { 0, 1, 12, 7 }, /* QME7342 backplane setting */
6964 { 0, 1, 12, 8 }, /* QME7342 backplane setting */
6965};
6966
6967static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
6968 unsigned atten)
6969{
6970 /*
6971 * The attenuation table starts at 2dB for entry 1,
6972 * with entry 0 being the loopback entry.
6973 */
6974 if (atten <= 2)
6975 atten = 1;
6976 else if (atten > TXDDS_TABLE_SZ)
6977 atten = TXDDS_TABLE_SZ - 1;
6978 else
6979 atten--;
6980 return txdds + atten;
6981}
6982
6983/*
6984 * if override is set, the module parameter txselect has a value
6985 * for this specific port, so use it, rather than our normal mechanism.
6986 */
6987static void find_best_ent(struct qib_pportdata *ppd,
6988 const struct txdds_ent **sdr_dds,
6989 const struct txdds_ent **ddr_dds,
6990 const struct txdds_ent **qdr_dds, int override)
6991{
6992 struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
6993 int idx;
6994
6995 /* Search table of known cables */
6996 for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
6997 const struct vendor_txdds_ent *v = vendor_txdds + idx;
6998
6999 if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7000 (!v->partnum ||
7001 !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7002 *sdr_dds = &v->sdr;
7003 *ddr_dds = &v->ddr;
7004 *qdr_dds = &v->qdr;
7005 return;
7006 }
7007 }
7008
7009 /* Lookup serdes setting by cable type and attenuation */
7010 if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7011 *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7012 *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7013 *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7014 return;
7015 }
7016
7017 if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7018 qd->atten[1])) {
7019 *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7020 *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7021 *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7022 return;
7023 } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7024 /*
7025 * If we have no (or incomplete) data from the cable
7026 * EEPROM, or no QSFP, or override is set, use the
7027 * module parameter value to index into the attentuation
7028 * table.
7029 */
7030 idx = ppd->cpspec->no_eep;
7031 *sdr_dds = &txdds_sdr[idx];
7032 *ddr_dds = &txdds_ddr[idx];
7033 *qdr_dds = &txdds_qdr[idx];
7034 } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7035 /* similar to above, but index into the "extra" table. */
7036 idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7037 *sdr_dds = &txdds_extra_sdr[idx];
7038 *ddr_dds = &txdds_extra_ddr[idx];
7039 *qdr_dds = &txdds_extra_qdr[idx];
7040 } else {
7041 /* this shouldn't happen, it's range checked */
7042 *sdr_dds = txdds_sdr + qib_long_atten;
7043 *ddr_dds = txdds_ddr + qib_long_atten;
7044 *qdr_dds = txdds_qdr + qib_long_atten;
7045 }
7046}
7047
7048static void init_txdds_table(struct qib_pportdata *ppd, int override)
7049{
7050 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7051 struct txdds_ent *dds;
7052 int idx;
7053 int single_ent = 0;
7054
7055 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7056
7057 /* for mez cards or override, use the selected value for all entries */
7058 if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7059 single_ent = 1;
7060
7061 /* Fill in the first entry with the best entry found. */
7062 set_txdds(ppd, 0, sdr_dds);
7063 set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7064 set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7065 if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7066 QIBL_LINKACTIVE)) {
7067 dds = (struct txdds_ent *)(ppd->link_speed_active ==
7068 QIB_IB_QDR ? qdr_dds :
7069 (ppd->link_speed_active ==
7070 QIB_IB_DDR ? ddr_dds : sdr_dds));
7071 write_tx_serdes_param(ppd, dds);
7072 }
7073
7074 /* Fill in the remaining entries with the default table values. */
7075 for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7076 set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7077 set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7078 single_ent ? ddr_dds : txdds_ddr + idx);
7079 set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7080 single_ent ? qdr_dds : txdds_qdr + idx);
7081 }
7082}
7083
7084#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7085#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7086#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7087#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7088#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7089#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7090#define AHB_TRANS_TRIES 10
7091
7092/*
7093 * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7094 * 5=subsystem which is why most calls have "chan + chan >> 1"
7095 * for the channel argument.
7096 */
7097static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7098 u32 data, u32 mask)
7099{
7100 u32 rd_data, wr_data, sz_mask;
7101 u64 trans, acc, prev_acc;
7102 u32 ret = 0xBAD0BAD;
7103 int tries;
7104
7105 prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7106 /* From this point on, make sure we return access */
7107 acc = (quad << 1) | 1;
7108 qib_write_kreg(dd, KR_AHB_ACC, acc);
7109
7110 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7111 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7112 if (trans & AHB_TRANS_RDY)
7113 break;
7114 }
7115 if (tries >= AHB_TRANS_TRIES) {
7116 qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7117 goto bail;
7118 }
7119
7120 /* If mask is not all 1s, we need to read, but different SerDes
7121 * entities have different sizes
7122 */
7123 sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7124 wr_data = data & mask & sz_mask;
7125 if ((~mask & sz_mask) != 0) {
7126 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7127 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7128
7129 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7130 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7131 if (trans & AHB_TRANS_RDY)
7132 break;
7133 }
7134 if (tries >= AHB_TRANS_TRIES) {
7135 qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7136 AHB_TRANS_TRIES);
7137 goto bail;
7138 }
7139 /* Re-read in case host split reads and read data first */
7140 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7141 rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7142 wr_data |= (rd_data & ~mask & sz_mask);
7143 }
7144
7145 /* If mask is not zero, we need to write. */
7146 if (mask & sz_mask) {
7147 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7148 trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7149 trans |= AHB_WR;
7150 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7151
7152 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7153 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7154 if (trans & AHB_TRANS_RDY)
7155 break;
7156 }
7157 if (tries >= AHB_TRANS_TRIES) {
7158 qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7159 AHB_TRANS_TRIES);
7160 goto bail;
7161 }
7162 }
7163 ret = wr_data;
7164bail:
7165 qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7166 return ret;
7167}
7168
7169static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7170 unsigned mask)
7171{
7172 struct qib_devdata *dd = ppd->dd;
7173 int chan;
7174 u32 rbc;
7175
7176 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7177 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7178 data, mask);
7179 rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7180 addr, 0, 0);
7181 }
7182}
7183
7184static int serdes_7322_init(struct qib_pportdata *ppd)
7185{
7186 u64 data;
7187 u32 le_val;
7188
7189 /*
7190 * Initialize the Tx DDS tables. Also done every QSFP event,
7191 * for adapters with QSFP
7192 */
7193 init_txdds_table(ppd, 0);
7194
7195 /* ensure no tx overrides from earlier driver loads */
7196 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7197 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7198 reset_tx_deemphasis_override));
7199
7200 /* Patch some SerDes defaults to "Better for IB" */
7201 /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7202 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7203
7204 /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7205 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7206 /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7207 ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7208
7209 /* May be overridden in qsfp_7322_event */
7210 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7211 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7212
7213 /* enable LE1 adaptation for all but QME, which is disabled */
7214 le_val = IS_QME(ppd->dd) ? 0 : 1;
7215 ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7216
7217 /* Clear cmode-override, may be set from older driver */
7218 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7219
7220 /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7221 ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7222
7223 /* setup LoS params; these are subsystem, so chan == 5 */
7224 /* LoS filter threshold_count on, ch 0-3, set to 8 */
7225 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7226 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7227 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7228 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7229
7230 /* LoS filter threshold_count off, ch 0-3, set to 4 */
7231 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7232 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7233 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7234 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7235
7236 /* LoS filter select enabled */
7237 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7238
7239 /* LoS target data: SDR=4, DDR=2, QDR=1 */
7240 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7241 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7242 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7243
7244 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7245 qib_write_kreg_port(ppd, krp_serdesctrl, data |
7246 SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
7247
7248 /* rxbistena; set 0 to avoid effects of it switch later */
7249 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7250
7251 /* Configure 4 DFE taps, and only they adapt */
7252 ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7253
7254 /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7255 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7256 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7257
7258 /*
7259 * Set receive adaptation mode. SDR and DDR adaptation are
7260 * always on, and QDR is initially enabled; later disabled.
7261 */
7262 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7263 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7264 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7265 ppd->dd->cspec->r1 ?
7266 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7267 ppd->cpspec->qdr_dfe_on = 1;
7268
7269 /* FLoop LOS gate: PPM filter enabled */
7270 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7271
7272 /* rx offset center enabled */
7273 ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7274
7275 if (!ppd->dd->cspec->r1) {
7276 ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7277 ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7278 }
7279
7280 /* Set the frequency loop bandwidth to 15 */
7281 ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7282
7283 return 0;
7284}
7285
7286/* start adjust QMH serdes parameters */
7287
7288static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
7289{
7290 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7291 9, code << 9, 0x3f << 9);
7292}
7293
7294static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
7295 int enable, u32 tapenable)
7296{
7297 if (enable)
7298 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7299 1, 3 << 10, 0x1f << 10);
7300 else
7301 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7302 1, 0, 0x1f << 10);
7303}
7304
7305/* Set clock to 1, 0, 1, 0 */
7306static void clock_man(struct qib_pportdata *ppd, int chan)
7307{
7308 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7309 4, 0x4000, 0x4000);
7310 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7311 4, 0, 0x4000);
7312 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7313 4, 0x4000, 0x4000);
7314 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7315 4, 0, 0x4000);
7316}
7317
7318/*
7319 * write the current Tx serdes pre,post,main,amp settings into the serdes.
7320 * The caller must pass the settings appropriate for the current speed,
7321 * or not care if they are correct for the current speed.
7322 */
7323static void write_tx_serdes_param(struct qib_pportdata *ppd,
7324 struct txdds_ent *txdds)
7325{
7326 u64 deemph;
7327
7328 deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
7329 /* field names for amp, main, post, pre, respectively */
7330 deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
7331 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
7332 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
7333 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
7334
7335 deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7336 tx_override_deemphasis_select);
7337 deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7338 txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7339 txampcntl_d2a);
7340 deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7341 txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7342 txc0_ena);
7343 deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7344 txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7345 txcp1_ena);
7346 deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7347 txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7348 txcn1_ena);
7349 qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
7350}
7351
7352/*
7353 * Set the parameters for mez cards on link bounce, so they are
7354 * always exactly what was requested. Similar logic to init_txdds
7355 * but does just the serdes.
7356 */
7357static void adj_tx_serdes(struct qib_pportdata *ppd)
7358{
7359 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7360 struct txdds_ent *dds;
7361
7362 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
7363 dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
7364 qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
7365 ddr_dds : sdr_dds));
7366 write_tx_serdes_param(ppd, dds);
7367}
7368
7369/* set QDR forced value for H1, if needed */
7370static void force_h1(struct qib_pportdata *ppd)
7371{
7372 int chan;
7373
7374 ppd->cpspec->qdr_reforce = 0;
7375 if (!ppd->dd->cspec->r1)
7376 return;
7377
7378 for (chan = 0; chan < SERDES_CHANS; chan++) {
7379 set_man_mode_h1(ppd, chan, 1, 0);
7380 set_man_code(ppd, chan, ppd->cpspec->h1_val);
7381 clock_man(ppd, chan);
7382 set_man_mode_h1(ppd, chan, 0, 0);
7383 }
7384}
7385
7386#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
7387#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
7388
7389#define R_OPCODE_LSB 3
7390#define R_OP_NOP 0
7391#define R_OP_SHIFT 2
7392#define R_OP_UPDATE 3
7393#define R_TDI_LSB 2
7394#define R_TDO_LSB 1
7395#define R_RDY 1
7396
7397static int qib_r_grab(struct qib_devdata *dd)
7398{
7399 u64 val;
7400 val = SJA_EN;
7401 qib_write_kreg(dd, kr_r_access, val);
7402 qib_read_kreg32(dd, kr_scratch);
7403 return 0;
7404}
7405
7406/* qib_r_wait_for_rdy() not only waits for the ready bit, it
7407 * returns the current state of R_TDO
7408 */
7409static int qib_r_wait_for_rdy(struct qib_devdata *dd)
7410{
7411 u64 val;
7412 int timeout;
7413 for (timeout = 0; timeout < 100 ; ++timeout) {
7414 val = qib_read_kreg32(dd, kr_r_access);
7415 if (val & R_RDY)
7416 return (val >> R_TDO_LSB) & 1;
7417 }
7418 return -1;
7419}
7420
7421static int qib_r_shift(struct qib_devdata *dd, int bisten,
7422 int len, u8 *inp, u8 *outp)
7423{
7424 u64 valbase, val;
7425 int ret, pos;
7426
7427 valbase = SJA_EN | (bisten << BISTEN_LSB) |
7428 (R_OP_SHIFT << R_OPCODE_LSB);
7429 ret = qib_r_wait_for_rdy(dd);
7430 if (ret < 0)
7431 goto bail;
7432 for (pos = 0; pos < len; ++pos) {
7433 val = valbase;
7434 if (outp) {
7435 outp[pos >> 3] &= ~(1 << (pos & 7));
7436 outp[pos >> 3] |= (ret << (pos & 7));
7437 }
7438 if (inp) {
7439 int tdi = inp[pos >> 3] >> (pos & 7);
7440 val |= ((tdi & 1) << R_TDI_LSB);
7441 }
7442 qib_write_kreg(dd, kr_r_access, val);
7443 qib_read_kreg32(dd, kr_scratch);
7444 ret = qib_r_wait_for_rdy(dd);
7445 if (ret < 0)
7446 break;
7447 }
7448 /* Restore to NOP between operations. */
7449 val = SJA_EN | (bisten << BISTEN_LSB);
7450 qib_write_kreg(dd, kr_r_access, val);
7451 qib_read_kreg32(dd, kr_scratch);
7452 ret = qib_r_wait_for_rdy(dd);
7453
7454 if (ret >= 0)
7455 ret = pos;
7456bail:
7457 return ret;
7458}
7459
7460static int qib_r_update(struct qib_devdata *dd, int bisten)
7461{
7462 u64 val;
7463 int ret;
7464
7465 val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
7466 ret = qib_r_wait_for_rdy(dd);
7467 if (ret >= 0) {
7468 qib_write_kreg(dd, kr_r_access, val);
7469 qib_read_kreg32(dd, kr_scratch);
7470 }
7471 return ret;
7472}
7473
7474#define BISTEN_PORT_SEL 15
7475#define LEN_PORT_SEL 625
7476#define BISTEN_AT 17
7477#define LEN_AT 156
7478#define BISTEN_ETM 16
7479#define LEN_ETM 632
7480
7481#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
7482
7483/* these are common for all IB port use cases. */
7484static u8 reset_at[BIT2BYTE(LEN_AT)] = {
7485 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7486 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7487};
7488static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
7489 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7490 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7491 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
7492 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
7493 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
7494 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
7495 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7496 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
7497};
7498static u8 at[BIT2BYTE(LEN_AT)] = {
7499 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
7500 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7501};
7502
7503/* used for IB1 or IB2, only one in use */
7504static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
7505 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7506 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7507 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7508 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
7509 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7510 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
7511 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
7512 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
7513};
7514
7515/* used when both IB1 and IB2 are in use */
7516static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
7517 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7518 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
7519 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7520 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
7521 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
7522 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
7523 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
7524 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
7525};
7526
7527/* used when only IB1 is in use */
7528static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
7529 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7530 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7531 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7532 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7533 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7534 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7535 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7536 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7537};
7538
7539/* used when only IB2 is in use */
7540static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
7541 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
7542 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
7543 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7544 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7545 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
7546 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7547 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7548 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
7549};
7550
7551/* used when both IB1 and IB2 are in use */
7552static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
7553 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7554 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7555 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7556 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7557 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7558 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
7559 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7560 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7561};
7562
7563/*
7564 * Do setup to properly handle IB link recovery; if port is zero, we
7565 * are initializing to cover both ports; otherwise we are initializing
7566 * to cover a single port card, or the port has reached INIT and we may
7567 * need to switch coverage types.
7568 */
7569static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
7570{
7571 u8 *portsel, *etm;
7572 struct qib_devdata *dd = ppd->dd;
7573
7574 if (!ppd->dd->cspec->r1)
7575 return;
7576 if (!both) {
7577 dd->cspec->recovery_ports_initted++;
7578 ppd->cpspec->recovery_init = 1;
7579 }
7580 if (!both && dd->cspec->recovery_ports_initted == 1) {
7581 portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
7582 etm = atetm_1port;
7583 } else {
7584 portsel = portsel_2port;
7585 etm = atetm_2port;
7586 }
7587
7588 if (qib_r_grab(dd) < 0 ||
7589 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
7590 qib_r_update(dd, BISTEN_ETM) < 0 ||
7591 qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
7592 qib_r_update(dd, BISTEN_AT) < 0 ||
7593 qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
7594 portsel, NULL) < 0 ||
7595 qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
7596 qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
7597 qib_r_update(dd, BISTEN_AT) < 0 ||
7598 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
7599 qib_r_update(dd, BISTEN_ETM) < 0)
7600 qib_dev_err(dd, "Failed IB link recovery setup\n");
7601}
7602
7603static void check_7322_rxe_status(struct qib_pportdata *ppd)
7604{
7605 struct qib_devdata *dd = ppd->dd;
7606 u64 fmask;
7607
7608 if (dd->cspec->recovery_ports_initted != 1)
7609 return; /* rest doesn't apply to dualport */
7610 qib_write_kreg(dd, kr_control, dd->control |
7611 SYM_MASK(Control, FreezeMode));
7612 (void)qib_read_kreg64(dd, kr_scratch);
7613 udelay(3); /* ibcreset asserted 400ns, be sure that's over */
7614 fmask = qib_read_kreg64(dd, kr_act_fmask);
7615 if (!fmask) {
7616 /*
7617 * require a powercycle before we'll work again, and make
7618 * sure we get no more interrupts, and don't turn off
7619 * freeze.
7620 */
7621 ppd->dd->cspec->stay_in_freeze = 1;
7622 qib_7322_set_intr_state(ppd->dd, 0);
7623 qib_write_kreg(dd, kr_fmask, 0ULL);
7624 qib_dev_err(dd, "HCA unusable until powercycled\n");
7625 return; /* eventually reset */
7626 }
7627
7628 qib_write_kreg(ppd->dd, kr_hwerrclear,
7629 SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
7630
7631 /* don't do the full clear_freeze(), not needed for this */
7632 qib_write_kreg(dd, kr_control, dd->control);
7633 qib_read_kreg32(dd, kr_scratch);
7634 /* take IBC out of reset */
7635 if (ppd->link_speed_supported) {
7636 ppd->cpspec->ibcctrl_a &=
7637 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
7638 qib_write_kreg_port(ppd, krp_ibcctrl_a,
7639 ppd->cpspec->ibcctrl_a);
7640 qib_read_kreg32(dd, kr_scratch);
7641 if (ppd->lflags & QIBL_IB_LINK_DISABLED)
7642 qib_set_ib_7322_lstate(ppd, 0,
7643 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
7644 }
7645}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
new file mode 100644
index 000000000000..9b40f345ac3f
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -0,0 +1,1586 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/vmalloc.h>
38#include <linux/delay.h>
39#include <linux/idr.h>
40
41#include "qib.h"
42#include "qib_common.h"
43
44/*
45 * min buffers we want to have per context, after driver
46 */
47#define QIB_MIN_USER_CTXT_BUFCNT 7
48
49#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
50#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
51#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
52
53/*
54 * Number of ctxts we are configured to use (to allow for more pio
55 * buffers per ctxt, etc.) Zero means use chip value.
56 */
57ushort qib_cfgctxts;
58module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
59MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
60
61/*
62 * If set, do not write to any regs if avoidable, hack to allow
63 * check for deranged default register values.
64 */
65ushort qib_mini_init;
66module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
67MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
68
69unsigned qib_n_krcv_queues;
70module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
71MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
72
73/*
74 * qib_wc_pat parameter:
75 * 0 is WC via MTRR
76 * 1 is WC via PAT
77 * If PAT initialization fails, code reverts back to MTRR
78 */
79unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
80module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
81MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
82
83struct workqueue_struct *qib_wq;
84struct workqueue_struct *qib_cq_wq;
85
86static void verify_interrupt(unsigned long);
87
88static struct idr qib_unit_table;
89u32 qib_cpulist_count;
90unsigned long *qib_cpulist;
91
92/* set number of contexts we'll actually use */
93void qib_set_ctxtcnt(struct qib_devdata *dd)
94{
95 if (!qib_cfgctxts)
96 dd->cfgctxts = dd->ctxtcnt;
97 else if (qib_cfgctxts < dd->num_pports)
98 dd->cfgctxts = dd->ctxtcnt;
99 else if (qib_cfgctxts <= dd->ctxtcnt)
100 dd->cfgctxts = qib_cfgctxts;
101 else
102 dd->cfgctxts = dd->ctxtcnt;
103}
104
105/*
106 * Common code for creating the receive context array.
107 */
108int qib_create_ctxts(struct qib_devdata *dd)
109{
110 unsigned i;
111 int ret;
112
113 /*
114 * Allocate full ctxtcnt array, rather than just cfgctxts, because
115 * cleanup iterates across all possible ctxts.
116 */
117 dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
118 if (!dd->rcd) {
119 qib_dev_err(dd, "Unable to allocate ctxtdata array, "
120 "failing\n");
121 ret = -ENOMEM;
122 goto done;
123 }
124
125 /* create (one or more) kctxt */
126 for (i = 0; i < dd->first_user_ctxt; ++i) {
127 struct qib_pportdata *ppd;
128 struct qib_ctxtdata *rcd;
129
130 if (dd->skip_kctxt_mask & (1 << i))
131 continue;
132
133 ppd = dd->pport + (i % dd->num_pports);
134 rcd = qib_create_ctxtdata(ppd, i);
135 if (!rcd) {
136 qib_dev_err(dd, "Unable to allocate ctxtdata"
137 " for Kernel ctxt, failing\n");
138 ret = -ENOMEM;
139 goto done;
140 }
141 rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
142 rcd->seq_cnt = 1;
143 }
144 ret = 0;
145done:
146 return ret;
147}
148
149/*
150 * Common code for user and kernel context setup.
151 */
152struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
153{
154 struct qib_devdata *dd = ppd->dd;
155 struct qib_ctxtdata *rcd;
156
157 rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
158 if (rcd) {
159 INIT_LIST_HEAD(&rcd->qp_wait_list);
160 rcd->ppd = ppd;
161 rcd->dd = dd;
162 rcd->cnt = 1;
163 rcd->ctxt = ctxt;
164 dd->rcd[ctxt] = rcd;
165
166 dd->f_init_ctxt(rcd);
167
168 /*
169 * To avoid wasting a lot of memory, we allocate 32KB chunks
170 * of physically contiguous memory, advance through it until
171 * used up and then allocate more. Of course, we need
172 * memory to store those extra pointers, now. 32KB seems to
173 * be the most that is "safe" under memory pressure
174 * (creating large files and then copying them over
175 * NFS while doing lots of MPI jobs). The OOM killer can
176 * get invoked, even though we say we can sleep and this can
177 * cause significant system problems....
178 */
179 rcd->rcvegrbuf_size = 0x8000;
180 rcd->rcvegrbufs_perchunk =
181 rcd->rcvegrbuf_size / dd->rcvegrbufsize;
182 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
183 rcd->rcvegrbufs_perchunk - 1) /
184 rcd->rcvegrbufs_perchunk;
185 }
186 return rcd;
187}
188
189/*
190 * Common code for initializing the physical port structure.
191 */
192void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
193 u8 hw_pidx, u8 port)
194{
195 ppd->dd = dd;
196 ppd->hw_pidx = hw_pidx;
197 ppd->port = port; /* IB port number, not index */
198
199 spin_lock_init(&ppd->sdma_lock);
200 spin_lock_init(&ppd->lflags_lock);
201 init_waitqueue_head(&ppd->state_wait);
202
203 init_timer(&ppd->symerr_clear_timer);
204 ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup;
205 ppd->symerr_clear_timer.data = (unsigned long)ppd;
206}
207
208static int init_pioavailregs(struct qib_devdata *dd)
209{
210 int ret, pidx;
211 u64 *status_page;
212
213 dd->pioavailregs_dma = dma_alloc_coherent(
214 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
215 GFP_KERNEL);
216 if (!dd->pioavailregs_dma) {
217 qib_dev_err(dd, "failed to allocate PIOavail reg area "
218 "in memory\n");
219 ret = -ENOMEM;
220 goto done;
221 }
222
223 /*
224 * We really want L2 cache aligned, but for current CPUs of
225 * interest, they are the same.
226 */
227 status_page = (u64 *)
228 ((char *) dd->pioavailregs_dma +
229 ((2 * L1_CACHE_BYTES +
230 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
231 /* device status comes first, for backwards compatibility */
232 dd->devstatusp = status_page;
233 *status_page++ = 0;
234 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
235 dd->pport[pidx].statusp = status_page;
236 *status_page++ = 0;
237 }
238
239 /*
240 * Setup buffer to hold freeze and other messages, accessible to
241 * apps, following statusp. This is per-unit, not per port.
242 */
243 dd->freezemsg = (char *) status_page;
244 *dd->freezemsg = 0;
245 /* length of msg buffer is "whatever is left" */
246 ret = (char *) status_page - (char *) dd->pioavailregs_dma;
247 dd->freezelen = PAGE_SIZE - ret;
248
249 ret = 0;
250
251done:
252 return ret;
253}
254
255/**
256 * init_shadow_tids - allocate the shadow TID array
257 * @dd: the qlogic_ib device
258 *
259 * allocate the shadow TID array, so we can qib_munlock previous
260 * entries. It may make more sense to move the pageshadow to the
261 * ctxt data structure, so we only allocate memory for ctxts actually
262 * in use, since we at 8k per ctxt, now.
263 * We don't want failures here to prevent use of the driver/chip,
264 * so no return value.
265 */
266static void init_shadow_tids(struct qib_devdata *dd)
267{
268 struct page **pages;
269 dma_addr_t *addrs;
270
271 pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
272 if (!pages) {
273 qib_dev_err(dd, "failed to allocate shadow page * "
274 "array, no expected sends!\n");
275 goto bail;
276 }
277
278 addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
279 if (!addrs) {
280 qib_dev_err(dd, "failed to allocate shadow dma handle "
281 "array, no expected sends!\n");
282 goto bail_free;
283 }
284
285 memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
286 memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
287
288 dd->pageshadow = pages;
289 dd->physshadow = addrs;
290 return;
291
292bail_free:
293 vfree(pages);
294bail:
295 dd->pageshadow = NULL;
296}
297
298/*
299 * Do initialization for device that is only needed on
300 * first detect, not on resets.
301 */
302static int loadtime_init(struct qib_devdata *dd)
303{
304 int ret = 0;
305
306 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
307 QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
308 qib_dev_err(dd, "Driver only handles version %d, "
309 "chip swversion is %d (%llx), failng\n",
310 QIB_CHIP_SWVERSION,
311 (int)(dd->revision >>
312 QLOGIC_IB_R_SOFTWARE_SHIFT) &
313 QLOGIC_IB_R_SOFTWARE_MASK,
314 (unsigned long long) dd->revision);
315 ret = -ENOSYS;
316 goto done;
317 }
318
319 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
320 qib_devinfo(dd->pcidev, "%s", dd->boardversion);
321
322 spin_lock_init(&dd->pioavail_lock);
323 spin_lock_init(&dd->sendctrl_lock);
324 spin_lock_init(&dd->uctxt_lock);
325 spin_lock_init(&dd->qib_diag_trans_lock);
326 spin_lock_init(&dd->eep_st_lock);
327 mutex_init(&dd->eep_lock);
328
329 if (qib_mini_init)
330 goto done;
331
332 ret = init_pioavailregs(dd);
333 init_shadow_tids(dd);
334
335 qib_get_eeprom_info(dd);
336
337 /* setup time (don't start yet) to verify we got interrupt */
338 init_timer(&dd->intrchk_timer);
339 dd->intrchk_timer.function = verify_interrupt;
340 dd->intrchk_timer.data = (unsigned long) dd;
341
342done:
343 return ret;
344}
345
346/**
347 * init_after_reset - re-initialize after a reset
348 * @dd: the qlogic_ib device
349 *
350 * sanity check at least some of the values after reset, and
351 * ensure no receive or transmit (explictly, in case reset
352 * failed
353 */
354static int init_after_reset(struct qib_devdata *dd)
355{
356 int i;
357
358 /*
359 * Ensure chip does no sends or receives, tail updates, or
360 * pioavail updates while we re-initialize. This is mostly
361 * for the driver data structures, not chip registers.
362 */
363 for (i = 0; i < dd->num_pports; ++i) {
364 /*
365 * ctxt == -1 means "all contexts". Only really safe for
366 * _dis_abling things, as here.
367 */
368 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
369 QIB_RCVCTRL_INTRAVAIL_DIS |
370 QIB_RCVCTRL_TAILUPD_DIS, -1);
371 /* Redundant across ports for some, but no big deal. */
372 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
373 QIB_SENDCTRL_AVAIL_DIS);
374 }
375
376 return 0;
377}
378
379static void enable_chip(struct qib_devdata *dd)
380{
381 u64 rcvmask;
382 int i;
383
384 /*
385 * Enable PIO send, and update of PIOavail regs to memory.
386 */
387 for (i = 0; i < dd->num_pports; ++i)
388 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
389 QIB_SENDCTRL_AVAIL_ENB);
390 /*
391 * Enable kernel ctxts' receive and receive interrupt.
392 * Other ctxts done as user opens and inits them.
393 */
394 rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
395 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
396 QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
397 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
398 struct qib_ctxtdata *rcd = dd->rcd[i];
399
400 if (rcd)
401 dd->f_rcvctrl(rcd->ppd, rcvmask, i);
402 }
403}
404
405static void verify_interrupt(unsigned long opaque)
406{
407 struct qib_devdata *dd = (struct qib_devdata *) opaque;
408
409 if (!dd)
410 return; /* being torn down */
411
412 /*
413 * If we don't have a lid or any interrupts, let the user know and
414 * don't bother checking again.
415 */
416 if (dd->int_counter == 0) {
417 if (!dd->f_intr_fallback(dd))
418 dev_err(&dd->pcidev->dev, "No interrupts detected, "
419 "not usable.\n");
420 else /* re-arm the timer to see if fallback works */
421 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
422 }
423}
424
425static void init_piobuf_state(struct qib_devdata *dd)
426{
427 int i, pidx;
428 u32 uctxts;
429
430 /*
431 * Ensure all buffers are free, and fifos empty. Buffers
432 * are common, so only do once for port 0.
433 *
434 * After enable and qib_chg_pioavailkernel so we can safely
435 * enable pioavail updates and PIOENABLE. After this, packets
436 * are ready and able to go out.
437 */
438 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
439 for (pidx = 0; pidx < dd->num_pports; ++pidx)
440 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
441
442 /*
443 * If not all sendbufs are used, add the one to each of the lower
444 * numbered contexts. pbufsctxt and lastctxt_piobuf are
445 * calculated in chip-specific code because it may cause some
446 * chip-specific adjustments to be made.
447 */
448 uctxts = dd->cfgctxts - dd->first_user_ctxt;
449 dd->ctxts_extrabuf = dd->pbufsctxt ?
450 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
451
452 /*
453 * Set up the shadow copies of the piobufavail registers,
454 * which we compare against the chip registers for now, and
455 * the in memory DMA'ed copies of the registers.
456 * By now pioavail updates to memory should have occurred, so
457 * copy them into our working/shadow registers; this is in
458 * case something went wrong with abort, but mostly to get the
459 * initial values of the generation bit correct.
460 */
461 for (i = 0; i < dd->pioavregs; i++) {
462 __le64 tmp;
463
464 tmp = dd->pioavailregs_dma[i];
465 /*
466 * Don't need to worry about pioavailkernel here
467 * because we will call qib_chg_pioavailkernel() later
468 * in initialization, to busy out buffers as needed.
469 */
470 dd->pioavailshadow[i] = le64_to_cpu(tmp);
471 }
472 while (i < ARRAY_SIZE(dd->pioavailshadow))
473 dd->pioavailshadow[i++] = 0; /* for debugging sanity */
474
475 /* after pioavailshadow is setup */
476 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
477 TXCHK_CHG_TYPE_KERN, NULL);
478 dd->f_initvl15_bufs(dd);
479}
480
481/**
482 * qib_init - do the actual initialization sequence on the chip
483 * @dd: the qlogic_ib device
484 * @reinit: reinitializing, so don't allocate new memory
485 *
486 * Do the actual initialization sequence on the chip. This is done
487 * both from the init routine called from the PCI infrastructure, and
488 * when we reset the chip, or detect that it was reset internally,
489 * or it's administratively re-enabled.
490 *
491 * Memory allocation here and in called routines is only done in
492 * the first case (reinit == 0). We have to be careful, because even
493 * without memory allocation, we need to re-write all the chip registers
494 * TIDs, etc. after the reset or enable has completed.
495 */
496int qib_init(struct qib_devdata *dd, int reinit)
497{
498 int ret = 0, pidx, lastfail = 0;
499 u32 portok = 0;
500 unsigned i;
501 struct qib_ctxtdata *rcd;
502 struct qib_pportdata *ppd;
503 unsigned long flags;
504
505 /* Set linkstate to unknown, so we can watch for a transition. */
506 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
507 ppd = dd->pport + pidx;
508 spin_lock_irqsave(&ppd->lflags_lock, flags);
509 ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
510 QIBL_LINKDOWN | QIBL_LINKINIT |
511 QIBL_LINKV);
512 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
513 }
514
515 if (reinit)
516 ret = init_after_reset(dd);
517 else
518 ret = loadtime_init(dd);
519 if (ret)
520 goto done;
521
522 /* Bypass most chip-init, to get to device creation */
523 if (qib_mini_init)
524 return 0;
525
526 ret = dd->f_late_initreg(dd);
527 if (ret)
528 goto done;
529
530 /* dd->rcd can be NULL if early init failed */
531 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
532 /*
533 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
534 * re-init, the simplest way to handle this is to free
535 * existing, and re-allocate.
536 * Need to re-create rest of ctxt 0 ctxtdata as well.
537 */
538 rcd = dd->rcd[i];
539 if (!rcd)
540 continue;
541
542 lastfail = qib_create_rcvhdrq(dd, rcd);
543 if (!lastfail)
544 lastfail = qib_setup_eagerbufs(rcd);
545 if (lastfail) {
546 qib_dev_err(dd, "failed to allocate kernel ctxt's "
547 "rcvhdrq and/or egr bufs\n");
548 continue;
549 }
550 }
551
552 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
553 int mtu;
554 if (lastfail)
555 ret = lastfail;
556 ppd = dd->pport + pidx;
557 mtu = ib_mtu_enum_to_int(qib_ibmtu);
558 if (mtu == -1) {
559 mtu = QIB_DEFAULT_MTU;
560 qib_ibmtu = 0; /* don't leave invalid value */
561 }
562 /* set max we can ever have for this driver load */
563 ppd->init_ibmaxlen = min(mtu > 2048 ?
564 dd->piosize4k : dd->piosize2k,
565 dd->rcvegrbufsize +
566 (dd->rcvhdrentsize << 2));
567 /*
568 * Have to initialize ibmaxlen, but this will normally
569 * change immediately in qib_set_mtu().
570 */
571 ppd->ibmaxlen = ppd->init_ibmaxlen;
572 qib_set_mtu(ppd, mtu);
573
574 spin_lock_irqsave(&ppd->lflags_lock, flags);
575 ppd->lflags |= QIBL_IB_LINK_DISABLED;
576 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
577
578 lastfail = dd->f_bringup_serdes(ppd);
579 if (lastfail) {
580 qib_devinfo(dd->pcidev,
581 "Failed to bringup IB port %u\n", ppd->port);
582 lastfail = -ENETDOWN;
583 continue;
584 }
585
586 /* let link come up, and enable IBC */
587 spin_lock_irqsave(&ppd->lflags_lock, flags);
588 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
589 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
590 portok++;
591 }
592
593 if (!portok) {
594 /* none of the ports initialized */
595 if (!ret && lastfail)
596 ret = lastfail;
597 else if (!ret)
598 ret = -ENETDOWN;
599 /* but continue on, so we can debug cause */
600 }
601
602 enable_chip(dd);
603
604 init_piobuf_state(dd);
605
606done:
607 if (!ret) {
608 /* chip is OK for user apps; mark it as initialized */
609 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
610 ppd = dd->pport + pidx;
611 /*
612 * Set status even if port serdes is not initialized
613 * so that diags will work.
614 */
615 *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
616 QIB_STATUS_INITTED;
617 if (!ppd->link_speed_enabled)
618 continue;
619 if (dd->flags & QIB_HAS_SEND_DMA)
620 ret = qib_setup_sdma(ppd);
621 init_timer(&ppd->hol_timer);
622 ppd->hol_timer.function = qib_hol_event;
623 ppd->hol_timer.data = (unsigned long)ppd;
624 ppd->hol_state = QIB_HOL_UP;
625 }
626
627 /* now we can enable all interrupts from the chip */
628 dd->f_set_intr_state(dd, 1);
629
630 /*
631 * Setup to verify we get an interrupt, and fallback
632 * to an alternate if necessary and possible.
633 */
634 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
635 /* start stats retrieval timer */
636 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
637 }
638
639 /* if ret is non-zero, we probably should do some cleanup here... */
640 return ret;
641}
642
643/*
644 * These next two routines are placeholders in case we don't have per-arch
645 * code for controlling write combining. If explicit control of write
646 * combining is not available, performance will probably be awful.
647 */
648
649int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
650{
651 return -EOPNOTSUPP;
652}
653
654void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
655{
656}
657
658static inline struct qib_devdata *__qib_lookup(int unit)
659{
660 return idr_find(&qib_unit_table, unit);
661}
662
663struct qib_devdata *qib_lookup(int unit)
664{
665 struct qib_devdata *dd;
666 unsigned long flags;
667
668 spin_lock_irqsave(&qib_devs_lock, flags);
669 dd = __qib_lookup(unit);
670 spin_unlock_irqrestore(&qib_devs_lock, flags);
671
672 return dd;
673}
674
675/*
676 * Stop the timers during unit shutdown, or after an error late
677 * in initialization.
678 */
679static void qib_stop_timers(struct qib_devdata *dd)
680{
681 struct qib_pportdata *ppd;
682 int pidx;
683
684 if (dd->stats_timer.data) {
685 del_timer_sync(&dd->stats_timer);
686 dd->stats_timer.data = 0;
687 }
688 if (dd->intrchk_timer.data) {
689 del_timer_sync(&dd->intrchk_timer);
690 dd->intrchk_timer.data = 0;
691 }
692 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
693 ppd = dd->pport + pidx;
694 if (ppd->hol_timer.data)
695 del_timer_sync(&ppd->hol_timer);
696 if (ppd->led_override_timer.data) {
697 del_timer_sync(&ppd->led_override_timer);
698 atomic_set(&ppd->led_override_timer_active, 0);
699 }
700 if (ppd->symerr_clear_timer.data)
701 del_timer_sync(&ppd->symerr_clear_timer);
702 }
703}
704
705/**
706 * qib_shutdown_device - shut down a device
707 * @dd: the qlogic_ib device
708 *
709 * This is called to make the device quiet when we are about to
710 * unload the driver, and also when the device is administratively
711 * disabled. It does not free any data structures.
712 * Everything it does has to be setup again by qib_init(dd, 1)
713 */
714static void qib_shutdown_device(struct qib_devdata *dd)
715{
716 struct qib_pportdata *ppd;
717 unsigned pidx;
718
719 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
720 ppd = dd->pport + pidx;
721
722 spin_lock_irq(&ppd->lflags_lock);
723 ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
724 QIBL_LINKARMED | QIBL_LINKACTIVE |
725 QIBL_LINKV);
726 spin_unlock_irq(&ppd->lflags_lock);
727 *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
728 }
729 dd->flags &= ~QIB_INITTED;
730
731 /* mask interrupts, but not errors */
732 dd->f_set_intr_state(dd, 0);
733
734 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
735 ppd = dd->pport + pidx;
736 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
737 QIB_RCVCTRL_CTXT_DIS |
738 QIB_RCVCTRL_INTRAVAIL_DIS |
739 QIB_RCVCTRL_PKEY_ENB, -1);
740 /*
741 * Gracefully stop all sends allowing any in progress to
742 * trickle out first.
743 */
744 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
745 }
746
747 /*
748 * Enough for anything that's going to trickle out to have actually
749 * done so.
750 */
751 udelay(20);
752
753 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
754 ppd = dd->pport + pidx;
755 dd->f_setextled(ppd, 0); /* make sure LEDs are off */
756
757 if (dd->flags & QIB_HAS_SEND_DMA)
758 qib_teardown_sdma(ppd);
759
760 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
761 QIB_SENDCTRL_SEND_DIS);
762 /*
763 * Clear SerdesEnable.
764 * We can't count on interrupts since we are stopping.
765 */
766 dd->f_quiet_serdes(ppd);
767 }
768
769 qib_update_eeprom_log(dd);
770}
771
772/**
773 * qib_free_ctxtdata - free a context's allocated data
774 * @dd: the qlogic_ib device
775 * @rcd: the ctxtdata structure
776 *
777 * free up any allocated data for a context
778 * This should not touch anything that would affect a simultaneous
779 * re-allocation of context data, because it is called after qib_mutex
780 * is released (and can be called from reinit as well).
781 * It should never change any chip state, or global driver state.
782 */
783void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
784{
785 if (!rcd)
786 return;
787
788 if (rcd->rcvhdrq) {
789 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
790 rcd->rcvhdrq, rcd->rcvhdrq_phys);
791 rcd->rcvhdrq = NULL;
792 if (rcd->rcvhdrtail_kvaddr) {
793 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
794 rcd->rcvhdrtail_kvaddr,
795 rcd->rcvhdrqtailaddr_phys);
796 rcd->rcvhdrtail_kvaddr = NULL;
797 }
798 }
799 if (rcd->rcvegrbuf) {
800 unsigned e;
801
802 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
803 void *base = rcd->rcvegrbuf[e];
804 size_t size = rcd->rcvegrbuf_size;
805
806 dma_free_coherent(&dd->pcidev->dev, size,
807 base, rcd->rcvegrbuf_phys[e]);
808 }
809 kfree(rcd->rcvegrbuf);
810 rcd->rcvegrbuf = NULL;
811 kfree(rcd->rcvegrbuf_phys);
812 rcd->rcvegrbuf_phys = NULL;
813 rcd->rcvegrbuf_chunks = 0;
814 }
815
816 kfree(rcd->tid_pg_list);
817 vfree(rcd->user_event_mask);
818 vfree(rcd->subctxt_uregbase);
819 vfree(rcd->subctxt_rcvegrbuf);
820 vfree(rcd->subctxt_rcvhdr_base);
821 kfree(rcd);
822}
823
824/*
825 * Perform a PIO buffer bandwidth write test, to verify proper system
826 * configuration. Even when all the setup calls work, occasionally
827 * BIOS or other issues can prevent write combining from working, or
828 * can cause other bandwidth problems to the chip.
829 *
830 * This test simply writes the same buffer over and over again, and
831 * measures close to the peak bandwidth to the chip (not testing
832 * data bandwidth to the wire). On chips that use an address-based
833 * trigger to send packets to the wire, this is easy. On chips that
834 * use a count to trigger, we want to make sure that the packet doesn't
835 * go out on the wire, or trigger flow control checks.
836 */
837static void qib_verify_pioperf(struct qib_devdata *dd)
838{
839 u32 pbnum, cnt, lcnt;
840 u32 __iomem *piobuf;
841 u32 *addr;
842 u64 msecs, emsecs;
843
844 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
845 if (!piobuf) {
846 qib_devinfo(dd->pcidev,
847 "No PIObufs for checking perf, skipping\n");
848 return;
849 }
850
851 /*
852 * Enough to give us a reasonable test, less than piobuf size, and
853 * likely multiple of store buffer length.
854 */
855 cnt = 1024;
856
857 addr = vmalloc(cnt);
858 if (!addr) {
859 qib_devinfo(dd->pcidev,
860 "Couldn't get memory for checking PIO perf,"
861 " skipping\n");
862 goto done;
863 }
864
865 preempt_disable(); /* we want reasonably accurate elapsed time */
866 msecs = 1 + jiffies_to_msecs(jiffies);
867 for (lcnt = 0; lcnt < 10000U; lcnt++) {
868 /* wait until we cross msec boundary */
869 if (jiffies_to_msecs(jiffies) >= msecs)
870 break;
871 udelay(1);
872 }
873
874 dd->f_set_armlaunch(dd, 0);
875
876 /*
877 * length 0, no dwords actually sent
878 */
879 writeq(0, piobuf);
880 qib_flush_wc();
881
882 /*
883 * This is only roughly accurate, since even with preempt we
884 * still take interrupts that could take a while. Running for
885 * >= 5 msec seems to get us "close enough" to accurate values.
886 */
887 msecs = jiffies_to_msecs(jiffies);
888 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
889 qib_pio_copy(piobuf + 64, addr, cnt >> 2);
890 emsecs = jiffies_to_msecs(jiffies) - msecs;
891 }
892
893 /* 1 GiB/sec, slightly over IB SDR line rate */
894 if (lcnt < (emsecs * 1024U))
895 qib_dev_err(dd,
896 "Performance problem: bandwidth to PIO buffers is "
897 "only %u MiB/sec\n",
898 lcnt / (u32) emsecs);
899
900 preempt_enable();
901
902 vfree(addr);
903
904done:
905 /* disarm piobuf, so it's available again */
906 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
907 qib_sendbuf_done(dd, pbnum);
908 dd->f_set_armlaunch(dd, 1);
909}
910
911
912void qib_free_devdata(struct qib_devdata *dd)
913{
914 unsigned long flags;
915
916 spin_lock_irqsave(&qib_devs_lock, flags);
917 idr_remove(&qib_unit_table, dd->unit);
918 list_del(&dd->list);
919 spin_unlock_irqrestore(&qib_devs_lock, flags);
920
921 ib_dealloc_device(&dd->verbs_dev.ibdev);
922}
923
924/*
925 * Allocate our primary per-unit data structure. Must be done via verbs
926 * allocator, because the verbs cleanup process both does cleanup and
927 * free of the data structure.
928 * "extra" is for chip-specific data.
929 *
930 * Use the idr mechanism to get a unit number for this unit.
931 */
932struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
933{
934 unsigned long flags;
935 struct qib_devdata *dd;
936 int ret;
937
938 if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
939 dd = ERR_PTR(-ENOMEM);
940 goto bail;
941 }
942
943 dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
944 if (!dd) {
945 dd = ERR_PTR(-ENOMEM);
946 goto bail;
947 }
948
949 spin_lock_irqsave(&qib_devs_lock, flags);
950 ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
951 if (ret >= 0)
952 list_add(&dd->list, &qib_dev_list);
953 spin_unlock_irqrestore(&qib_devs_lock, flags);
954
955 if (ret < 0) {
956 qib_early_err(&pdev->dev,
957 "Could not allocate unit ID: error %d\n", -ret);
958 ib_dealloc_device(&dd->verbs_dev.ibdev);
959 dd = ERR_PTR(ret);
960 goto bail;
961 }
962
963 if (!qib_cpulist_count) {
964 u32 count = num_online_cpus();
965 qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
966 sizeof(long), GFP_KERNEL);
967 if (qib_cpulist)
968 qib_cpulist_count = count;
969 else
970 qib_early_err(&pdev->dev, "Could not alloc cpulist "
971 "info, cpu affinity might be wrong\n");
972 }
973
974bail:
975 return dd;
976}
977
978/*
979 * Called from freeze mode handlers, and from PCI error
980 * reporting code. Should be paranoid about state of
981 * system and data structures.
982 */
983void qib_disable_after_error(struct qib_devdata *dd)
984{
985 if (dd->flags & QIB_INITTED) {
986 u32 pidx;
987
988 dd->flags &= ~QIB_INITTED;
989 if (dd->pport)
990 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
991 struct qib_pportdata *ppd;
992
993 ppd = dd->pport + pidx;
994 if (dd->flags & QIB_PRESENT) {
995 qib_set_linkstate(ppd,
996 QIB_IB_LINKDOWN_DISABLE);
997 dd->f_setextled(ppd, 0);
998 }
999 *ppd->statusp &= ~QIB_STATUS_IB_READY;
1000 }
1001 }
1002
1003 /*
1004 * Mark as having had an error for driver, and also
1005 * for /sys and status word mapped to user programs.
1006 * This marks unit as not usable, until reset.
1007 */
1008 if (dd->devstatusp)
1009 *dd->devstatusp |= QIB_STATUS_HWERROR;
1010}
1011
1012static void __devexit qib_remove_one(struct pci_dev *);
1013static int __devinit qib_init_one(struct pci_dev *,
1014 const struct pci_device_id *);
1015
1016#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
1017#define PFX QIB_DRV_NAME ": "
1018
1019static const struct pci_device_id qib_pci_tbl[] = {
1020 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
1021 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
1022 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
1023 { 0, }
1024};
1025
1026MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
1027
1028struct pci_driver qib_driver = {
1029 .name = QIB_DRV_NAME,
1030 .probe = qib_init_one,
1031 .remove = __devexit_p(qib_remove_one),
1032 .id_table = qib_pci_tbl,
1033 .err_handler = &qib_pci_err_handler,
1034};
1035
1036/*
1037 * Do all the generic driver unit- and chip-independent memory
1038 * allocation and initialization.
1039 */
1040static int __init qlogic_ib_init(void)
1041{
1042 int ret;
1043
1044 ret = qib_dev_init();
1045 if (ret)
1046 goto bail;
1047
1048 /*
1049 * We create our own workqueue mainly because we want to be
1050 * able to flush it when devices are being removed. We can't
1051 * use schedule_work()/flush_scheduled_work() because both
1052 * unregister_netdev() and linkwatch_event take the rtnl lock,
1053 * so flush_scheduled_work() can deadlock during device
1054 * removal.
1055 */
1056 qib_wq = create_workqueue("qib");
1057 if (!qib_wq) {
1058 ret = -ENOMEM;
1059 goto bail_dev;
1060 }
1061
1062 qib_cq_wq = create_workqueue("qib_cq");
1063 if (!qib_cq_wq) {
1064 ret = -ENOMEM;
1065 goto bail_wq;
1066 }
1067
1068 /*
1069 * These must be called before the driver is registered with
1070 * the PCI subsystem.
1071 */
1072 idr_init(&qib_unit_table);
1073 if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
1074 printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n");
1075 ret = -ENOMEM;
1076 goto bail_cq_wq;
1077 }
1078
1079 ret = pci_register_driver(&qib_driver);
1080 if (ret < 0) {
1081 printk(KERN_ERR QIB_DRV_NAME
1082 ": Unable to register driver: error %d\n", -ret);
1083 goto bail_unit;
1084 }
1085
1086 /* not fatal if it doesn't work */
1087 if (qib_init_qibfs())
1088 printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n");
1089 goto bail; /* all OK */
1090
1091bail_unit:
1092 idr_destroy(&qib_unit_table);
1093bail_cq_wq:
1094 destroy_workqueue(qib_cq_wq);
1095bail_wq:
1096 destroy_workqueue(qib_wq);
1097bail_dev:
1098 qib_dev_cleanup();
1099bail:
1100 return ret;
1101}
1102
1103module_init(qlogic_ib_init);
1104
1105/*
1106 * Do the non-unit driver cleanup, memory free, etc. at unload.
1107 */
1108static void __exit qlogic_ib_cleanup(void)
1109{
1110 int ret;
1111
1112 ret = qib_exit_qibfs();
1113 if (ret)
1114 printk(KERN_ERR QIB_DRV_NAME ": "
1115 "Unable to cleanup counter filesystem: "
1116 "error %d\n", -ret);
1117
1118 pci_unregister_driver(&qib_driver);
1119
1120 destroy_workqueue(qib_wq);
1121 destroy_workqueue(qib_cq_wq);
1122
1123 qib_cpulist_count = 0;
1124 kfree(qib_cpulist);
1125
1126 idr_destroy(&qib_unit_table);
1127 qib_dev_cleanup();
1128}
1129
1130module_exit(qlogic_ib_cleanup);
1131
1132/* this can only be called after a successful initialization */
1133static void cleanup_device_data(struct qib_devdata *dd)
1134{
1135 int ctxt;
1136 int pidx;
1137 struct qib_ctxtdata **tmp;
1138 unsigned long flags;
1139
1140 /* users can't do anything more with chip */
1141 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1142 if (dd->pport[pidx].statusp)
1143 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
1144
1145 if (!qib_wc_pat)
1146 qib_disable_wc(dd);
1147
1148 if (dd->pioavailregs_dma) {
1149 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1150 (void *) dd->pioavailregs_dma,
1151 dd->pioavailregs_phys);
1152 dd->pioavailregs_dma = NULL;
1153 }
1154
1155 if (dd->pageshadow) {
1156 struct page **tmpp = dd->pageshadow;
1157 dma_addr_t *tmpd = dd->physshadow;
1158 int i, cnt = 0;
1159
1160 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
1161 int ctxt_tidbase = ctxt * dd->rcvtidcnt;
1162 int maxtid = ctxt_tidbase + dd->rcvtidcnt;
1163
1164 for (i = ctxt_tidbase; i < maxtid; i++) {
1165 if (!tmpp[i])
1166 continue;
1167 pci_unmap_page(dd->pcidev, tmpd[i],
1168 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1169 qib_release_user_pages(&tmpp[i], 1);
1170 tmpp[i] = NULL;
1171 cnt++;
1172 }
1173 }
1174
1175 tmpp = dd->pageshadow;
1176 dd->pageshadow = NULL;
1177 vfree(tmpp);
1178 }
1179
1180 /*
1181 * Free any resources still in use (usually just kernel contexts)
1182 * at unload; we do for ctxtcnt, because that's what we allocate.
1183 * We acquire lock to be really paranoid that rcd isn't being
1184 * accessed from some interrupt-related code (that should not happen,
1185 * but best to be sure).
1186 */
1187 spin_lock_irqsave(&dd->uctxt_lock, flags);
1188 tmp = dd->rcd;
1189 dd->rcd = NULL;
1190 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1191 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
1192 struct qib_ctxtdata *rcd = tmp[ctxt];
1193
1194 tmp[ctxt] = NULL; /* debugging paranoia */
1195 qib_free_ctxtdata(dd, rcd);
1196 }
1197 kfree(tmp);
1198 kfree(dd->boardname);
1199}
1200
1201/*
1202 * Clean up on unit shutdown, or error during unit load after
1203 * successful initialization.
1204 */
1205static void qib_postinit_cleanup(struct qib_devdata *dd)
1206{
1207 /*
1208 * Clean up chip-specific stuff.
1209 * We check for NULL here, because it's outside
1210 * the kregbase check, and we need to call it
1211 * after the free_irq. Thus it's possible that
1212 * the function pointers were never initialized.
1213 */
1214 if (dd->f_cleanup)
1215 dd->f_cleanup(dd);
1216
1217 qib_pcie_ddcleanup(dd);
1218
1219 cleanup_device_data(dd);
1220
1221 qib_free_devdata(dd);
1222}
1223
1224static int __devinit qib_init_one(struct pci_dev *pdev,
1225 const struct pci_device_id *ent)
1226{
1227 int ret, j, pidx, initfail;
1228 struct qib_devdata *dd = NULL;
1229
1230 ret = qib_pcie_init(pdev, ent);
1231 if (ret)
1232 goto bail;
1233
1234 /*
1235 * Do device-specific initialiation, function table setup, dd
1236 * allocation, etc.
1237 */
1238 switch (ent->device) {
1239 case PCI_DEVICE_ID_QLOGIC_IB_6120:
1240#ifdef CONFIG_PCI_MSI
1241 dd = qib_init_iba6120_funcs(pdev, ent);
1242#else
1243 qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
1244 "work if CONFIG_PCI_MSI is not enabled\n",
1245 ent->device);
1246#endif
1247 break;
1248
1249 case PCI_DEVICE_ID_QLOGIC_IB_7220:
1250 dd = qib_init_iba7220_funcs(pdev, ent);
1251 break;
1252
1253 case PCI_DEVICE_ID_QLOGIC_IB_7322:
1254 dd = qib_init_iba7322_funcs(pdev, ent);
1255 break;
1256
1257 default:
1258 qib_early_err(&pdev->dev, "Failing on unknown QLogic "
1259 "deviceid 0x%x\n", ent->device);
1260 ret = -ENODEV;
1261 }
1262
1263 if (IS_ERR(dd))
1264 ret = PTR_ERR(dd);
1265 if (ret)
1266 goto bail; /* error already printed */
1267
1268 /* do the generic initialization */
1269 initfail = qib_init(dd, 0);
1270
1271 ret = qib_register_ib_device(dd);
1272
1273 /*
1274 * Now ready for use. this should be cleared whenever we
1275 * detect a reset, or initiate one. If earlier failure,
1276 * we still create devices, so diags, etc. can be used
1277 * to determine cause of problem.
1278 */
1279 if (!qib_mini_init && !initfail && !ret)
1280 dd->flags |= QIB_INITTED;
1281
1282 j = qib_device_create(dd);
1283 if (j)
1284 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1285 j = qibfs_add(dd);
1286 if (j)
1287 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
1288 -j);
1289
1290 if (qib_mini_init || initfail || ret) {
1291 qib_stop_timers(dd);
1292 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1293 dd->f_quiet_serdes(dd->pport + pidx);
1294 if (initfail)
1295 ret = initfail;
1296 goto bail;
1297 }
1298
1299 if (!qib_wc_pat) {
1300 ret = qib_enable_wc(dd);
1301 if (ret) {
1302 qib_dev_err(dd, "Write combining not enabled "
1303 "(err %d): performance may be poor\n",
1304 -ret);
1305 ret = 0;
1306 }
1307 }
1308
1309 qib_verify_pioperf(dd);
1310bail:
1311 return ret;
1312}
1313
1314static void __devexit qib_remove_one(struct pci_dev *pdev)
1315{
1316 struct qib_devdata *dd = pci_get_drvdata(pdev);
1317 int ret;
1318
1319 /* unregister from IB core */
1320 qib_unregister_ib_device(dd);
1321
1322 /*
1323 * Disable the IB link, disable interrupts on the device,
1324 * clear dma engines, etc.
1325 */
1326 if (!qib_mini_init)
1327 qib_shutdown_device(dd);
1328
1329 qib_stop_timers(dd);
1330
1331 /* wait until all of our (qsfp) schedule_work() calls complete */
1332 flush_scheduled_work();
1333
1334 ret = qibfs_remove(dd);
1335 if (ret)
1336 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
1337 -ret);
1338
1339 qib_device_remove(dd);
1340
1341 qib_postinit_cleanup(dd);
1342}
1343
1344/**
1345 * qib_create_rcvhdrq - create a receive header queue
1346 * @dd: the qlogic_ib device
1347 * @rcd: the context data
1348 *
1349 * This must be contiguous memory (from an i/o perspective), and must be
1350 * DMA'able (which means for some systems, it will go through an IOMMU,
1351 * or be forced into a low address range).
1352 */
1353int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
1354{
1355 unsigned amt;
1356
1357 if (!rcd->rcvhdrq) {
1358 dma_addr_t phys_hdrqtail;
1359 gfp_t gfp_flags;
1360
1361 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1362 sizeof(u32), PAGE_SIZE);
1363 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1364 GFP_USER : GFP_KERNEL;
1365 rcd->rcvhdrq = dma_alloc_coherent(
1366 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1367 gfp_flags | __GFP_COMP);
1368
1369 if (!rcd->rcvhdrq) {
1370 qib_dev_err(dd, "attempt to allocate %d bytes "
1371 "for ctxt %u rcvhdrq failed\n",
1372 amt, rcd->ctxt);
1373 goto bail;
1374 }
1375
1376 if (rcd->ctxt >= dd->first_user_ctxt) {
1377 rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
1378 if (!rcd->user_event_mask)
1379 goto bail_free_hdrq;
1380 }
1381
1382 if (!(dd->flags & QIB_NODMA_RTAIL)) {
1383 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
1384 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1385 gfp_flags);
1386 if (!rcd->rcvhdrtail_kvaddr)
1387 goto bail_free;
1388 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
1389 }
1390
1391 rcd->rcvhdrq_size = amt;
1392 }
1393
1394 /* clear for security and sanity on each use */
1395 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
1396 if (rcd->rcvhdrtail_kvaddr)
1397 memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1398 return 0;
1399
1400bail_free:
1401 qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u "
1402 "rcvhdrqtailaddr failed\n", rcd->ctxt);
1403 vfree(rcd->user_event_mask);
1404 rcd->user_event_mask = NULL;
1405bail_free_hdrq:
1406 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1407 rcd->rcvhdrq_phys);
1408 rcd->rcvhdrq = NULL;
1409bail:
1410 return -ENOMEM;
1411}
1412
1413/**
1414 * allocate eager buffers, both kernel and user contexts.
1415 * @rcd: the context we are setting up.
1416 *
1417 * Allocate the eager TID buffers and program them into hip.
1418 * They are no longer completely contiguous, we do multiple allocation
1419 * calls. Otherwise we get the OOM code involved, by asking for too
1420 * much per call, with disastrous results on some kernels.
1421 */
1422int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
1423{
1424 struct qib_devdata *dd = rcd->dd;
1425 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
1426 size_t size;
1427 gfp_t gfp_flags;
1428
1429 /*
1430 * GFP_USER, but without GFP_FS, so buffer cache can be
1431 * coalesced (we hope); otherwise, even at order 4,
1432 * heavy filesystem activity makes these fail, and we can
1433 * use compound pages.
1434 */
1435 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
1436
1437 egrcnt = rcd->rcvegrcnt;
1438 egroff = rcd->rcvegr_tid_base;
1439 egrsize = dd->rcvegrbufsize;
1440
1441 chunk = rcd->rcvegrbuf_chunks;
1442 egrperchunk = rcd->rcvegrbufs_perchunk;
1443 size = rcd->rcvegrbuf_size;
1444 if (!rcd->rcvegrbuf) {
1445 rcd->rcvegrbuf =
1446 kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]),
1447 GFP_KERNEL);
1448 if (!rcd->rcvegrbuf)
1449 goto bail;
1450 }
1451 if (!rcd->rcvegrbuf_phys) {
1452 rcd->rcvegrbuf_phys =
1453 kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
1454 GFP_KERNEL);
1455 if (!rcd->rcvegrbuf_phys)
1456 goto bail_rcvegrbuf;
1457 }
1458 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
1459 if (rcd->rcvegrbuf[e])
1460 continue;
1461 rcd->rcvegrbuf[e] =
1462 dma_alloc_coherent(&dd->pcidev->dev, size,
1463 &rcd->rcvegrbuf_phys[e],
1464 gfp_flags);
1465 if (!rcd->rcvegrbuf[e])
1466 goto bail_rcvegrbuf_phys;
1467 }
1468
1469 rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
1470
1471 for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
1472 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
1473 unsigned i;
1474
1475 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
1476 dd->f_put_tid(dd, e + egroff +
1477 (u64 __iomem *)
1478 ((char __iomem *)
1479 dd->kregbase +
1480 dd->rcvegrbase),
1481 RCVHQ_RCV_TYPE_EAGER, pa);
1482 pa += egrsize;
1483 }
1484 cond_resched(); /* don't hog the cpu */
1485 }
1486
1487 return 0;
1488
1489bail_rcvegrbuf_phys:
1490 for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
1491 dma_free_coherent(&dd->pcidev->dev, size,
1492 rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
1493 kfree(rcd->rcvegrbuf_phys);
1494 rcd->rcvegrbuf_phys = NULL;
1495bail_rcvegrbuf:
1496 kfree(rcd->rcvegrbuf);
1497 rcd->rcvegrbuf = NULL;
1498bail:
1499 return -ENOMEM;
1500}
1501
1502int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
1503{
1504 u64 __iomem *qib_kregbase = NULL;
1505 void __iomem *qib_piobase = NULL;
1506 u64 __iomem *qib_userbase = NULL;
1507 u64 qib_kreglen;
1508 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
1509 u64 qib_pio4koffset = dd->piobufbase >> 32;
1510 u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
1511 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
1512 u64 qib_physaddr = dd->physaddr;
1513 u64 qib_piolen;
1514 u64 qib_userlen = 0;
1515
1516 /*
1517 * Free the old mapping because the kernel will try to reuse the
1518 * old mapping and not create a new mapping with the
1519 * write combining attribute.
1520 */
1521 iounmap(dd->kregbase);
1522 dd->kregbase = NULL;
1523
1524 /*
1525 * Assumes chip address space looks like:
1526 * - kregs + sregs + cregs + uregs (in any order)
1527 * - piobufs (2K and 4K bufs in either order)
1528 * or:
1529 * - kregs + sregs + cregs (in any order)
1530 * - piobufs (2K and 4K bufs in either order)
1531 * - uregs
1532 */
1533 if (dd->piobcnt4k == 0) {
1534 qib_kreglen = qib_pio2koffset;
1535 qib_piolen = qib_pio2klen;
1536 } else if (qib_pio2koffset < qib_pio4koffset) {
1537 qib_kreglen = qib_pio2koffset;
1538 qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
1539 } else {
1540 qib_kreglen = qib_pio4koffset;
1541 qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
1542 }
1543 qib_piolen += vl15buflen;
1544 /* Map just the configured ports (not all hw ports) */
1545 if (dd->uregbase > qib_kreglen)
1546 qib_userlen = dd->ureg_align * dd->cfgctxts;
1547
1548 /* Sanity checks passed, now create the new mappings */
1549 qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
1550 if (!qib_kregbase)
1551 goto bail;
1552
1553 qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
1554 if (!qib_piobase)
1555 goto bail_kregbase;
1556
1557 if (qib_userlen) {
1558 qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
1559 qib_userlen);
1560 if (!qib_userbase)
1561 goto bail_piobase;
1562 }
1563
1564 dd->kregbase = qib_kregbase;
1565 dd->kregend = (u64 __iomem *)
1566 ((char __iomem *) qib_kregbase + qib_kreglen);
1567 dd->piobase = qib_piobase;
1568 dd->pio2kbase = (void __iomem *)
1569 (((char __iomem *) dd->piobase) +
1570 qib_pio2koffset - qib_kreglen);
1571 if (dd->piobcnt4k)
1572 dd->pio4kbase = (void __iomem *)
1573 (((char __iomem *) dd->piobase) +
1574 qib_pio4koffset - qib_kreglen);
1575 if (qib_userlen)
1576 /* ureg will now be accessed relative to dd->userbase */
1577 dd->userbase = qib_userbase;
1578 return 0;
1579
1580bail_piobase:
1581 iounmap(qib_piobase);
1582bail_kregbase:
1583 iounmap(qib_kregbase);
1584bail:
1585 return -ENOMEM;
1586}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
new file mode 100644
index 000000000000..54a40828a106
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -0,0 +1,236 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/delay.h>
37
38#include "qib.h"
39#include "qib_common.h"
40
41/**
42 * qib_format_hwmsg - format a single hwerror message
43 * @msg message buffer
44 * @msgl length of message buffer
45 * @hwmsg message to add to message buffer
46 */
47static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
48{
49 strlcat(msg, "[", msgl);
50 strlcat(msg, hwmsg, msgl);
51 strlcat(msg, "]", msgl);
52}
53
54/**
55 * qib_format_hwerrors - format hardware error messages for display
56 * @hwerrs hardware errors bit vector
57 * @hwerrmsgs hardware error descriptions
58 * @nhwerrmsgs number of hwerrmsgs
59 * @msg message buffer
60 * @msgl message buffer length
61 */
62void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
63 size_t nhwerrmsgs, char *msg, size_t msgl)
64{
65 int i;
66
67 for (i = 0; i < nhwerrmsgs; i++)
68 if (hwerrs & hwerrmsgs[i].mask)
69 qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
70}
71
72static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
73{
74 struct ib_event event;
75 struct qib_devdata *dd = ppd->dd;
76
77 event.device = &dd->verbs_dev.ibdev;
78 event.element.port_num = ppd->port;
79 event.event = ev;
80 ib_dispatch_event(&event);
81}
82
83void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
84{
85 struct qib_devdata *dd = ppd->dd;
86 unsigned long flags;
87 u32 lstate;
88 u8 ltstate;
89 enum ib_event_type ev = 0;
90
91 lstate = dd->f_iblink_state(ibcs); /* linkstate */
92 ltstate = dd->f_ibphys_portstate(ibcs);
93
94 /*
95 * If linkstate transitions into INIT from any of the various down
96 * states, or if it transitions from any of the up (INIT or better)
97 * states into any of the down states (except link recovery), then
98 * call the chip-specific code to take appropriate actions.
99 */
100 if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
101 ltstate == IB_PHYSPORTSTATE_LINKUP) {
102 /* transitioned to UP */
103 if (dd->f_ib_updown(ppd, 1, ibcs))
104 goto skip_ibchange; /* chip-code handled */
105 } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
106 QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
107 if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
108 ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
109 dd->f_ib_updown(ppd, 0, ibcs))
110 goto skip_ibchange; /* chip-code handled */
111 qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
112 }
113
114 if (lstate != IB_PORT_DOWN) {
115 /* lstate is INIT, ARMED, or ACTIVE */
116 if (lstate != IB_PORT_ACTIVE) {
117 *ppd->statusp &= ~QIB_STATUS_IB_READY;
118 if (ppd->lflags & QIBL_LINKACTIVE)
119 ev = IB_EVENT_PORT_ERR;
120 spin_lock_irqsave(&ppd->lflags_lock, flags);
121 if (lstate == IB_PORT_ARMED) {
122 ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
123 ppd->lflags &= ~(QIBL_LINKINIT |
124 QIBL_LINKDOWN | QIBL_LINKACTIVE);
125 } else {
126 ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
127 ppd->lflags &= ~(QIBL_LINKARMED |
128 QIBL_LINKDOWN | QIBL_LINKACTIVE);
129 }
130 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
131 /* start a 75msec timer to clear symbol errors */
132 mod_timer(&ppd->symerr_clear_timer,
133 msecs_to_jiffies(75));
134 } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
135 /* active, but not active defered */
136 qib_hol_up(ppd); /* useful only for 6120 now */
137 *ppd->statusp |=
138 QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
139 qib_clear_symerror_on_linkup((unsigned long)ppd);
140 spin_lock_irqsave(&ppd->lflags_lock, flags);
141 ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
142 ppd->lflags &= ~(QIBL_LINKINIT |
143 QIBL_LINKDOWN | QIBL_LINKARMED);
144 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
145 if (dd->flags & QIB_HAS_SEND_DMA)
146 qib_sdma_process_event(ppd,
147 qib_sdma_event_e30_go_running);
148 ev = IB_EVENT_PORT_ACTIVE;
149 dd->f_setextled(ppd, 1);
150 }
151 } else { /* down */
152 if (ppd->lflags & QIBL_LINKACTIVE)
153 ev = IB_EVENT_PORT_ERR;
154 spin_lock_irqsave(&ppd->lflags_lock, flags);
155 ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
156 ppd->lflags &= ~(QIBL_LINKINIT |
157 QIBL_LINKACTIVE | QIBL_LINKARMED);
158 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
159 *ppd->statusp &= ~QIB_STATUS_IB_READY;
160 }
161
162skip_ibchange:
163 ppd->lastibcstat = ibcs;
164 if (ev)
165 signal_ib_event(ppd, ev);
166 return;
167}
168
169void qib_clear_symerror_on_linkup(unsigned long opaque)
170{
171 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
172
173 if (ppd->lflags & QIBL_LINKACTIVE)
174 return;
175
176 ppd->ibport_data.z_symbol_error_counter =
177 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
178}
179
180/*
181 * Handle receive interrupts for user ctxts; this means a user
182 * process was waiting for a packet to arrive, and didn't want
183 * to poll.
184 */
185void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
186{
187 struct qib_ctxtdata *rcd;
188 unsigned long flags;
189 int i;
190
191 spin_lock_irqsave(&dd->uctxt_lock, flags);
192 for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
193 if (!(ctxtr & (1ULL << i)))
194 continue;
195 rcd = dd->rcd[i];
196 if (!rcd || !rcd->cnt)
197 continue;
198
199 if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
200 wake_up_interruptible(&rcd->wait);
201 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
202 rcd->ctxt);
203 } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
204 &rcd->flag)) {
205 rcd->urgent++;
206 wake_up_interruptible(&rcd->wait);
207 }
208 }
209 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
210}
211
212void qib_bad_intrstatus(struct qib_devdata *dd)
213{
214 static int allbits;
215
216 /* separate routine, for better optimization of qib_intr() */
217
218 /*
219 * We print the message and disable interrupts, in hope of
220 * having a better chance of debugging the problem.
221 */
222 qib_dev_err(dd, "Read of chip interrupt status failed"
223 " disabling interrupts\n");
224 if (allbits++) {
225 /* disable interrupt delivery, something is very wrong */
226 if (allbits == 2)
227 dd->f_set_intr_state(dd, 0);
228 if (allbits == 3) {
229 qib_dev_err(dd, "2nd bad interrupt status, "
230 "unregistering interrupts\n");
231 dd->flags |= QIB_BADINTR;
232 dd->flags &= ~QIB_INITTED;
233 dd->f_free_irq(dd);
234 }
235 }
236}
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
new file mode 100644
index 000000000000..4b80eb153d57
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -0,0 +1,328 @@
1/*
2 * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "qib.h"
35
36/**
37 * qib_alloc_lkey - allocate an lkey
38 * @rkt: lkey table in which to allocate the lkey
39 * @mr: memory region that this lkey protects
40 *
41 * Returns 1 if successful, otherwise returns 0.
42 */
43
44int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
45{
46 unsigned long flags;
47 u32 r;
48 u32 n;
49 int ret;
50
51 spin_lock_irqsave(&rkt->lock, flags);
52
53 /* Find the next available LKEY */
54 r = rkt->next;
55 n = r;
56 for (;;) {
57 if (rkt->table[r] == NULL)
58 break;
59 r = (r + 1) & (rkt->max - 1);
60 if (r == n) {
61 spin_unlock_irqrestore(&rkt->lock, flags);
62 ret = 0;
63 goto bail;
64 }
65 }
66 rkt->next = (r + 1) & (rkt->max - 1);
67 /*
68 * Make sure lkey is never zero which is reserved to indicate an
69 * unrestricted LKEY.
70 */
71 rkt->gen++;
72 mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
73 ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
74 << 8);
75 if (mr->lkey == 0) {
76 mr->lkey |= 1 << 8;
77 rkt->gen++;
78 }
79 rkt->table[r] = mr;
80 spin_unlock_irqrestore(&rkt->lock, flags);
81
82 ret = 1;
83
84bail:
85 return ret;
86}
87
88/**
89 * qib_free_lkey - free an lkey
90 * @rkt: table from which to free the lkey
91 * @lkey: lkey id to free
92 */
93int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
94{
95 unsigned long flags;
96 u32 lkey = mr->lkey;
97 u32 r;
98 int ret;
99
100 spin_lock_irqsave(&dev->lk_table.lock, flags);
101 if (lkey == 0) {
102 if (dev->dma_mr && dev->dma_mr == mr) {
103 ret = atomic_read(&dev->dma_mr->refcount);
104 if (!ret)
105 dev->dma_mr = NULL;
106 } else
107 ret = 0;
108 } else {
109 r = lkey >> (32 - ib_qib_lkey_table_size);
110 ret = atomic_read(&dev->lk_table.table[r]->refcount);
111 if (!ret)
112 dev->lk_table.table[r] = NULL;
113 }
114 spin_unlock_irqrestore(&dev->lk_table.lock, flags);
115
116 if (ret)
117 ret = -EBUSY;
118 return ret;
119}
120
121/**
122 * qib_lkey_ok - check IB SGE for validity and initialize
123 * @rkt: table containing lkey to check SGE against
124 * @isge: outgoing internal SGE
125 * @sge: SGE to check
126 * @acc: access flags
127 *
128 * Return 1 if valid and successful, otherwise returns 0.
129 *
130 * Check the IB SGE for validity and initialize our internal version
131 * of it.
132 */
133int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
134 struct qib_sge *isge, struct ib_sge *sge, int acc)
135{
136 struct qib_mregion *mr;
137 unsigned n, m;
138 size_t off;
139 int ret = 0;
140 unsigned long flags;
141
142 /*
143 * We use LKEY == zero for kernel virtual addresses
144 * (see qib_get_dma_mr and qib_dma.c).
145 */
146 spin_lock_irqsave(&rkt->lock, flags);
147 if (sge->lkey == 0) {
148 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
149
150 if (pd->user)
151 goto bail;
152 if (!dev->dma_mr)
153 goto bail;
154 atomic_inc(&dev->dma_mr->refcount);
155 isge->mr = dev->dma_mr;
156 isge->vaddr = (void *) sge->addr;
157 isge->length = sge->length;
158 isge->sge_length = sge->length;
159 isge->m = 0;
160 isge->n = 0;
161 goto ok;
162 }
163 mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
164 if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
165 mr->pd != &pd->ibpd))
166 goto bail;
167
168 off = sge->addr - mr->user_base;
169 if (unlikely(sge->addr < mr->user_base ||
170 off + sge->length > mr->length ||
171 (mr->access_flags & acc) != acc))
172 goto bail;
173
174 off += mr->offset;
175 m = 0;
176 n = 0;
177 while (off >= mr->map[m]->segs[n].length) {
178 off -= mr->map[m]->segs[n].length;
179 n++;
180 if (n >= QIB_SEGSZ) {
181 m++;
182 n = 0;
183 }
184 }
185 atomic_inc(&mr->refcount);
186 isge->mr = mr;
187 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
188 isge->length = mr->map[m]->segs[n].length - off;
189 isge->sge_length = sge->length;
190 isge->m = m;
191 isge->n = n;
192ok:
193 ret = 1;
194bail:
195 spin_unlock_irqrestore(&rkt->lock, flags);
196 return ret;
197}
198
199/**
200 * qib_rkey_ok - check the IB virtual address, length, and RKEY
201 * @dev: infiniband device
202 * @ss: SGE state
203 * @len: length of data
204 * @vaddr: virtual address to place data
205 * @rkey: rkey to check
206 * @acc: access flags
207 *
208 * Return 1 if successful, otherwise 0.
209 */
210int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
211 u32 len, u64 vaddr, u32 rkey, int acc)
212{
213 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
214 struct qib_mregion *mr;
215 unsigned n, m;
216 size_t off;
217 int ret = 0;
218 unsigned long flags;
219
220 /*
221 * We use RKEY == zero for kernel virtual addresses
222 * (see qib_get_dma_mr and qib_dma.c).
223 */
224 spin_lock_irqsave(&rkt->lock, flags);
225 if (rkey == 0) {
226 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
227 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
228
229 if (pd->user)
230 goto bail;
231 if (!dev->dma_mr)
232 goto bail;
233 atomic_inc(&dev->dma_mr->refcount);
234 sge->mr = dev->dma_mr;
235 sge->vaddr = (void *) vaddr;
236 sge->length = len;
237 sge->sge_length = len;
238 sge->m = 0;
239 sge->n = 0;
240 goto ok;
241 }
242
243 mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
244 if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
245 goto bail;
246
247 off = vaddr - mr->iova;
248 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
249 (mr->access_flags & acc) == 0))
250 goto bail;
251
252 off += mr->offset;
253 m = 0;
254 n = 0;
255 while (off >= mr->map[m]->segs[n].length) {
256 off -= mr->map[m]->segs[n].length;
257 n++;
258 if (n >= QIB_SEGSZ) {
259 m++;
260 n = 0;
261 }
262 }
263 atomic_inc(&mr->refcount);
264 sge->mr = mr;
265 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
266 sge->length = mr->map[m]->segs[n].length - off;
267 sge->sge_length = len;
268 sge->m = m;
269 sge->n = n;
270ok:
271 ret = 1;
272bail:
273 spin_unlock_irqrestore(&rkt->lock, flags);
274 return ret;
275}
276
277/*
278 * Initialize the memory region specified by the work reqeust.
279 */
280int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
281{
282 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
283 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
284 struct qib_mregion *mr;
285 u32 rkey = wr->wr.fast_reg.rkey;
286 unsigned i, n, m;
287 int ret = -EINVAL;
288 unsigned long flags;
289 u64 *page_list;
290 size_t ps;
291
292 spin_lock_irqsave(&rkt->lock, flags);
293 if (pd->user || rkey == 0)
294 goto bail;
295
296 mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
297 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
298 goto bail;
299
300 if (wr->wr.fast_reg.page_list_len > mr->max_segs)
301 goto bail;
302
303 ps = 1UL << wr->wr.fast_reg.page_shift;
304 if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
305 goto bail;
306
307 mr->user_base = wr->wr.fast_reg.iova_start;
308 mr->iova = wr->wr.fast_reg.iova_start;
309 mr->lkey = rkey;
310 mr->length = wr->wr.fast_reg.length;
311 mr->access_flags = wr->wr.fast_reg.access_flags;
312 page_list = wr->wr.fast_reg.page_list->page_list;
313 m = 0;
314 n = 0;
315 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
316 mr->map[m]->segs[n].vaddr = (void *) page_list[i];
317 mr->map[m]->segs[n].length = ps;
318 if (++n == QIB_SEGSZ) {
319 m++;
320 n = 0;
321 }
322 }
323
324 ret = 0;
325bail:
326 spin_unlock_irqrestore(&rkt->lock, flags);
327 return ret;
328}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
new file mode 100644
index 000000000000..94b0d1f3a8f0
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -0,0 +1,2173 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_smi.h>
36
37#include "qib.h"
38#include "qib_mad.h"
39
40static int reply(struct ib_smp *smp)
41{
42 /*
43 * The verbs framework will handle the directed/LID route
44 * packet changes.
45 */
46 smp->method = IB_MGMT_METHOD_GET_RESP;
47 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
48 smp->status |= IB_SMP_DIRECTION;
49 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
50}
51
52static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
53{
54 struct ib_mad_send_buf *send_buf;
55 struct ib_mad_agent *agent;
56 struct ib_smp *smp;
57 int ret;
58 unsigned long flags;
59 unsigned long timeout;
60
61 agent = ibp->send_agent;
62 if (!agent)
63 return;
64
65 /* o14-3.2.1 */
66 if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
67 return;
68
69 /* o14-2 */
70 if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
71 return;
72
73 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
74 IB_MGMT_MAD_DATA, GFP_ATOMIC);
75 if (IS_ERR(send_buf))
76 return;
77
78 smp = send_buf->mad;
79 smp->base_version = IB_MGMT_BASE_VERSION;
80 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
81 smp->class_version = 1;
82 smp->method = IB_MGMT_METHOD_TRAP;
83 ibp->tid++;
84 smp->tid = cpu_to_be64(ibp->tid);
85 smp->attr_id = IB_SMP_ATTR_NOTICE;
86 /* o14-1: smp->mkey = 0; */
87 memcpy(smp->data, data, len);
88
89 spin_lock_irqsave(&ibp->lock, flags);
90 if (!ibp->sm_ah) {
91 if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
92 struct ib_ah *ah;
93 struct ib_ah_attr attr;
94
95 memset(&attr, 0, sizeof attr);
96 attr.dlid = ibp->sm_lid;
97 attr.port_num = ppd_from_ibp(ibp)->port;
98 ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
99 if (IS_ERR(ah))
100 ret = -EINVAL;
101 else {
102 send_buf->ah = ah;
103 ibp->sm_ah = to_iah(ah);
104 ret = 0;
105 }
106 } else
107 ret = -EINVAL;
108 } else {
109 send_buf->ah = &ibp->sm_ah->ibah;
110 ret = 0;
111 }
112 spin_unlock_irqrestore(&ibp->lock, flags);
113
114 if (!ret)
115 ret = ib_post_send_mad(send_buf, NULL);
116 if (!ret) {
117 /* 4.096 usec. */
118 timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
119 ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
120 } else {
121 ib_free_send_mad(send_buf);
122 ibp->trap_timeout = 0;
123 }
124}
125
126/*
127 * Send a bad [PQ]_Key trap (ch. 14.3.8).
128 */
129void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
130 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
131{
132 struct ib_mad_notice_attr data;
133
134 if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
135 ibp->pkey_violations++;
136 else
137 ibp->qkey_violations++;
138 ibp->n_pkt_drops++;
139
140 /* Send violation trap */
141 data.generic_type = IB_NOTICE_TYPE_SECURITY;
142 data.prod_type_msb = 0;
143 data.prod_type_lsb = IB_NOTICE_PROD_CA;
144 data.trap_num = trap_num;
145 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
146 data.toggle_count = 0;
147 memset(&data.details, 0, sizeof data.details);
148 data.details.ntc_257_258.lid1 = lid1;
149 data.details.ntc_257_258.lid2 = lid2;
150 data.details.ntc_257_258.key = cpu_to_be32(key);
151 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
152 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
153
154 qib_send_trap(ibp, &data, sizeof data);
155}
156
157/*
158 * Send a bad M_Key trap (ch. 14.3.9).
159 */
160static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
161{
162 struct ib_mad_notice_attr data;
163
164 /* Send violation trap */
165 data.generic_type = IB_NOTICE_TYPE_SECURITY;
166 data.prod_type_msb = 0;
167 data.prod_type_lsb = IB_NOTICE_PROD_CA;
168 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
169 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
170 data.toggle_count = 0;
171 memset(&data.details, 0, sizeof data.details);
172 data.details.ntc_256.lid = data.issuer_lid;
173 data.details.ntc_256.method = smp->method;
174 data.details.ntc_256.attr_id = smp->attr_id;
175 data.details.ntc_256.attr_mod = smp->attr_mod;
176 data.details.ntc_256.mkey = smp->mkey;
177 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
178 u8 hop_cnt;
179
180 data.details.ntc_256.dr_slid = smp->dr_slid;
181 data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
182 hop_cnt = smp->hop_cnt;
183 if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
184 data.details.ntc_256.dr_trunc_hop |=
185 IB_NOTICE_TRAP_DR_TRUNC;
186 hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
187 }
188 data.details.ntc_256.dr_trunc_hop |= hop_cnt;
189 memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
190 hop_cnt);
191 }
192
193 qib_send_trap(ibp, &data, sizeof data);
194}
195
196/*
197 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
198 */
199void qib_cap_mask_chg(struct qib_ibport *ibp)
200{
201 struct ib_mad_notice_attr data;
202
203 data.generic_type = IB_NOTICE_TYPE_INFO;
204 data.prod_type_msb = 0;
205 data.prod_type_lsb = IB_NOTICE_PROD_CA;
206 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
207 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
208 data.toggle_count = 0;
209 memset(&data.details, 0, sizeof data.details);
210 data.details.ntc_144.lid = data.issuer_lid;
211 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
212
213 qib_send_trap(ibp, &data, sizeof data);
214}
215
216/*
217 * Send a System Image GUID Changed trap (ch. 14.3.12).
218 */
219void qib_sys_guid_chg(struct qib_ibport *ibp)
220{
221 struct ib_mad_notice_attr data;
222
223 data.generic_type = IB_NOTICE_TYPE_INFO;
224 data.prod_type_msb = 0;
225 data.prod_type_lsb = IB_NOTICE_PROD_CA;
226 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
227 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
228 data.toggle_count = 0;
229 memset(&data.details, 0, sizeof data.details);
230 data.details.ntc_145.lid = data.issuer_lid;
231 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
232
233 qib_send_trap(ibp, &data, sizeof data);
234}
235
236/*
237 * Send a Node Description Changed trap (ch. 14.3.13).
238 */
239void qib_node_desc_chg(struct qib_ibport *ibp)
240{
241 struct ib_mad_notice_attr data;
242
243 data.generic_type = IB_NOTICE_TYPE_INFO;
244 data.prod_type_msb = 0;
245 data.prod_type_lsb = IB_NOTICE_PROD_CA;
246 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
247 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
248 data.toggle_count = 0;
249 memset(&data.details, 0, sizeof data.details);
250 data.details.ntc_144.lid = data.issuer_lid;
251 data.details.ntc_144.local_changes = 1;
252 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
253
254 qib_send_trap(ibp, &data, sizeof data);
255}
256
257static int subn_get_nodedescription(struct ib_smp *smp,
258 struct ib_device *ibdev)
259{
260 if (smp->attr_mod)
261 smp->status |= IB_SMP_INVALID_FIELD;
262
263 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
264
265 return reply(smp);
266}
267
268static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
269 u8 port)
270{
271 struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
272 struct qib_devdata *dd = dd_from_ibdev(ibdev);
273 u32 vendor, majrev, minrev;
274 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
275
276 /* GUID 0 is illegal */
277 if (smp->attr_mod || pidx >= dd->num_pports ||
278 dd->pport[pidx].guid == 0)
279 smp->status |= IB_SMP_INVALID_FIELD;
280 else
281 nip->port_guid = dd->pport[pidx].guid;
282
283 nip->base_version = 1;
284 nip->class_version = 1;
285 nip->node_type = 1; /* channel adapter */
286 nip->num_ports = ibdev->phys_port_cnt;
287 /* This is already in network order */
288 nip->sys_guid = ib_qib_sys_image_guid;
289 nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
290 nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
291 nip->device_id = cpu_to_be16(dd->deviceid);
292 majrev = dd->majrev;
293 minrev = dd->minrev;
294 nip->revision = cpu_to_be32((majrev << 16) | minrev);
295 nip->local_port_num = port;
296 vendor = dd->vendorid;
297 nip->vendor_id[0] = QIB_SRC_OUI_1;
298 nip->vendor_id[1] = QIB_SRC_OUI_2;
299 nip->vendor_id[2] = QIB_SRC_OUI_3;
300
301 return reply(smp);
302}
303
304static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
305 u8 port)
306{
307 struct qib_devdata *dd = dd_from_ibdev(ibdev);
308 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
309 __be64 *p = (__be64 *) smp->data;
310 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
311
312 /* 32 blocks of 8 64-bit GUIDs per block */
313
314 memset(smp->data, 0, sizeof(smp->data));
315
316 if (startgx == 0 && pidx < dd->num_pports) {
317 struct qib_pportdata *ppd = dd->pport + pidx;
318 struct qib_ibport *ibp = &ppd->ibport_data;
319 __be64 g = ppd->guid;
320 unsigned i;
321
322 /* GUID 0 is illegal */
323 if (g == 0)
324 smp->status |= IB_SMP_INVALID_FIELD;
325 else {
326 /* The first is a copy of the read-only HW GUID. */
327 p[0] = g;
328 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
329 p[i] = ibp->guids[i - 1];
330 }
331 } else
332 smp->status |= IB_SMP_INVALID_FIELD;
333
334 return reply(smp);
335}
336
337static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
338{
339 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
340}
341
342static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
343{
344 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
345}
346
347static int get_overrunthreshold(struct qib_pportdata *ppd)
348{
349 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
350}
351
352/**
353 * set_overrunthreshold - set the overrun threshold
354 * @ppd: the physical port data
355 * @n: the new threshold
356 *
357 * Note that this will only take effect when the link state changes.
358 */
359static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
360{
361 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
362 (u32)n);
363 return 0;
364}
365
366static int get_phyerrthreshold(struct qib_pportdata *ppd)
367{
368 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
369}
370
371/**
372 * set_phyerrthreshold - set the physical error threshold
373 * @ppd: the physical port data
374 * @n: the new threshold
375 *
376 * Note that this will only take effect when the link state changes.
377 */
378static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
379{
380 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
381 (u32)n);
382 return 0;
383}
384
385/**
386 * get_linkdowndefaultstate - get the default linkdown state
387 * @ppd: the physical port data
388 *
389 * Returns zero if the default is POLL, 1 if the default is SLEEP.
390 */
391static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
392{
393 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
394 IB_LINKINITCMD_SLEEP;
395}
396
397static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
398{
399 int ret = 0;
400
401 /* Is the mkey in the process of expiring? */
402 if (ibp->mkey_lease_timeout &&
403 time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
404 /* Clear timeout and mkey protection field. */
405 ibp->mkey_lease_timeout = 0;
406 ibp->mkeyprot = 0;
407 }
408
409 /* M_Key checking depends on Portinfo:M_Key_protect_bits */
410 if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 &&
411 ibp->mkey != smp->mkey &&
412 (smp->method == IB_MGMT_METHOD_SET ||
413 smp->method == IB_MGMT_METHOD_TRAP_REPRESS ||
414 (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) {
415 if (ibp->mkey_violations != 0xFFFF)
416 ++ibp->mkey_violations;
417 if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
418 ibp->mkey_lease_timeout = jiffies +
419 ibp->mkey_lease_period * HZ;
420 /* Generate a trap notice. */
421 qib_bad_mkey(ibp, smp);
422 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
423 } else if (ibp->mkey_lease_timeout)
424 ibp->mkey_lease_timeout = 0;
425
426 return ret;
427}
428
429static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
430 u8 port)
431{
432 struct qib_devdata *dd;
433 struct qib_pportdata *ppd;
434 struct qib_ibport *ibp;
435 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
436 u16 lid;
437 u8 mtu;
438 int ret;
439 u32 state;
440 u32 port_num = be32_to_cpu(smp->attr_mod);
441
442 if (port_num == 0)
443 port_num = port;
444 else {
445 if (port_num > ibdev->phys_port_cnt) {
446 smp->status |= IB_SMP_INVALID_FIELD;
447 ret = reply(smp);
448 goto bail;
449 }
450 if (port_num != port) {
451 ibp = to_iport(ibdev, port_num);
452 ret = check_mkey(ibp, smp, 0);
453 if (ret)
454 goto bail;
455 }
456 }
457
458 dd = dd_from_ibdev(ibdev);
459 /* IB numbers ports from 1, hdw from 0 */
460 ppd = dd->pport + (port_num - 1);
461 ibp = &ppd->ibport_data;
462
463 /* Clear all fields. Only set the non-zero fields. */
464 memset(smp->data, 0, sizeof(smp->data));
465
466 /* Only return the mkey if the protection field allows it. */
467 if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey ||
468 ibp->mkeyprot == 0)
469 pip->mkey = ibp->mkey;
470 pip->gid_prefix = ibp->gid_prefix;
471 lid = ppd->lid;
472 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
473 pip->sm_lid = cpu_to_be16(ibp->sm_lid);
474 pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
475 /* pip->diag_code; */
476 pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
477 pip->local_port_num = port;
478 pip->link_width_enabled = ppd->link_width_enabled;
479 pip->link_width_supported = ppd->link_width_supported;
480 pip->link_width_active = ppd->link_width_active;
481 state = dd->f_iblink_state(ppd->lastibcstat);
482 pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
483
484 pip->portphysstate_linkdown =
485 (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
486 (get_linkdowndefaultstate(ppd) ? 1 : 2);
487 pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
488 pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
489 ppd->link_speed_enabled;
490 switch (ppd->ibmtu) {
491 default: /* something is wrong; fall through */
492 case 4096:
493 mtu = IB_MTU_4096;
494 break;
495 case 2048:
496 mtu = IB_MTU_2048;
497 break;
498 case 1024:
499 mtu = IB_MTU_1024;
500 break;
501 case 512:
502 mtu = IB_MTU_512;
503 break;
504 case 256:
505 mtu = IB_MTU_256;
506 break;
507 }
508 pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
509 pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
510 pip->vl_high_limit = ibp->vl_high_limit;
511 pip->vl_arb_high_cap =
512 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
513 pip->vl_arb_low_cap =
514 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
515 /* InitTypeReply = 0 */
516 pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
517 /* HCAs ignore VLStallCount and HOQLife */
518 /* pip->vlstallcnt_hoqlife; */
519 pip->operationalvl_pei_peo_fpi_fpo =
520 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
521 pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
522 /* P_KeyViolations are counted by hardware. */
523 pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
524 pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
525 /* Only the hardware GUID is supported for now */
526 pip->guid_cap = QIB_GUIDS_PER_PORT;
527 pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
528 /* 32.768 usec. response time (guessing) */
529 pip->resv_resptimevalue = 3;
530 pip->localphyerrors_overrunerrors =
531 (get_phyerrthreshold(ppd) << 4) |
532 get_overrunthreshold(ppd);
533 /* pip->max_credit_hint; */
534 if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
535 u32 v;
536
537 v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
538 pip->link_roundtrip_latency[0] = v >> 16;
539 pip->link_roundtrip_latency[1] = v >> 8;
540 pip->link_roundtrip_latency[2] = v;
541 }
542
543 ret = reply(smp);
544
545bail:
546 return ret;
547}
548
549/**
550 * get_pkeys - return the PKEY table
551 * @dd: the qlogic_ib device
552 * @port: the IB port number
553 * @pkeys: the pkey table is placed here
554 */
555static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
556{
557 struct qib_pportdata *ppd = dd->pport + port - 1;
558 /*
559 * always a kernel context, no locking needed.
560 * If we get here with ppd setup, no need to check
561 * that pd is valid.
562 */
563 struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
564
565 memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
566
567 return 0;
568}
569
570static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
571 u8 port)
572{
573 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
574 u16 *p = (u16 *) smp->data;
575 __be16 *q = (__be16 *) smp->data;
576
577 /* 64 blocks of 32 16-bit P_Key entries */
578
579 memset(smp->data, 0, sizeof(smp->data));
580 if (startpx == 0) {
581 struct qib_devdata *dd = dd_from_ibdev(ibdev);
582 unsigned i, n = qib_get_npkeys(dd);
583
584 get_pkeys(dd, port, p);
585
586 for (i = 0; i < n; i++)
587 q[i] = cpu_to_be16(p[i]);
588 } else
589 smp->status |= IB_SMP_INVALID_FIELD;
590
591 return reply(smp);
592}
593
594static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
595 u8 port)
596{
597 struct qib_devdata *dd = dd_from_ibdev(ibdev);
598 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
599 __be64 *p = (__be64 *) smp->data;
600 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
601
602 /* 32 blocks of 8 64-bit GUIDs per block */
603
604 if (startgx == 0 && pidx < dd->num_pports) {
605 struct qib_pportdata *ppd = dd->pport + pidx;
606 struct qib_ibport *ibp = &ppd->ibport_data;
607 unsigned i;
608
609 /* The first entry is read-only. */
610 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
611 ibp->guids[i - 1] = p[i];
612 } else
613 smp->status |= IB_SMP_INVALID_FIELD;
614
615 /* The only GUID we support is the first read-only entry. */
616 return subn_get_guidinfo(smp, ibdev, port);
617}
618
619/**
620 * subn_set_portinfo - set port information
621 * @smp: the incoming SM packet
622 * @ibdev: the infiniband device
623 * @port: the port on the device
624 *
625 * Set Portinfo (see ch. 14.2.5.6).
626 */
627static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
628 u8 port)
629{
630 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
631 struct ib_event event;
632 struct qib_devdata *dd;
633 struct qib_pportdata *ppd;
634 struct qib_ibport *ibp;
635 char clientrereg = 0;
636 unsigned long flags;
637 u16 lid, smlid;
638 u8 lwe;
639 u8 lse;
640 u8 state;
641 u8 vls;
642 u8 msl;
643 u16 lstate;
644 int ret, ore, mtu;
645 u32 port_num = be32_to_cpu(smp->attr_mod);
646
647 if (port_num == 0)
648 port_num = port;
649 else {
650 if (port_num > ibdev->phys_port_cnt)
651 goto err;
652 /* Port attributes can only be set on the receiving port */
653 if (port_num != port)
654 goto get_only;
655 }
656
657 dd = dd_from_ibdev(ibdev);
658 /* IB numbers ports from 1, hdw from 0 */
659 ppd = dd->pport + (port_num - 1);
660 ibp = &ppd->ibport_data;
661 event.device = ibdev;
662 event.element.port_num = port;
663
664 ibp->mkey = pip->mkey;
665 ibp->gid_prefix = pip->gid_prefix;
666 ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
667
668 lid = be16_to_cpu(pip->lid);
669 /* Must be a valid unicast LID address. */
670 if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
671 goto err;
672 if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
673 if (ppd->lid != lid)
674 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
675 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
676 qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
677 qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
678 event.event = IB_EVENT_LID_CHANGE;
679 ib_dispatch_event(&event);
680 }
681
682 smlid = be16_to_cpu(pip->sm_lid);
683 msl = pip->neighbormtu_mastersmsl & 0xF;
684 /* Must be a valid unicast LID address. */
685 if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
686 goto err;
687 if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
688 spin_lock_irqsave(&ibp->lock, flags);
689 if (ibp->sm_ah) {
690 if (smlid != ibp->sm_lid)
691 ibp->sm_ah->attr.dlid = smlid;
692 if (msl != ibp->sm_sl)
693 ibp->sm_ah->attr.sl = msl;
694 }
695 spin_unlock_irqrestore(&ibp->lock, flags);
696 if (smlid != ibp->sm_lid)
697 ibp->sm_lid = smlid;
698 if (msl != ibp->sm_sl)
699 ibp->sm_sl = msl;
700 event.event = IB_EVENT_SM_CHANGE;
701 ib_dispatch_event(&event);
702 }
703
704 /* Allow 1x or 4x to be set (see 14.2.6.6). */
705 lwe = pip->link_width_enabled;
706 if (lwe) {
707 if (lwe == 0xFF)
708 lwe = ppd->link_width_supported;
709 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
710 goto err;
711 set_link_width_enabled(ppd, lwe);
712 }
713
714 lse = pip->linkspeedactive_enabled & 0xF;
715 if (lse) {
716 /*
717 * The IB 1.2 spec. only allows link speed values
718 * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
719 * speeds.
720 */
721 if (lse == 15)
722 lse = ppd->link_speed_supported;
723 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
724 goto err;
725 set_link_speed_enabled(ppd, lse);
726 }
727
728 /* Set link down default state. */
729 switch (pip->portphysstate_linkdown & 0xF) {
730 case 0: /* NOP */
731 break;
732 case 1: /* SLEEP */
733 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
734 IB_LINKINITCMD_SLEEP);
735 break;
736 case 2: /* POLL */
737 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
738 IB_LINKINITCMD_POLL);
739 break;
740 default:
741 goto err;
742 }
743
744 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
745 ibp->vl_high_limit = pip->vl_high_limit;
746 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
747 ibp->vl_high_limit);
748
749 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
750 if (mtu == -1)
751 goto err;
752 qib_set_mtu(ppd, mtu);
753
754 /* Set operational VLs */
755 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
756 if (vls) {
757 if (vls > ppd->vls_supported)
758 goto err;
759 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
760 }
761
762 if (pip->mkey_violations == 0)
763 ibp->mkey_violations = 0;
764
765 if (pip->pkey_violations == 0)
766 ibp->pkey_violations = 0;
767
768 if (pip->qkey_violations == 0)
769 ibp->qkey_violations = 0;
770
771 ore = pip->localphyerrors_overrunerrors;
772 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
773 goto err;
774
775 if (set_overrunthreshold(ppd, (ore & 0xF)))
776 goto err;
777
778 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
779
780 if (pip->clientrereg_resv_subnetto & 0x80) {
781 clientrereg = 1;
782 event.event = IB_EVENT_CLIENT_REREGISTER;
783 ib_dispatch_event(&event);
784 }
785
786 /*
787 * Do the port state change now that the other link parameters
788 * have been set.
789 * Changing the port physical state only makes sense if the link
790 * is down or is being set to down.
791 */
792 state = pip->linkspeed_portstate & 0xF;
793 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
794 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
795 goto err;
796
797 /*
798 * Only state changes of DOWN, ARM, and ACTIVE are valid
799 * and must be in the correct state to take effect (see 7.2.6).
800 */
801 switch (state) {
802 case IB_PORT_NOP:
803 if (lstate == 0)
804 break;
805 /* FALLTHROUGH */
806 case IB_PORT_DOWN:
807 if (lstate == 0)
808 lstate = QIB_IB_LINKDOWN_ONLY;
809 else if (lstate == 1)
810 lstate = QIB_IB_LINKDOWN_SLEEP;
811 else if (lstate == 2)
812 lstate = QIB_IB_LINKDOWN;
813 else if (lstate == 3)
814 lstate = QIB_IB_LINKDOWN_DISABLE;
815 else
816 goto err;
817 spin_lock_irqsave(&ppd->lflags_lock, flags);
818 ppd->lflags &= ~QIBL_LINKV;
819 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
820 qib_set_linkstate(ppd, lstate);
821 /*
822 * Don't send a reply if the response would be sent
823 * through the disabled port.
824 */
825 if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
826 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
827 goto done;
828 }
829 qib_wait_linkstate(ppd, QIBL_LINKV, 10);
830 break;
831 case IB_PORT_ARMED:
832 qib_set_linkstate(ppd, QIB_IB_LINKARM);
833 break;
834 case IB_PORT_ACTIVE:
835 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
836 break;
837 default:
838 /* XXX We have already partially updated our state! */
839 goto err;
840 }
841
842 ret = subn_get_portinfo(smp, ibdev, port);
843
844 if (clientrereg)
845 pip->clientrereg_resv_subnetto |= 0x80;
846
847 goto done;
848
849err:
850 smp->status |= IB_SMP_INVALID_FIELD;
851get_only:
852 ret = subn_get_portinfo(smp, ibdev, port);
853done:
854 return ret;
855}
856
857/**
858 * rm_pkey - decrecment the reference count for the given PKEY
859 * @dd: the qlogic_ib device
860 * @key: the PKEY index
861 *
862 * Return true if this was the last reference and the hardware table entry
863 * needs to be changed.
864 */
865static int rm_pkey(struct qib_pportdata *ppd, u16 key)
866{
867 int i;
868 int ret;
869
870 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
871 if (ppd->pkeys[i] != key)
872 continue;
873 if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
874 ppd->pkeys[i] = 0;
875 ret = 1;
876 goto bail;
877 }
878 break;
879 }
880
881 ret = 0;
882
883bail:
884 return ret;
885}
886
887/**
888 * add_pkey - add the given PKEY to the hardware table
889 * @dd: the qlogic_ib device
890 * @key: the PKEY
891 *
892 * Return an error code if unable to add the entry, zero if no change,
893 * or 1 if the hardware PKEY register needs to be updated.
894 */
895static int add_pkey(struct qib_pportdata *ppd, u16 key)
896{
897 int i;
898 u16 lkey = key & 0x7FFF;
899 int any = 0;
900 int ret;
901
902 if (lkey == 0x7FFF) {
903 ret = 0;
904 goto bail;
905 }
906
907 /* Look for an empty slot or a matching PKEY. */
908 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
909 if (!ppd->pkeys[i]) {
910 any++;
911 continue;
912 }
913 /* If it matches exactly, try to increment the ref count */
914 if (ppd->pkeys[i] == key) {
915 if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
916 ret = 0;
917 goto bail;
918 }
919 /* Lost the race. Look for an empty slot below. */
920 atomic_dec(&ppd->pkeyrefs[i]);
921 any++;
922 }
923 /*
924 * It makes no sense to have both the limited and unlimited
925 * PKEY set at the same time since the unlimited one will
926 * disable the limited one.
927 */
928 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
929 ret = -EEXIST;
930 goto bail;
931 }
932 }
933 if (!any) {
934 ret = -EBUSY;
935 goto bail;
936 }
937 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
938 if (!ppd->pkeys[i] &&
939 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
940 /* for qibstats, etc. */
941 ppd->pkeys[i] = key;
942 ret = 1;
943 goto bail;
944 }
945 }
946 ret = -EBUSY;
947
948bail:
949 return ret;
950}
951
952/**
953 * set_pkeys - set the PKEY table for ctxt 0
954 * @dd: the qlogic_ib device
955 * @port: the IB port number
956 * @pkeys: the PKEY table
957 */
958static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
959{
960 struct qib_pportdata *ppd;
961 struct qib_ctxtdata *rcd;
962 int i;
963 int changed = 0;
964
965 /*
966 * IB port one/two always maps to context zero/one,
967 * always a kernel context, no locking needed
968 * If we get here with ppd setup, no need to check
969 * that rcd is valid.
970 */
971 ppd = dd->pport + (port - 1);
972 rcd = dd->rcd[ppd->hw_pidx];
973
974 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
975 u16 key = pkeys[i];
976 u16 okey = rcd->pkeys[i];
977
978 if (key == okey)
979 continue;
980 /*
981 * The value of this PKEY table entry is changing.
982 * Remove the old entry in the hardware's array of PKEYs.
983 */
984 if (okey & 0x7FFF)
985 changed |= rm_pkey(ppd, okey);
986 if (key & 0x7FFF) {
987 int ret = add_pkey(ppd, key);
988
989 if (ret < 0)
990 key = 0;
991 else
992 changed |= ret;
993 }
994 rcd->pkeys[i] = key;
995 }
996 if (changed) {
997 struct ib_event event;
998
999 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
1000
1001 event.event = IB_EVENT_PKEY_CHANGE;
1002 event.device = &dd->verbs_dev.ibdev;
1003 event.element.port_num = 1;
1004 ib_dispatch_event(&event);
1005 }
1006 return 0;
1007}
1008
1009static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
1010 u8 port)
1011{
1012 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
1013 __be16 *p = (__be16 *) smp->data;
1014 u16 *q = (u16 *) smp->data;
1015 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1016 unsigned i, n = qib_get_npkeys(dd);
1017
1018 for (i = 0; i < n; i++)
1019 q[i] = be16_to_cpu(p[i]);
1020
1021 if (startpx != 0 || set_pkeys(dd, port, q) != 0)
1022 smp->status |= IB_SMP_INVALID_FIELD;
1023
1024 return subn_get_pkeytable(smp, ibdev, port);
1025}
1026
1027static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1028 u8 port)
1029{
1030 struct qib_ibport *ibp = to_iport(ibdev, port);
1031 u8 *p = (u8 *) smp->data;
1032 unsigned i;
1033
1034 memset(smp->data, 0, sizeof(smp->data));
1035
1036 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
1037 smp->status |= IB_SMP_UNSUP_METHOD;
1038 else
1039 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
1040 *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
1041
1042 return reply(smp);
1043}
1044
1045static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1046 u8 port)
1047{
1048 struct qib_ibport *ibp = to_iport(ibdev, port);
1049 u8 *p = (u8 *) smp->data;
1050 unsigned i;
1051
1052 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
1053 smp->status |= IB_SMP_UNSUP_METHOD;
1054 return reply(smp);
1055 }
1056
1057 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
1058 ibp->sl_to_vl[i] = *p >> 4;
1059 ibp->sl_to_vl[i + 1] = *p & 0xF;
1060 }
1061 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
1062 _QIB_EVENT_SL2VL_CHANGE_BIT);
1063
1064 return subn_get_sl_to_vl(smp, ibdev, port);
1065}
1066
1067static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1068 u8 port)
1069{
1070 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1071 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1072
1073 memset(smp->data, 0, sizeof(smp->data));
1074
1075 if (ppd->vls_supported == IB_VL_VL0)
1076 smp->status |= IB_SMP_UNSUP_METHOD;
1077 else if (which == IB_VLARB_LOWPRI_0_31)
1078 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1079 smp->data);
1080 else if (which == IB_VLARB_HIGHPRI_0_31)
1081 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1082 smp->data);
1083 else
1084 smp->status |= IB_SMP_INVALID_FIELD;
1085
1086 return reply(smp);
1087}
1088
1089static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1090 u8 port)
1091{
1092 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1093 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1094
1095 if (ppd->vls_supported == IB_VL_VL0)
1096 smp->status |= IB_SMP_UNSUP_METHOD;
1097 else if (which == IB_VLARB_LOWPRI_0_31)
1098 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1099 smp->data);
1100 else if (which == IB_VLARB_HIGHPRI_0_31)
1101 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1102 smp->data);
1103 else
1104 smp->status |= IB_SMP_INVALID_FIELD;
1105
1106 return subn_get_vl_arb(smp, ibdev, port);
1107}
1108
1109static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
1110 u8 port)
1111{
1112 /*
1113 * For now, we only send the trap once so no need to process this.
1114 * o13-6, o13-7,
1115 * o14-3.a4 The SMA shall not send any message in response to a valid
1116 * SubnTrapRepress() message.
1117 */
1118 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1119}
1120
1121static int pma_get_classportinfo(struct ib_perf *pmp,
1122 struct ib_device *ibdev)
1123{
1124 struct ib_pma_classportinfo *p =
1125 (struct ib_pma_classportinfo *)pmp->data;
1126 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1127
1128 memset(pmp->data, 0, sizeof(pmp->data));
1129
1130 if (pmp->attr_mod != 0)
1131 pmp->status |= IB_SMP_INVALID_FIELD;
1132
1133 /* Note that AllPortSelect is not valid */
1134 p->base_version = 1;
1135 p->class_version = 1;
1136 p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
1137 /*
1138 * Set the most significant bit of CM2 to indicate support for
1139 * congestion statistics
1140 */
1141 p->reserved[0] = dd->psxmitwait_supported << 7;
1142 /*
1143 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1144 */
1145 p->resp_time_value = 18;
1146
1147 return reply((struct ib_smp *) pmp);
1148}
1149
1150static int pma_get_portsamplescontrol(struct ib_perf *pmp,
1151 struct ib_device *ibdev, u8 port)
1152{
1153 struct ib_pma_portsamplescontrol *p =
1154 (struct ib_pma_portsamplescontrol *)pmp->data;
1155 struct qib_ibdev *dev = to_idev(ibdev);
1156 struct qib_devdata *dd = dd_from_dev(dev);
1157 struct qib_ibport *ibp = to_iport(ibdev, port);
1158 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1159 unsigned long flags;
1160 u8 port_select = p->port_select;
1161
1162 memset(pmp->data, 0, sizeof(pmp->data));
1163
1164 p->port_select = port_select;
1165 if (pmp->attr_mod != 0 || port_select != port) {
1166 pmp->status |= IB_SMP_INVALID_FIELD;
1167 goto bail;
1168 }
1169 spin_lock_irqsave(&ibp->lock, flags);
1170 p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
1171 p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1172 p->counter_width = 4; /* 32 bit counters */
1173 p->counter_mask0_9 = COUNTER_MASK0_9;
1174 p->sample_start = cpu_to_be32(ibp->pma_sample_start);
1175 p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
1176 p->tag = cpu_to_be16(ibp->pma_tag);
1177 p->counter_select[0] = ibp->pma_counter_select[0];
1178 p->counter_select[1] = ibp->pma_counter_select[1];
1179 p->counter_select[2] = ibp->pma_counter_select[2];
1180 p->counter_select[3] = ibp->pma_counter_select[3];
1181 p->counter_select[4] = ibp->pma_counter_select[4];
1182 spin_unlock_irqrestore(&ibp->lock, flags);
1183
1184bail:
1185 return reply((struct ib_smp *) pmp);
1186}
1187
1188static int pma_set_portsamplescontrol(struct ib_perf *pmp,
1189 struct ib_device *ibdev, u8 port)
1190{
1191 struct ib_pma_portsamplescontrol *p =
1192 (struct ib_pma_portsamplescontrol *)pmp->data;
1193 struct qib_ibdev *dev = to_idev(ibdev);
1194 struct qib_devdata *dd = dd_from_dev(dev);
1195 struct qib_ibport *ibp = to_iport(ibdev, port);
1196 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1197 unsigned long flags;
1198 u8 status, xmit_flags;
1199 int ret;
1200
1201 if (pmp->attr_mod != 0 || p->port_select != port) {
1202 pmp->status |= IB_SMP_INVALID_FIELD;
1203 ret = reply((struct ib_smp *) pmp);
1204 goto bail;
1205 }
1206
1207 spin_lock_irqsave(&ibp->lock, flags);
1208
1209 /* Port Sampling code owns the PS* HW counters */
1210 xmit_flags = ppd->cong_stats.flags;
1211 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
1212 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1213 if (status == IB_PMA_SAMPLE_STATUS_DONE ||
1214 (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
1215 xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
1216 ibp->pma_sample_start = be32_to_cpu(p->sample_start);
1217 ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
1218 ibp->pma_tag = be16_to_cpu(p->tag);
1219 ibp->pma_counter_select[0] = p->counter_select[0];
1220 ibp->pma_counter_select[1] = p->counter_select[1];
1221 ibp->pma_counter_select[2] = p->counter_select[2];
1222 ibp->pma_counter_select[3] = p->counter_select[3];
1223 ibp->pma_counter_select[4] = p->counter_select[4];
1224 dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
1225 ibp->pma_sample_start);
1226 }
1227 spin_unlock_irqrestore(&ibp->lock, flags);
1228
1229 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1230
1231bail:
1232 return ret;
1233}
1234
1235static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
1236 __be16 sel)
1237{
1238 u64 ret;
1239
1240 switch (sel) {
1241 case IB_PMA_PORT_XMIT_DATA:
1242 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
1243 break;
1244 case IB_PMA_PORT_RCV_DATA:
1245 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
1246 break;
1247 case IB_PMA_PORT_XMIT_PKTS:
1248 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
1249 break;
1250 case IB_PMA_PORT_RCV_PKTS:
1251 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
1252 break;
1253 case IB_PMA_PORT_XMIT_WAIT:
1254 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
1255 break;
1256 default:
1257 ret = 0;
1258 }
1259
1260 return ret;
1261}
1262
1263/* This function assumes that the xmit_wait lock is already held */
1264static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
1265{
1266 u32 delta;
1267
1268 delta = get_counter(&ppd->ibport_data, ppd,
1269 IB_PMA_PORT_XMIT_WAIT);
1270 return ppd->cong_stats.counter + delta;
1271}
1272
1273static void cache_hw_sample_counters(struct qib_pportdata *ppd)
1274{
1275 struct qib_ibport *ibp = &ppd->ibport_data;
1276
1277 ppd->cong_stats.counter_cache.psxmitdata =
1278 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
1279 ppd->cong_stats.counter_cache.psrcvdata =
1280 get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
1281 ppd->cong_stats.counter_cache.psxmitpkts =
1282 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
1283 ppd->cong_stats.counter_cache.psrcvpkts =
1284 get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
1285 ppd->cong_stats.counter_cache.psxmitwait =
1286 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
1287}
1288
1289static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
1290 __be16 sel)
1291{
1292 u64 ret;
1293
1294 switch (sel) {
1295 case IB_PMA_PORT_XMIT_DATA:
1296 ret = ppd->cong_stats.counter_cache.psxmitdata;
1297 break;
1298 case IB_PMA_PORT_RCV_DATA:
1299 ret = ppd->cong_stats.counter_cache.psrcvdata;
1300 break;
1301 case IB_PMA_PORT_XMIT_PKTS:
1302 ret = ppd->cong_stats.counter_cache.psxmitpkts;
1303 break;
1304 case IB_PMA_PORT_RCV_PKTS:
1305 ret = ppd->cong_stats.counter_cache.psrcvpkts;
1306 break;
1307 case IB_PMA_PORT_XMIT_WAIT:
1308 ret = ppd->cong_stats.counter_cache.psxmitwait;
1309 break;
1310 default:
1311 ret = 0;
1312 }
1313
1314 return ret;
1315}
1316
1317static int pma_get_portsamplesresult(struct ib_perf *pmp,
1318 struct ib_device *ibdev, u8 port)
1319{
1320 struct ib_pma_portsamplesresult *p =
1321 (struct ib_pma_portsamplesresult *)pmp->data;
1322 struct qib_ibdev *dev = to_idev(ibdev);
1323 struct qib_devdata *dd = dd_from_dev(dev);
1324 struct qib_ibport *ibp = to_iport(ibdev, port);
1325 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1326 unsigned long flags;
1327 u8 status;
1328 int i;
1329
1330 memset(pmp->data, 0, sizeof(pmp->data));
1331 spin_lock_irqsave(&ibp->lock, flags);
1332 p->tag = cpu_to_be16(ibp->pma_tag);
1333 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1334 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1335 else {
1336 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1337 p->sample_status = cpu_to_be16(status);
1338 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1339 cache_hw_sample_counters(ppd);
1340 ppd->cong_stats.counter =
1341 xmit_wait_get_value_delta(ppd);
1342 dd->f_set_cntr_sample(ppd,
1343 QIB_CONG_TIMER_PSINTERVAL, 0);
1344 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1345 }
1346 }
1347 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
1348 p->counter[i] = cpu_to_be32(
1349 get_cache_hw_sample_counters(
1350 ppd, ibp->pma_counter_select[i]));
1351 spin_unlock_irqrestore(&ibp->lock, flags);
1352
1353 return reply((struct ib_smp *) pmp);
1354}
1355
1356static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
1357 struct ib_device *ibdev, u8 port)
1358{
1359 struct ib_pma_portsamplesresult_ext *p =
1360 (struct ib_pma_portsamplesresult_ext *)pmp->data;
1361 struct qib_ibdev *dev = to_idev(ibdev);
1362 struct qib_devdata *dd = dd_from_dev(dev);
1363 struct qib_ibport *ibp = to_iport(ibdev, port);
1364 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1365 unsigned long flags;
1366 u8 status;
1367 int i;
1368
1369 /* Port Sampling code owns the PS* HW counters */
1370 memset(pmp->data, 0, sizeof(pmp->data));
1371 spin_lock_irqsave(&ibp->lock, flags);
1372 p->tag = cpu_to_be16(ibp->pma_tag);
1373 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1374 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1375 else {
1376 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1377 p->sample_status = cpu_to_be16(status);
1378 /* 64 bits */
1379 p->extended_width = cpu_to_be32(0x80000000);
1380 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1381 cache_hw_sample_counters(ppd);
1382 ppd->cong_stats.counter =
1383 xmit_wait_get_value_delta(ppd);
1384 dd->f_set_cntr_sample(ppd,
1385 QIB_CONG_TIMER_PSINTERVAL, 0);
1386 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1387 }
1388 }
1389 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
1390 p->counter[i] = cpu_to_be64(
1391 get_cache_hw_sample_counters(
1392 ppd, ibp->pma_counter_select[i]));
1393 spin_unlock_irqrestore(&ibp->lock, flags);
1394
1395 return reply((struct ib_smp *) pmp);
1396}
1397
1398static int pma_get_portcounters(struct ib_perf *pmp,
1399 struct ib_device *ibdev, u8 port)
1400{
1401 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1402 pmp->data;
1403 struct qib_ibport *ibp = to_iport(ibdev, port);
1404 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1405 struct qib_verbs_counters cntrs;
1406 u8 port_select = p->port_select;
1407
1408 qib_get_counters(ppd, &cntrs);
1409
1410 /* Adjust counters for any resets done. */
1411 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1412 cntrs.link_error_recovery_counter -=
1413 ibp->z_link_error_recovery_counter;
1414 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1415 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1416 cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
1417 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1418 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1419 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1420 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1421 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1422 cntrs.local_link_integrity_errors -=
1423 ibp->z_local_link_integrity_errors;
1424 cntrs.excessive_buffer_overrun_errors -=
1425 ibp->z_excessive_buffer_overrun_errors;
1426 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1427 cntrs.vl15_dropped += ibp->n_vl15_dropped;
1428
1429 memset(pmp->data, 0, sizeof(pmp->data));
1430
1431 p->port_select = port_select;
1432 if (pmp->attr_mod != 0 || port_select != port)
1433 pmp->status |= IB_SMP_INVALID_FIELD;
1434
1435 if (cntrs.symbol_error_counter > 0xFFFFUL)
1436 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1437 else
1438 p->symbol_error_counter =
1439 cpu_to_be16((u16)cntrs.symbol_error_counter);
1440 if (cntrs.link_error_recovery_counter > 0xFFUL)
1441 p->link_error_recovery_counter = 0xFF;
1442 else
1443 p->link_error_recovery_counter =
1444 (u8)cntrs.link_error_recovery_counter;
1445 if (cntrs.link_downed_counter > 0xFFUL)
1446 p->link_downed_counter = 0xFF;
1447 else
1448 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1449 if (cntrs.port_rcv_errors > 0xFFFFUL)
1450 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1451 else
1452 p->port_rcv_errors =
1453 cpu_to_be16((u16) cntrs.port_rcv_errors);
1454 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1455 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1456 else
1457 p->port_rcv_remphys_errors =
1458 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1459 if (cntrs.port_xmit_discards > 0xFFFFUL)
1460 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1461 else
1462 p->port_xmit_discards =
1463 cpu_to_be16((u16)cntrs.port_xmit_discards);
1464 if (cntrs.local_link_integrity_errors > 0xFUL)
1465 cntrs.local_link_integrity_errors = 0xFUL;
1466 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1467 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1468 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
1469 cntrs.excessive_buffer_overrun_errors;
1470 if (cntrs.vl15_dropped > 0xFFFFUL)
1471 p->vl15_dropped = cpu_to_be16(0xFFFF);
1472 else
1473 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1474 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1475 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1476 else
1477 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1478 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1479 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1480 else
1481 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1482 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1483 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1484 else
1485 p->port_xmit_packets =
1486 cpu_to_be32((u32)cntrs.port_xmit_packets);
1487 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1488 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1489 else
1490 p->port_rcv_packets =
1491 cpu_to_be32((u32) cntrs.port_rcv_packets);
1492
1493 return reply((struct ib_smp *) pmp);
1494}
1495
1496static int pma_get_portcounters_cong(struct ib_perf *pmp,
1497 struct ib_device *ibdev, u8 port)
1498{
1499 /* Congestion PMA packets start at offset 24 not 64 */
1500 struct ib_pma_portcounters_cong *p =
1501 (struct ib_pma_portcounters_cong *)pmp->reserved;
1502 struct qib_verbs_counters cntrs;
1503 struct qib_ibport *ibp = to_iport(ibdev, port);
1504 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1505 struct qib_devdata *dd = dd_from_ppd(ppd);
1506 u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;
1507 u64 xmit_wait_counter;
1508 unsigned long flags;
1509
1510 /*
1511 * This check is performed only in the GET method because the
1512 * SET method ends up calling this anyway.
1513 */
1514 if (!dd->psxmitwait_supported)
1515 pmp->status |= IB_SMP_UNSUP_METH_ATTR;
1516 if (port_select != port)
1517 pmp->status |= IB_SMP_INVALID_FIELD;
1518
1519 qib_get_counters(ppd, &cntrs);
1520 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
1521 xmit_wait_counter = xmit_wait_get_value_delta(ppd);
1522 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
1523
1524 /* Adjust counters for any resets done. */
1525 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1526 cntrs.link_error_recovery_counter -=
1527 ibp->z_link_error_recovery_counter;
1528 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1529 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1530 cntrs.port_rcv_remphys_errors -=
1531 ibp->z_port_rcv_remphys_errors;
1532 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1533 cntrs.local_link_integrity_errors -=
1534 ibp->z_local_link_integrity_errors;
1535 cntrs.excessive_buffer_overrun_errors -=
1536 ibp->z_excessive_buffer_overrun_errors;
1537 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1538 cntrs.vl15_dropped += ibp->n_vl15_dropped;
1539 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1540 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1541 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1542 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1543
1544 memset(pmp->reserved, 0, sizeof(pmp->reserved) +
1545 sizeof(pmp->data));
1546
1547 /*
1548 * Set top 3 bits to indicate interval in picoseconds in
1549 * remaining bits.
1550 */
1551 p->port_check_rate =
1552 cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
1553 (dd->psxmitwait_check_rate &
1554 ~(QIB_XMIT_RATE_PICO << 13)));
1555 p->port_adr_events = cpu_to_be64(0);
1556 p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
1557 p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
1558 p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
1559 p->port_xmit_packets =
1560 cpu_to_be64(cntrs.port_xmit_packets);
1561 p->port_rcv_packets =
1562 cpu_to_be64(cntrs.port_rcv_packets);
1563 if (cntrs.symbol_error_counter > 0xFFFFUL)
1564 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1565 else
1566 p->symbol_error_counter =
1567 cpu_to_be16(
1568 (u16)cntrs.symbol_error_counter);
1569 if (cntrs.link_error_recovery_counter > 0xFFUL)
1570 p->link_error_recovery_counter = 0xFF;
1571 else
1572 p->link_error_recovery_counter =
1573 (u8)cntrs.link_error_recovery_counter;
1574 if (cntrs.link_downed_counter > 0xFFUL)
1575 p->link_downed_counter = 0xFF;
1576 else
1577 p->link_downed_counter =
1578 (u8)cntrs.link_downed_counter;
1579 if (cntrs.port_rcv_errors > 0xFFFFUL)
1580 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1581 else
1582 p->port_rcv_errors =
1583 cpu_to_be16((u16) cntrs.port_rcv_errors);
1584 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1585 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1586 else
1587 p->port_rcv_remphys_errors =
1588 cpu_to_be16(
1589 (u16)cntrs.port_rcv_remphys_errors);
1590 if (cntrs.port_xmit_discards > 0xFFFFUL)
1591 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1592 else
1593 p->port_xmit_discards =
1594 cpu_to_be16((u16)cntrs.port_xmit_discards);
1595 if (cntrs.local_link_integrity_errors > 0xFUL)
1596 cntrs.local_link_integrity_errors = 0xFUL;
1597 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1598 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1599 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
1600 cntrs.excessive_buffer_overrun_errors;
1601 if (cntrs.vl15_dropped > 0xFFFFUL)
1602 p->vl15_dropped = cpu_to_be16(0xFFFF);
1603 else
1604 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1605
1606 return reply((struct ib_smp *)pmp);
1607}
1608
1609static int pma_get_portcounters_ext(struct ib_perf *pmp,
1610 struct ib_device *ibdev, u8 port)
1611{
1612 struct ib_pma_portcounters_ext *p =
1613 (struct ib_pma_portcounters_ext *)pmp->data;
1614 struct qib_ibport *ibp = to_iport(ibdev, port);
1615 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1616 u64 swords, rwords, spkts, rpkts, xwait;
1617 u8 port_select = p->port_select;
1618
1619 memset(pmp->data, 0, sizeof(pmp->data));
1620
1621 p->port_select = port_select;
1622 if (pmp->attr_mod != 0 || port_select != port) {
1623 pmp->status |= IB_SMP_INVALID_FIELD;
1624 goto bail;
1625 }
1626
1627 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1628
1629 /* Adjust counters for any resets done. */
1630 swords -= ibp->z_port_xmit_data;
1631 rwords -= ibp->z_port_rcv_data;
1632 spkts -= ibp->z_port_xmit_packets;
1633 rpkts -= ibp->z_port_rcv_packets;
1634
1635 p->port_xmit_data = cpu_to_be64(swords);
1636 p->port_rcv_data = cpu_to_be64(rwords);
1637 p->port_xmit_packets = cpu_to_be64(spkts);
1638 p->port_rcv_packets = cpu_to_be64(rpkts);
1639 p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);
1640 p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv);
1641 p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);
1642 p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv);
1643
1644bail:
1645 return reply((struct ib_smp *) pmp);
1646}
1647
1648static int pma_set_portcounters(struct ib_perf *pmp,
1649 struct ib_device *ibdev, u8 port)
1650{
1651 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1652 pmp->data;
1653 struct qib_ibport *ibp = to_iport(ibdev, port);
1654 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1655 struct qib_verbs_counters cntrs;
1656
1657 /*
1658 * Since the HW doesn't support clearing counters, we save the
1659 * current count and subtract it from future responses.
1660 */
1661 qib_get_counters(ppd, &cntrs);
1662
1663 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1664 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1665
1666 if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1667 ibp->z_link_error_recovery_counter =
1668 cntrs.link_error_recovery_counter;
1669
1670 if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1671 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1672
1673 if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1674 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1675
1676 if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1677 ibp->z_port_rcv_remphys_errors =
1678 cntrs.port_rcv_remphys_errors;
1679
1680 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1681 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1682
1683 if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1684 ibp->z_local_link_integrity_errors =
1685 cntrs.local_link_integrity_errors;
1686
1687 if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1688 ibp->z_excessive_buffer_overrun_errors =
1689 cntrs.excessive_buffer_overrun_errors;
1690
1691 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1692 ibp->n_vl15_dropped = 0;
1693 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1694 }
1695
1696 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1697 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1698
1699 if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1700 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1701
1702 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1703 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1704
1705 if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1706 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1707
1708 return pma_get_portcounters(pmp, ibdev, port);
1709}
1710
1711static int pma_set_portcounters_cong(struct ib_perf *pmp,
1712 struct ib_device *ibdev, u8 port)
1713{
1714 struct qib_ibport *ibp = to_iport(ibdev, port);
1715 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1716 struct qib_devdata *dd = dd_from_ppd(ppd);
1717 struct qib_verbs_counters cntrs;
1718 u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;
1719 int ret = 0;
1720 unsigned long flags;
1721
1722 qib_get_counters(ppd, &cntrs);
1723 /* Get counter values before we save them */
1724 ret = pma_get_portcounters_cong(pmp, ibdev, port);
1725
1726 if (counter_select & IB_PMA_SEL_CONG_XMIT) {
1727 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
1728 ppd->cong_stats.counter = 0;
1729 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
1730 0x0);
1731 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
1732 }
1733 if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
1734 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1735 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1736 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1737 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1738 }
1739 if (counter_select & IB_PMA_SEL_CONG_ALL) {
1740 ibp->z_symbol_error_counter =
1741 cntrs.symbol_error_counter;
1742 ibp->z_link_error_recovery_counter =
1743 cntrs.link_error_recovery_counter;
1744 ibp->z_link_downed_counter =
1745 cntrs.link_downed_counter;
1746 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1747 ibp->z_port_rcv_remphys_errors =
1748 cntrs.port_rcv_remphys_errors;
1749 ibp->z_port_xmit_discards =
1750 cntrs.port_xmit_discards;
1751 ibp->z_local_link_integrity_errors =
1752 cntrs.local_link_integrity_errors;
1753 ibp->z_excessive_buffer_overrun_errors =
1754 cntrs.excessive_buffer_overrun_errors;
1755 ibp->n_vl15_dropped = 0;
1756 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1757 }
1758
1759 return ret;
1760}
1761
1762static int pma_set_portcounters_ext(struct ib_perf *pmp,
1763 struct ib_device *ibdev, u8 port)
1764{
1765 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1766 pmp->data;
1767 struct qib_ibport *ibp = to_iport(ibdev, port);
1768 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1769 u64 swords, rwords, spkts, rpkts, xwait;
1770
1771 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1772
1773 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1774 ibp->z_port_xmit_data = swords;
1775
1776 if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1777 ibp->z_port_rcv_data = rwords;
1778
1779 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1780 ibp->z_port_xmit_packets = spkts;
1781
1782 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1783 ibp->z_port_rcv_packets = rpkts;
1784
1785 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1786 ibp->n_unicast_xmit = 0;
1787
1788 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1789 ibp->n_unicast_rcv = 0;
1790
1791 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1792 ibp->n_multicast_xmit = 0;
1793
1794 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1795 ibp->n_multicast_rcv = 0;
1796
1797 return pma_get_portcounters_ext(pmp, ibdev, port);
1798}
1799
1800static int process_subn(struct ib_device *ibdev, int mad_flags,
1801 u8 port, struct ib_mad *in_mad,
1802 struct ib_mad *out_mad)
1803{
1804 struct ib_smp *smp = (struct ib_smp *)out_mad;
1805 struct qib_ibport *ibp = to_iport(ibdev, port);
1806 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1807 int ret;
1808
1809 *out_mad = *in_mad;
1810 if (smp->class_version != 1) {
1811 smp->status |= IB_SMP_UNSUP_VERSION;
1812 ret = reply(smp);
1813 goto bail;
1814 }
1815
1816 ret = check_mkey(ibp, smp, mad_flags);
1817 if (ret) {
1818 u32 port_num = be32_to_cpu(smp->attr_mod);
1819
1820 /*
1821 * If this is a get/set portinfo, we already check the
1822 * M_Key if the MAD is for another port and the M_Key
1823 * is OK on the receiving port. This check is needed
1824 * to increment the error counters when the M_Key
1825 * fails to match on *both* ports.
1826 */
1827 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
1828 (smp->method == IB_MGMT_METHOD_GET ||
1829 smp->method == IB_MGMT_METHOD_SET) &&
1830 port_num && port_num <= ibdev->phys_port_cnt &&
1831 port != port_num)
1832 (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
1833 goto bail;
1834 }
1835
1836 switch (smp->method) {
1837 case IB_MGMT_METHOD_GET:
1838 switch (smp->attr_id) {
1839 case IB_SMP_ATTR_NODE_DESC:
1840 ret = subn_get_nodedescription(smp, ibdev);
1841 goto bail;
1842 case IB_SMP_ATTR_NODE_INFO:
1843 ret = subn_get_nodeinfo(smp, ibdev, port);
1844 goto bail;
1845 case IB_SMP_ATTR_GUID_INFO:
1846 ret = subn_get_guidinfo(smp, ibdev, port);
1847 goto bail;
1848 case IB_SMP_ATTR_PORT_INFO:
1849 ret = subn_get_portinfo(smp, ibdev, port);
1850 goto bail;
1851 case IB_SMP_ATTR_PKEY_TABLE:
1852 ret = subn_get_pkeytable(smp, ibdev, port);
1853 goto bail;
1854 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1855 ret = subn_get_sl_to_vl(smp, ibdev, port);
1856 goto bail;
1857 case IB_SMP_ATTR_VL_ARB_TABLE:
1858 ret = subn_get_vl_arb(smp, ibdev, port);
1859 goto bail;
1860 case IB_SMP_ATTR_SM_INFO:
1861 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
1862 ret = IB_MAD_RESULT_SUCCESS |
1863 IB_MAD_RESULT_CONSUMED;
1864 goto bail;
1865 }
1866 if (ibp->port_cap_flags & IB_PORT_SM) {
1867 ret = IB_MAD_RESULT_SUCCESS;
1868 goto bail;
1869 }
1870 /* FALLTHROUGH */
1871 default:
1872 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1873 ret = reply(smp);
1874 goto bail;
1875 }
1876
1877 case IB_MGMT_METHOD_SET:
1878 switch (smp->attr_id) {
1879 case IB_SMP_ATTR_GUID_INFO:
1880 ret = subn_set_guidinfo(smp, ibdev, port);
1881 goto bail;
1882 case IB_SMP_ATTR_PORT_INFO:
1883 ret = subn_set_portinfo(smp, ibdev, port);
1884 goto bail;
1885 case IB_SMP_ATTR_PKEY_TABLE:
1886 ret = subn_set_pkeytable(smp, ibdev, port);
1887 goto bail;
1888 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1889 ret = subn_set_sl_to_vl(smp, ibdev, port);
1890 goto bail;
1891 case IB_SMP_ATTR_VL_ARB_TABLE:
1892 ret = subn_set_vl_arb(smp, ibdev, port);
1893 goto bail;
1894 case IB_SMP_ATTR_SM_INFO:
1895 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
1896 ret = IB_MAD_RESULT_SUCCESS |
1897 IB_MAD_RESULT_CONSUMED;
1898 goto bail;
1899 }
1900 if (ibp->port_cap_flags & IB_PORT_SM) {
1901 ret = IB_MAD_RESULT_SUCCESS;
1902 goto bail;
1903 }
1904 /* FALLTHROUGH */
1905 default:
1906 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1907 ret = reply(smp);
1908 goto bail;
1909 }
1910
1911 case IB_MGMT_METHOD_TRAP_REPRESS:
1912 if (smp->attr_id == IB_SMP_ATTR_NOTICE)
1913 ret = subn_trap_repress(smp, ibdev, port);
1914 else {
1915 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1916 ret = reply(smp);
1917 }
1918 goto bail;
1919
1920 case IB_MGMT_METHOD_TRAP:
1921 case IB_MGMT_METHOD_REPORT:
1922 case IB_MGMT_METHOD_REPORT_RESP:
1923 case IB_MGMT_METHOD_GET_RESP:
1924 /*
1925 * The ib_mad module will call us to process responses
1926 * before checking for other consumers.
1927 * Just tell the caller to process it normally.
1928 */
1929 ret = IB_MAD_RESULT_SUCCESS;
1930 goto bail;
1931
1932 case IB_MGMT_METHOD_SEND:
1933 if (ib_get_smp_direction(smp) &&
1934 smp->attr_id == QIB_VENDOR_IPG) {
1935 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
1936 smp->data[0]);
1937 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1938 } else
1939 ret = IB_MAD_RESULT_SUCCESS;
1940 goto bail;
1941
1942 default:
1943 smp->status |= IB_SMP_UNSUP_METHOD;
1944 ret = reply(smp);
1945 }
1946
1947bail:
1948 return ret;
1949}
1950
1951static int process_perf(struct ib_device *ibdev, u8 port,
1952 struct ib_mad *in_mad,
1953 struct ib_mad *out_mad)
1954{
1955 struct ib_perf *pmp = (struct ib_perf *)out_mad;
1956 int ret;
1957
1958 *out_mad = *in_mad;
1959 if (pmp->class_version != 1) {
1960 pmp->status |= IB_SMP_UNSUP_VERSION;
1961 ret = reply((struct ib_smp *) pmp);
1962 goto bail;
1963 }
1964
1965 switch (pmp->method) {
1966 case IB_MGMT_METHOD_GET:
1967 switch (pmp->attr_id) {
1968 case IB_PMA_CLASS_PORT_INFO:
1969 ret = pma_get_classportinfo(pmp, ibdev);
1970 goto bail;
1971 case IB_PMA_PORT_SAMPLES_CONTROL:
1972 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1973 goto bail;
1974 case IB_PMA_PORT_SAMPLES_RESULT:
1975 ret = pma_get_portsamplesresult(pmp, ibdev, port);
1976 goto bail;
1977 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
1978 ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
1979 goto bail;
1980 case IB_PMA_PORT_COUNTERS:
1981 ret = pma_get_portcounters(pmp, ibdev, port);
1982 goto bail;
1983 case IB_PMA_PORT_COUNTERS_EXT:
1984 ret = pma_get_portcounters_ext(pmp, ibdev, port);
1985 goto bail;
1986 case IB_PMA_PORT_COUNTERS_CONG:
1987 ret = pma_get_portcounters_cong(pmp, ibdev, port);
1988 goto bail;
1989 default:
1990 pmp->status |= IB_SMP_UNSUP_METH_ATTR;
1991 ret = reply((struct ib_smp *) pmp);
1992 goto bail;
1993 }
1994
1995 case IB_MGMT_METHOD_SET:
1996 switch (pmp->attr_id) {
1997 case IB_PMA_PORT_SAMPLES_CONTROL:
1998 ret = pma_set_portsamplescontrol(pmp, ibdev, port);
1999 goto bail;
2000 case IB_PMA_PORT_COUNTERS:
2001 ret = pma_set_portcounters(pmp, ibdev, port);
2002 goto bail;
2003 case IB_PMA_PORT_COUNTERS_EXT:
2004 ret = pma_set_portcounters_ext(pmp, ibdev, port);
2005 goto bail;
2006 case IB_PMA_PORT_COUNTERS_CONG:
2007 ret = pma_set_portcounters_cong(pmp, ibdev, port);
2008 goto bail;
2009 default:
2010 pmp->status |= IB_SMP_UNSUP_METH_ATTR;
2011 ret = reply((struct ib_smp *) pmp);
2012 goto bail;
2013 }
2014
2015 case IB_MGMT_METHOD_TRAP:
2016 case IB_MGMT_METHOD_GET_RESP:
2017 /*
2018 * The ib_mad module will call us to process responses
2019 * before checking for other consumers.
2020 * Just tell the caller to process it normally.
2021 */
2022 ret = IB_MAD_RESULT_SUCCESS;
2023 goto bail;
2024
2025 default:
2026 pmp->status |= IB_SMP_UNSUP_METHOD;
2027 ret = reply((struct ib_smp *) pmp);
2028 }
2029
2030bail:
2031 return ret;
2032}
2033
2034/**
2035 * qib_process_mad - process an incoming MAD packet
2036 * @ibdev: the infiniband device this packet came in on
2037 * @mad_flags: MAD flags
2038 * @port: the port number this packet came in on
2039 * @in_wc: the work completion entry for this packet
2040 * @in_grh: the global route header for this packet
2041 * @in_mad: the incoming MAD
2042 * @out_mad: any outgoing MAD reply
2043 *
2044 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2045 * interested in processing.
2046 *
2047 * Note that the verbs framework has already done the MAD sanity checks,
2048 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2049 * MADs.
2050 *
2051 * This is called by the ib_mad module.
2052 */
2053int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
2054 struct ib_wc *in_wc, struct ib_grh *in_grh,
2055 struct ib_mad *in_mad, struct ib_mad *out_mad)
2056{
2057 int ret;
2058
2059 switch (in_mad->mad_hdr.mgmt_class) {
2060 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
2061 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
2062 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
2063 goto bail;
2064
2065 case IB_MGMT_CLASS_PERF_MGMT:
2066 ret = process_perf(ibdev, port, in_mad, out_mad);
2067 goto bail;
2068
2069 default:
2070 ret = IB_MAD_RESULT_SUCCESS;
2071 }
2072
2073bail:
2074 return ret;
2075}
2076
2077static void send_handler(struct ib_mad_agent *agent,
2078 struct ib_mad_send_wc *mad_send_wc)
2079{
2080 ib_free_send_mad(mad_send_wc->send_buf);
2081}
2082
2083static void xmit_wait_timer_func(unsigned long opaque)
2084{
2085 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
2086 struct qib_devdata *dd = dd_from_ppd(ppd);
2087 unsigned long flags;
2088 u8 status;
2089
2090 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
2091 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
2092 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
2093 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
2094 /* save counter cache */
2095 cache_hw_sample_counters(ppd);
2096 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
2097 } else
2098 goto done;
2099 }
2100 ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
2101 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
2102done:
2103 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
2104 mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
2105}
2106
2107int qib_create_agents(struct qib_ibdev *dev)
2108{
2109 struct qib_devdata *dd = dd_from_dev(dev);
2110 struct ib_mad_agent *agent;
2111 struct qib_ibport *ibp;
2112 int p;
2113 int ret;
2114
2115 for (p = 0; p < dd->num_pports; p++) {
2116 ibp = &dd->pport[p].ibport_data;
2117 agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
2118 NULL, 0, send_handler,
2119 NULL, NULL);
2120 if (IS_ERR(agent)) {
2121 ret = PTR_ERR(agent);
2122 goto err;
2123 }
2124
2125 /* Initialize xmit_wait structure */
2126 dd->pport[p].cong_stats.counter = 0;
2127 init_timer(&dd->pport[p].cong_stats.timer);
2128 dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
2129 dd->pport[p].cong_stats.timer.data =
2130 (unsigned long)(&dd->pport[p]);
2131 dd->pport[p].cong_stats.timer.expires = 0;
2132 add_timer(&dd->pport[p].cong_stats.timer);
2133
2134 ibp->send_agent = agent;
2135 }
2136
2137 return 0;
2138
2139err:
2140 for (p = 0; p < dd->num_pports; p++) {
2141 ibp = &dd->pport[p].ibport_data;
2142 if (ibp->send_agent) {
2143 agent = ibp->send_agent;
2144 ibp->send_agent = NULL;
2145 ib_unregister_mad_agent(agent);
2146 }
2147 }
2148
2149 return ret;
2150}
2151
2152void qib_free_agents(struct qib_ibdev *dev)
2153{
2154 struct qib_devdata *dd = dd_from_dev(dev);
2155 struct ib_mad_agent *agent;
2156 struct qib_ibport *ibp;
2157 int p;
2158
2159 for (p = 0; p < dd->num_pports; p++) {
2160 ibp = &dd->pport[p].ibport_data;
2161 if (ibp->send_agent) {
2162 agent = ibp->send_agent;
2163 ibp->send_agent = NULL;
2164 ib_unregister_mad_agent(agent);
2165 }
2166 if (ibp->sm_ah) {
2167 ib_destroy_ah(&ibp->sm_ah->ibah);
2168 ibp->sm_ah = NULL;
2169 }
2170 if (dd->pport[p].cong_stats.timer.data)
2171 del_timer_sync(&dd->pport[p].cong_stats.timer);
2172 }
2173}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
new file mode 100644
index 000000000000..147aff9117d7
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -0,0 +1,373 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
36#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
37#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
38#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
39
40struct ib_node_info {
41 u8 base_version;
42 u8 class_version;
43 u8 node_type;
44 u8 num_ports;
45 __be64 sys_guid;
46 __be64 node_guid;
47 __be64 port_guid;
48 __be16 partition_cap;
49 __be16 device_id;
50 __be32 revision;
51 u8 local_port_num;
52 u8 vendor_id[3];
53} __attribute__ ((packed));
54
55struct ib_mad_notice_attr {
56 u8 generic_type;
57 u8 prod_type_msb;
58 __be16 prod_type_lsb;
59 __be16 trap_num;
60 __be16 issuer_lid;
61 __be16 toggle_count;
62
63 union {
64 struct {
65 u8 details[54];
66 } raw_data;
67
68 struct {
69 __be16 reserved;
70 __be16 lid; /* where violation happened */
71 u8 port_num; /* where violation happened */
72 } __attribute__ ((packed)) ntc_129_131;
73
74 struct {
75 __be16 reserved;
76 __be16 lid; /* LID where change occured */
77 u8 reserved2;
78 u8 local_changes; /* low bit - local changes */
79 __be32 new_cap_mask; /* new capability mask */
80 u8 reserved3;
81 u8 change_flags; /* low 3 bits only */
82 } __attribute__ ((packed)) ntc_144;
83
84 struct {
85 __be16 reserved;
86 __be16 lid; /* lid where sys guid changed */
87 __be16 reserved2;
88 __be64 new_sys_guid;
89 } __attribute__ ((packed)) ntc_145;
90
91 struct {
92 __be16 reserved;
93 __be16 lid;
94 __be16 dr_slid;
95 u8 method;
96 u8 reserved2;
97 __be16 attr_id;
98 __be32 attr_mod;
99 __be64 mkey;
100 u8 reserved3;
101 u8 dr_trunc_hop;
102 u8 dr_rtn_path[30];
103 } __attribute__ ((packed)) ntc_256;
104
105 struct {
106 __be16 reserved;
107 __be16 lid1;
108 __be16 lid2;
109 __be32 key;
110 __be32 sl_qp1; /* SL: high 4 bits */
111 __be32 qp2; /* high 8 bits reserved */
112 union ib_gid gid1;
113 union ib_gid gid2;
114 } __attribute__ ((packed)) ntc_257_258;
115
116 } details;
117};
118
119/*
120 * Generic trap/notice types
121 */
122#define IB_NOTICE_TYPE_FATAL 0x80
123#define IB_NOTICE_TYPE_URGENT 0x81
124#define IB_NOTICE_TYPE_SECURITY 0x82
125#define IB_NOTICE_TYPE_SM 0x83
126#define IB_NOTICE_TYPE_INFO 0x84
127
128/*
129 * Generic trap/notice producers
130 */
131#define IB_NOTICE_PROD_CA cpu_to_be16(1)
132#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
133#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
134#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
135
136/*
137 * Generic trap/notice numbers
138 */
139#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129)
140#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130)
141#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131)
142#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144)
143#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145)
144#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256)
145#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257)
146#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258)
147
148/*
149 * Repress trap/notice flags
150 */
151#define IB_NOTICE_REPRESS_LLI_THRESH (1 << 0)
152#define IB_NOTICE_REPRESS_EBO_THRESH (1 << 1)
153#define IB_NOTICE_REPRESS_FLOW_UPDATE (1 << 2)
154#define IB_NOTICE_REPRESS_CAP_MASK_CHG (1 << 3)
155#define IB_NOTICE_REPRESS_SYS_GUID_CHG (1 << 4)
156#define IB_NOTICE_REPRESS_BAD_MKEY (1 << 5)
157#define IB_NOTICE_REPRESS_BAD_PKEY (1 << 6)
158#define IB_NOTICE_REPRESS_BAD_QKEY (1 << 7)
159
160/*
161 * Generic trap/notice other local changes flags (trap 144).
162 */
163#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
164#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
165#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01
166
167/*
168 * Generic trap/notice M_Key volation flags in dr_trunc_hop (trap 256).
169 */
170#define IB_NOTICE_TRAP_DR_NOTICE 0x80
171#define IB_NOTICE_TRAP_DR_TRUNC 0x40
172
173struct ib_vl_weight_elem {
174 u8 vl; /* Only low 4 bits, upper 4 bits reserved */
175 u8 weight;
176};
177
178#define IB_VLARB_LOWPRI_0_31 1
179#define IB_VLARB_LOWPRI_32_63 2
180#define IB_VLARB_HIGHPRI_0_31 3
181#define IB_VLARB_HIGHPRI_32_63 4
182
183/*
184 * PMA class portinfo capability mask bits
185 */
186#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
187#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
188#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
189
190#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
191#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
192#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
193#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
194#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
195#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
196#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
197
198struct ib_perf {
199 u8 base_version;
200 u8 mgmt_class;
201 u8 class_version;
202 u8 method;
203 __be16 status;
204 __be16 unused;
205 __be64 tid;
206 __be16 attr_id;
207 __be16 resv;
208 __be32 attr_mod;
209 u8 reserved[40];
210 u8 data[192];
211} __attribute__ ((packed));
212
213struct ib_pma_classportinfo {
214 u8 base_version;
215 u8 class_version;
216 __be16 cap_mask;
217 u8 reserved[3];
218 u8 resp_time_value; /* only lower 5 bits */
219 union ib_gid redirect_gid;
220 __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
221 __be16 redirect_lid;
222 __be16 redirect_pkey;
223 __be32 redirect_qp; /* only lower 24 bits */
224 __be32 redirect_qkey;
225 union ib_gid trap_gid;
226 __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
227 __be16 trap_lid;
228 __be16 trap_pkey;
229 __be32 trap_hl_qp; /* 8, 24 bits respectively */
230 __be32 trap_qkey;
231} __attribute__ ((packed));
232
233struct ib_pma_portsamplescontrol {
234 u8 opcode;
235 u8 port_select;
236 u8 tick;
237 u8 counter_width; /* only lower 3 bits */
238 __be32 counter_mask0_9; /* 2, 10 * 3, bits */
239 __be16 counter_mask10_14; /* 1, 5 * 3, bits */
240 u8 sample_mechanisms;
241 u8 sample_status; /* only lower 2 bits */
242 __be64 option_mask;
243 __be64 vendor_mask;
244 __be32 sample_start;
245 __be32 sample_interval;
246 __be16 tag;
247 __be16 counter_select[15];
248} __attribute__ ((packed));
249
250struct ib_pma_portsamplesresult {
251 __be16 tag;
252 __be16 sample_status; /* only lower 2 bits */
253 __be32 counter[15];
254} __attribute__ ((packed));
255
256struct ib_pma_portsamplesresult_ext {
257 __be16 tag;
258 __be16 sample_status; /* only lower 2 bits */
259 __be32 extended_width; /* only upper 2 bits */
260 __be64 counter[15];
261} __attribute__ ((packed));
262
263struct ib_pma_portcounters {
264 u8 reserved;
265 u8 port_select;
266 __be16 counter_select;
267 __be16 symbol_error_counter;
268 u8 link_error_recovery_counter;
269 u8 link_downed_counter;
270 __be16 port_rcv_errors;
271 __be16 port_rcv_remphys_errors;
272 __be16 port_rcv_switch_relay_errors;
273 __be16 port_xmit_discards;
274 u8 port_xmit_constraint_errors;
275 u8 port_rcv_constraint_errors;
276 u8 reserved1;
277 u8 lli_ebor_errors; /* 4, 4, bits */
278 __be16 reserved2;
279 __be16 vl15_dropped;
280 __be32 port_xmit_data;
281 __be32 port_rcv_data;
282 __be32 port_xmit_packets;
283 __be32 port_rcv_packets;
284} __attribute__ ((packed));
285
286struct ib_pma_portcounters_cong {
287 u8 reserved;
288 u8 reserved1;
289 __be16 port_check_rate;
290 __be16 symbol_error_counter;
291 u8 link_error_recovery_counter;
292 u8 link_downed_counter;
293 __be16 port_rcv_errors;
294 __be16 port_rcv_remphys_errors;
295 __be16 port_rcv_switch_relay_errors;
296 __be16 port_xmit_discards;
297 u8 port_xmit_constraint_errors;
298 u8 port_rcv_constraint_errors;
299 u8 reserved2;
300 u8 lli_ebor_errors; /* 4, 4, bits */
301 __be16 reserved3;
302 __be16 vl15_dropped;
303 __be64 port_xmit_data;
304 __be64 port_rcv_data;
305 __be64 port_xmit_packets;
306 __be64 port_rcv_packets;
307 __be64 port_xmit_wait;
308 __be64 port_adr_events;
309} __attribute__ ((packed));
310
311#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00
312#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01
313
314#define QIB_XMIT_RATE_UNSUPPORTED 0x0
315#define QIB_XMIT_RATE_PICO 0x7
316/* number of 4nsec cycles equaling 2secs */
317#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
318
319#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
320#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
321#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
322#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
323#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
324#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
325#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
326#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
327#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
328#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
329#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
330#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
331#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
332
333#define IB_PMA_SEL_CONG_ALL 0x01
334#define IB_PMA_SEL_CONG_PORT_DATA 0x02
335#define IB_PMA_SEL_CONG_XMIT 0x04
336#define IB_PMA_SEL_CONG_ROUTING 0x08
337
338struct ib_pma_portcounters_ext {
339 u8 reserved;
340 u8 port_select;
341 __be16 counter_select;
342 __be32 reserved1;
343 __be64 port_xmit_data;
344 __be64 port_rcv_data;
345 __be64 port_xmit_packets;
346 __be64 port_rcv_packets;
347 __be64 port_unicast_xmit_packets;
348 __be64 port_unicast_rcv_packets;
349 __be64 port_multicast_xmit_packets;
350 __be64 port_multicast_rcv_packets;
351} __attribute__ ((packed));
352
353#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
354#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
355#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
356#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
357#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
358#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
359#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
360#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
361
362/*
363 * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
364 * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
365 * We support 5 counters which only count the mandatory quantities.
366 */
367#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
368#define COUNTER_MASK0_9 \
369 cpu_to_be32(COUNTER_MASK(1, 0) | \
370 COUNTER_MASK(1, 1) | \
371 COUNTER_MASK(1, 2) | \
372 COUNTER_MASK(1, 3) | \
373 COUNTER_MASK(1, 4))
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
new file mode 100644
index 000000000000..8b73a11d571c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mmap.c
@@ -0,0 +1,174 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <linux/mm.h>
37#include <linux/errno.h>
38#include <asm/pgtable.h>
39
40#include "qib_verbs.h"
41
42/**
43 * qib_release_mmap_info - free mmap info structure
44 * @ref: a pointer to the kref within struct qib_mmap_info
45 */
46void qib_release_mmap_info(struct kref *ref)
47{
48 struct qib_mmap_info *ip =
49 container_of(ref, struct qib_mmap_info, ref);
50 struct qib_ibdev *dev = to_idev(ip->context->device);
51
52 spin_lock_irq(&dev->pending_lock);
53 list_del(&ip->pending_mmaps);
54 spin_unlock_irq(&dev->pending_lock);
55
56 vfree(ip->obj);
57 kfree(ip);
58}
59
60/*
61 * open and close keep track of how many times the CQ is mapped,
62 * to avoid releasing it.
63 */
64static void qib_vma_open(struct vm_area_struct *vma)
65{
66 struct qib_mmap_info *ip = vma->vm_private_data;
67
68 kref_get(&ip->ref);
69}
70
71static void qib_vma_close(struct vm_area_struct *vma)
72{
73 struct qib_mmap_info *ip = vma->vm_private_data;
74
75 kref_put(&ip->ref, qib_release_mmap_info);
76}
77
78static struct vm_operations_struct qib_vm_ops = {
79 .open = qib_vma_open,
80 .close = qib_vma_close,
81};
82
83/**
84 * qib_mmap - create a new mmap region
85 * @context: the IB user context of the process making the mmap() call
86 * @vma: the VMA to be initialized
87 * Return zero if the mmap is OK. Otherwise, return an errno.
88 */
89int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
90{
91 struct qib_ibdev *dev = to_idev(context->device);
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
93 unsigned long size = vma->vm_end - vma->vm_start;
94 struct qib_mmap_info *ip, *pp;
95 int ret = -EINVAL;
96
97 /*
98 * Search the device's list of objects waiting for a mmap call.
99 * Normally, this list is very short since a call to create a
100 * CQ, QP, or SRQ is soon followed by a call to mmap().
101 */
102 spin_lock_irq(&dev->pending_lock);
103 list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
104 pending_mmaps) {
105 /* Only the creator is allowed to mmap the object */
106 if (context != ip->context || (__u64) offset != ip->offset)
107 continue;
108 /* Don't allow a mmap larger than the object. */
109 if (size > ip->size)
110 break;
111
112 list_del_init(&ip->pending_mmaps);
113 spin_unlock_irq(&dev->pending_lock);
114
115 ret = remap_vmalloc_range(vma, ip->obj, 0);
116 if (ret)
117 goto done;
118 vma->vm_ops = &qib_vm_ops;
119 vma->vm_private_data = ip;
120 qib_vma_open(vma);
121 goto done;
122 }
123 spin_unlock_irq(&dev->pending_lock);
124done:
125 return ret;
126}
127
128/*
129 * Allocate information for qib_mmap
130 */
131struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
132 u32 size,
133 struct ib_ucontext *context,
134 void *obj) {
135 struct qib_mmap_info *ip;
136
137 ip = kmalloc(sizeof *ip, GFP_KERNEL);
138 if (!ip)
139 goto bail;
140
141 size = PAGE_ALIGN(size);
142
143 spin_lock_irq(&dev->mmap_offset_lock);
144 if (dev->mmap_offset == 0)
145 dev->mmap_offset = PAGE_SIZE;
146 ip->offset = dev->mmap_offset;
147 dev->mmap_offset += size;
148 spin_unlock_irq(&dev->mmap_offset_lock);
149
150 INIT_LIST_HEAD(&ip->pending_mmaps);
151 ip->size = size;
152 ip->context = context;
153 ip->obj = obj;
154 kref_init(&ip->ref);
155
156bail:
157 return ip;
158}
159
160void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
161 u32 size, void *obj)
162{
163 size = PAGE_ALIGN(size);
164
165 spin_lock_irq(&dev->mmap_offset_lock);
166 if (dev->mmap_offset == 0)
167 dev->mmap_offset = PAGE_SIZE;
168 ip->offset = dev->mmap_offset;
169 dev->mmap_offset += size;
170 spin_unlock_irq(&dev->mmap_offset_lock);
171
172 ip->size = size;
173 ip->obj = obj;
174}
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
new file mode 100644
index 000000000000..5f95f0f6385d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -0,0 +1,503 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_umem.h>
35#include <rdma/ib_smi.h>
36
37#include "qib.h"
38
39/* Fast memory region */
40struct qib_fmr {
41 struct ib_fmr ibfmr;
42 u8 page_shift;
43 struct qib_mregion mr; /* must be last */
44};
45
46static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
47{
48 return container_of(ibfmr, struct qib_fmr, ibfmr);
49}
50
51/**
52 * qib_get_dma_mr - get a DMA memory region
53 * @pd: protection domain for this memory region
54 * @acc: access flags
55 *
56 * Returns the memory region on success, otherwise returns an errno.
57 * Note that all DMA addresses should be created via the
58 * struct ib_dma_mapping_ops functions (see qib_dma.c).
59 */
60struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
61{
62 struct qib_ibdev *dev = to_idev(pd->device);
63 struct qib_mr *mr;
64 struct ib_mr *ret;
65 unsigned long flags;
66
67 if (to_ipd(pd)->user) {
68 ret = ERR_PTR(-EPERM);
69 goto bail;
70 }
71
72 mr = kzalloc(sizeof *mr, GFP_KERNEL);
73 if (!mr) {
74 ret = ERR_PTR(-ENOMEM);
75 goto bail;
76 }
77
78 mr->mr.access_flags = acc;
79 atomic_set(&mr->mr.refcount, 0);
80
81 spin_lock_irqsave(&dev->lk_table.lock, flags);
82 if (!dev->dma_mr)
83 dev->dma_mr = &mr->mr;
84 spin_unlock_irqrestore(&dev->lk_table.lock, flags);
85
86 ret = &mr->ibmr;
87
88bail:
89 return ret;
90}
91
92static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
93{
94 struct qib_mr *mr;
95 int m, i = 0;
96
97 /* Allocate struct plus pointers to first level page tables. */
98 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
99 mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
100 if (!mr)
101 goto done;
102
103 /* Allocate first level page tables. */
104 for (; i < m; i++) {
105 mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
106 if (!mr->mr.map[i])
107 goto bail;
108 }
109 mr->mr.mapsz = m;
110 mr->mr.max_segs = count;
111
112 /*
113 * ib_reg_phys_mr() will initialize mr->ibmr except for
114 * lkey and rkey.
115 */
116 if (!qib_alloc_lkey(lk_table, &mr->mr))
117 goto bail;
118 mr->ibmr.lkey = mr->mr.lkey;
119 mr->ibmr.rkey = mr->mr.lkey;
120
121 atomic_set(&mr->mr.refcount, 0);
122 goto done;
123
124bail:
125 while (i)
126 kfree(mr->mr.map[--i]);
127 kfree(mr);
128 mr = NULL;
129
130done:
131 return mr;
132}
133
134/**
135 * qib_reg_phys_mr - register a physical memory region
136 * @pd: protection domain for this memory region
137 * @buffer_list: pointer to the list of physical buffers to register
138 * @num_phys_buf: the number of physical buffers to register
139 * @iova_start: the starting address passed over IB which maps to this MR
140 *
141 * Returns the memory region on success, otherwise returns an errno.
142 */
143struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
144 struct ib_phys_buf *buffer_list,
145 int num_phys_buf, int acc, u64 *iova_start)
146{
147 struct qib_mr *mr;
148 int n, m, i;
149 struct ib_mr *ret;
150
151 mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
152 if (mr == NULL) {
153 ret = ERR_PTR(-ENOMEM);
154 goto bail;
155 }
156
157 mr->mr.pd = pd;
158 mr->mr.user_base = *iova_start;
159 mr->mr.iova = *iova_start;
160 mr->mr.length = 0;
161 mr->mr.offset = 0;
162 mr->mr.access_flags = acc;
163 mr->umem = NULL;
164
165 m = 0;
166 n = 0;
167 for (i = 0; i < num_phys_buf; i++) {
168 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
169 mr->mr.map[m]->segs[n].length = buffer_list[i].size;
170 mr->mr.length += buffer_list[i].size;
171 n++;
172 if (n == QIB_SEGSZ) {
173 m++;
174 n = 0;
175 }
176 }
177
178 ret = &mr->ibmr;
179
180bail:
181 return ret;
182}
183
184/**
185 * qib_reg_user_mr - register a userspace memory region
186 * @pd: protection domain for this memory region
187 * @start: starting userspace address
188 * @length: length of region to register
189 * @virt_addr: virtual address to use (from HCA's point of view)
190 * @mr_access_flags: access flags for this memory region
191 * @udata: unused by the QLogic_IB driver
192 *
193 * Returns the memory region on success, otherwise returns an errno.
194 */
195struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
196 u64 virt_addr, int mr_access_flags,
197 struct ib_udata *udata)
198{
199 struct qib_mr *mr;
200 struct ib_umem *umem;
201 struct ib_umem_chunk *chunk;
202 int n, m, i;
203 struct ib_mr *ret;
204
205 if (length == 0) {
206 ret = ERR_PTR(-EINVAL);
207 goto bail;
208 }
209
210 umem = ib_umem_get(pd->uobject->context, start, length,
211 mr_access_flags, 0);
212 if (IS_ERR(umem))
213 return (void *) umem;
214
215 n = 0;
216 list_for_each_entry(chunk, &umem->chunk_list, list)
217 n += chunk->nents;
218
219 mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
220 if (!mr) {
221 ret = ERR_PTR(-ENOMEM);
222 ib_umem_release(umem);
223 goto bail;
224 }
225
226 mr->mr.pd = pd;
227 mr->mr.user_base = start;
228 mr->mr.iova = virt_addr;
229 mr->mr.length = length;
230 mr->mr.offset = umem->offset;
231 mr->mr.access_flags = mr_access_flags;
232 mr->umem = umem;
233
234 m = 0;
235 n = 0;
236 list_for_each_entry(chunk, &umem->chunk_list, list) {
237 for (i = 0; i < chunk->nents; i++) {
238 void *vaddr;
239
240 vaddr = page_address(sg_page(&chunk->page_list[i]));
241 if (!vaddr) {
242 ret = ERR_PTR(-EINVAL);
243 goto bail;
244 }
245 mr->mr.map[m]->segs[n].vaddr = vaddr;
246 mr->mr.map[m]->segs[n].length = umem->page_size;
247 n++;
248 if (n == QIB_SEGSZ) {
249 m++;
250 n = 0;
251 }
252 }
253 }
254 ret = &mr->ibmr;
255
256bail:
257 return ret;
258}
259
260/**
261 * qib_dereg_mr - unregister and free a memory region
262 * @ibmr: the memory region to free
263 *
264 * Returns 0 on success.
265 *
266 * Note that this is called to free MRs created by qib_get_dma_mr()
267 * or qib_reg_user_mr().
268 */
269int qib_dereg_mr(struct ib_mr *ibmr)
270{
271 struct qib_mr *mr = to_imr(ibmr);
272 struct qib_ibdev *dev = to_idev(ibmr->device);
273 int ret;
274 int i;
275
276 ret = qib_free_lkey(dev, &mr->mr);
277 if (ret)
278 return ret;
279
280 i = mr->mr.mapsz;
281 while (i)
282 kfree(mr->mr.map[--i]);
283 if (mr->umem)
284 ib_umem_release(mr->umem);
285 kfree(mr);
286 return 0;
287}
288
289/*
290 * Allocate a memory region usable with the
291 * IB_WR_FAST_REG_MR send work request.
292 *
293 * Return the memory region on success, otherwise return an errno.
294 */
295struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
296{
297 struct qib_mr *mr;
298
299 mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
300 if (mr == NULL)
301 return ERR_PTR(-ENOMEM);
302
303 mr->mr.pd = pd;
304 mr->mr.user_base = 0;
305 mr->mr.iova = 0;
306 mr->mr.length = 0;
307 mr->mr.offset = 0;
308 mr->mr.access_flags = 0;
309 mr->umem = NULL;
310
311 return &mr->ibmr;
312}
313
314struct ib_fast_reg_page_list *
315qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
316{
317 unsigned size = page_list_len * sizeof(u64);
318 struct ib_fast_reg_page_list *pl;
319
320 if (size > PAGE_SIZE)
321 return ERR_PTR(-EINVAL);
322
323 pl = kmalloc(sizeof *pl, GFP_KERNEL);
324 if (!pl)
325 return ERR_PTR(-ENOMEM);
326
327 pl->page_list = kmalloc(size, GFP_KERNEL);
328 if (!pl->page_list)
329 goto err_free;
330
331 return pl;
332
333err_free:
334 kfree(pl);
335 return ERR_PTR(-ENOMEM);
336}
337
338void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
339{
340 kfree(pl->page_list);
341 kfree(pl);
342}
343
344/**
345 * qib_alloc_fmr - allocate a fast memory region
346 * @pd: the protection domain for this memory region
347 * @mr_access_flags: access flags for this memory region
348 * @fmr_attr: fast memory region attributes
349 *
350 * Returns the memory region on success, otherwise returns an errno.
351 */
352struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
353 struct ib_fmr_attr *fmr_attr)
354{
355 struct qib_fmr *fmr;
356 int m, i = 0;
357 struct ib_fmr *ret;
358
359 /* Allocate struct plus pointers to first level page tables. */
360 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
361 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
362 if (!fmr)
363 goto bail;
364
365 /* Allocate first level page tables. */
366 for (; i < m; i++) {
367 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
368 GFP_KERNEL);
369 if (!fmr->mr.map[i])
370 goto bail;
371 }
372 fmr->mr.mapsz = m;
373
374 /*
375 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
376 * rkey.
377 */
378 if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
379 goto bail;
380 fmr->ibfmr.rkey = fmr->mr.lkey;
381 fmr->ibfmr.lkey = fmr->mr.lkey;
382 /*
383 * Resources are allocated but no valid mapping (RKEY can't be
384 * used).
385 */
386 fmr->mr.pd = pd;
387 fmr->mr.user_base = 0;
388 fmr->mr.iova = 0;
389 fmr->mr.length = 0;
390 fmr->mr.offset = 0;
391 fmr->mr.access_flags = mr_access_flags;
392 fmr->mr.max_segs = fmr_attr->max_pages;
393 fmr->page_shift = fmr_attr->page_shift;
394
395 atomic_set(&fmr->mr.refcount, 0);
396 ret = &fmr->ibfmr;
397 goto done;
398
399bail:
400 while (i)
401 kfree(fmr->mr.map[--i]);
402 kfree(fmr);
403 ret = ERR_PTR(-ENOMEM);
404
405done:
406 return ret;
407}
408
409/**
410 * qib_map_phys_fmr - set up a fast memory region
411 * @ibmfr: the fast memory region to set up
412 * @page_list: the list of pages to associate with the fast memory region
413 * @list_len: the number of pages to associate with the fast memory region
414 * @iova: the virtual address of the start of the fast memory region
415 *
416 * This may be called from interrupt context.
417 */
418
419int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
420 int list_len, u64 iova)
421{
422 struct qib_fmr *fmr = to_ifmr(ibfmr);
423 struct qib_lkey_table *rkt;
424 unsigned long flags;
425 int m, n, i;
426 u32 ps;
427 int ret;
428
429 if (atomic_read(&fmr->mr.refcount))
430 return -EBUSY;
431
432 if (list_len > fmr->mr.max_segs) {
433 ret = -EINVAL;
434 goto bail;
435 }
436 rkt = &to_idev(ibfmr->device)->lk_table;
437 spin_lock_irqsave(&rkt->lock, flags);
438 fmr->mr.user_base = iova;
439 fmr->mr.iova = iova;
440 ps = 1 << fmr->page_shift;
441 fmr->mr.length = list_len * ps;
442 m = 0;
443 n = 0;
444 for (i = 0; i < list_len; i++) {
445 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
446 fmr->mr.map[m]->segs[n].length = ps;
447 if (++n == QIB_SEGSZ) {
448 m++;
449 n = 0;
450 }
451 }
452 spin_unlock_irqrestore(&rkt->lock, flags);
453 ret = 0;
454
455bail:
456 return ret;
457}
458
459/**
460 * qib_unmap_fmr - unmap fast memory regions
461 * @fmr_list: the list of fast memory regions to unmap
462 *
463 * Returns 0 on success.
464 */
465int qib_unmap_fmr(struct list_head *fmr_list)
466{
467 struct qib_fmr *fmr;
468 struct qib_lkey_table *rkt;
469 unsigned long flags;
470
471 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
472 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
473 spin_lock_irqsave(&rkt->lock, flags);
474 fmr->mr.user_base = 0;
475 fmr->mr.iova = 0;
476 fmr->mr.length = 0;
477 spin_unlock_irqrestore(&rkt->lock, flags);
478 }
479 return 0;
480}
481
482/**
483 * qib_dealloc_fmr - deallocate a fast memory region
484 * @ibfmr: the fast memory region to deallocate
485 *
486 * Returns 0 on success.
487 */
488int qib_dealloc_fmr(struct ib_fmr *ibfmr)
489{
490 struct qib_fmr *fmr = to_ifmr(ibfmr);
491 int ret;
492 int i;
493
494 ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
495 if (ret)
496 return ret;
497
498 i = fmr->mr.mapsz;
499 while (i)
500 kfree(fmr->mr.map[--i]);
501 kfree(fmr);
502 return 0;
503}
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
new file mode 100644
index 000000000000..c926bf4541df
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -0,0 +1,738 @@
1/*
2 * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/pci.h>
34#include <linux/io.h>
35#include <linux/delay.h>
36#include <linux/vmalloc.h>
37#include <linux/aer.h>
38
39#include "qib.h"
40
41/*
42 * This file contains PCIe utility routines that are common to the
43 * various QLogic InfiniPath adapters
44 */
45
46/*
47 * Code to adjust PCIe capabilities.
48 * To minimize the change footprint, we call it
49 * from qib_pcie_params, which every chip-specific
50 * file calls, even though this violates some
51 * expectations of harmlessness.
52 */
53static int qib_tune_pcie_caps(struct qib_devdata *);
54static int qib_tune_pcie_coalesce(struct qib_devdata *);
55
56/*
57 * Do all the common PCIe setup and initialization.
58 * devdata is not yet allocated, and is not allocated until after this
59 * routine returns success. Therefore qib_dev_err() can't be used for error
60 * printing.
61 */
62int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
63{
64 int ret;
65
66 ret = pci_enable_device(pdev);
67 if (ret) {
68 /*
69 * This can happen (in theory) iff:
70 * We did a chip reset, and then failed to reprogram the
71 * BAR, or the chip reset due to an internal error. We then
72 * unloaded the driver and reloaded it.
73 *
74 * Both reset cases set the BAR back to initial state. For
75 * the latter case, the AER sticky error bit at offset 0x718
76 * should be set, but the Linux kernel doesn't yet know
77 * about that, it appears. If the original BAR was retained
78 * in the kernel data structures, this may be OK.
79 */
80 qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
81 -ret);
82 goto done;
83 }
84
85 ret = pci_request_regions(pdev, QIB_DRV_NAME);
86 if (ret) {
87 qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
88 goto bail;
89 }
90
91 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
92 if (ret) {
93 /*
94 * If the 64 bit setup fails, try 32 bit. Some systems
95 * do not setup 64 bit maps on systems with 2GB or less
96 * memory installed.
97 */
98 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
99 if (ret) {
100 qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
101 goto bail;
102 }
103 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
104 } else
105 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
106 if (ret)
107 qib_early_err(&pdev->dev,
108 "Unable to set DMA consistent mask: %d\n", ret);
109
110 pci_set_master(pdev);
111 ret = pci_enable_pcie_error_reporting(pdev);
112 if (ret)
113 qib_early_err(&pdev->dev,
114 "Unable to enable pcie error reporting: %d\n",
115 ret);
116 goto done;
117
118bail:
119 pci_disable_device(pdev);
120 pci_release_regions(pdev);
121done:
122 return ret;
123}
124
125/*
126 * Do remaining PCIe setup, once dd is allocated, and save away
127 * fields required to re-initialize after a chip reset, or for
128 * various other purposes
129 */
130int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
131 const struct pci_device_id *ent)
132{
133 unsigned long len;
134 resource_size_t addr;
135
136 dd->pcidev = pdev;
137 pci_set_drvdata(pdev, dd);
138
139 addr = pci_resource_start(pdev, 0);
140 len = pci_resource_len(pdev, 0);
141
142#if defined(__powerpc__)
143 /* There isn't a generic way to specify writethrough mappings */
144 dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU);
145#else
146 dd->kregbase = ioremap_nocache(addr, len);
147#endif
148
149 if (!dd->kregbase)
150 return -ENOMEM;
151
152 dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
153 dd->physaddr = addr; /* used for io_remap, etc. */
154
155 /*
156 * Save BARs to rewrite after device reset. Save all 64 bits of
157 * BAR, just in case.
158 */
159 dd->pcibar0 = addr;
160 dd->pcibar1 = addr >> 32;
161 dd->deviceid = ent->device; /* save for later use */
162 dd->vendorid = ent->vendor;
163
164 return 0;
165}
166
167/*
168 * Do PCIe cleanup, after chip-specific cleanup, etc. Just prior
169 * to releasing the dd memory.
170 * void because none of the core pcie cleanup returns are void
171 */
172void qib_pcie_ddcleanup(struct qib_devdata *dd)
173{
174 u64 __iomem *base = (void __iomem *) dd->kregbase;
175
176 dd->kregbase = NULL;
177 iounmap(base);
178 if (dd->piobase)
179 iounmap(dd->piobase);
180 if (dd->userbase)
181 iounmap(dd->userbase);
182
183 pci_disable_device(dd->pcidev);
184 pci_release_regions(dd->pcidev);
185
186 pci_set_drvdata(dd->pcidev, NULL);
187}
188
189static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
190 struct msix_entry *msix_entry)
191{
192 int ret;
193 u32 tabsize = 0;
194 u16 msix_flags;
195
196 pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags);
197 tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE);
198 if (tabsize > *msixcnt)
199 tabsize = *msixcnt;
200 ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
201 if (ret > 0) {
202 tabsize = ret;
203 ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
204 }
205 if (ret) {
206 qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, "
207 "falling back to INTx\n", tabsize, ret);
208 tabsize = 0;
209 }
210 *msixcnt = tabsize;
211
212 if (ret)
213 qib_enable_intx(dd->pcidev);
214
215}
216
217/**
218 * We save the msi lo and hi values, so we can restore them after
219 * chip reset (the kernel PCI infrastructure doesn't yet handle that
220 * correctly.
221 */
222static int qib_msi_setup(struct qib_devdata *dd, int pos)
223{
224 struct pci_dev *pdev = dd->pcidev;
225 u16 control;
226 int ret;
227
228 ret = pci_enable_msi(pdev);
229 if (ret)
230 qib_dev_err(dd, "pci_enable_msi failed: %d, "
231 "interrupts may not work\n", ret);
232 /* continue even if it fails, we may still be OK... */
233
234 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
235 &dd->msi_lo);
236 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
237 &dd->msi_hi);
238 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
239 /* now save the data (vector) info */
240 pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT)
241 ? 12 : 8),
242 &dd->msi_data);
243 return ret;
244}
245
246int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
247 struct msix_entry *entry)
248{
249 u16 linkstat, speed;
250 int pos = 0, pose, ret = 1;
251
252 pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
253 if (!pose) {
254 qib_dev_err(dd, "Can't find PCI Express capability!\n");
255 /* set up something... */
256 dd->lbus_width = 1;
257 dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
258 goto bail;
259 }
260
261 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
262 if (nent && *nent && pos) {
263 qib_msix_setup(dd, pos, nent, entry);
264 ret = 0; /* did it, either MSIx or INTx */
265 } else {
266 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
267 if (pos)
268 ret = qib_msi_setup(dd, pos);
269 else
270 qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
271 }
272 if (!pos)
273 qib_enable_intx(dd->pcidev);
274
275 pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat);
276 /*
277 * speed is bits 0-3, linkwidth is bits 4-8
278 * no defines for them in headers
279 */
280 speed = linkstat & 0xf;
281 linkstat >>= 4;
282 linkstat &= 0x1f;
283 dd->lbus_width = linkstat;
284
285 switch (speed) {
286 case 1:
287 dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
288 break;
289 case 2:
290 dd->lbus_speed = 5000; /* Gen1, 5GHz */
291 break;
292 default: /* not defined, assume gen1 */
293 dd->lbus_speed = 2500;
294 break;
295 }
296
297 /*
298 * Check against expected pcie width and complain if "wrong"
299 * on first initialization, not afterwards (i.e., reset).
300 */
301 if (minw && linkstat < minw)
302 qib_dev_err(dd,
303 "PCIe width %u (x%u HCA), performance reduced\n",
304 linkstat, minw);
305
306 qib_tune_pcie_caps(dd);
307
308 qib_tune_pcie_coalesce(dd);
309
310bail:
311 /* fill in string, even on errors */
312 snprintf(dd->lbus_info, sizeof(dd->lbus_info),
313 "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
314 return ret;
315}
316
317/*
318 * Setup pcie interrupt stuff again after a reset. I'd like to just call
319 * pci_enable_msi() again for msi, but when I do that,
320 * the MSI enable bit doesn't get set in the command word, and
321 * we switch to to a different interrupt vector, which is confusing,
322 * so I instead just do it all inline. Perhaps somehow can tie this
323 * into the PCIe hotplug support at some point
324 */
325int qib_reinit_intr(struct qib_devdata *dd)
326{
327 int pos;
328 u16 control;
329 int ret = 0;
330
331 /* If we aren't using MSI, don't restore it */
332 if (!dd->msi_lo)
333 goto bail;
334
335 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
336 if (!pos) {
337 qib_dev_err(dd, "Can't find MSI capability, "
338 "can't restore MSI settings\n");
339 ret = 0;
340 /* nothing special for MSIx, just MSI */
341 goto bail;
342 }
343 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
344 dd->msi_lo);
345 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
346 dd->msi_hi);
347 pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
348 if (!(control & PCI_MSI_FLAGS_ENABLE)) {
349 control |= PCI_MSI_FLAGS_ENABLE;
350 pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
351 control);
352 }
353 /* now rewrite the data (vector) info */
354 pci_write_config_word(dd->pcidev, pos +
355 ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
356 dd->msi_data);
357 ret = 1;
358bail:
359 if (!ret && (dd->flags & QIB_HAS_INTX)) {
360 qib_enable_intx(dd->pcidev);
361 ret = 1;
362 }
363
364 /* and now set the pci master bit again */
365 pci_set_master(dd->pcidev);
366
367 return ret;
368}
369
370/*
371 * Disable msi interrupt if enabled, and clear msi_lo.
372 * This is used primarily for the fallback to INTx, but
373 * is also used in reinit after reset, and during cleanup.
374 */
375void qib_nomsi(struct qib_devdata *dd)
376{
377 dd->msi_lo = 0;
378 pci_disable_msi(dd->pcidev);
379}
380
381/*
382 * Same as qib_nosmi, but for MSIx.
383 */
384void qib_nomsix(struct qib_devdata *dd)
385{
386 pci_disable_msix(dd->pcidev);
387}
388
389/*
390 * Similar to pci_intx(pdev, 1), except that we make sure
391 * msi(x) is off.
392 */
393void qib_enable_intx(struct pci_dev *pdev)
394{
395 u16 cw, new;
396 int pos;
397
398 /* first, turn on INTx */
399 pci_read_config_word(pdev, PCI_COMMAND, &cw);
400 new = cw & ~PCI_COMMAND_INTX_DISABLE;
401 if (new != cw)
402 pci_write_config_word(pdev, PCI_COMMAND, new);
403
404 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
405 if (pos) {
406 /* then turn off MSI */
407 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
408 new = cw & ~PCI_MSI_FLAGS_ENABLE;
409 if (new != cw)
410 pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
411 }
412 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
413 if (pos) {
414 /* then turn off MSIx */
415 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
416 new = cw & ~PCI_MSIX_FLAGS_ENABLE;
417 if (new != cw)
418 pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
419 }
420}
421
422/*
423 * These two routines are helper routines for the device reset code
424 * to move all the pcie code out of the chip-specific driver code.
425 */
426void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
427{
428 pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
429 pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
430 pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
431}
432
433void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
434{
435 int r;
436 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
437 dd->pcibar0);
438 if (r)
439 qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
440 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
441 dd->pcibar1);
442 if (r)
443 qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
444 /* now re-enable memory access, and restore cosmetic settings */
445 pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
446 pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
447 pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
448 r = pci_enable_device(dd->pcidev);
449 if (r)
450 qib_dev_err(dd, "pci_enable_device failed after "
451 "reset: %d\n", r);
452}
453
454/* code to adjust PCIe capabilities. */
455
456static int fld2val(int wd, int mask)
457{
458 int lsbmask;
459
460 if (!mask)
461 return 0;
462 wd &= mask;
463 lsbmask = mask ^ (mask & (mask - 1));
464 wd /= lsbmask;
465 return wd;
466}
467
468static int val2fld(int wd, int mask)
469{
470 int lsbmask;
471
472 if (!mask)
473 return 0;
474 lsbmask = mask ^ (mask & (mask - 1));
475 wd *= lsbmask;
476 return wd;
477}
478
479static int qib_pcie_coalesce;
480module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
481MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
482
483/*
484 * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
485 * chipsets. This is known to be unsafe for some revisions of some
486 * of these chipsets, with some BIOS settings, and enabling it on those
487 * systems may result in the system crashing, and/or data corruption.
488 */
489static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
490{
491 int r;
492 struct pci_dev *parent;
493 int ppos;
494 u16 devid;
495 u32 mask, bits, val;
496
497 if (!qib_pcie_coalesce)
498 return 0;
499
500 /* Find out supported and configured values for parent (root) */
501 parent = dd->pcidev->bus->self;
502 if (parent->bus->parent) {
503 qib_devinfo(dd->pcidev, "Parent not root\n");
504 return 1;
505 }
506 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
507 if (!ppos)
508 return 1;
509 if (parent->vendor != 0x8086)
510 return 1;
511
512 /*
513 * - bit 12: Max_rdcmp_Imt_EN: need to set to 1
514 * - bit 11: COALESCE_FORCE: need to set to 0
515 * - bit 10: COALESCE_EN: need to set to 1
516 * (but limitations on some on some chipsets)
517 *
518 * On the Intel 5000, 5100, and 7300 chipsets, there is
519 * also: - bit 25:24: COALESCE_MODE, need to set to 0
520 */
521 devid = parent->device;
522 if (devid >= 0x25e2 && devid <= 0x25fa) {
523 u8 rev;
524
525 /* 5000 P/V/X/Z */
526 pci_read_config_byte(parent, PCI_REVISION_ID, &rev);
527 if (rev <= 0xb2)
528 bits = 1U << 10;
529 else
530 bits = 7U << 10;
531 mask = (3U << 24) | (7U << 10);
532 } else if (devid >= 0x65e2 && devid <= 0x65fa) {
533 /* 5100 */
534 bits = 1U << 10;
535 mask = (3U << 24) | (7U << 10);
536 } else if (devid >= 0x4021 && devid <= 0x402e) {
537 /* 5400 */
538 bits = 7U << 10;
539 mask = 7U << 10;
540 } else if (devid >= 0x3604 && devid <= 0x360a) {
541 /* 7300 */
542 bits = 7U << 10;
543 mask = (3U << 24) | (7U << 10);
544 } else {
545 /* not one of the chipsets that we know about */
546 return 1;
547 }
548 pci_read_config_dword(parent, 0x48, &val);
549 val &= ~mask;
550 val |= bits;
551 r = pci_write_config_dword(parent, 0x48, val);
552 return 0;
553}
554
555/*
556 * BIOS may not set PCIe bus-utilization parameters for best performance.
557 * Check and optionally adjust them to maximize our throughput.
558 */
559static int qib_pcie_caps;
560module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
561MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)");
562
563static int qib_tune_pcie_caps(struct qib_devdata *dd)
564{
565 int ret = 1; /* Assume the worst */
566 struct pci_dev *parent;
567 int ppos, epos;
568 u16 pcaps, pctl, ecaps, ectl;
569 int rc_sup, ep_sup;
570 int rc_cur, ep_cur;
571
572 /* Find out supported and configured values for parent (root) */
573 parent = dd->pcidev->bus->self;
574 if (parent->bus->parent) {
575 qib_devinfo(dd->pcidev, "Parent not root\n");
576 goto bail;
577 }
578 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
579 if (ppos) {
580 pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
581 pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
582 } else
583 goto bail;
584 /* Find out supported and configured values for endpoint (us) */
585 epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
586 if (epos) {
587 pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
588 pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
589 } else
590 goto bail;
591 ret = 0;
592 /* Find max payload supported by root, endpoint */
593 rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
594 ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD);
595 if (rc_sup > ep_sup)
596 rc_sup = ep_sup;
597
598 rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD);
599 ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD);
600
601 /* If Supported greater than limit in module param, limit it */
602 if (rc_sup > (qib_pcie_caps & 7))
603 rc_sup = qib_pcie_caps & 7;
604 /* If less than (allowed, supported), bump root payload */
605 if (rc_sup > rc_cur) {
606 rc_cur = rc_sup;
607 pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
608 val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
609 pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
610 }
611 /* If less than (allowed, supported), bump endpoint payload */
612 if (rc_sup > ep_cur) {
613 ep_cur = rc_sup;
614 ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
615 val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
616 pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
617 }
618
619 /*
620 * Now the Read Request size.
621 * No field for max supported, but PCIe spec limits it to 4096,
622 * which is code '5' (log2(4096) - 7)
623 */
624 rc_sup = 5;
625 if (rc_sup > ((qib_pcie_caps >> 4) & 7))
626 rc_sup = (qib_pcie_caps >> 4) & 7;
627 rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ);
628 ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ);
629
630 if (rc_sup > rc_cur) {
631 rc_cur = rc_sup;
632 pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
633 val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
634 pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
635 }
636 if (rc_sup > ep_cur) {
637 ep_cur = rc_sup;
638 ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
639 val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
640 pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
641 }
642bail:
643 return ret;
644}
645/* End of PCIe capability tuning */
646
647/*
648 * From here through qib_pci_err_handler definition is invoked via
649 * PCI error infrastructure, registered via pci
650 */
651static pci_ers_result_t
652qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
653{
654 struct qib_devdata *dd = pci_get_drvdata(pdev);
655 pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
656
657 switch (state) {
658 case pci_channel_io_normal:
659 qib_devinfo(pdev, "State Normal, ignoring\n");
660 break;
661
662 case pci_channel_io_frozen:
663 qib_devinfo(pdev, "State Frozen, requesting reset\n");
664 pci_disable_device(pdev);
665 ret = PCI_ERS_RESULT_NEED_RESET;
666 break;
667
668 case pci_channel_io_perm_failure:
669 qib_devinfo(pdev, "State Permanent Failure, disabling\n");
670 if (dd) {
671 /* no more register accesses! */
672 dd->flags &= ~QIB_PRESENT;
673 qib_disable_after_error(dd);
674 }
675 /* else early, or other problem */
676 ret = PCI_ERS_RESULT_DISCONNECT;
677 break;
678
679 default: /* shouldn't happen */
680 qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
681 state);
682 break;
683 }
684 return ret;
685}
686
687static pci_ers_result_t
688qib_pci_mmio_enabled(struct pci_dev *pdev)
689{
690 u64 words = 0U;
691 struct qib_devdata *dd = pci_get_drvdata(pdev);
692 pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
693
694 if (dd && dd->pport) {
695 words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
696 if (words == ~0ULL)
697 ret = PCI_ERS_RESULT_NEED_RESET;
698 }
699 qib_devinfo(pdev, "QIB mmio_enabled function called, "
700 "read wordscntr %Lx, returning %d\n", words, ret);
701 return ret;
702}
703
704static pci_ers_result_t
705qib_pci_slot_reset(struct pci_dev *pdev)
706{
707 qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
708 return PCI_ERS_RESULT_CAN_RECOVER;
709}
710
711static pci_ers_result_t
712qib_pci_link_reset(struct pci_dev *pdev)
713{
714 qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
715 return PCI_ERS_RESULT_CAN_RECOVER;
716}
717
718static void
719qib_pci_resume(struct pci_dev *pdev)
720{
721 struct qib_devdata *dd = pci_get_drvdata(pdev);
722 qib_devinfo(pdev, "QIB resume function called\n");
723 pci_cleanup_aer_uncorrect_error_status(pdev);
724 /*
725 * Running jobs will fail, since it's asynchronous
726 * unlike sysfs-requested reset. Better than
727 * doing nothing.
728 */
729 qib_init(dd, 1); /* same as re-init after reset */
730}
731
732struct pci_error_handlers qib_pci_err_handler = {
733 .error_detected = qib_pci_error_detected,
734 .mmio_enabled = qib_pci_mmio_enabled,
735 .link_reset = qib_pci_link_reset,
736 .slot_reset = qib_pci_slot_reset,
737 .resume = qib_pci_resume,
738};
diff --git a/drivers/infiniband/hw/qib/qib_pio_copy.c b/drivers/infiniband/hw/qib/qib_pio_copy.c
new file mode 100644
index 000000000000..10b8c444dd31
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pio_copy.c
@@ -0,0 +1,64 @@
1/*
2 * Copyright (c) 2009 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "qib.h"
34
35/**
36 * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits
37 * @to: destination, in MMIO space (must be 64-bit aligned)
38 * @from: source (must be 64-bit aligned)
39 * @count: number of 32-bit quantities to copy
40 *
41 * Copy data from kernel space to MMIO space, in multiples of 32 bits at a
42 * time. Order of access is not guaranteed, nor is a memory barrier
43 * performed afterwards.
44 */
45void qib_pio_copy(void __iomem *to, const void *from, size_t count)
46{
47#ifdef CONFIG_64BIT
48 u64 __iomem *dst = to;
49 const u64 *src = from;
50 const u64 *end = src + (count >> 1);
51
52 while (src < end)
53 __raw_writeq(*src++, dst++);
54 if (count & 1)
55 __raw_writel(*(const u32 *)src, dst);
56#else
57 u32 __iomem *dst = to;
58 const u32 *src = from;
59 const u32 *end = src + count;
60
61 while (src < end)
62 __raw_writel(*src++, dst++);
63#endif
64}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
new file mode 100644
index 000000000000..e0f65e39076b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -0,0 +1,1255 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
37
38#include "qib.h"
39
40#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
41#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
42
43static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
44 struct qpn_map *map, unsigned off)
45{
46 return (map - qpt->map) * BITS_PER_PAGE + off;
47}
48
49static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
50 struct qpn_map *map, unsigned off,
51 unsigned r)
52{
53 if (qpt->mask) {
54 off++;
55 if ((off & qpt->mask) >> 1 != r)
56 off = ((off & qpt->mask) ?
57 (off | qpt->mask) + 1 : off) | (r << 1);
58 } else
59 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
60 return off;
61}
62
63/*
64 * Convert the AETH credit code into the number of credits.
65 */
66static u32 credit_table[31] = {
67 0, /* 0 */
68 1, /* 1 */
69 2, /* 2 */
70 3, /* 3 */
71 4, /* 4 */
72 6, /* 5 */
73 8, /* 6 */
74 12, /* 7 */
75 16, /* 8 */
76 24, /* 9 */
77 32, /* A */
78 48, /* B */
79 64, /* C */
80 96, /* D */
81 128, /* E */
82 192, /* F */
83 256, /* 10 */
84 384, /* 11 */
85 512, /* 12 */
86 768, /* 13 */
87 1024, /* 14 */
88 1536, /* 15 */
89 2048, /* 16 */
90 3072, /* 17 */
91 4096, /* 18 */
92 6144, /* 19 */
93 8192, /* 1A */
94 12288, /* 1B */
95 16384, /* 1C */
96 24576, /* 1D */
97 32768 /* 1E */
98};
99
100static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
101{
102 unsigned long page = get_zeroed_page(GFP_KERNEL);
103
104 /*
105 * Free the page if someone raced with us installing it.
106 */
107
108 spin_lock(&qpt->lock);
109 if (map->page)
110 free_page(page);
111 else
112 map->page = (void *)page;
113 spin_unlock(&qpt->lock);
114}
115
116/*
117 * Allocate the next available QPN or
118 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
119 */
120static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
121 enum ib_qp_type type, u8 port)
122{
123 u32 i, offset, max_scan, qpn;
124 struct qpn_map *map;
125 u32 ret;
126 int r;
127
128 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
129 unsigned n;
130
131 ret = type == IB_QPT_GSI;
132 n = 1 << (ret + 2 * (port - 1));
133 spin_lock(&qpt->lock);
134 if (qpt->flags & n)
135 ret = -EINVAL;
136 else
137 qpt->flags |= n;
138 spin_unlock(&qpt->lock);
139 goto bail;
140 }
141
142 r = smp_processor_id();
143 if (r >= dd->n_krcv_queues)
144 r %= dd->n_krcv_queues;
145 qpn = qpt->last + 1;
146 if (qpn >= QPN_MAX)
147 qpn = 2;
148 if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
149 qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
150 (r << 1);
151 offset = qpn & BITS_PER_PAGE_MASK;
152 map = &qpt->map[qpn / BITS_PER_PAGE];
153 max_scan = qpt->nmaps - !offset;
154 for (i = 0;;) {
155 if (unlikely(!map->page)) {
156 get_map_page(qpt, map);
157 if (unlikely(!map->page))
158 break;
159 }
160 do {
161 if (!test_and_set_bit(offset, map->page)) {
162 qpt->last = qpn;
163 ret = qpn;
164 goto bail;
165 }
166 offset = find_next_offset(qpt, map, offset, r);
167 qpn = mk_qpn(qpt, map, offset);
168 /*
169 * This test differs from alloc_pidmap().
170 * If find_next_offset() does find a zero
171 * bit, we don't need to check for QPN
172 * wrapping around past our starting QPN.
173 * We just need to be sure we don't loop
174 * forever.
175 */
176 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
177 /*
178 * In order to keep the number of pages allocated to a
179 * minimum, we scan the all existing pages before increasing
180 * the size of the bitmap table.
181 */
182 if (++i > max_scan) {
183 if (qpt->nmaps == QPNMAP_ENTRIES)
184 break;
185 map = &qpt->map[qpt->nmaps++];
186 offset = qpt->mask ? (r << 1) : 0;
187 } else if (map < &qpt->map[qpt->nmaps]) {
188 ++map;
189 offset = qpt->mask ? (r << 1) : 0;
190 } else {
191 map = &qpt->map[0];
192 offset = qpt->mask ? (r << 1) : 2;
193 }
194 qpn = mk_qpn(qpt, map, offset);
195 }
196
197 ret = -ENOMEM;
198
199bail:
200 return ret;
201}
202
203static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
204{
205 struct qpn_map *map;
206
207 map = qpt->map + qpn / BITS_PER_PAGE;
208 if (map->page)
209 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
210}
211
212/*
213 * Put the QP into the hash table.
214 * The hash table holds a reference to the QP.
215 */
216static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
217{
218 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
219 unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
220 unsigned long flags;
221
222 spin_lock_irqsave(&dev->qpt_lock, flags);
223
224 if (qp->ibqp.qp_num == 0)
225 ibp->qp0 = qp;
226 else if (qp->ibqp.qp_num == 1)
227 ibp->qp1 = qp;
228 else {
229 qp->next = dev->qp_table[n];
230 dev->qp_table[n] = qp;
231 }
232 atomic_inc(&qp->refcount);
233
234 spin_unlock_irqrestore(&dev->qpt_lock, flags);
235}
236
237/*
238 * Remove the QP from the table so it can't be found asynchronously by
239 * the receive interrupt routine.
240 */
241static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
242{
243 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
244 struct qib_qp *q, **qpp;
245 unsigned long flags;
246
247 qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
248
249 spin_lock_irqsave(&dev->qpt_lock, flags);
250
251 if (ibp->qp0 == qp) {
252 ibp->qp0 = NULL;
253 atomic_dec(&qp->refcount);
254 } else if (ibp->qp1 == qp) {
255 ibp->qp1 = NULL;
256 atomic_dec(&qp->refcount);
257 } else
258 for (; (q = *qpp) != NULL; qpp = &q->next)
259 if (q == qp) {
260 *qpp = qp->next;
261 qp->next = NULL;
262 atomic_dec(&qp->refcount);
263 break;
264 }
265
266 spin_unlock_irqrestore(&dev->qpt_lock, flags);
267}
268
269/**
270 * qib_free_all_qps - check for QPs still in use
271 * @qpt: the QP table to empty
272 *
273 * There should not be any QPs still in use.
274 * Free memory for table.
275 */
276unsigned qib_free_all_qps(struct qib_devdata *dd)
277{
278 struct qib_ibdev *dev = &dd->verbs_dev;
279 unsigned long flags;
280 struct qib_qp *qp;
281 unsigned n, qp_inuse = 0;
282
283 for (n = 0; n < dd->num_pports; n++) {
284 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
285
286 if (!qib_mcast_tree_empty(ibp))
287 qp_inuse++;
288 if (ibp->qp0)
289 qp_inuse++;
290 if (ibp->qp1)
291 qp_inuse++;
292 }
293
294 spin_lock_irqsave(&dev->qpt_lock, flags);
295 for (n = 0; n < dev->qp_table_size; n++) {
296 qp = dev->qp_table[n];
297 dev->qp_table[n] = NULL;
298
299 for (; qp; qp = qp->next)
300 qp_inuse++;
301 }
302 spin_unlock_irqrestore(&dev->qpt_lock, flags);
303
304 return qp_inuse;
305}
306
307/**
308 * qib_lookup_qpn - return the QP with the given QPN
309 * @qpt: the QP table
310 * @qpn: the QP number to look up
311 *
312 * The caller is responsible for decrementing the QP reference count
313 * when done.
314 */
315struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
316{
317 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
318 unsigned long flags;
319 struct qib_qp *qp;
320
321 spin_lock_irqsave(&dev->qpt_lock, flags);
322
323 if (qpn == 0)
324 qp = ibp->qp0;
325 else if (qpn == 1)
326 qp = ibp->qp1;
327 else
328 for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
329 qp = qp->next)
330 if (qp->ibqp.qp_num == qpn)
331 break;
332 if (qp)
333 atomic_inc(&qp->refcount);
334
335 spin_unlock_irqrestore(&dev->qpt_lock, flags);
336 return qp;
337}
338
339/**
340 * qib_reset_qp - initialize the QP state to the reset state
341 * @qp: the QP to reset
342 * @type: the QP type
343 */
344static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
345{
346 qp->remote_qpn = 0;
347 qp->qkey = 0;
348 qp->qp_access_flags = 0;
349 atomic_set(&qp->s_dma_busy, 0);
350 qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
351 qp->s_hdrwords = 0;
352 qp->s_wqe = NULL;
353 qp->s_draining = 0;
354 qp->s_next_psn = 0;
355 qp->s_last_psn = 0;
356 qp->s_sending_psn = 0;
357 qp->s_sending_hpsn = 0;
358 qp->s_psn = 0;
359 qp->r_psn = 0;
360 qp->r_msn = 0;
361 if (type == IB_QPT_RC) {
362 qp->s_state = IB_OPCODE_RC_SEND_LAST;
363 qp->r_state = IB_OPCODE_RC_SEND_LAST;
364 } else {
365 qp->s_state = IB_OPCODE_UC_SEND_LAST;
366 qp->r_state = IB_OPCODE_UC_SEND_LAST;
367 }
368 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
369 qp->r_nak_state = 0;
370 qp->r_aflags = 0;
371 qp->r_flags = 0;
372 qp->s_head = 0;
373 qp->s_tail = 0;
374 qp->s_cur = 0;
375 qp->s_acked = 0;
376 qp->s_last = 0;
377 qp->s_ssn = 1;
378 qp->s_lsn = 0;
379 qp->s_mig_state = IB_MIG_MIGRATED;
380 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
381 qp->r_head_ack_queue = 0;
382 qp->s_tail_ack_queue = 0;
383 qp->s_num_rd_atomic = 0;
384 if (qp->r_rq.wq) {
385 qp->r_rq.wq->head = 0;
386 qp->r_rq.wq->tail = 0;
387 }
388 qp->r_sge.num_sge = 0;
389}
390
391static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
392{
393 unsigned n;
394
395 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
396 while (qp->s_rdma_read_sge.num_sge) {
397 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
398 if (--qp->s_rdma_read_sge.num_sge)
399 qp->s_rdma_read_sge.sge =
400 *qp->s_rdma_read_sge.sg_list++;
401 }
402
403 while (qp->r_sge.num_sge) {
404 atomic_dec(&qp->r_sge.sge.mr->refcount);
405 if (--qp->r_sge.num_sge)
406 qp->r_sge.sge = *qp->r_sge.sg_list++;
407 }
408
409 if (clr_sends) {
410 while (qp->s_last != qp->s_head) {
411 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
412 unsigned i;
413
414 for (i = 0; i < wqe->wr.num_sge; i++) {
415 struct qib_sge *sge = &wqe->sg_list[i];
416
417 atomic_dec(&sge->mr->refcount);
418 }
419 if (qp->ibqp.qp_type == IB_QPT_UD ||
420 qp->ibqp.qp_type == IB_QPT_SMI ||
421 qp->ibqp.qp_type == IB_QPT_GSI)
422 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
423 if (++qp->s_last >= qp->s_size)
424 qp->s_last = 0;
425 }
426 if (qp->s_rdma_mr) {
427 atomic_dec(&qp->s_rdma_mr->refcount);
428 qp->s_rdma_mr = NULL;
429 }
430 }
431
432 if (qp->ibqp.qp_type != IB_QPT_RC)
433 return;
434
435 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
436 struct qib_ack_entry *e = &qp->s_ack_queue[n];
437
438 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
439 e->rdma_sge.mr) {
440 atomic_dec(&e->rdma_sge.mr->refcount);
441 e->rdma_sge.mr = NULL;
442 }
443 }
444}
445
446/**
447 * qib_error_qp - put a QP into the error state
448 * @qp: the QP to put into the error state
449 * @err: the receive completion error to signal if a RWQE is active
450 *
451 * Flushes both send and receive work queues.
452 * Returns true if last WQE event should be generated.
453 * The QP s_lock should be held and interrupts disabled.
454 * If we are already in error state, just return.
455 */
456int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
457{
458 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
459 struct ib_wc wc;
460 int ret = 0;
461
462 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
463 goto bail;
464
465 qp->state = IB_QPS_ERR;
466
467 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
468 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
469 del_timer(&qp->s_timer);
470 }
471 spin_lock(&dev->pending_lock);
472 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
473 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
474 list_del_init(&qp->iowait);
475 }
476 spin_unlock(&dev->pending_lock);
477
478 if (!(qp->s_flags & QIB_S_BUSY)) {
479 qp->s_hdrwords = 0;
480 if (qp->s_rdma_mr) {
481 atomic_dec(&qp->s_rdma_mr->refcount);
482 qp->s_rdma_mr = NULL;
483 }
484 if (qp->s_tx) {
485 qib_put_txreq(qp->s_tx);
486 qp->s_tx = NULL;
487 }
488 }
489
490 /* Schedule the sending tasklet to drain the send work queue. */
491 if (qp->s_last != qp->s_head)
492 qib_schedule_send(qp);
493
494 clear_mr_refs(qp, 0);
495
496 memset(&wc, 0, sizeof(wc));
497 wc.qp = &qp->ibqp;
498 wc.opcode = IB_WC_RECV;
499
500 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
501 wc.wr_id = qp->r_wr_id;
502 wc.status = err;
503 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
504 }
505 wc.status = IB_WC_WR_FLUSH_ERR;
506
507 if (qp->r_rq.wq) {
508 struct qib_rwq *wq;
509 u32 head;
510 u32 tail;
511
512 spin_lock(&qp->r_rq.lock);
513
514 /* sanity check pointers before trusting them */
515 wq = qp->r_rq.wq;
516 head = wq->head;
517 if (head >= qp->r_rq.size)
518 head = 0;
519 tail = wq->tail;
520 if (tail >= qp->r_rq.size)
521 tail = 0;
522 while (tail != head) {
523 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
524 if (++tail >= qp->r_rq.size)
525 tail = 0;
526 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
527 }
528 wq->tail = tail;
529
530 spin_unlock(&qp->r_rq.lock);
531 } else if (qp->ibqp.event_handler)
532 ret = 1;
533
534bail:
535 return ret;
536}
537
538/**
539 * qib_modify_qp - modify the attributes of a queue pair
540 * @ibqp: the queue pair who's attributes we're modifying
541 * @attr: the new attributes
542 * @attr_mask: the mask of attributes to modify
543 * @udata: user data for libibverbs.so
544 *
545 * Returns 0 on success, otherwise returns an errno.
546 */
547int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
548 int attr_mask, struct ib_udata *udata)
549{
550 struct qib_ibdev *dev = to_idev(ibqp->device);
551 struct qib_qp *qp = to_iqp(ibqp);
552 enum ib_qp_state cur_state, new_state;
553 struct ib_event ev;
554 int lastwqe = 0;
555 int mig = 0;
556 int ret;
557 u32 pmtu = 0; /* for gcc warning only */
558
559 spin_lock_irq(&qp->r_lock);
560 spin_lock(&qp->s_lock);
561
562 cur_state = attr_mask & IB_QP_CUR_STATE ?
563 attr->cur_qp_state : qp->state;
564 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
565
566 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
567 attr_mask))
568 goto inval;
569
570 if (attr_mask & IB_QP_AV) {
571 if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
572 goto inval;
573 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
574 goto inval;
575 }
576
577 if (attr_mask & IB_QP_ALT_PATH) {
578 if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
579 goto inval;
580 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
581 goto inval;
582 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
583 goto inval;
584 }
585
586 if (attr_mask & IB_QP_PKEY_INDEX)
587 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
588 goto inval;
589
590 if (attr_mask & IB_QP_MIN_RNR_TIMER)
591 if (attr->min_rnr_timer > 31)
592 goto inval;
593
594 if (attr_mask & IB_QP_PORT)
595 if (qp->ibqp.qp_type == IB_QPT_SMI ||
596 qp->ibqp.qp_type == IB_QPT_GSI ||
597 attr->port_num == 0 ||
598 attr->port_num > ibqp->device->phys_port_cnt)
599 goto inval;
600
601 if (attr_mask & IB_QP_DEST_QPN)
602 if (attr->dest_qp_num > QIB_QPN_MASK)
603 goto inval;
604
605 if (attr_mask & IB_QP_RETRY_CNT)
606 if (attr->retry_cnt > 7)
607 goto inval;
608
609 if (attr_mask & IB_QP_RNR_RETRY)
610 if (attr->rnr_retry > 7)
611 goto inval;
612
613 /*
614 * Don't allow invalid path_mtu values. OK to set greater
615 * than the active mtu (or even the max_cap, if we have tuned
616 * that to a small mtu. We'll set qp->path_mtu
617 * to the lesser of requested attribute mtu and active,
618 * for packetizing messages.
619 * Note that the QP port has to be set in INIT and MTU in RTR.
620 */
621 if (attr_mask & IB_QP_PATH_MTU) {
622 struct qib_devdata *dd = dd_from_dev(dev);
623 int mtu, pidx = qp->port_num - 1;
624
625 mtu = ib_mtu_enum_to_int(attr->path_mtu);
626 if (mtu == -1)
627 goto inval;
628 if (mtu > dd->pport[pidx].ibmtu) {
629 switch (dd->pport[pidx].ibmtu) {
630 case 4096:
631 pmtu = IB_MTU_4096;
632 break;
633 case 2048:
634 pmtu = IB_MTU_2048;
635 break;
636 case 1024:
637 pmtu = IB_MTU_1024;
638 break;
639 case 512:
640 pmtu = IB_MTU_512;
641 break;
642 case 256:
643 pmtu = IB_MTU_256;
644 break;
645 default:
646 pmtu = IB_MTU_2048;
647 }
648 } else
649 pmtu = attr->path_mtu;
650 }
651
652 if (attr_mask & IB_QP_PATH_MIG_STATE) {
653 if (attr->path_mig_state == IB_MIG_REARM) {
654 if (qp->s_mig_state == IB_MIG_ARMED)
655 goto inval;
656 if (new_state != IB_QPS_RTS)
657 goto inval;
658 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
659 if (qp->s_mig_state == IB_MIG_REARM)
660 goto inval;
661 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
662 goto inval;
663 if (qp->s_mig_state == IB_MIG_ARMED)
664 mig = 1;
665 } else
666 goto inval;
667 }
668
669 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
670 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
671 goto inval;
672
673 switch (new_state) {
674 case IB_QPS_RESET:
675 if (qp->state != IB_QPS_RESET) {
676 qp->state = IB_QPS_RESET;
677 spin_lock(&dev->pending_lock);
678 if (!list_empty(&qp->iowait))
679 list_del_init(&qp->iowait);
680 spin_unlock(&dev->pending_lock);
681 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
682 spin_unlock(&qp->s_lock);
683 spin_unlock_irq(&qp->r_lock);
684 /* Stop the sending work queue and retry timer */
685 cancel_work_sync(&qp->s_work);
686 del_timer_sync(&qp->s_timer);
687 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
688 if (qp->s_tx) {
689 qib_put_txreq(qp->s_tx);
690 qp->s_tx = NULL;
691 }
692 remove_qp(dev, qp);
693 wait_event(qp->wait, !atomic_read(&qp->refcount));
694 spin_lock_irq(&qp->r_lock);
695 spin_lock(&qp->s_lock);
696 clear_mr_refs(qp, 1);
697 qib_reset_qp(qp, ibqp->qp_type);
698 }
699 break;
700
701 case IB_QPS_RTR:
702 /* Allow event to retrigger if QP set to RTR more than once */
703 qp->r_flags &= ~QIB_R_COMM_EST;
704 qp->state = new_state;
705 break;
706
707 case IB_QPS_SQD:
708 qp->s_draining = qp->s_last != qp->s_cur;
709 qp->state = new_state;
710 break;
711
712 case IB_QPS_SQE:
713 if (qp->ibqp.qp_type == IB_QPT_RC)
714 goto inval;
715 qp->state = new_state;
716 break;
717
718 case IB_QPS_ERR:
719 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
720 break;
721
722 default:
723 qp->state = new_state;
724 break;
725 }
726
727 if (attr_mask & IB_QP_PKEY_INDEX)
728 qp->s_pkey_index = attr->pkey_index;
729
730 if (attr_mask & IB_QP_PORT)
731 qp->port_num = attr->port_num;
732
733 if (attr_mask & IB_QP_DEST_QPN)
734 qp->remote_qpn = attr->dest_qp_num;
735
736 if (attr_mask & IB_QP_SQ_PSN) {
737 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
738 qp->s_psn = qp->s_next_psn;
739 qp->s_sending_psn = qp->s_next_psn;
740 qp->s_last_psn = qp->s_next_psn - 1;
741 qp->s_sending_hpsn = qp->s_last_psn;
742 }
743
744 if (attr_mask & IB_QP_RQ_PSN)
745 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
746
747 if (attr_mask & IB_QP_ACCESS_FLAGS)
748 qp->qp_access_flags = attr->qp_access_flags;
749
750 if (attr_mask & IB_QP_AV) {
751 qp->remote_ah_attr = attr->ah_attr;
752 qp->s_srate = attr->ah_attr.static_rate;
753 }
754
755 if (attr_mask & IB_QP_ALT_PATH) {
756 qp->alt_ah_attr = attr->alt_ah_attr;
757 qp->s_alt_pkey_index = attr->alt_pkey_index;
758 }
759
760 if (attr_mask & IB_QP_PATH_MIG_STATE) {
761 qp->s_mig_state = attr->path_mig_state;
762 if (mig) {
763 qp->remote_ah_attr = qp->alt_ah_attr;
764 qp->port_num = qp->alt_ah_attr.port_num;
765 qp->s_pkey_index = qp->s_alt_pkey_index;
766 }
767 }
768
769 if (attr_mask & IB_QP_PATH_MTU)
770 qp->path_mtu = pmtu;
771
772 if (attr_mask & IB_QP_RETRY_CNT) {
773 qp->s_retry_cnt = attr->retry_cnt;
774 qp->s_retry = attr->retry_cnt;
775 }
776
777 if (attr_mask & IB_QP_RNR_RETRY) {
778 qp->s_rnr_retry_cnt = attr->rnr_retry;
779 qp->s_rnr_retry = attr->rnr_retry;
780 }
781
782 if (attr_mask & IB_QP_MIN_RNR_TIMER)
783 qp->r_min_rnr_timer = attr->min_rnr_timer;
784
785 if (attr_mask & IB_QP_TIMEOUT)
786 qp->timeout = attr->timeout;
787
788 if (attr_mask & IB_QP_QKEY)
789 qp->qkey = attr->qkey;
790
791 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
792 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
793
794 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
795 qp->s_max_rd_atomic = attr->max_rd_atomic;
796
797 spin_unlock(&qp->s_lock);
798 spin_unlock_irq(&qp->r_lock);
799
800 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
801 insert_qp(dev, qp);
802
803 if (lastwqe) {
804 ev.device = qp->ibqp.device;
805 ev.element.qp = &qp->ibqp;
806 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
807 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
808 }
809 if (mig) {
810 ev.device = qp->ibqp.device;
811 ev.element.qp = &qp->ibqp;
812 ev.event = IB_EVENT_PATH_MIG;
813 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
814 }
815 ret = 0;
816 goto bail;
817
818inval:
819 spin_unlock(&qp->s_lock);
820 spin_unlock_irq(&qp->r_lock);
821 ret = -EINVAL;
822
823bail:
824 return ret;
825}
826
827int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
828 int attr_mask, struct ib_qp_init_attr *init_attr)
829{
830 struct qib_qp *qp = to_iqp(ibqp);
831
832 attr->qp_state = qp->state;
833 attr->cur_qp_state = attr->qp_state;
834 attr->path_mtu = qp->path_mtu;
835 attr->path_mig_state = qp->s_mig_state;
836 attr->qkey = qp->qkey;
837 attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
838 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
839 attr->dest_qp_num = qp->remote_qpn;
840 attr->qp_access_flags = qp->qp_access_flags;
841 attr->cap.max_send_wr = qp->s_size - 1;
842 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
843 attr->cap.max_send_sge = qp->s_max_sge;
844 attr->cap.max_recv_sge = qp->r_rq.max_sge;
845 attr->cap.max_inline_data = 0;
846 attr->ah_attr = qp->remote_ah_attr;
847 attr->alt_ah_attr = qp->alt_ah_attr;
848 attr->pkey_index = qp->s_pkey_index;
849 attr->alt_pkey_index = qp->s_alt_pkey_index;
850 attr->en_sqd_async_notify = 0;
851 attr->sq_draining = qp->s_draining;
852 attr->max_rd_atomic = qp->s_max_rd_atomic;
853 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
854 attr->min_rnr_timer = qp->r_min_rnr_timer;
855 attr->port_num = qp->port_num;
856 attr->timeout = qp->timeout;
857 attr->retry_cnt = qp->s_retry_cnt;
858 attr->rnr_retry = qp->s_rnr_retry_cnt;
859 attr->alt_port_num = qp->alt_ah_attr.port_num;
860 attr->alt_timeout = qp->alt_timeout;
861
862 init_attr->event_handler = qp->ibqp.event_handler;
863 init_attr->qp_context = qp->ibqp.qp_context;
864 init_attr->send_cq = qp->ibqp.send_cq;
865 init_attr->recv_cq = qp->ibqp.recv_cq;
866 init_attr->srq = qp->ibqp.srq;
867 init_attr->cap = attr->cap;
868 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
869 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
870 else
871 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
872 init_attr->qp_type = qp->ibqp.qp_type;
873 init_attr->port_num = qp->port_num;
874 return 0;
875}
876
877/**
878 * qib_compute_aeth - compute the AETH (syndrome + MSN)
879 * @qp: the queue pair to compute the AETH for
880 *
881 * Returns the AETH.
882 */
883__be32 qib_compute_aeth(struct qib_qp *qp)
884{
885 u32 aeth = qp->r_msn & QIB_MSN_MASK;
886
887 if (qp->ibqp.srq) {
888 /*
889 * Shared receive queues don't generate credits.
890 * Set the credit field to the invalid value.
891 */
892 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
893 } else {
894 u32 min, max, x;
895 u32 credits;
896 struct qib_rwq *wq = qp->r_rq.wq;
897 u32 head;
898 u32 tail;
899
900 /* sanity check pointers before trusting them */
901 head = wq->head;
902 if (head >= qp->r_rq.size)
903 head = 0;
904 tail = wq->tail;
905 if (tail >= qp->r_rq.size)
906 tail = 0;
907 /*
908 * Compute the number of credits available (RWQEs).
909 * XXX Not holding the r_rq.lock here so there is a small
910 * chance that the pair of reads are not atomic.
911 */
912 credits = head - tail;
913 if ((int)credits < 0)
914 credits += qp->r_rq.size;
915 /*
916 * Binary search the credit table to find the code to
917 * use.
918 */
919 min = 0;
920 max = 31;
921 for (;;) {
922 x = (min + max) / 2;
923 if (credit_table[x] == credits)
924 break;
925 if (credit_table[x] > credits)
926 max = x;
927 else if (min == x)
928 break;
929 else
930 min = x;
931 }
932 aeth |= x << QIB_AETH_CREDIT_SHIFT;
933 }
934 return cpu_to_be32(aeth);
935}
936
937/**
938 * qib_create_qp - create a queue pair for a device
939 * @ibpd: the protection domain who's device we create the queue pair for
940 * @init_attr: the attributes of the queue pair
941 * @udata: user data for libibverbs.so
942 *
943 * Returns the queue pair on success, otherwise returns an errno.
944 *
945 * Called by the ib_create_qp() core verbs function.
946 */
947struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
948 struct ib_qp_init_attr *init_attr,
949 struct ib_udata *udata)
950{
951 struct qib_qp *qp;
952 int err;
953 struct qib_swqe *swq = NULL;
954 struct qib_ibdev *dev;
955 struct qib_devdata *dd;
956 size_t sz;
957 size_t sg_list_sz;
958 struct ib_qp *ret;
959
960 if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
961 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
962 ret = ERR_PTR(-EINVAL);
963 goto bail;
964 }
965
966 /* Check receive queue parameters if no SRQ is specified. */
967 if (!init_attr->srq) {
968 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
969 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
970 ret = ERR_PTR(-EINVAL);
971 goto bail;
972 }
973 if (init_attr->cap.max_send_sge +
974 init_attr->cap.max_send_wr +
975 init_attr->cap.max_recv_sge +
976 init_attr->cap.max_recv_wr == 0) {
977 ret = ERR_PTR(-EINVAL);
978 goto bail;
979 }
980 }
981
982 switch (init_attr->qp_type) {
983 case IB_QPT_SMI:
984 case IB_QPT_GSI:
985 if (init_attr->port_num == 0 ||
986 init_attr->port_num > ibpd->device->phys_port_cnt) {
987 ret = ERR_PTR(-EINVAL);
988 goto bail;
989 }
990 case IB_QPT_UC:
991 case IB_QPT_RC:
992 case IB_QPT_UD:
993 sz = sizeof(struct qib_sge) *
994 init_attr->cap.max_send_sge +
995 sizeof(struct qib_swqe);
996 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
997 if (swq == NULL) {
998 ret = ERR_PTR(-ENOMEM);
999 goto bail;
1000 }
1001 sz = sizeof(*qp);
1002 sg_list_sz = 0;
1003 if (init_attr->srq) {
1004 struct qib_srq *srq = to_isrq(init_attr->srq);
1005
1006 if (srq->rq.max_sge > 1)
1007 sg_list_sz = sizeof(*qp->r_sg_list) *
1008 (srq->rq.max_sge - 1);
1009 } else if (init_attr->cap.max_recv_sge > 1)
1010 sg_list_sz = sizeof(*qp->r_sg_list) *
1011 (init_attr->cap.max_recv_sge - 1);
1012 qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1013 if (!qp) {
1014 ret = ERR_PTR(-ENOMEM);
1015 goto bail_swq;
1016 }
1017 if (init_attr->srq)
1018 sz = 0;
1019 else {
1020 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1021 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1022 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1023 sizeof(struct qib_rwqe);
1024 qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1025 qp->r_rq.size * sz);
1026 if (!qp->r_rq.wq) {
1027 ret = ERR_PTR(-ENOMEM);
1028 goto bail_qp;
1029 }
1030 }
1031
1032 /*
1033 * ib_create_qp() will initialize qp->ibqp
1034 * except for qp->ibqp.qp_num.
1035 */
1036 spin_lock_init(&qp->r_lock);
1037 spin_lock_init(&qp->s_lock);
1038 spin_lock_init(&qp->r_rq.lock);
1039 atomic_set(&qp->refcount, 0);
1040 init_waitqueue_head(&qp->wait);
1041 init_waitqueue_head(&qp->wait_dma);
1042 init_timer(&qp->s_timer);
1043 qp->s_timer.data = (unsigned long)qp;
1044 INIT_WORK(&qp->s_work, qib_do_send);
1045 INIT_LIST_HEAD(&qp->iowait);
1046 INIT_LIST_HEAD(&qp->rspwait);
1047 qp->state = IB_QPS_RESET;
1048 qp->s_wq = swq;
1049 qp->s_size = init_attr->cap.max_send_wr + 1;
1050 qp->s_max_sge = init_attr->cap.max_send_sge;
1051 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1052 qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1053 dev = to_idev(ibpd->device);
1054 dd = dd_from_dev(dev);
1055 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1056 init_attr->port_num);
1057 if (err < 0) {
1058 ret = ERR_PTR(err);
1059 vfree(qp->r_rq.wq);
1060 goto bail_qp;
1061 }
1062 qp->ibqp.qp_num = err;
1063 qp->port_num = init_attr->port_num;
1064 qp->processor_id = smp_processor_id();
1065 qib_reset_qp(qp, init_attr->qp_type);
1066 break;
1067
1068 default:
1069 /* Don't support raw QPs */
1070 ret = ERR_PTR(-ENOSYS);
1071 goto bail;
1072 }
1073
1074 init_attr->cap.max_inline_data = 0;
1075
1076 /*
1077 * Return the address of the RWQ as the offset to mmap.
1078 * See qib_mmap() for details.
1079 */
1080 if (udata && udata->outlen >= sizeof(__u64)) {
1081 if (!qp->r_rq.wq) {
1082 __u64 offset = 0;
1083
1084 err = ib_copy_to_udata(udata, &offset,
1085 sizeof(offset));
1086 if (err) {
1087 ret = ERR_PTR(err);
1088 goto bail_ip;
1089 }
1090 } else {
1091 u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1092
1093 qp->ip = qib_create_mmap_info(dev, s,
1094 ibpd->uobject->context,
1095 qp->r_rq.wq);
1096 if (!qp->ip) {
1097 ret = ERR_PTR(-ENOMEM);
1098 goto bail_ip;
1099 }
1100
1101 err = ib_copy_to_udata(udata, &(qp->ip->offset),
1102 sizeof(qp->ip->offset));
1103 if (err) {
1104 ret = ERR_PTR(err);
1105 goto bail_ip;
1106 }
1107 }
1108 }
1109
1110 spin_lock(&dev->n_qps_lock);
1111 if (dev->n_qps_allocated == ib_qib_max_qps) {
1112 spin_unlock(&dev->n_qps_lock);
1113 ret = ERR_PTR(-ENOMEM);
1114 goto bail_ip;
1115 }
1116
1117 dev->n_qps_allocated++;
1118 spin_unlock(&dev->n_qps_lock);
1119
1120 if (qp->ip) {
1121 spin_lock_irq(&dev->pending_lock);
1122 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1123 spin_unlock_irq(&dev->pending_lock);
1124 }
1125
1126 ret = &qp->ibqp;
1127 goto bail;
1128
1129bail_ip:
1130 if (qp->ip)
1131 kref_put(&qp->ip->ref, qib_release_mmap_info);
1132 else
1133 vfree(qp->r_rq.wq);
1134 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1135bail_qp:
1136 kfree(qp);
1137bail_swq:
1138 vfree(swq);
1139bail:
1140 return ret;
1141}
1142
1143/**
1144 * qib_destroy_qp - destroy a queue pair
1145 * @ibqp: the queue pair to destroy
1146 *
1147 * Returns 0 on success.
1148 *
1149 * Note that this can be called while the QP is actively sending or
1150 * receiving!
1151 */
1152int qib_destroy_qp(struct ib_qp *ibqp)
1153{
1154 struct qib_qp *qp = to_iqp(ibqp);
1155 struct qib_ibdev *dev = to_idev(ibqp->device);
1156
1157 /* Make sure HW and driver activity is stopped. */
1158 spin_lock_irq(&qp->s_lock);
1159 if (qp->state != IB_QPS_RESET) {
1160 qp->state = IB_QPS_RESET;
1161 spin_lock(&dev->pending_lock);
1162 if (!list_empty(&qp->iowait))
1163 list_del_init(&qp->iowait);
1164 spin_unlock(&dev->pending_lock);
1165 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1166 spin_unlock_irq(&qp->s_lock);
1167 cancel_work_sync(&qp->s_work);
1168 del_timer_sync(&qp->s_timer);
1169 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1170 if (qp->s_tx) {
1171 qib_put_txreq(qp->s_tx);
1172 qp->s_tx = NULL;
1173 }
1174 remove_qp(dev, qp);
1175 wait_event(qp->wait, !atomic_read(&qp->refcount));
1176 clear_mr_refs(qp, 1);
1177 } else
1178 spin_unlock_irq(&qp->s_lock);
1179
1180 /* all user's cleaned up, mark it available */
1181 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1182 spin_lock(&dev->n_qps_lock);
1183 dev->n_qps_allocated--;
1184 spin_unlock(&dev->n_qps_lock);
1185
1186 if (qp->ip)
1187 kref_put(&qp->ip->ref, qib_release_mmap_info);
1188 else
1189 vfree(qp->r_rq.wq);
1190 vfree(qp->s_wq);
1191 kfree(qp);
1192 return 0;
1193}
1194
1195/**
1196 * qib_init_qpn_table - initialize the QP number table for a device
1197 * @qpt: the QPN table
1198 */
1199void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1200{
1201 spin_lock_init(&qpt->lock);
1202 qpt->last = 1; /* start with QPN 2 */
1203 qpt->nmaps = 1;
1204 qpt->mask = dd->qpn_mask;
1205}
1206
1207/**
1208 * qib_free_qpn_table - free the QP number table for a device
1209 * @qpt: the QPN table
1210 */
1211void qib_free_qpn_table(struct qib_qpn_table *qpt)
1212{
1213 int i;
1214
1215 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1216 if (qpt->map[i].page)
1217 free_page((unsigned long) qpt->map[i].page);
1218}
1219
1220/**
1221 * qib_get_credit - flush the send work queue of a QP
1222 * @qp: the qp who's send work queue to flush
1223 * @aeth: the Acknowledge Extended Transport Header
1224 *
1225 * The QP s_lock should be held.
1226 */
1227void qib_get_credit(struct qib_qp *qp, u32 aeth)
1228{
1229 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1230
1231 /*
1232 * If the credit is invalid, we can send
1233 * as many packets as we like. Otherwise, we have to
1234 * honor the credit field.
1235 */
1236 if (credit == QIB_AETH_CREDIT_INVAL) {
1237 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1238 qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1239 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1240 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1241 qib_schedule_send(qp);
1242 }
1243 }
1244 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1245 /* Compute new LSN (i.e., MSN + credit) */
1246 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1247 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1248 qp->s_lsn = credit;
1249 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1250 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1251 qib_schedule_send(qp);
1252 }
1253 }
1254 }
1255}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
new file mode 100644
index 000000000000..35b3604b691d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -0,0 +1,564 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/delay.h>
35#include <linux/pci.h>
36#include <linux/vmalloc.h>
37
38#include "qib.h"
39#include "qib_qsfp.h"
40
41/*
42 * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
43 * in qib_twsi.c
44 */
45#define QSFP_MAX_RETRY 4
46
47static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
48{
49 struct qib_devdata *dd = ppd->dd;
50 u32 out, mask;
51 int ret, cnt, pass = 0;
52 int stuck = 0;
53 u8 *buff = bp;
54
55 ret = mutex_lock_interruptible(&dd->eep_lock);
56 if (ret)
57 goto no_unlock;
58
59 if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
60 ret = -ENXIO;
61 goto bail;
62 }
63
64 /*
65 * We presume, if we are called at all, that this board has
66 * QSFP. This is on the same i2c chain as the legacy parts,
67 * but only responds if the module is selected via GPIO pins.
68 * Further, there are very long setup and hold requirements
69 * on MODSEL.
70 */
71 mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
72 out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
73 if (ppd->hw_pidx) {
74 mask <<= QSFP_GPIO_PORT2_SHIFT;
75 out <<= QSFP_GPIO_PORT2_SHIFT;
76 }
77
78 dd->f_gpio_mod(dd, out, mask, mask);
79
80 /*
81 * Module could take up to 2 Msec to respond to MOD_SEL, and there
82 * is no way to tell if it is ready, so we must wait.
83 */
84 msleep(2);
85
86 /* Make sure TWSI bus is in sane state. */
87 ret = qib_twsi_reset(dd);
88 if (ret) {
89 qib_dev_porterr(dd, ppd->port,
90 "QSFP interface Reset for read failed\n");
91 ret = -EIO;
92 stuck = 1;
93 goto deselect;
94 }
95
96 /* All QSFP modules are at A0 */
97
98 cnt = 0;
99 while (cnt < len) {
100 unsigned in_page;
101 int wlen = len - cnt;
102 in_page = addr % QSFP_PAGESIZE;
103 if ((in_page + wlen) > QSFP_PAGESIZE)
104 wlen = QSFP_PAGESIZE - in_page;
105 ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
106 /* Some QSFP's fail first try. Retry as experiment */
107 if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY)
108 continue;
109 if (ret) {
110 /* qib_twsi_blk_rd() 1 for error, else 0 */
111 ret = -EIO;
112 goto deselect;
113 }
114 addr += wlen;
115 cnt += wlen;
116 }
117 ret = cnt;
118
119deselect:
120 /*
121 * Module could take up to 10 uSec after transfer before
122 * ready to respond to MOD_SEL negation, and there is no way
123 * to tell if it is ready, so we must wait.
124 */
125 udelay(10);
126 /* set QSFP MODSEL, RST. LP all high */
127 dd->f_gpio_mod(dd, mask, mask, mask);
128
129 /*
130 * Module could take up to 2 Msec to respond to MOD_SEL
131 * going away, and there is no way to tell if it is ready.
132 * so we must wait.
133 */
134 if (stuck)
135 qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
136
137 if (pass >= QSFP_MAX_RETRY && ret)
138 qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
139 else if (pass)
140 qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
141
142 msleep(2);
143
144bail:
145 mutex_unlock(&dd->eep_lock);
146
147no_unlock:
148 return ret;
149}
150
151/*
152 * qsfp_write
153 * We do not ordinarily write the QSFP, but this is needed to select
154 * the page on non-flat QSFPs, and possibly later unusual cases
155 */
156static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
157 int len)
158{
159 struct qib_devdata *dd = ppd->dd;
160 u32 out, mask;
161 int ret, cnt;
162 u8 *buff = bp;
163
164 ret = mutex_lock_interruptible(&dd->eep_lock);
165 if (ret)
166 goto no_unlock;
167
168 if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
169 ret = -ENXIO;
170 goto bail;
171 }
172
173 /*
174 * We presume, if we are called at all, that this board has
175 * QSFP. This is on the same i2c chain as the legacy parts,
176 * but only responds if the module is selected via GPIO pins.
177 * Further, there are very long setup and hold requirements
178 * on MODSEL.
179 */
180 mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
181 out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
182 if (ppd->hw_pidx) {
183 mask <<= QSFP_GPIO_PORT2_SHIFT;
184 out <<= QSFP_GPIO_PORT2_SHIFT;
185 }
186 dd->f_gpio_mod(dd, out, mask, mask);
187
188 /*
189 * Module could take up to 2 Msec to respond to MOD_SEL,
190 * and there is no way to tell if it is ready, so we must wait.
191 */
192 msleep(2);
193
194 /* Make sure TWSI bus is in sane state. */
195 ret = qib_twsi_reset(dd);
196 if (ret) {
197 qib_dev_porterr(dd, ppd->port,
198 "QSFP interface Reset for write failed\n");
199 ret = -EIO;
200 goto deselect;
201 }
202
203 /* All QSFP modules are at A0 */
204
205 cnt = 0;
206 while (cnt < len) {
207 unsigned in_page;
208 int wlen = len - cnt;
209 in_page = addr % QSFP_PAGESIZE;
210 if ((in_page + wlen) > QSFP_PAGESIZE)
211 wlen = QSFP_PAGESIZE - in_page;
212 ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
213 if (ret) {
214 /* qib_twsi_blk_wr() 1 for error, else 0 */
215 ret = -EIO;
216 goto deselect;
217 }
218 addr += wlen;
219 cnt += wlen;
220 }
221 ret = cnt;
222
223deselect:
224 /*
225 * Module could take up to 10 uSec after transfer before
226 * ready to respond to MOD_SEL negation, and there is no way
227 * to tell if it is ready, so we must wait.
228 */
229 udelay(10);
230 /* set QSFP MODSEL, RST, LP high */
231 dd->f_gpio_mod(dd, mask, mask, mask);
232 /*
233 * Module could take up to 2 Msec to respond to MOD_SEL
234 * going away, and there is no way to tell if it is ready.
235 * so we must wait.
236 */
237 msleep(2);
238
239bail:
240 mutex_unlock(&dd->eep_lock);
241
242no_unlock:
243 return ret;
244}
245
246/*
247 * For validation, we want to check the checksums, even of the
248 * fields we do not otherwise use. This function reads the bytes from
249 * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
250 */
251static int qsfp_cks(struct qib_pportdata *ppd, int first, int next)
252{
253 int ret;
254 u16 cks;
255 u8 bval;
256
257 cks = 0;
258 while (first < next) {
259 ret = qsfp_read(ppd, first, &bval, 1);
260 if (ret < 0)
261 goto bail;
262 cks += bval;
263 ++first;
264 }
265 ret = cks & 0xFF;
266bail:
267 return ret;
268
269}
270
271int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
272{
273 int ret;
274 int idx;
275 u16 cks;
276 u32 mask;
277 u8 peek[4];
278
279 /* ensure sane contents on invalid reads, for cable swaps */
280 memset(cp, 0, sizeof(*cp));
281
282 mask = QSFP_GPIO_MOD_PRS_N;
283 if (ppd->hw_pidx)
284 mask <<= QSFP_GPIO_PORT2_SHIFT;
285
286 ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
287 if (ret & mask) {
288 ret = -ENODEV;
289 goto bail;
290 }
291
292 ret = qsfp_read(ppd, 0, peek, 3);
293 if (ret < 0)
294 goto bail;
295 if ((peek[0] & 0xFE) != 0x0C)
296 qib_dev_porterr(ppd->dd, ppd->port,
297 "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
298
299 if ((peek[2] & 2) == 0) {
300 /*
301 * If cable is paged, rather than "flat memory", we need to
302 * set the page to zero, Even if it already appears to be zero.
303 */
304 u8 poke = 0;
305 ret = qib_qsfp_write(ppd, 127, &poke, 1);
306 udelay(50);
307 if (ret != 1) {
308 qib_dev_porterr(ppd->dd, ppd->port,
309 "Failed QSFP Page set\n");
310 goto bail;
311 }
312 }
313
314 ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1);
315 if (ret < 0)
316 goto bail;
317 if ((cp->id & 0xFE) != 0x0C)
318 qib_dev_porterr(ppd->dd, ppd->port,
319 "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id);
320 cks = cp->id;
321
322 ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1);
323 if (ret < 0)
324 goto bail;
325 cks += cp->pwr;
326
327 ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS);
328 if (ret < 0)
329 goto bail;
330 cks += ret;
331
332 ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1);
333 if (ret < 0)
334 goto bail;
335 cks += cp->len;
336
337 ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1);
338 if (ret < 0)
339 goto bail;
340 cks += cp->tech;
341
342 ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN);
343 if (ret < 0)
344 goto bail;
345 for (idx = 0; idx < QSFP_VEND_LEN; ++idx)
346 cks += cp->vendor[idx];
347
348 ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1);
349 if (ret < 0)
350 goto bail;
351 cks += cp->xt_xcv;
352
353 ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN);
354 if (ret < 0)
355 goto bail;
356 for (idx = 0; idx < QSFP_VOUI_LEN; ++idx)
357 cks += cp->oui[idx];
358
359 ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN);
360 if (ret < 0)
361 goto bail;
362 for (idx = 0; idx < QSFP_PN_LEN; ++idx)
363 cks += cp->partnum[idx];
364
365 ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN);
366 if (ret < 0)
367 goto bail;
368 for (idx = 0; idx < QSFP_REV_LEN; ++idx)
369 cks += cp->rev[idx];
370
371 ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN);
372 if (ret < 0)
373 goto bail;
374 for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx)
375 cks += cp->atten[idx];
376
377 ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS);
378 if (ret < 0)
379 goto bail;
380 cks += ret;
381
382 cks &= 0xFF;
383 ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1);
384 if (ret < 0)
385 goto bail;
386 if (cks != cp->cks1)
387 qib_dev_porterr(ppd->dd, ppd->port,
388 "QSFP cks1 is %02X, computed %02X\n", cp->cks1,
389 cks);
390
391 /* Second checksum covers 192 to (serial, date, lot) */
392 ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS);
393 if (ret < 0)
394 goto bail;
395 cks = ret;
396
397 ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN);
398 if (ret < 0)
399 goto bail;
400 for (idx = 0; idx < QSFP_SN_LEN; ++idx)
401 cks += cp->serial[idx];
402
403 ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN);
404 if (ret < 0)
405 goto bail;
406 for (idx = 0; idx < QSFP_DATE_LEN; ++idx)
407 cks += cp->date[idx];
408
409 ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN);
410 if (ret < 0)
411 goto bail;
412 for (idx = 0; idx < QSFP_LOT_LEN; ++idx)
413 cks += cp->lot[idx];
414
415 ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS);
416 if (ret < 0)
417 goto bail;
418 cks += ret;
419
420 ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1);
421 if (ret < 0)
422 goto bail;
423 cks &= 0xFF;
424 if (cks != cp->cks2)
425 qib_dev_porterr(ppd->dd, ppd->port,
426 "QSFP cks2 is %02X, computed %02X\n", cp->cks2,
427 cks);
428 return 0;
429
430bail:
431 cp->id = 0;
432 return ret;
433}
434
435const char * const qib_qsfp_devtech[16] = {
436 "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
437 "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
438 "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
439 "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
440};
441
442#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
443#define QSFP_DEFAULT_HDR_CNT 224
444
445static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
446
447/*
448 * Initialize structures that control access to QSFP. Called once per port
449 * on cards that support QSFP.
450 */
451void qib_qsfp_init(struct qib_qsfp_data *qd,
452 void (*fevent)(struct work_struct *))
453{
454 u32 mask, highs;
455 int pins;
456
457 struct qib_devdata *dd = qd->ppd->dd;
458
459 /* Initialize work struct for later QSFP events */
460 INIT_WORK(&qd->work, fevent);
461
462 /*
463 * Later, we may want more validation. For now, just set up pins and
464 * blip reset. If module is present, call qib_refresh_qsfp_cache(),
465 * to do further init.
466 */
467 mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
468 highs = mask - QSFP_GPIO_MOD_RST_N;
469 if (qd->ppd->hw_pidx) {
470 mask <<= QSFP_GPIO_PORT2_SHIFT;
471 highs <<= QSFP_GPIO_PORT2_SHIFT;
472 }
473 dd->f_gpio_mod(dd, highs, mask, mask);
474 udelay(20); /* Generous RST dwell */
475
476 dd->f_gpio_mod(dd, mask, mask, mask);
477 /* Spec says module can take up to two seconds! */
478 mask = QSFP_GPIO_MOD_PRS_N;
479 if (qd->ppd->hw_pidx)
480 mask <<= QSFP_GPIO_PORT2_SHIFT;
481
482 /* Do not try to wait here. Better to let event handle it */
483 pins = dd->f_gpio_mod(dd, 0, 0, 0);
484 if (pins & mask)
485 goto bail;
486 /* We see a module, but it may be unwise to look yet. Just schedule */
487 qd->t_insert = get_jiffies_64();
488 schedule_work(&qd->work);
489bail:
490 return;
491}
492
493void qib_qsfp_deinit(struct qib_qsfp_data *qd)
494{
495 /*
496 * There is nothing to do here for now. our
497 * work is scheduled with schedule_work(), and
498 * flush_scheduled_work() from remove_one will
499 * block until all work ssetup with schedule_work()
500 * completes.
501 */
502}
503
504int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
505{
506 struct qib_qsfp_cache cd;
507 u8 bin_buff[QSFP_DUMP_CHUNK];
508 char lenstr[6];
509 int sofar, ret;
510 int bidx = 0;
511
512 sofar = 0;
513 ret = qib_refresh_qsfp_cache(ppd, &cd);
514 if (ret < 0)
515 goto bail;
516
517 lenstr[0] = ' ';
518 lenstr[1] = '\0';
519 if (QSFP_IS_CU(cd.tech))
520 sprintf(lenstr, "%dM ", cd.len);
521
522 sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes +
523 (QSFP_PWR(cd.pwr) * 4));
524
525 sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr,
526 qib_qsfp_devtech[cd.tech >> 4]);
527
528 sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
529 QSFP_VEND_LEN, cd.vendor);
530
531 sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
532 QSFP_OUI(cd.oui));
533
534 sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
535 QSFP_PN_LEN, cd.partnum);
536 sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
537 QSFP_REV_LEN, cd.rev);
538 if (QSFP_IS_CU(cd.tech))
539 sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n",
540 QSFP_ATTEN_SDR(cd.atten),
541 QSFP_ATTEN_DDR(cd.atten));
542 sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
543 QSFP_SN_LEN, cd.serial);
544 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
545 QSFP_DATE_LEN, cd.date);
546 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
547 QSFP_LOT_LEN, cd.date);
548
549 while (bidx < QSFP_DEFAULT_HDR_CNT) {
550 int iidx;
551 ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
552 if (ret < 0)
553 goto bail;
554 for (iidx = 0; iidx < ret; ++iidx) {
555 sofar += scnprintf(buf + sofar, len-sofar, " %02X",
556 bin_buff[iidx]);
557 }
558 sofar += scnprintf(buf + sofar, len - sofar, "\n");
559 bidx += QSFP_DUMP_CHUNK;
560 }
561 ret = sofar;
562bail:
563 return ret;
564}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
new file mode 100644
index 000000000000..19b527bafd57
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -0,0 +1,184 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33/* QSFP support common definitions, for ib_qib driver */
34
35#define QSFP_DEV 0xA0
36#define QSFP_PWR_LAG_MSEC 2000
37
38/*
39 * Below are masks for various QSFP signals, for Port 1.
40 * Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT.
41 * _N means asserted low
42 */
43#define QSFP_GPIO_MOD_SEL_N (4)
44#define QSFP_GPIO_MOD_PRS_N (8)
45#define QSFP_GPIO_INT_N (0x10)
46#define QSFP_GPIO_MOD_RST_N (0x20)
47#define QSFP_GPIO_LP_MODE (0x40)
48#define QSFP_GPIO_PORT2_SHIFT 5
49
50#define QSFP_PAGESIZE 128
51/* Defined fields that QLogic requires of qualified cables */
52/* Byte 0 is Identifier, not checked */
53/* Byte 1 is reserved "status MSB" */
54/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
55/*
56 * Rest of first 128 not used, although 127 is reserved for page select
57 * if module is not "Flat memory".
58 */
59/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
60#define QSFP_MOD_ID_OFFS 128
61/*
62 * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
63 * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
64 */
65#define QSFP_MOD_PWR_OFFS 129
66/* Byte 130 is Connector type. Not QLogic req'd */
67/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
68/* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */
69/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */
70/* Byte 141 is Extended Rate Select. Not QLogic req'd */
71/* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */
72/* Byte 146 is length for Copper. Units of 1 meter */
73#define QSFP_MOD_LEN_OFFS 146
74/*
75 * Byte 147 is Device technology. D0..3 not Qlogc req'd
76 * D4..7 select from 15 choices, translated by table:
77 */
78#define QSFP_MOD_TECH_OFFS 147
79extern const char *const qib_qsfp_devtech[16];
80/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
81#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
82/* Attenuation should be valid for copper other than full/near Eq */
83#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
84/* Length is only valid if technology is "copper" */
85#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
86#define QSFP_TECH_1490 9
87
88#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
89 oui[2])
90#define QSFP_OUI_AMPHENOL 0x415048
91#define QSFP_OUI_FINISAR 0x009065
92#define QSFP_OUI_GORE 0x002177
93
94/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
95#define QSFP_VEND_OFFS 148
96#define QSFP_VEND_LEN 16
97/* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
98#define QSFP_IBXCV_OFFS 164
99/* Bytes 165..167 are Vendor OUI number */
100#define QSFP_VOUI_OFFS 165
101#define QSFP_VOUI_LEN 3
102/* Bytes 168..183 are Vendor Part Number, string */
103#define QSFP_PN_OFFS 168
104#define QSFP_PN_LEN 16
105/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
106#define QSFP_REV_OFFS 184
107#define QSFP_REV_LEN 2
108/*
109 * Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd
110 * If copper, they are attenuation in dB:
111 * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
112 */
113#define QSFP_ATTEN_OFFS 186
114#define QSFP_ATTEN_LEN 2
115/* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */
116/* Byte 190 is Max Case Temp. Not QLogic req'd */
117/* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */
118#define QSFP_CC_OFFS 191
119/* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */
120/* Bytes 196..211 are Serial Number, String */
121#define QSFP_SN_OFFS 196
122#define QSFP_SN_LEN 16
123/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
124#define QSFP_DATE_OFFS 212
125#define QSFP_DATE_LEN 6
126/* Bytes 218,219 are optional lot-code, string */
127#define QSFP_LOT_OFFS 218
128#define QSFP_LOT_LEN 2
129/* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */
130/* Byte 223 is LSB of sum of bytes 192..222 */
131#define QSFP_CC_EXT_OFFS 223
132
133/*
134 * struct qib_qsfp_data encapsulates state of QSFP device for one port.
135 * it will be part of port-chip-specific data if a board supports QSFP.
136 *
137 * Since multiple board-types use QSFP, and their pport_data structs
138 * differ (in the chip-specific section), we need a pointer to its head.
139 *
140 * Avoiding premature optimization, we will have one work_struct per port,
141 * and let the (increasingly inaccurately named) eep_lock arbitrate
142 * access to common resources.
143 *
144 */
145
146/*
147 * Hold the parts of the onboard EEPROM that we care about, so we aren't
148 * coonstantly bit-boffing
149 */
150struct qib_qsfp_cache {
151 u8 id; /* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */
152 u8 pwr; /* in D6,7 */
153 u8 len; /* in meters, Cu only */
154 u8 tech;
155 char vendor[QSFP_VEND_LEN];
156 u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */
157 u8 oui[QSFP_VOUI_LEN];
158 u8 partnum[QSFP_PN_LEN];
159 u8 rev[QSFP_REV_LEN];
160 u8 atten[QSFP_ATTEN_LEN];
161 u8 cks1; /* Checksum of bytes 128..190 */
162 u8 serial[QSFP_SN_LEN];
163 u8 date[QSFP_DATE_LEN];
164 u8 lot[QSFP_LOT_LEN];
165 u8 cks2; /* Checsum of bytes 192..222 */
166};
167
168#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
169#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
170#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
171
172struct qib_qsfp_data {
173 /* Helps to find our way */
174 struct qib_pportdata *ppd;
175 struct work_struct work;
176 struct qib_qsfp_cache cache;
177 u64 t_insert;
178};
179
180extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
181 struct qib_qsfp_cache *cp);
182extern void qib_qsfp_init(struct qib_qsfp_data *qd,
183 void (*fevent)(struct work_struct *));
184extern void qib_qsfp_deinit(struct qib_qsfp_data *qd);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
new file mode 100644
index 000000000000..40c0a373719c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -0,0 +1,2288 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/io.h>
35
36#include "qib.h"
37
38/* cut down ridiculously long IB macro names */
39#define OP(x) IB_OPCODE_RC_##x
40
41static void rc_timeout(unsigned long arg);
42
43static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
44 u32 psn, u32 pmtu)
45{
46 u32 len;
47
48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
49 ss->sge = wqe->sg_list[0];
50 ss->sg_list = wqe->sg_list + 1;
51 ss->num_sge = wqe->wr.num_sge;
52 ss->total_len = wqe->length;
53 qib_skip_sge(ss, len, 0);
54 return wqe->length - len;
55}
56
57static void start_timer(struct qib_qp *qp)
58{
59 qp->s_flags |= QIB_S_TIMER;
60 qp->s_timer.function = rc_timeout;
61 /* 4.096 usec. * (1 << qp->timeout) */
62 qp->s_timer.expires = jiffies +
63 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL);
64 add_timer(&qp->s_timer);
65}
66
67/**
68 * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
69 * @dev: the device for this QP
70 * @qp: a pointer to the QP
71 * @ohdr: a pointer to the IB header being constructed
72 * @pmtu: the path MTU
73 *
74 * Return 1 if constructed; otherwise, return 0.
75 * Note that we are in the responder's side of the QP context.
76 * Note the QP s_lock must be held.
77 */
78static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
79 struct qib_other_headers *ohdr, u32 pmtu)
80{
81 struct qib_ack_entry *e;
82 u32 hwords;
83 u32 len;
84 u32 bth0;
85 u32 bth2;
86
87 /* Don't send an ACK if we aren't supposed to. */
88 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
89 goto bail;
90
91 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
92 hwords = 5;
93
94 switch (qp->s_ack_state) {
95 case OP(RDMA_READ_RESPONSE_LAST):
96 case OP(RDMA_READ_RESPONSE_ONLY):
97 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
98 if (e->rdma_sge.mr) {
99 atomic_dec(&e->rdma_sge.mr->refcount);
100 e->rdma_sge.mr = NULL;
101 }
102 /* FALLTHROUGH */
103 case OP(ATOMIC_ACKNOWLEDGE):
104 /*
105 * We can increment the tail pointer now that the last
106 * response has been sent instead of only being
107 * constructed.
108 */
109 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
110 qp->s_tail_ack_queue = 0;
111 /* FALLTHROUGH */
112 case OP(SEND_ONLY):
113 case OP(ACKNOWLEDGE):
114 /* Check for no next entry in the queue. */
115 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
116 if (qp->s_flags & QIB_S_ACK_PENDING)
117 goto normal;
118 goto bail;
119 }
120
121 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
122 if (e->opcode == OP(RDMA_READ_REQUEST)) {
123 /*
124 * If a RDMA read response is being resent and
125 * we haven't seen the duplicate request yet,
126 * then stop sending the remaining responses the
127 * responder has seen until the requester resends it.
128 */
129 len = e->rdma_sge.sge_length;
130 if (len && !e->rdma_sge.mr) {
131 qp->s_tail_ack_queue = qp->r_head_ack_queue;
132 goto bail;
133 }
134 /* Copy SGE state in case we need to resend */
135 qp->s_rdma_mr = e->rdma_sge.mr;
136 if (qp->s_rdma_mr)
137 atomic_inc(&qp->s_rdma_mr->refcount);
138 qp->s_ack_rdma_sge.sge = e->rdma_sge;
139 qp->s_ack_rdma_sge.num_sge = 1;
140 qp->s_cur_sge = &qp->s_ack_rdma_sge;
141 if (len > pmtu) {
142 len = pmtu;
143 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
144 } else {
145 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
146 e->sent = 1;
147 }
148 ohdr->u.aeth = qib_compute_aeth(qp);
149 hwords++;
150 qp->s_ack_rdma_psn = e->psn;
151 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
152 } else {
153 /* COMPARE_SWAP or FETCH_ADD */
154 qp->s_cur_sge = NULL;
155 len = 0;
156 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
157 ohdr->u.at.aeth = qib_compute_aeth(qp);
158 ohdr->u.at.atomic_ack_eth[0] =
159 cpu_to_be32(e->atomic_data >> 32);
160 ohdr->u.at.atomic_ack_eth[1] =
161 cpu_to_be32(e->atomic_data);
162 hwords += sizeof(ohdr->u.at) / sizeof(u32);
163 bth2 = e->psn & QIB_PSN_MASK;
164 e->sent = 1;
165 }
166 bth0 = qp->s_ack_state << 24;
167 break;
168
169 case OP(RDMA_READ_RESPONSE_FIRST):
170 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
171 /* FALLTHROUGH */
172 case OP(RDMA_READ_RESPONSE_MIDDLE):
173 qp->s_cur_sge = &qp->s_ack_rdma_sge;
174 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
175 if (qp->s_rdma_mr)
176 atomic_inc(&qp->s_rdma_mr->refcount);
177 len = qp->s_ack_rdma_sge.sge.sge_length;
178 if (len > pmtu)
179 len = pmtu;
180 else {
181 ohdr->u.aeth = qib_compute_aeth(qp);
182 hwords++;
183 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
184 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
185 e->sent = 1;
186 }
187 bth0 = qp->s_ack_state << 24;
188 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
189 break;
190
191 default:
192normal:
193 /*
194 * Send a regular ACK.
195 * Set the s_ack_state so we wait until after sending
196 * the ACK before setting s_ack_state to ACKNOWLEDGE
197 * (see above).
198 */
199 qp->s_ack_state = OP(SEND_ONLY);
200 qp->s_flags &= ~QIB_S_ACK_PENDING;
201 qp->s_cur_sge = NULL;
202 if (qp->s_nak_state)
203 ohdr->u.aeth =
204 cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
205 (qp->s_nak_state <<
206 QIB_AETH_CREDIT_SHIFT));
207 else
208 ohdr->u.aeth = qib_compute_aeth(qp);
209 hwords++;
210 len = 0;
211 bth0 = OP(ACKNOWLEDGE) << 24;
212 bth2 = qp->s_ack_psn & QIB_PSN_MASK;
213 }
214 qp->s_rdma_ack_cnt++;
215 qp->s_hdrwords = hwords;
216 qp->s_cur_size = len;
217 qib_make_ruc_header(qp, ohdr, bth0, bth2);
218 return 1;
219
220bail:
221 qp->s_ack_state = OP(ACKNOWLEDGE);
222 qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
223 return 0;
224}
225
226/**
227 * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
228 * @qp: a pointer to the QP
229 *
230 * Return 1 if constructed; otherwise, return 0.
231 */
232int qib_make_rc_req(struct qib_qp *qp)
233{
234 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
235 struct qib_other_headers *ohdr;
236 struct qib_sge_state *ss;
237 struct qib_swqe *wqe;
238 u32 hwords;
239 u32 len;
240 u32 bth0;
241 u32 bth2;
242 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
243 char newreq;
244 unsigned long flags;
245 int ret = 0;
246 int delta;
247
248 ohdr = &qp->s_hdr.u.oth;
249 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
250 ohdr = &qp->s_hdr.u.l.oth;
251
252 /*
253 * The lock is needed to synchronize between the sending tasklet,
254 * the receive interrupt handler, and timeout resends.
255 */
256 spin_lock_irqsave(&qp->s_lock, flags);
257
258 /* Sending responses has higher priority over sending requests. */
259 if ((qp->s_flags & QIB_S_RESP_PENDING) &&
260 qib_make_rc_ack(dev, qp, ohdr, pmtu))
261 goto done;
262
263 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
264 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
265 goto bail;
266 /* We are in the error state, flush the work request. */
267 if (qp->s_last == qp->s_head)
268 goto bail;
269 /* If DMAs are in progress, we can't flush immediately. */
270 if (atomic_read(&qp->s_dma_busy)) {
271 qp->s_flags |= QIB_S_WAIT_DMA;
272 goto bail;
273 }
274 wqe = get_swqe_ptr(qp, qp->s_last);
275 while (qp->s_last != qp->s_acked) {
276 qib_send_complete(qp, wqe, IB_WC_SUCCESS);
277 if (++qp->s_last >= qp->s_size)
278 qp->s_last = 0;
279 wqe = get_swqe_ptr(qp, qp->s_last);
280 }
281 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
282 goto done;
283 }
284
285 if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK))
286 goto bail;
287
288 if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
289 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
290 qp->s_flags |= QIB_S_WAIT_PSN;
291 goto bail;
292 }
293 qp->s_sending_psn = qp->s_psn;
294 qp->s_sending_hpsn = qp->s_psn - 1;
295 }
296
297 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
298 hwords = 5;
299 bth0 = 0;
300
301 /* Send a request. */
302 wqe = get_swqe_ptr(qp, qp->s_cur);
303 switch (qp->s_state) {
304 default:
305 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK))
306 goto bail;
307 /*
308 * Resend an old request or start a new one.
309 *
310 * We keep track of the current SWQE so that
311 * we don't reset the "furthest progress" state
312 * if we need to back up.
313 */
314 newreq = 0;
315 if (qp->s_cur == qp->s_tail) {
316 /* Check if send work queue is empty. */
317 if (qp->s_tail == qp->s_head)
318 goto bail;
319 /*
320 * If a fence is requested, wait for previous
321 * RDMA read and atomic operations to finish.
322 */
323 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
324 qp->s_num_rd_atomic) {
325 qp->s_flags |= QIB_S_WAIT_FENCE;
326 goto bail;
327 }
328 wqe->psn = qp->s_next_psn;
329 newreq = 1;
330 }
331 /*
332 * Note that we have to be careful not to modify the
333 * original work request since we may need to resend
334 * it.
335 */
336 len = wqe->length;
337 ss = &qp->s_sge;
338 bth2 = qp->s_psn & QIB_PSN_MASK;
339 switch (wqe->wr.opcode) {
340 case IB_WR_SEND:
341 case IB_WR_SEND_WITH_IMM:
342 /* If no credit, return. */
343 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
344 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
345 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
346 goto bail;
347 }
348 wqe->lpsn = wqe->psn;
349 if (len > pmtu) {
350 wqe->lpsn += (len - 1) / pmtu;
351 qp->s_state = OP(SEND_FIRST);
352 len = pmtu;
353 break;
354 }
355 if (wqe->wr.opcode == IB_WR_SEND)
356 qp->s_state = OP(SEND_ONLY);
357 else {
358 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
359 /* Immediate data comes after the BTH */
360 ohdr->u.imm_data = wqe->wr.ex.imm_data;
361 hwords += 1;
362 }
363 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
364 bth0 |= IB_BTH_SOLICITED;
365 bth2 |= IB_BTH_REQ_ACK;
366 if (++qp->s_cur == qp->s_size)
367 qp->s_cur = 0;
368 break;
369
370 case IB_WR_RDMA_WRITE:
371 if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
372 qp->s_lsn++;
373 /* FALLTHROUGH */
374 case IB_WR_RDMA_WRITE_WITH_IMM:
375 /* If no credit, return. */
376 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
377 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
378 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
379 goto bail;
380 }
381 ohdr->u.rc.reth.vaddr =
382 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
383 ohdr->u.rc.reth.rkey =
384 cpu_to_be32(wqe->wr.wr.rdma.rkey);
385 ohdr->u.rc.reth.length = cpu_to_be32(len);
386 hwords += sizeof(struct ib_reth) / sizeof(u32);
387 wqe->lpsn = wqe->psn;
388 if (len > pmtu) {
389 wqe->lpsn += (len - 1) / pmtu;
390 qp->s_state = OP(RDMA_WRITE_FIRST);
391 len = pmtu;
392 break;
393 }
394 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
395 qp->s_state = OP(RDMA_WRITE_ONLY);
396 else {
397 qp->s_state =
398 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
399 /* Immediate data comes after RETH */
400 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
401 hwords += 1;
402 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
403 bth0 |= IB_BTH_SOLICITED;
404 }
405 bth2 |= IB_BTH_REQ_ACK;
406 if (++qp->s_cur == qp->s_size)
407 qp->s_cur = 0;
408 break;
409
410 case IB_WR_RDMA_READ:
411 /*
412 * Don't allow more operations to be started
413 * than the QP limits allow.
414 */
415 if (newreq) {
416 if (qp->s_num_rd_atomic >=
417 qp->s_max_rd_atomic) {
418 qp->s_flags |= QIB_S_WAIT_RDMAR;
419 goto bail;
420 }
421 qp->s_num_rd_atomic++;
422 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
423 qp->s_lsn++;
424 /*
425 * Adjust s_next_psn to count the
426 * expected number of responses.
427 */
428 if (len > pmtu)
429 qp->s_next_psn += (len - 1) / pmtu;
430 wqe->lpsn = qp->s_next_psn++;
431 }
432 ohdr->u.rc.reth.vaddr =
433 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
434 ohdr->u.rc.reth.rkey =
435 cpu_to_be32(wqe->wr.wr.rdma.rkey);
436 ohdr->u.rc.reth.length = cpu_to_be32(len);
437 qp->s_state = OP(RDMA_READ_REQUEST);
438 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
439 ss = NULL;
440 len = 0;
441 bth2 |= IB_BTH_REQ_ACK;
442 if (++qp->s_cur == qp->s_size)
443 qp->s_cur = 0;
444 break;
445
446 case IB_WR_ATOMIC_CMP_AND_SWP:
447 case IB_WR_ATOMIC_FETCH_AND_ADD:
448 /*
449 * Don't allow more operations to be started
450 * than the QP limits allow.
451 */
452 if (newreq) {
453 if (qp->s_num_rd_atomic >=
454 qp->s_max_rd_atomic) {
455 qp->s_flags |= QIB_S_WAIT_RDMAR;
456 goto bail;
457 }
458 qp->s_num_rd_atomic++;
459 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
460 qp->s_lsn++;
461 wqe->lpsn = wqe->psn;
462 }
463 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
464 qp->s_state = OP(COMPARE_SWAP);
465 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
466 wqe->wr.wr.atomic.swap);
467 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
468 wqe->wr.wr.atomic.compare_add);
469 } else {
470 qp->s_state = OP(FETCH_ADD);
471 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
472 wqe->wr.wr.atomic.compare_add);
473 ohdr->u.atomic_eth.compare_data = 0;
474 }
475 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
476 wqe->wr.wr.atomic.remote_addr >> 32);
477 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
478 wqe->wr.wr.atomic.remote_addr);
479 ohdr->u.atomic_eth.rkey = cpu_to_be32(
480 wqe->wr.wr.atomic.rkey);
481 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
482 ss = NULL;
483 len = 0;
484 bth2 |= IB_BTH_REQ_ACK;
485 if (++qp->s_cur == qp->s_size)
486 qp->s_cur = 0;
487 break;
488
489 default:
490 goto bail;
491 }
492 qp->s_sge.sge = wqe->sg_list[0];
493 qp->s_sge.sg_list = wqe->sg_list + 1;
494 qp->s_sge.num_sge = wqe->wr.num_sge;
495 qp->s_sge.total_len = wqe->length;
496 qp->s_len = wqe->length;
497 if (newreq) {
498 qp->s_tail++;
499 if (qp->s_tail >= qp->s_size)
500 qp->s_tail = 0;
501 }
502 if (wqe->wr.opcode == IB_WR_RDMA_READ)
503 qp->s_psn = wqe->lpsn + 1;
504 else {
505 qp->s_psn++;
506 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
507 qp->s_next_psn = qp->s_psn;
508 }
509 break;
510
511 case OP(RDMA_READ_RESPONSE_FIRST):
512 /*
513 * qp->s_state is normally set to the opcode of the
514 * last packet constructed for new requests and therefore
515 * is never set to RDMA read response.
516 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
517 * thread to indicate a SEND needs to be restarted from an
518 * earlier PSN without interferring with the sending thread.
519 * See qib_restart_rc().
520 */
521 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
522 /* FALLTHROUGH */
523 case OP(SEND_FIRST):
524 qp->s_state = OP(SEND_MIDDLE);
525 /* FALLTHROUGH */
526 case OP(SEND_MIDDLE):
527 bth2 = qp->s_psn++ & QIB_PSN_MASK;
528 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
529 qp->s_next_psn = qp->s_psn;
530 ss = &qp->s_sge;
531 len = qp->s_len;
532 if (len > pmtu) {
533 len = pmtu;
534 break;
535 }
536 if (wqe->wr.opcode == IB_WR_SEND)
537 qp->s_state = OP(SEND_LAST);
538 else {
539 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
540 /* Immediate data comes after the BTH */
541 ohdr->u.imm_data = wqe->wr.ex.imm_data;
542 hwords += 1;
543 }
544 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
545 bth0 |= IB_BTH_SOLICITED;
546 bth2 |= IB_BTH_REQ_ACK;
547 qp->s_cur++;
548 if (qp->s_cur >= qp->s_size)
549 qp->s_cur = 0;
550 break;
551
552 case OP(RDMA_READ_RESPONSE_LAST):
553 /*
554 * qp->s_state is normally set to the opcode of the
555 * last packet constructed for new requests and therefore
556 * is never set to RDMA read response.
557 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
558 * thread to indicate a RDMA write needs to be restarted from
559 * an earlier PSN without interferring with the sending thread.
560 * See qib_restart_rc().
561 */
562 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
563 /* FALLTHROUGH */
564 case OP(RDMA_WRITE_FIRST):
565 qp->s_state = OP(RDMA_WRITE_MIDDLE);
566 /* FALLTHROUGH */
567 case OP(RDMA_WRITE_MIDDLE):
568 bth2 = qp->s_psn++ & QIB_PSN_MASK;
569 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
570 qp->s_next_psn = qp->s_psn;
571 ss = &qp->s_sge;
572 len = qp->s_len;
573 if (len > pmtu) {
574 len = pmtu;
575 break;
576 }
577 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
578 qp->s_state = OP(RDMA_WRITE_LAST);
579 else {
580 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
581 /* Immediate data comes after the BTH */
582 ohdr->u.imm_data = wqe->wr.ex.imm_data;
583 hwords += 1;
584 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
585 bth0 |= IB_BTH_SOLICITED;
586 }
587 bth2 |= IB_BTH_REQ_ACK;
588 qp->s_cur++;
589 if (qp->s_cur >= qp->s_size)
590 qp->s_cur = 0;
591 break;
592
593 case OP(RDMA_READ_RESPONSE_MIDDLE):
594 /*
595 * qp->s_state is normally set to the opcode of the
596 * last packet constructed for new requests and therefore
597 * is never set to RDMA read response.
598 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
599 * thread to indicate a RDMA read needs to be restarted from
600 * an earlier PSN without interferring with the sending thread.
601 * See qib_restart_rc().
602 */
603 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
604 ohdr->u.rc.reth.vaddr =
605 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
606 ohdr->u.rc.reth.rkey =
607 cpu_to_be32(wqe->wr.wr.rdma.rkey);
608 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
609 qp->s_state = OP(RDMA_READ_REQUEST);
610 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
611 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
612 qp->s_psn = wqe->lpsn + 1;
613 ss = NULL;
614 len = 0;
615 qp->s_cur++;
616 if (qp->s_cur == qp->s_size)
617 qp->s_cur = 0;
618 break;
619 }
620 qp->s_sending_hpsn = bth2;
621 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
622 if (delta && delta % QIB_PSN_CREDIT == 0)
623 bth2 |= IB_BTH_REQ_ACK;
624 if (qp->s_flags & QIB_S_SEND_ONE) {
625 qp->s_flags &= ~QIB_S_SEND_ONE;
626 qp->s_flags |= QIB_S_WAIT_ACK;
627 bth2 |= IB_BTH_REQ_ACK;
628 }
629 qp->s_len -= len;
630 qp->s_hdrwords = hwords;
631 qp->s_cur_sge = ss;
632 qp->s_cur_size = len;
633 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
634done:
635 ret = 1;
636 goto unlock;
637
638bail:
639 qp->s_flags &= ~QIB_S_BUSY;
640unlock:
641 spin_unlock_irqrestore(&qp->s_lock, flags);
642 return ret;
643}
644
645/**
646 * qib_send_rc_ack - Construct an ACK packet and send it
647 * @qp: a pointer to the QP
648 *
649 * This is called from qib_rc_rcv() and qib_kreceive().
650 * Note that RDMA reads and atomics are handled in the
651 * send side QP state and tasklet.
652 */
653void qib_send_rc_ack(struct qib_qp *qp)
654{
655 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
656 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
657 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
658 u64 pbc;
659 u16 lrh0;
660 u32 bth0;
661 u32 hwords;
662 u32 pbufn;
663 u32 __iomem *piobuf;
664 struct qib_ib_header hdr;
665 struct qib_other_headers *ohdr;
666 u32 control;
667 unsigned long flags;
668
669 spin_lock_irqsave(&qp->s_lock, flags);
670
671 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
672 goto unlock;
673
674 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
675 if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
676 goto queue_ack;
677
678 /* Construct the header with s_lock held so APM doesn't change it. */
679 ohdr = &hdr.u.oth;
680 lrh0 = QIB_LRH_BTH;
681 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
682 hwords = 6;
683 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
684 hwords += qib_make_grh(ibp, &hdr.u.l.grh,
685 &qp->remote_ah_attr.grh, hwords, 0);
686 ohdr = &hdr.u.l.oth;
687 lrh0 = QIB_LRH_GRH;
688 }
689 /* read pkey_index w/o lock (its atomic) */
690 bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
691 if (qp->s_mig_state == IB_MIG_MIGRATED)
692 bth0 |= IB_BTH_MIG_REQ;
693 if (qp->r_nak_state)
694 ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
695 (qp->r_nak_state <<
696 QIB_AETH_CREDIT_SHIFT));
697 else
698 ohdr->u.aeth = qib_compute_aeth(qp);
699 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
700 qp->remote_ah_attr.sl << 4;
701 hdr.lrh[0] = cpu_to_be16(lrh0);
702 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
703 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
704 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
705 ohdr->bth[0] = cpu_to_be32(bth0);
706 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
707 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
708
709 spin_unlock_irqrestore(&qp->s_lock, flags);
710
711 /* Don't try to send ACKs if the link isn't ACTIVE */
712 if (!(ppd->lflags & QIBL_LINKACTIVE))
713 goto done;
714
715 control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
716 qp->s_srate, lrh0 >> 12);
717 /* length is + 1 for the control dword */
718 pbc = ((u64) control << 32) | (hwords + 1);
719
720 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
721 if (!piobuf) {
722 /*
723 * We are out of PIO buffers at the moment.
724 * Pass responsibility for sending the ACK to the
725 * send tasklet so that when a PIO buffer becomes
726 * available, the ACK is sent ahead of other outgoing
727 * packets.
728 */
729 spin_lock_irqsave(&qp->s_lock, flags);
730 goto queue_ack;
731 }
732
733 /*
734 * Write the pbc.
735 * We have to flush after the PBC for correctness
736 * on some cpus or WC buffer can be written out of order.
737 */
738 writeq(pbc, piobuf);
739
740 if (dd->flags & QIB_PIO_FLUSH_WC) {
741 u32 *hdrp = (u32 *) &hdr;
742
743 qib_flush_wc();
744 qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
745 qib_flush_wc();
746 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
747 } else
748 qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
749
750 if (dd->flags & QIB_USE_SPCL_TRIG) {
751 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
752
753 qib_flush_wc();
754 __raw_writel(0xaebecede, piobuf + spcl_off);
755 }
756
757 qib_flush_wc();
758 qib_sendbuf_done(dd, pbufn);
759
760 ibp->n_unicast_xmit++;
761 goto done;
762
763queue_ack:
764 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
765 ibp->n_rc_qacks++;
766 qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
767 qp->s_nak_state = qp->r_nak_state;
768 qp->s_ack_psn = qp->r_ack_psn;
769
770 /* Schedule the send tasklet. */
771 qib_schedule_send(qp);
772 }
773unlock:
774 spin_unlock_irqrestore(&qp->s_lock, flags);
775done:
776 return;
777}
778
779/**
780 * reset_psn - reset the QP state to send starting from PSN
781 * @qp: the QP
782 * @psn: the packet sequence number to restart at
783 *
784 * This is called from qib_rc_rcv() to process an incoming RC ACK
785 * for the given QP.
786 * Called at interrupt level with the QP s_lock held.
787 */
788static void reset_psn(struct qib_qp *qp, u32 psn)
789{
790 u32 n = qp->s_acked;
791 struct qib_swqe *wqe = get_swqe_ptr(qp, n);
792 u32 opcode;
793
794 qp->s_cur = n;
795
796 /*
797 * If we are starting the request from the beginning,
798 * let the normal send code handle initialization.
799 */
800 if (qib_cmp24(psn, wqe->psn) <= 0) {
801 qp->s_state = OP(SEND_LAST);
802 goto done;
803 }
804
805 /* Find the work request opcode corresponding to the given PSN. */
806 opcode = wqe->wr.opcode;
807 for (;;) {
808 int diff;
809
810 if (++n == qp->s_size)
811 n = 0;
812 if (n == qp->s_tail)
813 break;
814 wqe = get_swqe_ptr(qp, n);
815 diff = qib_cmp24(psn, wqe->psn);
816 if (diff < 0)
817 break;
818 qp->s_cur = n;
819 /*
820 * If we are starting the request from the beginning,
821 * let the normal send code handle initialization.
822 */
823 if (diff == 0) {
824 qp->s_state = OP(SEND_LAST);
825 goto done;
826 }
827 opcode = wqe->wr.opcode;
828 }
829
830 /*
831 * Set the state to restart in the middle of a request.
832 * Don't change the s_sge, s_cur_sge, or s_cur_size.
833 * See qib_make_rc_req().
834 */
835 switch (opcode) {
836 case IB_WR_SEND:
837 case IB_WR_SEND_WITH_IMM:
838 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
839 break;
840
841 case IB_WR_RDMA_WRITE:
842 case IB_WR_RDMA_WRITE_WITH_IMM:
843 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
844 break;
845
846 case IB_WR_RDMA_READ:
847 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
848 break;
849
850 default:
851 /*
852 * This case shouldn't happen since its only
853 * one PSN per req.
854 */
855 qp->s_state = OP(SEND_LAST);
856 }
857done:
858 qp->s_psn = psn;
859 /*
860 * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
861 * asynchronously before the send tasklet can get scheduled.
862 * Doing it in qib_make_rc_req() is too late.
863 */
864 if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
865 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
866 qp->s_flags |= QIB_S_WAIT_PSN;
867}
868
869/*
870 * Back up requester to resend the last un-ACKed request.
871 * The QP s_lock should be held and interrupts disabled.
872 */
873static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
874{
875 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
876 struct qib_ibport *ibp;
877
878 if (qp->s_retry == 0) {
879 if (qp->s_mig_state == IB_MIG_ARMED) {
880 qib_migrate_qp(qp);
881 qp->s_retry = qp->s_retry_cnt;
882 } else if (qp->s_last == qp->s_acked) {
883 qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
884 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
885 return;
886 } else /* XXX need to handle delayed completion */
887 return;
888 } else
889 qp->s_retry--;
890
891 ibp = to_iport(qp->ibqp.device, qp->port_num);
892 if (wqe->wr.opcode == IB_WR_RDMA_READ)
893 ibp->n_rc_resends++;
894 else
895 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
896
897 qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
898 QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
899 QIB_S_WAIT_ACK);
900 if (wait)
901 qp->s_flags |= QIB_S_SEND_ONE;
902 reset_psn(qp, psn);
903}
904
905/*
906 * This is called from s_timer for missing responses.
907 */
908static void rc_timeout(unsigned long arg)
909{
910 struct qib_qp *qp = (struct qib_qp *)arg;
911 struct qib_ibport *ibp;
912 unsigned long flags;
913
914 spin_lock_irqsave(&qp->s_lock, flags);
915 if (qp->s_flags & QIB_S_TIMER) {
916 ibp = to_iport(qp->ibqp.device, qp->port_num);
917 ibp->n_rc_timeouts++;
918 qp->s_flags &= ~QIB_S_TIMER;
919 del_timer(&qp->s_timer);
920 qib_restart_rc(qp, qp->s_last_psn + 1, 1);
921 qib_schedule_send(qp);
922 }
923 spin_unlock_irqrestore(&qp->s_lock, flags);
924}
925
926/*
927 * This is called from s_timer for RNR timeouts.
928 */
929void qib_rc_rnr_retry(unsigned long arg)
930{
931 struct qib_qp *qp = (struct qib_qp *)arg;
932 unsigned long flags;
933
934 spin_lock_irqsave(&qp->s_lock, flags);
935 if (qp->s_flags & QIB_S_WAIT_RNR) {
936 qp->s_flags &= ~QIB_S_WAIT_RNR;
937 del_timer(&qp->s_timer);
938 qib_schedule_send(qp);
939 }
940 spin_unlock_irqrestore(&qp->s_lock, flags);
941}
942
943/*
944 * Set qp->s_sending_psn to the next PSN after the given one.
945 * This would be psn+1 except when RDMA reads are present.
946 */
947static void reset_sending_psn(struct qib_qp *qp, u32 psn)
948{
949 struct qib_swqe *wqe;
950 u32 n = qp->s_last;
951
952 /* Find the work request corresponding to the given PSN. */
953 for (;;) {
954 wqe = get_swqe_ptr(qp, n);
955 if (qib_cmp24(psn, wqe->lpsn) <= 0) {
956 if (wqe->wr.opcode == IB_WR_RDMA_READ)
957 qp->s_sending_psn = wqe->lpsn + 1;
958 else
959 qp->s_sending_psn = psn + 1;
960 break;
961 }
962 if (++n == qp->s_size)
963 n = 0;
964 if (n == qp->s_tail)
965 break;
966 }
967}
968
969/*
970 * This should be called with the QP s_lock held and interrupts disabled.
971 */
972void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
973{
974 struct qib_other_headers *ohdr;
975 struct qib_swqe *wqe;
976 struct ib_wc wc;
977 unsigned i;
978 u32 opcode;
979 u32 psn;
980
981 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
982 return;
983
984 /* Find out where the BTH is */
985 if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
986 ohdr = &hdr->u.oth;
987 else
988 ohdr = &hdr->u.l.oth;
989
990 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
991 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
992 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
993 WARN_ON(!qp->s_rdma_ack_cnt);
994 qp->s_rdma_ack_cnt--;
995 return;
996 }
997
998 psn = be32_to_cpu(ohdr->bth[2]);
999 reset_sending_psn(qp, psn);
1000
1001 /*
1002 * Start timer after a packet requesting an ACK has been sent and
1003 * there are still requests that haven't been acked.
1004 */
1005 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
1006 !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
1007 start_timer(qp);
1008
1009 while (qp->s_last != qp->s_acked) {
1010 wqe = get_swqe_ptr(qp, qp->s_last);
1011 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1012 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1013 break;
1014 for (i = 0; i < wqe->wr.num_sge; i++) {
1015 struct qib_sge *sge = &wqe->sg_list[i];
1016
1017 atomic_dec(&sge->mr->refcount);
1018 }
1019 /* Post a send completion queue entry if requested. */
1020 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1021 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1022 memset(&wc, 0, sizeof wc);
1023 wc.wr_id = wqe->wr.wr_id;
1024 wc.status = IB_WC_SUCCESS;
1025 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1026 wc.byte_len = wqe->length;
1027 wc.qp = &qp->ibqp;
1028 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
1029 }
1030 if (++qp->s_last >= qp->s_size)
1031 qp->s_last = 0;
1032 }
1033 /*
1034 * If we were waiting for sends to complete before resending,
1035 * and they are now complete, restart sending.
1036 */
1037 if (qp->s_flags & QIB_S_WAIT_PSN &&
1038 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1039 qp->s_flags &= ~QIB_S_WAIT_PSN;
1040 qp->s_sending_psn = qp->s_psn;
1041 qp->s_sending_hpsn = qp->s_psn - 1;
1042 qib_schedule_send(qp);
1043 }
1044}
1045
1046static inline void update_last_psn(struct qib_qp *qp, u32 psn)
1047{
1048 qp->s_last_psn = psn;
1049}
1050
1051/*
1052 * Generate a SWQE completion.
1053 * This is similar to qib_send_complete but has to check to be sure
1054 * that the SGEs are not being referenced if the SWQE is being resent.
1055 */
1056static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
1057 struct qib_swqe *wqe,
1058 struct qib_ibport *ibp)
1059{
1060 struct ib_wc wc;
1061 unsigned i;
1062
1063 /*
1064 * Don't decrement refcount and don't generate a
1065 * completion if the SWQE is being resent until the send
1066 * is finished.
1067 */
1068 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
1069 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1070 for (i = 0; i < wqe->wr.num_sge; i++) {
1071 struct qib_sge *sge = &wqe->sg_list[i];
1072
1073 atomic_dec(&sge->mr->refcount);
1074 }
1075 /* Post a send completion queue entry if requested. */
1076 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1077 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1078 memset(&wc, 0, sizeof wc);
1079 wc.wr_id = wqe->wr.wr_id;
1080 wc.status = IB_WC_SUCCESS;
1081 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1082 wc.byte_len = wqe->length;
1083 wc.qp = &qp->ibqp;
1084 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
1085 }
1086 if (++qp->s_last >= qp->s_size)
1087 qp->s_last = 0;
1088 } else
1089 ibp->n_rc_delayed_comp++;
1090
1091 qp->s_retry = qp->s_retry_cnt;
1092 update_last_psn(qp, wqe->lpsn);
1093
1094 /*
1095 * If we are completing a request which is in the process of
1096 * being resent, we can stop resending it since we know the
1097 * responder has already seen it.
1098 */
1099 if (qp->s_acked == qp->s_cur) {
1100 if (++qp->s_cur >= qp->s_size)
1101 qp->s_cur = 0;
1102 qp->s_acked = qp->s_cur;
1103 wqe = get_swqe_ptr(qp, qp->s_cur);
1104 if (qp->s_acked != qp->s_tail) {
1105 qp->s_state = OP(SEND_LAST);
1106 qp->s_psn = wqe->psn;
1107 }
1108 } else {
1109 if (++qp->s_acked >= qp->s_size)
1110 qp->s_acked = 0;
1111 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1112 qp->s_draining = 0;
1113 wqe = get_swqe_ptr(qp, qp->s_acked);
1114 }
1115 return wqe;
1116}
1117
1118/**
1119 * do_rc_ack - process an incoming RC ACK
1120 * @qp: the QP the ACK came in on
1121 * @psn: the packet sequence number of the ACK
1122 * @opcode: the opcode of the request that resulted in the ACK
1123 *
1124 * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
1125 * for the given QP.
1126 * Called at interrupt level with the QP s_lock held.
1127 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1128 */
1129static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
1130 u64 val, struct qib_ctxtdata *rcd)
1131{
1132 struct qib_ibport *ibp;
1133 enum ib_wc_status status;
1134 struct qib_swqe *wqe;
1135 int ret = 0;
1136 u32 ack_psn;
1137 int diff;
1138
1139 /* Remove QP from retry timer */
1140 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
1141 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
1142 del_timer(&qp->s_timer);
1143 }
1144
1145 /*
1146 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1147 * requests and implicitly NAK RDMA read and atomic requests issued
1148 * before the NAK'ed request. The MSN won't include the NAK'ed
1149 * request but will include an ACK'ed request(s).
1150 */
1151 ack_psn = psn;
1152 if (aeth >> 29)
1153 ack_psn--;
1154 wqe = get_swqe_ptr(qp, qp->s_acked);
1155 ibp = to_iport(qp->ibqp.device, qp->port_num);
1156
1157 /*
1158 * The MSN might be for a later WQE than the PSN indicates so
1159 * only complete WQEs that the PSN finishes.
1160 */
1161 while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1162 /*
1163 * RDMA_READ_RESPONSE_ONLY is a special case since
1164 * we want to generate completion events for everything
1165 * before the RDMA read, copy the data, then generate
1166 * the completion for the read.
1167 */
1168 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1169 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1170 diff == 0) {
1171 ret = 1;
1172 goto bail;
1173 }
1174 /*
1175 * If this request is a RDMA read or atomic, and the ACK is
1176 * for a later operation, this ACK NAKs the RDMA read or
1177 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1178 * can ACK a RDMA read and likewise for atomic ops. Note
1179 * that the NAK case can only happen if relaxed ordering is
1180 * used and requests are sent after an RDMA read or atomic
1181 * is sent but before the response is received.
1182 */
1183 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1184 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1185 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1186 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1187 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1188 /* Retry this request. */
1189 if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) {
1190 qp->r_flags |= QIB_R_RDMAR_SEQ;
1191 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1192 if (list_empty(&qp->rspwait)) {
1193 qp->r_flags |= QIB_R_RSP_SEND;
1194 atomic_inc(&qp->refcount);
1195 list_add_tail(&qp->rspwait,
1196 &rcd->qp_wait_list);
1197 }
1198 }
1199 /*
1200 * No need to process the ACK/NAK since we are
1201 * restarting an earlier request.
1202 */
1203 goto bail;
1204 }
1205 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1206 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1207 u64 *vaddr = wqe->sg_list[0].vaddr;
1208 *vaddr = val;
1209 }
1210 if (qp->s_num_rd_atomic &&
1211 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1212 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1213 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1214 qp->s_num_rd_atomic--;
1215 /* Restart sending task if fence is complete */
1216 if ((qp->s_flags & QIB_S_WAIT_FENCE) &&
1217 !qp->s_num_rd_atomic) {
1218 qp->s_flags &= ~(QIB_S_WAIT_FENCE |
1219 QIB_S_WAIT_ACK);
1220 qib_schedule_send(qp);
1221 } else if (qp->s_flags & QIB_S_WAIT_RDMAR) {
1222 qp->s_flags &= ~(QIB_S_WAIT_RDMAR |
1223 QIB_S_WAIT_ACK);
1224 qib_schedule_send(qp);
1225 }
1226 }
1227 wqe = do_rc_completion(qp, wqe, ibp);
1228 if (qp->s_acked == qp->s_tail)
1229 break;
1230 }
1231
1232 switch (aeth >> 29) {
1233 case 0: /* ACK */
1234 ibp->n_rc_acks++;
1235 if (qp->s_acked != qp->s_tail) {
1236 /*
1237 * We are expecting more ACKs so
1238 * reset the retransmit timer.
1239 */
1240 start_timer(qp);
1241 /*
1242 * We can stop resending the earlier packets and
1243 * continue with the next packet the receiver wants.
1244 */
1245 if (qib_cmp24(qp->s_psn, psn) <= 0)
1246 reset_psn(qp, psn + 1);
1247 } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
1248 qp->s_state = OP(SEND_LAST);
1249 qp->s_psn = psn + 1;
1250 }
1251 if (qp->s_flags & QIB_S_WAIT_ACK) {
1252 qp->s_flags &= ~QIB_S_WAIT_ACK;
1253 qib_schedule_send(qp);
1254 }
1255 qib_get_credit(qp, aeth);
1256 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1257 qp->s_retry = qp->s_retry_cnt;
1258 update_last_psn(qp, psn);
1259 ret = 1;
1260 goto bail;
1261
1262 case 1: /* RNR NAK */
1263 ibp->n_rnr_naks++;
1264 if (qp->s_acked == qp->s_tail)
1265 goto bail;
1266 if (qp->s_flags & QIB_S_WAIT_RNR)
1267 goto bail;
1268 if (qp->s_rnr_retry == 0) {
1269 status = IB_WC_RNR_RETRY_EXC_ERR;
1270 goto class_b;
1271 }
1272 if (qp->s_rnr_retry_cnt < 7)
1273 qp->s_rnr_retry--;
1274
1275 /* The last valid PSN is the previous PSN. */
1276 update_last_psn(qp, psn - 1);
1277
1278 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
1279
1280 reset_psn(qp, psn);
1281
1282 qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK);
1283 qp->s_flags |= QIB_S_WAIT_RNR;
1284 qp->s_timer.function = qib_rc_rnr_retry;
1285 qp->s_timer.expires = jiffies + usecs_to_jiffies(
1286 ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
1287 QIB_AETH_CREDIT_MASK]);
1288 add_timer(&qp->s_timer);
1289 goto bail;
1290
1291 case 3: /* NAK */
1292 if (qp->s_acked == qp->s_tail)
1293 goto bail;
1294 /* The last valid PSN is the previous PSN. */
1295 update_last_psn(qp, psn - 1);
1296 switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
1297 QIB_AETH_CREDIT_MASK) {
1298 case 0: /* PSN sequence error */
1299 ibp->n_seq_naks++;
1300 /*
1301 * Back up to the responder's expected PSN.
1302 * Note that we might get a NAK in the middle of an
1303 * RDMA READ response which terminates the RDMA
1304 * READ.
1305 */
1306 qib_restart_rc(qp, psn, 0);
1307 qib_schedule_send(qp);
1308 break;
1309
1310 case 1: /* Invalid Request */
1311 status = IB_WC_REM_INV_REQ_ERR;
1312 ibp->n_other_naks++;
1313 goto class_b;
1314
1315 case 2: /* Remote Access Error */
1316 status = IB_WC_REM_ACCESS_ERR;
1317 ibp->n_other_naks++;
1318 goto class_b;
1319
1320 case 3: /* Remote Operation Error */
1321 status = IB_WC_REM_OP_ERR;
1322 ibp->n_other_naks++;
1323class_b:
1324 if (qp->s_last == qp->s_acked) {
1325 qib_send_complete(qp, wqe, status);
1326 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1327 }
1328 break;
1329
1330 default:
1331 /* Ignore other reserved NAK error codes */
1332 goto reserved;
1333 }
1334 qp->s_retry = qp->s_retry_cnt;
1335 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1336 goto bail;
1337
1338 default: /* 2: reserved */
1339reserved:
1340 /* Ignore reserved NAK codes. */
1341 goto bail;
1342 }
1343
1344bail:
1345 return ret;
1346}
1347
1348/*
1349 * We have seen an out of sequence RDMA read middle or last packet.
1350 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1351 */
1352static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
1353 struct qib_ctxtdata *rcd)
1354{
1355 struct qib_swqe *wqe;
1356
1357 /* Remove QP from retry timer */
1358 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
1359 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
1360 del_timer(&qp->s_timer);
1361 }
1362
1363 wqe = get_swqe_ptr(qp, qp->s_acked);
1364
1365 while (qib_cmp24(psn, wqe->lpsn) > 0) {
1366 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1367 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1368 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1369 break;
1370 wqe = do_rc_completion(qp, wqe, ibp);
1371 }
1372
1373 ibp->n_rdma_seq++;
1374 qp->r_flags |= QIB_R_RDMAR_SEQ;
1375 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1376 if (list_empty(&qp->rspwait)) {
1377 qp->r_flags |= QIB_R_RSP_SEND;
1378 atomic_inc(&qp->refcount);
1379 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1380 }
1381}
1382
1383/**
1384 * qib_rc_rcv_resp - process an incoming RC response packet
1385 * @ibp: the port this packet came in on
1386 * @ohdr: the other headers for this packet
1387 * @data: the packet data
1388 * @tlen: the packet length
1389 * @qp: the QP for this packet
1390 * @opcode: the opcode for this packet
1391 * @psn: the packet sequence number for this packet
1392 * @hdrsize: the header length
1393 * @pmtu: the path MTU
1394 *
1395 * This is called from qib_rc_rcv() to process an incoming RC response
1396 * packet for the given QP.
1397 * Called at interrupt level.
1398 */
1399static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1400 struct qib_other_headers *ohdr,
1401 void *data, u32 tlen,
1402 struct qib_qp *qp,
1403 u32 opcode,
1404 u32 psn, u32 hdrsize, u32 pmtu,
1405 struct qib_ctxtdata *rcd)
1406{
1407 struct qib_swqe *wqe;
1408 enum ib_wc_status status;
1409 unsigned long flags;
1410 int diff;
1411 u32 pad;
1412 u32 aeth;
1413 u64 val;
1414
1415 spin_lock_irqsave(&qp->s_lock, flags);
1416
1417 /* Double check we can process this now that we hold the s_lock. */
1418 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1419 goto ack_done;
1420
1421 /* Ignore invalid responses. */
1422 if (qib_cmp24(psn, qp->s_next_psn) >= 0)
1423 goto ack_done;
1424
1425 /* Ignore duplicate responses. */
1426 diff = qib_cmp24(psn, qp->s_last_psn);
1427 if (unlikely(diff <= 0)) {
1428 /* Update credits for "ghost" ACKs */
1429 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1430 aeth = be32_to_cpu(ohdr->u.aeth);
1431 if ((aeth >> 29) == 0)
1432 qib_get_credit(qp, aeth);
1433 }
1434 goto ack_done;
1435 }
1436
1437 /*
1438 * Skip everything other than the PSN we expect, if we are waiting
1439 * for a reply to a restarted RDMA read or atomic op.
1440 */
1441 if (qp->r_flags & QIB_R_RDMAR_SEQ) {
1442 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
1443 goto ack_done;
1444 qp->r_flags &= ~QIB_R_RDMAR_SEQ;
1445 }
1446
1447 if (unlikely(qp->s_acked == qp->s_tail))
1448 goto ack_done;
1449 wqe = get_swqe_ptr(qp, qp->s_acked);
1450 status = IB_WC_SUCCESS;
1451
1452 switch (opcode) {
1453 case OP(ACKNOWLEDGE):
1454 case OP(ATOMIC_ACKNOWLEDGE):
1455 case OP(RDMA_READ_RESPONSE_FIRST):
1456 aeth = be32_to_cpu(ohdr->u.aeth);
1457 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1458 __be32 *p = ohdr->u.at.atomic_ack_eth;
1459
1460 val = ((u64) be32_to_cpu(p[0]) << 32) |
1461 be32_to_cpu(p[1]);
1462 } else
1463 val = 0;
1464 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1465 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1466 goto ack_done;
1467 hdrsize += 4;
1468 wqe = get_swqe_ptr(qp, qp->s_acked);
1469 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1470 goto ack_op_err;
1471 /*
1472 * If this is a response to a resent RDMA read, we
1473 * have to be careful to copy the data to the right
1474 * location.
1475 */
1476 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1477 wqe, psn, pmtu);
1478 goto read_middle;
1479
1480 case OP(RDMA_READ_RESPONSE_MIDDLE):
1481 /* no AETH, no ACK */
1482 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1483 goto ack_seq_err;
1484 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1485 goto ack_op_err;
1486read_middle:
1487 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1488 goto ack_len_err;
1489 if (unlikely(pmtu >= qp->s_rdma_read_len))
1490 goto ack_len_err;
1491
1492 /*
1493 * We got a response so update the timeout.
1494 * 4.096 usec. * (1 << qp->timeout)
1495 */
1496 qp->s_flags |= QIB_S_TIMER;
1497 mod_timer(&qp->s_timer, jiffies +
1498 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1499 1000UL));
1500 if (qp->s_flags & QIB_S_WAIT_ACK) {
1501 qp->s_flags &= ~QIB_S_WAIT_ACK;
1502 qib_schedule_send(qp);
1503 }
1504
1505 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1506 qp->s_retry = qp->s_retry_cnt;
1507
1508 /*
1509 * Update the RDMA receive state but do the copy w/o
1510 * holding the locks and blocking interrupts.
1511 */
1512 qp->s_rdma_read_len -= pmtu;
1513 update_last_psn(qp, psn);
1514 spin_unlock_irqrestore(&qp->s_lock, flags);
1515 qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
1516 goto bail;
1517
1518 case OP(RDMA_READ_RESPONSE_ONLY):
1519 aeth = be32_to_cpu(ohdr->u.aeth);
1520 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1521 goto ack_done;
1522 /* Get the number of bytes the message was padded by. */
1523 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1524 /*
1525 * Check that the data size is >= 0 && <= pmtu.
1526 * Remember to account for the AETH header (4) and
1527 * ICRC (4).
1528 */
1529 if (unlikely(tlen < (hdrsize + pad + 8)))
1530 goto ack_len_err;
1531 /*
1532 * If this is a response to a resent RDMA read, we
1533 * have to be careful to copy the data to the right
1534 * location.
1535 */
1536 wqe = get_swqe_ptr(qp, qp->s_acked);
1537 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1538 wqe, psn, pmtu);
1539 goto read_last;
1540
1541 case OP(RDMA_READ_RESPONSE_LAST):
1542 /* ACKs READ req. */
1543 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1544 goto ack_seq_err;
1545 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1546 goto ack_op_err;
1547 /* Get the number of bytes the message was padded by. */
1548 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1549 /*
1550 * Check that the data size is >= 1 && <= pmtu.
1551 * Remember to account for the AETH header (4) and
1552 * ICRC (4).
1553 */
1554 if (unlikely(tlen <= (hdrsize + pad + 8)))
1555 goto ack_len_err;
1556read_last:
1557 tlen -= hdrsize + pad + 8;
1558 if (unlikely(tlen != qp->s_rdma_read_len))
1559 goto ack_len_err;
1560 aeth = be32_to_cpu(ohdr->u.aeth);
1561 qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
1562 WARN_ON(qp->s_rdma_read_sge.num_sge);
1563 (void) do_rc_ack(qp, aeth, psn,
1564 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1565 goto ack_done;
1566 }
1567
1568ack_op_err:
1569 status = IB_WC_LOC_QP_OP_ERR;
1570 goto ack_err;
1571
1572ack_seq_err:
1573 rdma_seq_err(qp, ibp, psn, rcd);
1574 goto ack_done;
1575
1576ack_len_err:
1577 status = IB_WC_LOC_LEN_ERR;
1578ack_err:
1579 if (qp->s_last == qp->s_acked) {
1580 qib_send_complete(qp, wqe, status);
1581 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1582 }
1583ack_done:
1584 spin_unlock_irqrestore(&qp->s_lock, flags);
1585bail:
1586 return;
1587}
1588
1589/**
1590 * qib_rc_rcv_error - process an incoming duplicate or error RC packet
1591 * @ohdr: the other headers for this packet
1592 * @data: the packet data
1593 * @qp: the QP for this packet
1594 * @opcode: the opcode for this packet
1595 * @psn: the packet sequence number for this packet
1596 * @diff: the difference between the PSN and the expected PSN
1597 *
1598 * This is called from qib_rc_rcv() to process an unexpected
1599 * incoming RC packet for the given QP.
1600 * Called at interrupt level.
1601 * Return 1 if no more processing is needed; otherwise return 0 to
1602 * schedule a response to be sent.
1603 */
1604static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
1605 void *data,
1606 struct qib_qp *qp,
1607 u32 opcode,
1608 u32 psn,
1609 int diff,
1610 struct qib_ctxtdata *rcd)
1611{
1612 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1613 struct qib_ack_entry *e;
1614 unsigned long flags;
1615 u8 i, prev;
1616 int old_req;
1617
1618 if (diff > 0) {
1619 /*
1620 * Packet sequence error.
1621 * A NAK will ACK earlier sends and RDMA writes.
1622 * Don't queue the NAK if we already sent one.
1623 */
1624 if (!qp->r_nak_state) {
1625 ibp->n_rc_seqnak++;
1626 qp->r_nak_state = IB_NAK_PSN_ERROR;
1627 /* Use the expected PSN. */
1628 qp->r_ack_psn = qp->r_psn;
1629 /*
1630 * Wait to send the sequence NAK until all packets
1631 * in the receive queue have been processed.
1632 * Otherwise, we end up propagating congestion.
1633 */
1634 if (list_empty(&qp->rspwait)) {
1635 qp->r_flags |= QIB_R_RSP_NAK;
1636 atomic_inc(&qp->refcount);
1637 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1638 }
1639 }
1640 goto done;
1641 }
1642
1643 /*
1644 * Handle a duplicate request. Don't re-execute SEND, RDMA
1645 * write or atomic op. Don't NAK errors, just silently drop
1646 * the duplicate request. Note that r_sge, r_len, and
1647 * r_rcv_len may be in use so don't modify them.
1648 *
1649 * We are supposed to ACK the earliest duplicate PSN but we
1650 * can coalesce an outstanding duplicate ACK. We have to
1651 * send the earliest so that RDMA reads can be restarted at
1652 * the requester's expected PSN.
1653 *
1654 * First, find where this duplicate PSN falls within the
1655 * ACKs previously sent.
1656 * old_req is true if there is an older response that is scheduled
1657 * to be sent before sending this one.
1658 */
1659 e = NULL;
1660 old_req = 1;
1661 ibp->n_rc_dupreq++;
1662
1663 spin_lock_irqsave(&qp->s_lock, flags);
1664 /* Double check we can process this now that we hold the s_lock. */
1665 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1666 goto unlock_done;
1667
1668 for (i = qp->r_head_ack_queue; ; i = prev) {
1669 if (i == qp->s_tail_ack_queue)
1670 old_req = 0;
1671 if (i)
1672 prev = i - 1;
1673 else
1674 prev = QIB_MAX_RDMA_ATOMIC;
1675 if (prev == qp->r_head_ack_queue) {
1676 e = NULL;
1677 break;
1678 }
1679 e = &qp->s_ack_queue[prev];
1680 if (!e->opcode) {
1681 e = NULL;
1682 break;
1683 }
1684 if (qib_cmp24(psn, e->psn) >= 0) {
1685 if (prev == qp->s_tail_ack_queue &&
1686 qib_cmp24(psn, e->lpsn) <= 0)
1687 old_req = 0;
1688 break;
1689 }
1690 }
1691 switch (opcode) {
1692 case OP(RDMA_READ_REQUEST): {
1693 struct ib_reth *reth;
1694 u32 offset;
1695 u32 len;
1696
1697 /*
1698 * If we didn't find the RDMA read request in the ack queue,
1699 * we can ignore this request.
1700 */
1701 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1702 goto unlock_done;
1703 /* RETH comes after BTH */
1704 reth = &ohdr->u.rc.reth;
1705 /*
1706 * Address range must be a subset of the original
1707 * request and start on pmtu boundaries.
1708 * We reuse the old ack_queue slot since the requester
1709 * should not back up and request an earlier PSN for the
1710 * same request.
1711 */
1712 offset = ((psn - e->psn) & QIB_PSN_MASK) *
1713 ib_mtu_enum_to_int(qp->path_mtu);
1714 len = be32_to_cpu(reth->length);
1715 if (unlikely(offset + len != e->rdma_sge.sge_length))
1716 goto unlock_done;
1717 if (e->rdma_sge.mr) {
1718 atomic_dec(&e->rdma_sge.mr->refcount);
1719 e->rdma_sge.mr = NULL;
1720 }
1721 if (len != 0) {
1722 u32 rkey = be32_to_cpu(reth->rkey);
1723 u64 vaddr = be64_to_cpu(reth->vaddr);
1724 int ok;
1725
1726 ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1727 IB_ACCESS_REMOTE_READ);
1728 if (unlikely(!ok))
1729 goto unlock_done;
1730 } else {
1731 e->rdma_sge.vaddr = NULL;
1732 e->rdma_sge.length = 0;
1733 e->rdma_sge.sge_length = 0;
1734 }
1735 e->psn = psn;
1736 if (old_req)
1737 goto unlock_done;
1738 qp->s_tail_ack_queue = prev;
1739 break;
1740 }
1741
1742 case OP(COMPARE_SWAP):
1743 case OP(FETCH_ADD): {
1744 /*
1745 * If we didn't find the atomic request in the ack queue
1746 * or the send tasklet is already backed up to send an
1747 * earlier entry, we can ignore this request.
1748 */
1749 if (!e || e->opcode != (u8) opcode || old_req)
1750 goto unlock_done;
1751 qp->s_tail_ack_queue = prev;
1752 break;
1753 }
1754
1755 default:
1756 /*
1757 * Ignore this operation if it doesn't request an ACK
1758 * or an earlier RDMA read or atomic is going to be resent.
1759 */
1760 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1761 goto unlock_done;
1762 /*
1763 * Resend the most recent ACK if this request is
1764 * after all the previous RDMA reads and atomics.
1765 */
1766 if (i == qp->r_head_ack_queue) {
1767 spin_unlock_irqrestore(&qp->s_lock, flags);
1768 qp->r_nak_state = 0;
1769 qp->r_ack_psn = qp->r_psn - 1;
1770 goto send_ack;
1771 }
1772 /*
1773 * Try to send a simple ACK to work around a Mellanox bug
1774 * which doesn't accept a RDMA read response or atomic
1775 * response as an ACK for earlier SENDs or RDMA writes.
1776 */
1777 if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
1778 spin_unlock_irqrestore(&qp->s_lock, flags);
1779 qp->r_nak_state = 0;
1780 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1781 goto send_ack;
1782 }
1783 /*
1784 * Resend the RDMA read or atomic op which
1785 * ACKs this duplicate request.
1786 */
1787 qp->s_tail_ack_queue = i;
1788 break;
1789 }
1790 qp->s_ack_state = OP(ACKNOWLEDGE);
1791 qp->s_flags |= QIB_S_RESP_PENDING;
1792 qp->r_nak_state = 0;
1793 qib_schedule_send(qp);
1794
1795unlock_done:
1796 spin_unlock_irqrestore(&qp->s_lock, flags);
1797done:
1798 return 1;
1799
1800send_ack:
1801 return 0;
1802}
1803
1804void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
1805{
1806 unsigned long flags;
1807 int lastwqe;
1808
1809 spin_lock_irqsave(&qp->s_lock, flags);
1810 lastwqe = qib_error_qp(qp, err);
1811 spin_unlock_irqrestore(&qp->s_lock, flags);
1812
1813 if (lastwqe) {
1814 struct ib_event ev;
1815
1816 ev.device = qp->ibqp.device;
1817 ev.element.qp = &qp->ibqp;
1818 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1819 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1820 }
1821}
1822
1823static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
1824{
1825 unsigned next;
1826
1827 next = n + 1;
1828 if (next > QIB_MAX_RDMA_ATOMIC)
1829 next = 0;
1830 qp->s_tail_ack_queue = next;
1831 qp->s_ack_state = OP(ACKNOWLEDGE);
1832}
1833
1834/**
1835 * qib_rc_rcv - process an incoming RC packet
1836 * @rcd: the context pointer
1837 * @hdr: the header of this packet
1838 * @has_grh: true if the header has a GRH
1839 * @data: the packet data
1840 * @tlen: the packet length
1841 * @qp: the QP for this packet
1842 *
1843 * This is called from qib_qp_rcv() to process an incoming RC packet
1844 * for the given QP.
1845 * Called at interrupt level.
1846 */
1847void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
1848 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
1849{
1850 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
1851 struct qib_other_headers *ohdr;
1852 u32 opcode;
1853 u32 hdrsize;
1854 u32 psn;
1855 u32 pad;
1856 struct ib_wc wc;
1857 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1858 int diff;
1859 struct ib_reth *reth;
1860 unsigned long flags;
1861 int ret;
1862
1863 /* Check for GRH */
1864 if (!has_grh) {
1865 ohdr = &hdr->u.oth;
1866 hdrsize = 8 + 12; /* LRH + BTH */
1867 } else {
1868 ohdr = &hdr->u.l.oth;
1869 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1870 }
1871
1872 opcode = be32_to_cpu(ohdr->bth[0]);
1873 spin_lock_irqsave(&qp->s_lock, flags);
1874 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
1875 goto sunlock;
1876 spin_unlock_irqrestore(&qp->s_lock, flags);
1877
1878 psn = be32_to_cpu(ohdr->bth[2]);
1879 opcode >>= 24;
1880
1881 /* Prevent simultaneous processing after APM on different CPUs */
1882 spin_lock(&qp->r_lock);
1883
1884 /*
1885 * Process responses (ACKs) before anything else. Note that the
1886 * packet sequence number will be for something in the send work
1887 * queue rather than the expected receive packet sequence number.
1888 * In other words, this QP is the requester.
1889 */
1890 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1891 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1892 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1893 hdrsize, pmtu, rcd);
1894 goto runlock;
1895 }
1896
1897 /* Compute 24 bits worth of difference. */
1898 diff = qib_cmp24(psn, qp->r_psn);
1899 if (unlikely(diff)) {
1900 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
1901 goto runlock;
1902 goto send_ack;
1903 }
1904
1905 /* Check for opcode sequence errors. */
1906 switch (qp->r_state) {
1907 case OP(SEND_FIRST):
1908 case OP(SEND_MIDDLE):
1909 if (opcode == OP(SEND_MIDDLE) ||
1910 opcode == OP(SEND_LAST) ||
1911 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1912 break;
1913 goto nack_inv;
1914
1915 case OP(RDMA_WRITE_FIRST):
1916 case OP(RDMA_WRITE_MIDDLE):
1917 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1918 opcode == OP(RDMA_WRITE_LAST) ||
1919 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1920 break;
1921 goto nack_inv;
1922
1923 default:
1924 if (opcode == OP(SEND_MIDDLE) ||
1925 opcode == OP(SEND_LAST) ||
1926 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1927 opcode == OP(RDMA_WRITE_MIDDLE) ||
1928 opcode == OP(RDMA_WRITE_LAST) ||
1929 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1930 goto nack_inv;
1931 /*
1932 * Note that it is up to the requester to not send a new
1933 * RDMA read or atomic operation before receiving an ACK
1934 * for the previous operation.
1935 */
1936 break;
1937 }
1938
1939 memset(&wc, 0, sizeof wc);
1940
1941 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
1942 qp->r_flags |= QIB_R_COMM_EST;
1943 if (qp->ibqp.event_handler) {
1944 struct ib_event ev;
1945
1946 ev.device = qp->ibqp.device;
1947 ev.element.qp = &qp->ibqp;
1948 ev.event = IB_EVENT_COMM_EST;
1949 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1950 }
1951 }
1952
1953 /* OK, process the packet. */
1954 switch (opcode) {
1955 case OP(SEND_FIRST):
1956 ret = qib_get_rwqe(qp, 0);
1957 if (ret < 0)
1958 goto nack_op_err;
1959 if (!ret)
1960 goto rnr_nak;
1961 qp->r_rcv_len = 0;
1962 /* FALLTHROUGH */
1963 case OP(SEND_MIDDLE):
1964 case OP(RDMA_WRITE_MIDDLE):
1965send_middle:
1966 /* Check for invalid length PMTU or posted rwqe len. */
1967 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1968 goto nack_inv;
1969 qp->r_rcv_len += pmtu;
1970 if (unlikely(qp->r_rcv_len > qp->r_len))
1971 goto nack_inv;
1972 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
1973 break;
1974
1975 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1976 /* consume RWQE */
1977 ret = qib_get_rwqe(qp, 1);
1978 if (ret < 0)
1979 goto nack_op_err;
1980 if (!ret)
1981 goto rnr_nak;
1982 goto send_last_imm;
1983
1984 case OP(SEND_ONLY):
1985 case OP(SEND_ONLY_WITH_IMMEDIATE):
1986 ret = qib_get_rwqe(qp, 0);
1987 if (ret < 0)
1988 goto nack_op_err;
1989 if (!ret)
1990 goto rnr_nak;
1991 qp->r_rcv_len = 0;
1992 if (opcode == OP(SEND_ONLY))
1993 goto send_last;
1994 /* FALLTHROUGH */
1995 case OP(SEND_LAST_WITH_IMMEDIATE):
1996send_last_imm:
1997 wc.ex.imm_data = ohdr->u.imm_data;
1998 hdrsize += 4;
1999 wc.wc_flags = IB_WC_WITH_IMM;
2000 /* FALLTHROUGH */
2001 case OP(SEND_LAST):
2002 case OP(RDMA_WRITE_LAST):
2003send_last:
2004 /* Get the number of bytes the message was padded by. */
2005 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
2006 /* Check for invalid length. */
2007 /* XXX LAST len should be >= 1 */
2008 if (unlikely(tlen < (hdrsize + pad + 4)))
2009 goto nack_inv;
2010 /* Don't count the CRC. */
2011 tlen -= (hdrsize + pad + 4);
2012 wc.byte_len = tlen + qp->r_rcv_len;
2013 if (unlikely(wc.byte_len > qp->r_len))
2014 goto nack_inv;
2015 qib_copy_sge(&qp->r_sge, data, tlen, 1);
2016 while (qp->r_sge.num_sge) {
2017 atomic_dec(&qp->r_sge.sge.mr->refcount);
2018 if (--qp->r_sge.num_sge)
2019 qp->r_sge.sge = *qp->r_sge.sg_list++;
2020 }
2021 qp->r_msn++;
2022 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
2023 break;
2024 wc.wr_id = qp->r_wr_id;
2025 wc.status = IB_WC_SUCCESS;
2026 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2027 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2028 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2029 else
2030 wc.opcode = IB_WC_RECV;
2031 wc.qp = &qp->ibqp;
2032 wc.src_qp = qp->remote_qpn;
2033 wc.slid = qp->remote_ah_attr.dlid;
2034 wc.sl = qp->remote_ah_attr.sl;
2035 /* Signal completion event if the solicited bit is set. */
2036 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
2037 (ohdr->bth[0] &
2038 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
2039 break;
2040
2041 case OP(RDMA_WRITE_FIRST):
2042 case OP(RDMA_WRITE_ONLY):
2043 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
2044 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2045 goto nack_inv;
2046 /* consume RWQE */
2047 reth = &ohdr->u.rc.reth;
2048 hdrsize += sizeof(*reth);
2049 qp->r_len = be32_to_cpu(reth->length);
2050 qp->r_rcv_len = 0;
2051 qp->r_sge.sg_list = NULL;
2052 if (qp->r_len != 0) {
2053 u32 rkey = be32_to_cpu(reth->rkey);
2054 u64 vaddr = be64_to_cpu(reth->vaddr);
2055 int ok;
2056
2057 /* Check rkey & NAK */
2058 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
2059 rkey, IB_ACCESS_REMOTE_WRITE);
2060 if (unlikely(!ok))
2061 goto nack_acc;
2062 qp->r_sge.num_sge = 1;
2063 } else {
2064 qp->r_sge.num_sge = 0;
2065 qp->r_sge.sge.mr = NULL;
2066 qp->r_sge.sge.vaddr = NULL;
2067 qp->r_sge.sge.length = 0;
2068 qp->r_sge.sge.sge_length = 0;
2069 }
2070 if (opcode == OP(RDMA_WRITE_FIRST))
2071 goto send_middle;
2072 else if (opcode == OP(RDMA_WRITE_ONLY))
2073 goto send_last;
2074 ret = qib_get_rwqe(qp, 1);
2075 if (ret < 0)
2076 goto nack_op_err;
2077 if (!ret)
2078 goto rnr_nak;
2079 goto send_last_imm;
2080
2081 case OP(RDMA_READ_REQUEST): {
2082 struct qib_ack_entry *e;
2083 u32 len;
2084 u8 next;
2085
2086 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2087 goto nack_inv;
2088 next = qp->r_head_ack_queue + 1;
2089 /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
2090 if (next > QIB_MAX_RDMA_ATOMIC)
2091 next = 0;
2092 spin_lock_irqsave(&qp->s_lock, flags);
2093 /* Double check we can process this while holding the s_lock. */
2094 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
2095 goto srunlock;
2096 if (unlikely(next == qp->s_tail_ack_queue)) {
2097 if (!qp->s_ack_queue[next].sent)
2098 goto nack_inv_unlck;
2099 qib_update_ack_queue(qp, next);
2100 }
2101 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2102 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2103 atomic_dec(&e->rdma_sge.mr->refcount);
2104 e->rdma_sge.mr = NULL;
2105 }
2106 reth = &ohdr->u.rc.reth;
2107 len = be32_to_cpu(reth->length);
2108 if (len) {
2109 u32 rkey = be32_to_cpu(reth->rkey);
2110 u64 vaddr = be64_to_cpu(reth->vaddr);
2111 int ok;
2112
2113 /* Check rkey & NAK */
2114 ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
2115 rkey, IB_ACCESS_REMOTE_READ);
2116 if (unlikely(!ok))
2117 goto nack_acc_unlck;
2118 /*
2119 * Update the next expected PSN. We add 1 later
2120 * below, so only add the remainder here.
2121 */
2122 if (len > pmtu)
2123 qp->r_psn += (len - 1) / pmtu;
2124 } else {
2125 e->rdma_sge.mr = NULL;
2126 e->rdma_sge.vaddr = NULL;
2127 e->rdma_sge.length = 0;
2128 e->rdma_sge.sge_length = 0;
2129 }
2130 e->opcode = opcode;
2131 e->sent = 0;
2132 e->psn = psn;
2133 e->lpsn = qp->r_psn;
2134 /*
2135 * We need to increment the MSN here instead of when we
2136 * finish sending the result since a duplicate request would
2137 * increment it more than once.
2138 */
2139 qp->r_msn++;
2140 qp->r_psn++;
2141 qp->r_state = opcode;
2142 qp->r_nak_state = 0;
2143 qp->r_head_ack_queue = next;
2144
2145 /* Schedule the send tasklet. */
2146 qp->s_flags |= QIB_S_RESP_PENDING;
2147 qib_schedule_send(qp);
2148
2149 goto srunlock;
2150 }
2151
2152 case OP(COMPARE_SWAP):
2153 case OP(FETCH_ADD): {
2154 struct ib_atomic_eth *ateth;
2155 struct qib_ack_entry *e;
2156 u64 vaddr;
2157 atomic64_t *maddr;
2158 u64 sdata;
2159 u32 rkey;
2160 u8 next;
2161
2162 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2163 goto nack_inv;
2164 next = qp->r_head_ack_queue + 1;
2165 if (next > QIB_MAX_RDMA_ATOMIC)
2166 next = 0;
2167 spin_lock_irqsave(&qp->s_lock, flags);
2168 /* Double check we can process this while holding the s_lock. */
2169 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
2170 goto srunlock;
2171 if (unlikely(next == qp->s_tail_ack_queue)) {
2172 if (!qp->s_ack_queue[next].sent)
2173 goto nack_inv_unlck;
2174 qib_update_ack_queue(qp, next);
2175 }
2176 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2177 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2178 atomic_dec(&e->rdma_sge.mr->refcount);
2179 e->rdma_sge.mr = NULL;
2180 }
2181 ateth = &ohdr->u.atomic_eth;
2182 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
2183 be32_to_cpu(ateth->vaddr[1]);
2184 if (unlikely(vaddr & (sizeof(u64) - 1)))
2185 goto nack_inv_unlck;
2186 rkey = be32_to_cpu(ateth->rkey);
2187 /* Check rkey & NAK */
2188 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2189 vaddr, rkey,
2190 IB_ACCESS_REMOTE_ATOMIC)))
2191 goto nack_acc_unlck;
2192 /* Perform atomic OP and save result. */
2193 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
2194 sdata = be64_to_cpu(ateth->swap_data);
2195 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2196 (u64) atomic64_add_return(sdata, maddr) - sdata :
2197 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
2198 be64_to_cpu(ateth->compare_data),
2199 sdata);
2200 atomic_dec(&qp->r_sge.sge.mr->refcount);
2201 qp->r_sge.num_sge = 0;
2202 e->opcode = opcode;
2203 e->sent = 0;
2204 e->psn = psn;
2205 e->lpsn = psn;
2206 qp->r_msn++;
2207 qp->r_psn++;
2208 qp->r_state = opcode;
2209 qp->r_nak_state = 0;
2210 qp->r_head_ack_queue = next;
2211
2212 /* Schedule the send tasklet. */
2213 qp->s_flags |= QIB_S_RESP_PENDING;
2214 qib_schedule_send(qp);
2215
2216 goto srunlock;
2217 }
2218
2219 default:
2220 /* NAK unknown opcodes. */
2221 goto nack_inv;
2222 }
2223 qp->r_psn++;
2224 qp->r_state = opcode;
2225 qp->r_ack_psn = psn;
2226 qp->r_nak_state = 0;
2227 /* Send an ACK if requested or required. */
2228 if (psn & (1 << 31))
2229 goto send_ack;
2230 goto runlock;
2231
2232rnr_nak:
2233 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2234 qp->r_ack_psn = qp->r_psn;
2235 /* Queue RNR NAK for later */
2236 if (list_empty(&qp->rspwait)) {
2237 qp->r_flags |= QIB_R_RSP_NAK;
2238 atomic_inc(&qp->refcount);
2239 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2240 }
2241 goto runlock;
2242
2243nack_op_err:
2244 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2245 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2246 qp->r_ack_psn = qp->r_psn;
2247 /* Queue NAK for later */
2248 if (list_empty(&qp->rspwait)) {
2249 qp->r_flags |= QIB_R_RSP_NAK;
2250 atomic_inc(&qp->refcount);
2251 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2252 }
2253 goto runlock;
2254
2255nack_inv_unlck:
2256 spin_unlock_irqrestore(&qp->s_lock, flags);
2257nack_inv:
2258 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2259 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2260 qp->r_ack_psn = qp->r_psn;
2261 /* Queue NAK for later */
2262 if (list_empty(&qp->rspwait)) {
2263 qp->r_flags |= QIB_R_RSP_NAK;
2264 atomic_inc(&qp->refcount);
2265 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2266 }
2267 goto runlock;
2268
2269nack_acc_unlck:
2270 spin_unlock_irqrestore(&qp->s_lock, flags);
2271nack_acc:
2272 qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
2273 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2274 qp->r_ack_psn = qp->r_psn;
2275send_ack:
2276 qib_send_rc_ack(qp);
2277runlock:
2278 spin_unlock(&qp->r_lock);
2279 return;
2280
2281srunlock:
2282 spin_unlock_irqrestore(&qp->s_lock, flags);
2283 spin_unlock(&qp->r_lock);
2284 return;
2285
2286sunlock:
2287 spin_unlock_irqrestore(&qp->s_lock, flags);
2288}
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
new file mode 100644
index 000000000000..eb78d9367f06
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -0,0 +1,817 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/spinlock.h>
35
36#include "qib.h"
37#include "qib_mad.h"
38
39/*
40 * Convert the AETH RNR timeout code into the number of microseconds.
41 */
42const u32 ib_qib_rnr_table[32] = {
43 655360, /* 00: 655.36 */
44 10, /* 01: .01 */
45 20, /* 02 .02 */
46 30, /* 03: .03 */
47 40, /* 04: .04 */
48 60, /* 05: .06 */
49 80, /* 06: .08 */
50 120, /* 07: .12 */
51 160, /* 08: .16 */
52 240, /* 09: .24 */
53 320, /* 0A: .32 */
54 480, /* 0B: .48 */
55 640, /* 0C: .64 */
56 960, /* 0D: .96 */
57 1280, /* 0E: 1.28 */
58 1920, /* 0F: 1.92 */
59 2560, /* 10: 2.56 */
60 3840, /* 11: 3.84 */
61 5120, /* 12: 5.12 */
62 7680, /* 13: 7.68 */
63 10240, /* 14: 10.24 */
64 15360, /* 15: 15.36 */
65 20480, /* 16: 20.48 */
66 30720, /* 17: 30.72 */
67 40960, /* 18: 40.96 */
68 61440, /* 19: 61.44 */
69 81920, /* 1A: 81.92 */
70 122880, /* 1B: 122.88 */
71 163840, /* 1C: 163.84 */
72 245760, /* 1D: 245.76 */
73 327680, /* 1E: 327.68 */
74 491520 /* 1F: 491.52 */
75};
76
77/*
78 * Validate a RWQE and fill in the SGE state.
79 * Return 1 if OK.
80 */
81static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
82{
83 int i, j, ret;
84 struct ib_wc wc;
85 struct qib_lkey_table *rkt;
86 struct qib_pd *pd;
87 struct qib_sge_state *ss;
88
89 rkt = &to_idev(qp->ibqp.device)->lk_table;
90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
91 ss = &qp->r_sge;
92 ss->sg_list = qp->r_sg_list;
93 qp->r_len = 0;
94 for (i = j = 0; i < wqe->num_sge; i++) {
95 if (wqe->sg_list[i].length == 0)
96 continue;
97 /* Check LKEY */
98 if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
99 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
100 goto bad_lkey;
101 qp->r_len += wqe->sg_list[i].length;
102 j++;
103 }
104 ss->num_sge = j;
105 ss->total_len = qp->r_len;
106 ret = 1;
107 goto bail;
108
109bad_lkey:
110 while (j) {
111 struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
112
113 atomic_dec(&sge->mr->refcount);
114 }
115 ss->num_sge = 0;
116 memset(&wc, 0, sizeof(wc));
117 wc.wr_id = wqe->wr_id;
118 wc.status = IB_WC_LOC_PROT_ERR;
119 wc.opcode = IB_WC_RECV;
120 wc.qp = &qp->ibqp;
121 /* Signal solicited completion event. */
122 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
123 ret = 0;
124bail:
125 return ret;
126}
127
128/**
129 * qib_get_rwqe - copy the next RWQE into the QP's RWQE
130 * @qp: the QP
131 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
132 *
133 * Return -1 if there is a local error, 0 if no RWQE is available,
134 * otherwise return 1.
135 *
136 * Can be called from interrupt level.
137 */
138int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
139{
140 unsigned long flags;
141 struct qib_rq *rq;
142 struct qib_rwq *wq;
143 struct qib_srq *srq;
144 struct qib_rwqe *wqe;
145 void (*handler)(struct ib_event *, void *);
146 u32 tail;
147 int ret;
148
149 if (qp->ibqp.srq) {
150 srq = to_isrq(qp->ibqp.srq);
151 handler = srq->ibsrq.event_handler;
152 rq = &srq->rq;
153 } else {
154 srq = NULL;
155 handler = NULL;
156 rq = &qp->r_rq;
157 }
158
159 spin_lock_irqsave(&rq->lock, flags);
160 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
161 ret = 0;
162 goto unlock;
163 }
164
165 wq = rq->wq;
166 tail = wq->tail;
167 /* Validate tail before using it since it is user writable. */
168 if (tail >= rq->size)
169 tail = 0;
170 if (unlikely(tail == wq->head)) {
171 ret = 0;
172 goto unlock;
173 }
174 /* Make sure entry is read after head index is read. */
175 smp_rmb();
176 wqe = get_rwqe_ptr(rq, tail);
177 /*
178 * Even though we update the tail index in memory, the verbs
179 * consumer is not supposed to post more entries until a
180 * completion is generated.
181 */
182 if (++tail >= rq->size)
183 tail = 0;
184 wq->tail = tail;
185 if (!wr_id_only && !qib_init_sge(qp, wqe)) {
186 ret = -1;
187 goto unlock;
188 }
189 qp->r_wr_id = wqe->wr_id;
190
191 ret = 1;
192 set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
193 if (handler) {
194 u32 n;
195
196 /*
197 * Validate head pointer value and compute
198 * the number of remaining WQEs.
199 */
200 n = wq->head;
201 if (n >= rq->size)
202 n = 0;
203 if (n < tail)
204 n += rq->size - tail;
205 else
206 n -= tail;
207 if (n < srq->limit) {
208 struct ib_event ev;
209
210 srq->limit = 0;
211 spin_unlock_irqrestore(&rq->lock, flags);
212 ev.device = qp->ibqp.device;
213 ev.element.srq = qp->ibqp.srq;
214 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
215 handler(&ev, srq->ibsrq.srq_context);
216 goto bail;
217 }
218 }
219unlock:
220 spin_unlock_irqrestore(&rq->lock, flags);
221bail:
222 return ret;
223}
224
225/*
226 * Switch to alternate path.
227 * The QP s_lock should be held and interrupts disabled.
228 */
229void qib_migrate_qp(struct qib_qp *qp)
230{
231 struct ib_event ev;
232
233 qp->s_mig_state = IB_MIG_MIGRATED;
234 qp->remote_ah_attr = qp->alt_ah_attr;
235 qp->port_num = qp->alt_ah_attr.port_num;
236 qp->s_pkey_index = qp->s_alt_pkey_index;
237
238 ev.device = qp->ibqp.device;
239 ev.element.qp = &qp->ibqp;
240 ev.event = IB_EVENT_PATH_MIG;
241 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
242}
243
244static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
245{
246 if (!index) {
247 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
248
249 return ppd->guid;
250 } else
251 return ibp->guids[index - 1];
252}
253
254static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
255{
256 return (gid->global.interface_id == id &&
257 (gid->global.subnet_prefix == gid_prefix ||
258 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
259}
260
261/*
262 *
263 * This should be called with the QP s_lock held.
264 */
265int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
266 int has_grh, struct qib_qp *qp, u32 bth0)
267{
268 __be64 guid;
269
270 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
271 if (!has_grh) {
272 if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
273 goto err;
274 } else {
275 if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
276 goto err;
277 guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
278 if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
279 goto err;
280 if (!gid_ok(&hdr->u.l.grh.sgid,
281 qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
282 qp->alt_ah_attr.grh.dgid.global.interface_id))
283 goto err;
284 }
285 if (!qib_pkey_ok((u16)bth0,
286 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
287 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
288 (u16)bth0,
289 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
290 0, qp->ibqp.qp_num,
291 hdr->lrh[3], hdr->lrh[1]);
292 goto err;
293 }
294 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
295 if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
296 ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
297 goto err;
298 qib_migrate_qp(qp);
299 } else {
300 if (!has_grh) {
301 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
302 goto err;
303 } else {
304 if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
305 goto err;
306 guid = get_sguid(ibp,
307 qp->remote_ah_attr.grh.sgid_index);
308 if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
309 goto err;
310 if (!gid_ok(&hdr->u.l.grh.sgid,
311 qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
312 qp->remote_ah_attr.grh.dgid.global.interface_id))
313 goto err;
314 }
315 if (!qib_pkey_ok((u16)bth0,
316 qib_get_pkey(ibp, qp->s_pkey_index))) {
317 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
318 (u16)bth0,
319 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
320 0, qp->ibqp.qp_num,
321 hdr->lrh[3], hdr->lrh[1]);
322 goto err;
323 }
324 /* Validate the SLID. See Ch. 9.6.1.5 */
325 if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
326 ppd_from_ibp(ibp)->port != qp->port_num)
327 goto err;
328 if (qp->s_mig_state == IB_MIG_REARM &&
329 !(bth0 & IB_BTH_MIG_REQ))
330 qp->s_mig_state = IB_MIG_ARMED;
331 }
332
333 return 0;
334
335err:
336 return 1;
337}
338
339/**
340 * qib_ruc_loopback - handle UC and RC lookback requests
341 * @sqp: the sending QP
342 *
343 * This is called from qib_do_send() to
344 * forward a WQE addressed to the same HCA.
345 * Note that although we are single threaded due to the tasklet, we still
346 * have to protect against post_send(). We don't have to worry about
347 * receive interrupts since this is a connected protocol and all packets
348 * will pass through here.
349 */
350static void qib_ruc_loopback(struct qib_qp *sqp)
351{
352 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
353 struct qib_qp *qp;
354 struct qib_swqe *wqe;
355 struct qib_sge *sge;
356 unsigned long flags;
357 struct ib_wc wc;
358 u64 sdata;
359 atomic64_t *maddr;
360 enum ib_wc_status send_status;
361 int release;
362 int ret;
363
364 /*
365 * Note that we check the responder QP state after
366 * checking the requester's state.
367 */
368 qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
369
370 spin_lock_irqsave(&sqp->s_lock, flags);
371
372 /* Return if we are already busy processing a work request. */
373 if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
374 !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
375 goto unlock;
376
377 sqp->s_flags |= QIB_S_BUSY;
378
379again:
380 if (sqp->s_last == sqp->s_head)
381 goto clr_busy;
382 wqe = get_swqe_ptr(sqp, sqp->s_last);
383
384 /* Return if it is not OK to start a new work reqeust. */
385 if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
386 if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
387 goto clr_busy;
388 /* We are in the error state, flush the work request. */
389 send_status = IB_WC_WR_FLUSH_ERR;
390 goto flush_send;
391 }
392
393 /*
394 * We can rely on the entry not changing without the s_lock
395 * being held until we update s_last.
396 * We increment s_cur to indicate s_last is in progress.
397 */
398 if (sqp->s_last == sqp->s_cur) {
399 if (++sqp->s_cur >= sqp->s_size)
400 sqp->s_cur = 0;
401 }
402 spin_unlock_irqrestore(&sqp->s_lock, flags);
403
404 if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
405 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
406 ibp->n_pkt_drops++;
407 /*
408 * For RC, the requester would timeout and retry so
409 * shortcut the timeouts and just signal too many retries.
410 */
411 if (sqp->ibqp.qp_type == IB_QPT_RC)
412 send_status = IB_WC_RETRY_EXC_ERR;
413 else
414 send_status = IB_WC_SUCCESS;
415 goto serr;
416 }
417
418 memset(&wc, 0, sizeof wc);
419 send_status = IB_WC_SUCCESS;
420
421 release = 1;
422 sqp->s_sge.sge = wqe->sg_list[0];
423 sqp->s_sge.sg_list = wqe->sg_list + 1;
424 sqp->s_sge.num_sge = wqe->wr.num_sge;
425 sqp->s_len = wqe->length;
426 switch (wqe->wr.opcode) {
427 case IB_WR_SEND_WITH_IMM:
428 wc.wc_flags = IB_WC_WITH_IMM;
429 wc.ex.imm_data = wqe->wr.ex.imm_data;
430 /* FALLTHROUGH */
431 case IB_WR_SEND:
432 ret = qib_get_rwqe(qp, 0);
433 if (ret < 0)
434 goto op_err;
435 if (!ret)
436 goto rnr_nak;
437 break;
438
439 case IB_WR_RDMA_WRITE_WITH_IMM:
440 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
441 goto inv_err;
442 wc.wc_flags = IB_WC_WITH_IMM;
443 wc.ex.imm_data = wqe->wr.ex.imm_data;
444 ret = qib_get_rwqe(qp, 1);
445 if (ret < 0)
446 goto op_err;
447 if (!ret)
448 goto rnr_nak;
449 /* FALLTHROUGH */
450 case IB_WR_RDMA_WRITE:
451 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
452 goto inv_err;
453 if (wqe->length == 0)
454 break;
455 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
456 wqe->wr.wr.rdma.remote_addr,
457 wqe->wr.wr.rdma.rkey,
458 IB_ACCESS_REMOTE_WRITE)))
459 goto acc_err;
460 qp->r_sge.sg_list = NULL;
461 qp->r_sge.num_sge = 1;
462 qp->r_sge.total_len = wqe->length;
463 break;
464
465 case IB_WR_RDMA_READ:
466 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
467 goto inv_err;
468 if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
469 wqe->wr.wr.rdma.remote_addr,
470 wqe->wr.wr.rdma.rkey,
471 IB_ACCESS_REMOTE_READ)))
472 goto acc_err;
473 release = 0;
474 sqp->s_sge.sg_list = NULL;
475 sqp->s_sge.num_sge = 1;
476 qp->r_sge.sge = wqe->sg_list[0];
477 qp->r_sge.sg_list = wqe->sg_list + 1;
478 qp->r_sge.num_sge = wqe->wr.num_sge;
479 qp->r_sge.total_len = wqe->length;
480 break;
481
482 case IB_WR_ATOMIC_CMP_AND_SWP:
483 case IB_WR_ATOMIC_FETCH_AND_ADD:
484 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
485 goto inv_err;
486 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
487 wqe->wr.wr.atomic.remote_addr,
488 wqe->wr.wr.atomic.rkey,
489 IB_ACCESS_REMOTE_ATOMIC)))
490 goto acc_err;
491 /* Perform atomic OP and save result. */
492 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
493 sdata = wqe->wr.wr.atomic.compare_add;
494 *(u64 *) sqp->s_sge.sge.vaddr =
495 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
496 (u64) atomic64_add_return(sdata, maddr) - sdata :
497 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
498 sdata, wqe->wr.wr.atomic.swap);
499 atomic_dec(&qp->r_sge.sge.mr->refcount);
500 qp->r_sge.num_sge = 0;
501 goto send_comp;
502
503 default:
504 send_status = IB_WC_LOC_QP_OP_ERR;
505 goto serr;
506 }
507
508 sge = &sqp->s_sge.sge;
509 while (sqp->s_len) {
510 u32 len = sqp->s_len;
511
512 if (len > sge->length)
513 len = sge->length;
514 if (len > sge->sge_length)
515 len = sge->sge_length;
516 BUG_ON(len == 0);
517 qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
518 sge->vaddr += len;
519 sge->length -= len;
520 sge->sge_length -= len;
521 if (sge->sge_length == 0) {
522 if (!release)
523 atomic_dec(&sge->mr->refcount);
524 if (--sqp->s_sge.num_sge)
525 *sge = *sqp->s_sge.sg_list++;
526 } else if (sge->length == 0 && sge->mr->lkey) {
527 if (++sge->n >= QIB_SEGSZ) {
528 if (++sge->m >= sge->mr->mapsz)
529 break;
530 sge->n = 0;
531 }
532 sge->vaddr =
533 sge->mr->map[sge->m]->segs[sge->n].vaddr;
534 sge->length =
535 sge->mr->map[sge->m]->segs[sge->n].length;
536 }
537 sqp->s_len -= len;
538 }
539 if (release)
540 while (qp->r_sge.num_sge) {
541 atomic_dec(&qp->r_sge.sge.mr->refcount);
542 if (--qp->r_sge.num_sge)
543 qp->r_sge.sge = *qp->r_sge.sg_list++;
544 }
545
546 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
547 goto send_comp;
548
549 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
550 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
551 else
552 wc.opcode = IB_WC_RECV;
553 wc.wr_id = qp->r_wr_id;
554 wc.status = IB_WC_SUCCESS;
555 wc.byte_len = wqe->length;
556 wc.qp = &qp->ibqp;
557 wc.src_qp = qp->remote_qpn;
558 wc.slid = qp->remote_ah_attr.dlid;
559 wc.sl = qp->remote_ah_attr.sl;
560 wc.port_num = 1;
561 /* Signal completion event if the solicited bit is set. */
562 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
563 wqe->wr.send_flags & IB_SEND_SOLICITED);
564
565send_comp:
566 spin_lock_irqsave(&sqp->s_lock, flags);
567 ibp->n_loop_pkts++;
568flush_send:
569 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
570 qib_send_complete(sqp, wqe, send_status);
571 goto again;
572
573rnr_nak:
574 /* Handle RNR NAK */
575 if (qp->ibqp.qp_type == IB_QPT_UC)
576 goto send_comp;
577 ibp->n_rnr_naks++;
578 /*
579 * Note: we don't need the s_lock held since the BUSY flag
580 * makes this single threaded.
581 */
582 if (sqp->s_rnr_retry == 0) {
583 send_status = IB_WC_RNR_RETRY_EXC_ERR;
584 goto serr;
585 }
586 if (sqp->s_rnr_retry_cnt < 7)
587 sqp->s_rnr_retry--;
588 spin_lock_irqsave(&sqp->s_lock, flags);
589 if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
590 goto clr_busy;
591 sqp->s_flags |= QIB_S_WAIT_RNR;
592 sqp->s_timer.function = qib_rc_rnr_retry;
593 sqp->s_timer.expires = jiffies +
594 usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
595 add_timer(&sqp->s_timer);
596 goto clr_busy;
597
598op_err:
599 send_status = IB_WC_REM_OP_ERR;
600 wc.status = IB_WC_LOC_QP_OP_ERR;
601 goto err;
602
603inv_err:
604 send_status = IB_WC_REM_INV_REQ_ERR;
605 wc.status = IB_WC_LOC_QP_OP_ERR;
606 goto err;
607
608acc_err:
609 send_status = IB_WC_REM_ACCESS_ERR;
610 wc.status = IB_WC_LOC_PROT_ERR;
611err:
612 /* responder goes to error state */
613 qib_rc_error(qp, wc.status);
614
615serr:
616 spin_lock_irqsave(&sqp->s_lock, flags);
617 qib_send_complete(sqp, wqe, send_status);
618 if (sqp->ibqp.qp_type == IB_QPT_RC) {
619 int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
620
621 sqp->s_flags &= ~QIB_S_BUSY;
622 spin_unlock_irqrestore(&sqp->s_lock, flags);
623 if (lastwqe) {
624 struct ib_event ev;
625
626 ev.device = sqp->ibqp.device;
627 ev.element.qp = &sqp->ibqp;
628 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
629 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
630 }
631 goto done;
632 }
633clr_busy:
634 sqp->s_flags &= ~QIB_S_BUSY;
635unlock:
636 spin_unlock_irqrestore(&sqp->s_lock, flags);
637done:
638 if (qp && atomic_dec_and_test(&qp->refcount))
639 wake_up(&qp->wait);
640}
641
642/**
643 * qib_make_grh - construct a GRH header
644 * @ibp: a pointer to the IB port
645 * @hdr: a pointer to the GRH header being constructed
646 * @grh: the global route address to send to
647 * @hwords: the number of 32 bit words of header being sent
648 * @nwords: the number of 32 bit words of data being sent
649 *
650 * Return the size of the header in 32 bit words.
651 */
652u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
653 struct ib_global_route *grh, u32 hwords, u32 nwords)
654{
655 hdr->version_tclass_flow =
656 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
657 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
658 (grh->flow_label << IB_GRH_FLOW_SHIFT));
659 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
660 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
661 hdr->next_hdr = IB_GRH_NEXT_HDR;
662 hdr->hop_limit = grh->hop_limit;
663 /* The SGID is 32-bit aligned. */
664 hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
665 hdr->sgid.global.interface_id = grh->sgid_index ?
666 ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
667 hdr->dgid = grh->dgid;
668
669 /* GRH header size in 32-bit words. */
670 return sizeof(struct ib_grh) / sizeof(u32);
671}
672
673void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
674 u32 bth0, u32 bth2)
675{
676 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
677 u16 lrh0;
678 u32 nwords;
679 u32 extra_bytes;
680
681 /* Construct the header. */
682 extra_bytes = -qp->s_cur_size & 3;
683 nwords = (qp->s_cur_size + extra_bytes) >> 2;
684 lrh0 = QIB_LRH_BTH;
685 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
686 qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
687 &qp->remote_ah_attr.grh,
688 qp->s_hdrwords, nwords);
689 lrh0 = QIB_LRH_GRH;
690 }
691 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
692 qp->remote_ah_attr.sl << 4;
693 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
694 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
695 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
696 qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
697 qp->remote_ah_attr.src_path_bits);
698 bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
699 bth0 |= extra_bytes << 20;
700 if (qp->s_mig_state == IB_MIG_MIGRATED)
701 bth0 |= IB_BTH_MIG_REQ;
702 ohdr->bth[0] = cpu_to_be32(bth0);
703 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
704 ohdr->bth[2] = cpu_to_be32(bth2);
705}
706
707/**
708 * qib_do_send - perform a send on a QP
709 * @work: contains a pointer to the QP
710 *
711 * Process entries in the send work queue until credit or queue is
712 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
713 * Otherwise, two threads could send packets out of order.
714 */
715void qib_do_send(struct work_struct *work)
716{
717 struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
718 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
719 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
720 int (*make_req)(struct qib_qp *qp);
721 unsigned long flags;
722
723 if ((qp->ibqp.qp_type == IB_QPT_RC ||
724 qp->ibqp.qp_type == IB_QPT_UC) &&
725 (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
726 qib_ruc_loopback(qp);
727 return;
728 }
729
730 if (qp->ibqp.qp_type == IB_QPT_RC)
731 make_req = qib_make_rc_req;
732 else if (qp->ibqp.qp_type == IB_QPT_UC)
733 make_req = qib_make_uc_req;
734 else
735 make_req = qib_make_ud_req;
736
737 spin_lock_irqsave(&qp->s_lock, flags);
738
739 /* Return if we are already busy processing a work request. */
740 if (!qib_send_ok(qp)) {
741 spin_unlock_irqrestore(&qp->s_lock, flags);
742 return;
743 }
744
745 qp->s_flags |= QIB_S_BUSY;
746
747 spin_unlock_irqrestore(&qp->s_lock, flags);
748
749 do {
750 /* Check for a constructed packet to be sent. */
751 if (qp->s_hdrwords != 0) {
752 /*
753 * If the packet cannot be sent now, return and
754 * the send tasklet will be woken up later.
755 */
756 if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
757 qp->s_cur_sge, qp->s_cur_size))
758 break;
759 /* Record that s_hdr is empty. */
760 qp->s_hdrwords = 0;
761 }
762 } while (make_req(qp));
763}
764
765/*
766 * This should be called with s_lock held.
767 */
768void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
769 enum ib_wc_status status)
770{
771 u32 old_last, last;
772 unsigned i;
773
774 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
775 return;
776
777 for (i = 0; i < wqe->wr.num_sge; i++) {
778 struct qib_sge *sge = &wqe->sg_list[i];
779
780 atomic_dec(&sge->mr->refcount);
781 }
782 if (qp->ibqp.qp_type == IB_QPT_UD ||
783 qp->ibqp.qp_type == IB_QPT_SMI ||
784 qp->ibqp.qp_type == IB_QPT_GSI)
785 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
786
787 /* See ch. 11.2.4.1 and 10.7.3.1 */
788 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
789 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
790 status != IB_WC_SUCCESS) {
791 struct ib_wc wc;
792
793 memset(&wc, 0, sizeof wc);
794 wc.wr_id = wqe->wr.wr_id;
795 wc.status = status;
796 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
797 wc.qp = &qp->ibqp;
798 if (status == IB_WC_SUCCESS)
799 wc.byte_len = wqe->length;
800 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
801 status != IB_WC_SUCCESS);
802 }
803
804 last = qp->s_last;
805 old_last = last;
806 if (++last >= qp->s_size)
807 last = 0;
808 qp->s_last = last;
809 if (qp->s_acked == old_last)
810 qp->s_acked = last;
811 if (qp->s_cur == old_last)
812 qp->s_cur = last;
813 if (qp->s_tail == old_last)
814 qp->s_tail = last;
815 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
816 qp->s_draining = 0;
817}
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
new file mode 100644
index 000000000000..0aeed0e74cb6
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -0,0 +1,1413 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33/*
34 * This file contains all of the code that is specific to the SerDes
35 * on the QLogic_IB 7220 chip.
36 */
37
38#include <linux/pci.h>
39#include <linux/delay.h>
40
41#include "qib.h"
42#include "qib_7220.h"
43
44/*
45 * Same as in qib_iba7220.c, but just the registers needed here.
46 * Could move whole set to qib_7220.h, but decided better to keep
47 * local.
48 */
49#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
50#define kr_hwerrclear KREG_IDX(HwErrClear)
51#define kr_hwerrmask KREG_IDX(HwErrMask)
52#define kr_hwerrstatus KREG_IDX(HwErrStatus)
53#define kr_ibcstatus KREG_IDX(IBCStatus)
54#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
55#define kr_scratch KREG_IDX(Scratch)
56#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
57/* these are used only here, not in qib_iba7220.c */
58#define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
59#define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
60#define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
61#define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
62#define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
63
64/*
65 * The IBSerDesMappTable is a memory that holds values to be stored in
66 * various SerDes registers by IBC.
67 */
68#define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
69
70/*
71 * Below used for sdnum parameter, selecting one of the two sections
72 * used for PCIe, or the single SerDes used for IB.
73 */
74#define PCIE_SERDES0 0
75#define PCIE_SERDES1 1
76
77/*
78 * The EPB requires addressing in a particular form. EPB_LOC() is intended
79 * to make #definitions a little more readable.
80 */
81#define EPB_ADDR_SHF 8
82#define EPB_LOC(chn, elt, reg) \
83 (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
84 EPB_ADDR_SHF)
85#define EPB_IB_QUAD0_CS_SHF (25)
86#define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF)
87#define EPB_IB_UC_CS_SHF (26)
88#define EPB_PCIE_UC_CS_SHF (27)
89#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
90
91/* Forward declarations. */
92static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
93 u32 data, u32 mask);
94static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
95 int mask);
96static int qib_sd_trimdone_poll(struct qib_devdata *dd);
97static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
98static int qib_sd_setvals(struct qib_devdata *dd);
99static int qib_sd_early(struct qib_devdata *dd);
100static int qib_sd_dactrim(struct qib_devdata *dd);
101static int qib_internal_presets(struct qib_devdata *dd);
102/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
103static int qib_sd_trimself(struct qib_devdata *dd, int val);
104static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
105
106/*
107 * Below keeps track of whether the "once per power-on" initialization has
108 * been done, because uC code Version 1.32.17 or higher allows the uC to
109 * be reset at will, and Automatic Equalization may require it. So the
110 * state of the reset "pin", is no longer valid. Instead, we check for the
111 * actual uC code having been loaded.
112 */
113static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd)
114{
115 struct qib_devdata *dd = ppd->dd;
116 if (!dd->cspec->serdes_first_init_done && (qib_sd7220_ib_vfy(dd) > 0))
117 dd->cspec->serdes_first_init_done = 1;
118 return dd->cspec->serdes_first_init_done;
119}
120
121/* repeat #define for local use. "Real" #define is in qib_iba7220.c */
122#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
123#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
124#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
125#define UC_PAR_CLR_D 8
126#define UC_PAR_CLR_M 0xC
127#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
128#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
129
130void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
131{
132 int ret;
133
134 /* clear, then re-enable parity errs */
135 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
136 UC_PAR_CLR_D, UC_PAR_CLR_M);
137 if (ret < 0) {
138 qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
139 goto bail;
140 }
141 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
142 UC_PAR_CLR_M);
143
144 qib_read_kreg32(dd, kr_scratch);
145 udelay(4);
146 qib_write_kreg(dd, kr_hwerrclear,
147 QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
148 qib_read_kreg32(dd, kr_scratch);
149bail:
150 return;
151}
152
153/*
154 * After a reset or other unusual event, the epb interface may need
155 * to be re-synchronized, between the host and the uC.
156 * returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected)
157 */
158#define IBSD_RESYNC_TRIES 3
159#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
160#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
161
162static int qib_resync_ibepb(struct qib_devdata *dd)
163{
164 int ret, pat, tries, chn;
165 u32 loc;
166
167 ret = -1;
168 chn = 0;
169 for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
170 loc = IB_PGUDP(chn);
171 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
172 if (ret < 0) {
173 qib_dev_err(dd, "Failed read in resync\n");
174 continue;
175 }
176 if (ret != 0xF0 && ret != 0x55 && tries == 0)
177 qib_dev_err(dd, "unexpected pattern in resync\n");
178 pat = ret ^ 0xA5; /* alternate F0 and 55 */
179 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
180 if (ret < 0) {
181 qib_dev_err(dd, "Failed write in resync\n");
182 continue;
183 }
184 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
185 if (ret < 0) {
186 qib_dev_err(dd, "Failed re-read in resync\n");
187 continue;
188 }
189 if (ret != pat) {
190 qib_dev_err(dd, "Failed compare1 in resync\n");
191 continue;
192 }
193 loc = IB_CMUDONE(chn);
194 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
195 if (ret < 0) {
196 qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
197 continue;
198 }
199 if ((ret & 0x70) != ((chn << 4) | 0x40)) {
200 qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
201 ret, chn);
202 continue;
203 }
204 if (++chn == 4)
205 break; /* Success */
206 }
207 return (ret > 0) ? 0 : ret;
208}
209
210/*
211 * Localize the stuff that should be done to change IB uC reset
212 * returns <0 for errors.
213 */
214static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
215{
216 u64 rst_val;
217 int ret = 0;
218 unsigned long flags;
219
220 rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
221 if (assert_rst) {
222 /*
223 * Vendor recommends "interrupting" uC before reset, to
224 * minimize possible glitches.
225 */
226 spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
227 epb_access(dd, IB_7220_SERDES, 1);
228 rst_val |= 1ULL;
229 /* Squelch possible parity error from _asserting_ reset */
230 qib_write_kreg(dd, kr_hwerrmask,
231 dd->cspec->hwerrmask &
232 ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
233 qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
234 /* flush write, delay to ensure it took effect */
235 qib_read_kreg32(dd, kr_scratch);
236 udelay(2);
237 /* once it's reset, can remove interrupt */
238 epb_access(dd, IB_7220_SERDES, -1);
239 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
240 } else {
241 /*
242 * Before we de-assert reset, we need to deal with
243 * possible glitch on the Parity-error line.
244 * Suppress it around the reset, both in chip-level
245 * hwerrmask and in IB uC control reg. uC will allow
246 * it again during startup.
247 */
248 u64 val;
249 rst_val &= ~(1ULL);
250 qib_write_kreg(dd, kr_hwerrmask,
251 dd->cspec->hwerrmask &
252 ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
253
254 ret = qib_resync_ibepb(dd);
255 if (ret < 0)
256 qib_dev_err(dd, "unable to re-sync IB EPB\n");
257
258 /* set uC control regs to suppress parity errs */
259 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
260 if (ret < 0)
261 goto bail;
262 /* IB uC code past Version 1.32.17 allow suppression of wdog */
263 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
264 0x80);
265 if (ret < 0) {
266 qib_dev_err(dd, "Failed to set WDOG disable\n");
267 goto bail;
268 }
269 qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
270 /* flush write, delay for startup */
271 qib_read_kreg32(dd, kr_scratch);
272 udelay(1);
273 /* clear, then re-enable parity errs */
274 qib_sd7220_clr_ibpar(dd);
275 val = qib_read_kreg64(dd, kr_hwerrstatus);
276 if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
277 qib_dev_err(dd, "IBUC Parity still set after RST\n");
278 dd->cspec->hwerrmask &=
279 ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
280 }
281 qib_write_kreg(dd, kr_hwerrmask,
282 dd->cspec->hwerrmask);
283 }
284
285bail:
286 return ret;
287}
288
289static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
290 const char *where)
291{
292 int ret, chn, baduns;
293 u64 val;
294
295 if (!where)
296 where = "?";
297
298 /* give time for reset to settle out in EPB */
299 udelay(2);
300
301 ret = qib_resync_ibepb(dd);
302 if (ret < 0)
303 qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
304
305 /* Do "sacrificial read" to get EPB in sane state after reset */
306 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
307 if (ret < 0)
308 qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
309
310 /* Check/show "summary" Trim-done bit in IBCStatus */
311 val = qib_read_kreg64(dd, kr_ibcstatus);
312 if (!(val & (1ULL << 11)))
313 qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
314 /*
315 * Do "dummy read/mod/wr" to get EPB in sane state after reset
316 * The default value for MPREG6 is 0.
317 */
318 udelay(2);
319
320 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
321 if (ret < 0)
322 qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
323 udelay(10);
324
325 baduns = 0;
326
327 for (chn = 3; chn >= 0; --chn) {
328 /* Read CTRL reg for each channel to check TRIMDONE */
329 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
330 IB_CTRL2(chn), 0, 0);
331 if (ret < 0)
332 qib_dev_err(dd, "Failed checking TRIMDONE, chn %d"
333 " (%s)\n", chn, where);
334
335 if (!(ret & 0x10)) {
336 int probe;
337
338 baduns |= (1 << chn);
339 qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
340 " (%s)\n", chn, ret, where);
341 probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
342 IB_PGUDP(0), 0, 0);
343 qib_dev_err(dd, "probe is %d (%02X)\n",
344 probe, probe);
345 probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
346 IB_CTRL2(chn), 0, 0);
347 qib_dev_err(dd, "re-read: %d (%02X)\n",
348 probe, probe);
349 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
350 IB_CTRL2(chn), 0x10, 0x10);
351 if (ret < 0)
352 qib_dev_err(dd,
353 "Err on TRIMDONE rewrite1\n");
354 }
355 }
356 for (chn = 3; chn >= 0; --chn) {
357 /* Read CTRL reg for each channel to check TRIMDONE */
358 if (baduns & (1 << chn)) {
359 qib_dev_err(dd,
360 "Reseting TRIMDONE on chn %d (%s)\n",
361 chn, where);
362 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
363 IB_CTRL2(chn), 0x10, 0x10);
364 if (ret < 0)
365 qib_dev_err(dd, "Failed re-setting "
366 "TRIMDONE, chn %d (%s)\n",
367 chn, where);
368 }
369 }
370}
371
372/*
373 * Below is portion of IBA7220-specific bringup_serdes() that actually
374 * deals with registers and memory within the SerDes itself.
375 * Post IB uC code version 1.32.17, was_reset being 1 is not really
376 * informative, so we double-check.
377 */
378int qib_sd7220_init(struct qib_devdata *dd)
379{
380 int ret = 1; /* default to failure */
381 int first_reset, was_reset;
382
383 /* SERDES MPU reset recorded in D0 */
384 was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
385 if (!was_reset) {
386 /* entered with reset not asserted, we need to do it */
387 qib_ibsd_reset(dd, 1);
388 qib_sd_trimdone_monitor(dd, "Driver-reload");
389 }
390 /* Substitute our deduced value for was_reset */
391 ret = qib_ibsd_ucode_loaded(dd->pport);
392 if (ret < 0)
393 goto bail;
394
395 first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
396 /*
397 * Alter some regs per vendor latest doc, reset-defaults
398 * are not right for IB.
399 */
400 ret = qib_sd_early(dd);
401 if (ret < 0) {
402 qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
403 goto bail;
404 }
405 /*
406 * Set DAC manual trim IB.
407 * We only do this once after chip has been reset (usually
408 * same as once per system boot).
409 */
410 if (first_reset) {
411 ret = qib_sd_dactrim(dd);
412 if (ret < 0) {
413 qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
414 goto bail;
415 }
416 }
417 /*
418 * Set various registers (DDS and RXEQ) that will be
419 * controlled by IBC (in 1.2 mode) to reasonable preset values
420 * Calling the "internal" version avoids the "check for needed"
421 * and "trimdone monitor" that might be counter-productive.
422 */
423 ret = qib_internal_presets(dd);
424 if (ret < 0) {
425 qib_dev_err(dd, "Failed to set IB SERDES presets\n");
426 goto bail;
427 }
428 ret = qib_sd_trimself(dd, 0x80);
429 if (ret < 0) {
430 qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
431 goto bail;
432 }
433
434 /* Load image, then try to verify */
435 ret = 0; /* Assume success */
436 if (first_reset) {
437 int vfy;
438 int trim_done;
439
440 ret = qib_sd7220_ib_load(dd);
441 if (ret < 0) {
442 qib_dev_err(dd, "Failed to load IB SERDES image\n");
443 goto bail;
444 } else {
445 /* Loaded image, try to verify */
446 vfy = qib_sd7220_ib_vfy(dd);
447 if (vfy != ret) {
448 qib_dev_err(dd, "SERDES PRAM VFY failed\n");
449 goto bail;
450 } /* end if verified */
451 } /* end if loaded */
452
453 /*
454 * Loaded and verified. Almost good...
455 * hold "success" in ret
456 */
457 ret = 0;
458 /*
459 * Prev steps all worked, continue bringup
460 * De-assert RESET to uC, only in first reset, to allow
461 * trimming.
462 *
463 * Since our default setup sets START_EQ1 to
464 * PRESET, we need to clear that for this very first run.
465 */
466 ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
467 if (ret < 0) {
468 qib_dev_err(dd, "Failed clearing START_EQ1\n");
469 goto bail;
470 }
471
472 qib_ibsd_reset(dd, 0);
473 /*
474 * If this is not the first reset, trimdone should be set
475 * already. We may need to check about this.
476 */
477 trim_done = qib_sd_trimdone_poll(dd);
478 /*
479 * Whether or not trimdone succeeded, we need to put the
480 * uC back into reset to avoid a possible fight with the
481 * IBC state-machine.
482 */
483 qib_ibsd_reset(dd, 1);
484
485 if (!trim_done) {
486 qib_dev_err(dd, "No TRIMDONE seen\n");
487 goto bail;
488 }
489 /*
490 * DEBUG: check each time we reset if trimdone bits have
491 * gotten cleared, and re-set them.
492 */
493 qib_sd_trimdone_monitor(dd, "First-reset");
494 /* Remember so we do not re-do the load, dactrim, etc. */
495 dd->cspec->serdes_first_init_done = 1;
496 }
497 /*
498 * setup for channel training and load values for
499 * RxEq and DDS in tables used by IBC in IB1.2 mode
500 */
501 ret = 0;
502 if (qib_sd_setvals(dd) >= 0)
503 goto done;
504bail:
505 ret = 1;
506done:
507 /* start relock timer regardless, but start at 1 second */
508 set_7220_relock_poll(dd, -1);
509 return ret;
510}
511
512#define EPB_ACC_REQ 1
513#define EPB_ACC_GNT 0x100
514#define EPB_DATA_MASK 0xFF
515#define EPB_RD (1ULL << 24)
516#define EPB_TRANS_RDY (1ULL << 31)
517#define EPB_TRANS_ERR (1ULL << 30)
518#define EPB_TRANS_TRIES 5
519
520/*
521 * query, claim, release ownership of the EPB (External Parallel Bus)
522 * for a specified SERDES.
523 * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
524 * Returns <0 for errors, >0 if we had ownership, else 0.
525 */
526static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
527{
528 u16 acc;
529 u64 accval;
530 int owned = 0;
531 u64 oct_sel = 0;
532
533 switch (sdnum) {
534 case IB_7220_SERDES:
535 /*
536 * The IB SERDES "ownership" is fairly simple. A single each
537 * request/grant.
538 */
539 acc = kr_ibsd_epb_access_ctrl;
540 break;
541
542 case PCIE_SERDES0:
543 case PCIE_SERDES1:
544 /* PCIe SERDES has two "octants", need to select which */
545 acc = kr_pciesd_epb_access_ctrl;
546 oct_sel = (2 << (sdnum - PCIE_SERDES0));
547 break;
548
549 default:
550 return 0;
551 }
552
553 /* Make sure any outstanding transaction was seen */
554 qib_read_kreg32(dd, kr_scratch);
555 udelay(15);
556
557 accval = qib_read_kreg32(dd, acc);
558
559 owned = !!(accval & EPB_ACC_GNT);
560 if (claim < 0) {
561 /* Need to release */
562 u64 pollval;
563 /*
564 * The only writeable bits are the request and CS.
565 * Both should be clear
566 */
567 u64 newval = 0;
568 qib_write_kreg(dd, acc, newval);
569 /* First read after write is not trustworthy */
570 pollval = qib_read_kreg32(dd, acc);
571 udelay(5);
572 pollval = qib_read_kreg32(dd, acc);
573 if (pollval & EPB_ACC_GNT)
574 owned = -1;
575 } else if (claim > 0) {
576 /* Need to claim */
577 u64 pollval;
578 u64 newval = EPB_ACC_REQ | oct_sel;
579 qib_write_kreg(dd, acc, newval);
580 /* First read after write is not trustworthy */
581 pollval = qib_read_kreg32(dd, acc);
582 udelay(5);
583 pollval = qib_read_kreg32(dd, acc);
584 if (!(pollval & EPB_ACC_GNT))
585 owned = -1;
586 }
587 return owned;
588}
589
590/*
591 * Lemma to deal with race condition of write..read to epb regs
592 */
593static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
594{
595 int tries;
596 u64 transval;
597
598 qib_write_kreg(dd, reg, i_val);
599 /* Throw away first read, as RDY bit may be stale */
600 transval = qib_read_kreg64(dd, reg);
601
602 for (tries = EPB_TRANS_TRIES; tries; --tries) {
603 transval = qib_read_kreg32(dd, reg);
604 if (transval & EPB_TRANS_RDY)
605 break;
606 udelay(5);
607 }
608 if (transval & EPB_TRANS_ERR)
609 return -1;
610 if (tries > 0 && o_vp)
611 *o_vp = transval;
612 return tries;
613}
614
615/**
616 * qib_sd7220_reg_mod - modify SERDES register
617 * @dd: the qlogic_ib device
618 * @sdnum: which SERDES to access
619 * @loc: location - channel, element, register, as packed by EPB_LOC() macro.
620 * @wd: Write Data - value to set in register
621 * @mask: ones where data should be spliced into reg.
622 *
623 * Basic register read/modify/write, with un-needed acesses elided. That is,
624 * a mask of zero will prevent write, while a mask of 0xFF will prevent read.
625 * returns current (presumed, if a write was done) contents of selected
626 * register, or <0 if errors.
627 */
628static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
629 u32 wd, u32 mask)
630{
631 u16 trans;
632 u64 transval;
633 int owned;
634 int tries, ret;
635 unsigned long flags;
636
637 switch (sdnum) {
638 case IB_7220_SERDES:
639 trans = kr_ibsd_epb_transaction_reg;
640 break;
641
642 case PCIE_SERDES0:
643 case PCIE_SERDES1:
644 trans = kr_pciesd_epb_transaction_reg;
645 break;
646
647 default:
648 return -1;
649 }
650
651 /*
652 * All access is locked in software (vs other host threads) and
653 * hardware (vs uC access).
654 */
655 spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
656
657 owned = epb_access(dd, sdnum, 1);
658 if (owned < 0) {
659 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
660 return -1;
661 }
662 ret = 0;
663 for (tries = EPB_TRANS_TRIES; tries; --tries) {
664 transval = qib_read_kreg32(dd, trans);
665 if (transval & EPB_TRANS_RDY)
666 break;
667 udelay(5);
668 }
669
670 if (tries > 0) {
671 tries = 1; /* to make read-skip work */
672 if (mask != 0xFF) {
673 /*
674 * Not a pure write, so need to read.
675 * loc encodes chip-select as well as address
676 */
677 transval = loc | EPB_RD;
678 tries = epb_trans(dd, trans, transval, &transval);
679 }
680 if (tries > 0 && mask != 0) {
681 /*
682 * Not a pure read, so need to write.
683 */
684 wd = (wd & mask) | (transval & ~mask);
685 transval = loc | (wd & EPB_DATA_MASK);
686 tries = epb_trans(dd, trans, transval, &transval);
687 }
688 }
689 /* else, failed to see ready, what error-handling? */
690
691 /*
692 * Release bus. Failure is an error.
693 */
694 if (epb_access(dd, sdnum, -1) < 0)
695 ret = -1;
696 else
697 ret = transval & EPB_DATA_MASK;
698
699 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
700 if (tries <= 0)
701 ret = -1;
702 return ret;
703}
704
705#define EPB_ROM_R (2)
706#define EPB_ROM_W (1)
707/*
708 * Below, all uC-related, use appropriate UC_CS, depending
709 * on which SerDes is used.
710 */
711#define EPB_UC_CTL EPB_LOC(6, 0, 0)
712#define EPB_MADDRL EPB_LOC(6, 0, 2)
713#define EPB_MADDRH EPB_LOC(6, 0, 3)
714#define EPB_ROMDATA EPB_LOC(6, 0, 4)
715#define EPB_RAMDATA EPB_LOC(6, 0, 5)
716
717/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
718static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
719 u8 *buf, int cnt, int rd_notwr)
720{
721 u16 trans;
722 u64 transval;
723 u64 csbit;
724 int owned;
725 int tries;
726 int sofar;
727 int addr;
728 int ret;
729 unsigned long flags;
730 const char *op;
731
732 /* Pick appropriate transaction reg and "Chip select" for this serdes */
733 switch (sdnum) {
734 case IB_7220_SERDES:
735 csbit = 1ULL << EPB_IB_UC_CS_SHF;
736 trans = kr_ibsd_epb_transaction_reg;
737 break;
738
739 case PCIE_SERDES0:
740 case PCIE_SERDES1:
741 /* PCIe SERDES has uC "chip select" in different bit, too */
742 csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
743 trans = kr_pciesd_epb_transaction_reg;
744 break;
745
746 default:
747 return -1;
748 }
749
750 op = rd_notwr ? "Rd" : "Wr";
751 spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
752
753 owned = epb_access(dd, sdnum, 1);
754 if (owned < 0) {
755 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
756 return -1;
757 }
758
759 /*
760 * In future code, we may need to distinguish several address ranges,
761 * and select various memories based on this. For now, just trim
762 * "loc" (location including address and memory select) to
763 * "addr" (address within memory). we will only support PRAM
764 * The memory is 8KB.
765 */
766 addr = loc & 0x1FFF;
767 for (tries = EPB_TRANS_TRIES; tries; --tries) {
768 transval = qib_read_kreg32(dd, trans);
769 if (transval & EPB_TRANS_RDY)
770 break;
771 udelay(5);
772 }
773
774 sofar = 0;
775 if (tries > 0) {
776 /*
777 * Every "memory" access is doubly-indirect.
778 * We set two bytes of address, then read/write
779 * one or mores bytes of data.
780 */
781
782 /* First, we set control to "Read" or "Write" */
783 transval = csbit | EPB_UC_CTL |
784 (rd_notwr ? EPB_ROM_R : EPB_ROM_W);
785 tries = epb_trans(dd, trans, transval, &transval);
786 while (tries > 0 && sofar < cnt) {
787 if (!sofar) {
788 /* Only set address at start of chunk */
789 int addrbyte = (addr + sofar) >> 8;
790 transval = csbit | EPB_MADDRH | addrbyte;
791 tries = epb_trans(dd, trans, transval,
792 &transval);
793 if (tries <= 0)
794 break;
795 addrbyte = (addr + sofar) & 0xFF;
796 transval = csbit | EPB_MADDRL | addrbyte;
797 tries = epb_trans(dd, trans, transval,
798 &transval);
799 if (tries <= 0)
800 break;
801 }
802
803 if (rd_notwr)
804 transval = csbit | EPB_ROMDATA | EPB_RD;
805 else
806 transval = csbit | EPB_ROMDATA | buf[sofar];
807 tries = epb_trans(dd, trans, transval, &transval);
808 if (tries <= 0)
809 break;
810 if (rd_notwr)
811 buf[sofar] = transval & EPB_DATA_MASK;
812 ++sofar;
813 }
814 /* Finally, clear control-bit for Read or Write */
815 transval = csbit | EPB_UC_CTL;
816 tries = epb_trans(dd, trans, transval, &transval);
817 }
818
819 ret = sofar;
820 /* Release bus. Failure is an error */
821 if (epb_access(dd, sdnum, -1) < 0)
822 ret = -1;
823
824 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
825 if (tries <= 0)
826 ret = -1;
827 return ret;
828}
829
830#define PROG_CHUNK 64
831
832int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
833 u8 *img, int len, int offset)
834{
835 int cnt, sofar, req;
836
837 sofar = 0;
838 while (sofar < len) {
839 req = len - sofar;
840 if (req > PROG_CHUNK)
841 req = PROG_CHUNK;
842 cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
843 img + sofar, req, 0);
844 if (cnt < req) {
845 sofar = -1;
846 break;
847 }
848 sofar += req;
849 }
850 return sofar;
851}
852
853#define VFY_CHUNK 64
854#define SD_PRAM_ERROR_LIMIT 42
855
856int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
857 const u8 *img, int len, int offset)
858{
859 int cnt, sofar, req, idx, errors;
860 unsigned char readback[VFY_CHUNK];
861
862 errors = 0;
863 sofar = 0;
864 while (sofar < len) {
865 req = len - sofar;
866 if (req > VFY_CHUNK)
867 req = VFY_CHUNK;
868 cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
869 readback, req, 1);
870 if (cnt < req) {
871 /* failed in read itself */
872 sofar = -1;
873 break;
874 }
875 for (idx = 0; idx < cnt; ++idx) {
876 if (readback[idx] != img[idx+sofar])
877 ++errors;
878 }
879 sofar += cnt;
880 }
881 return errors ? -errors : sofar;
882}
883
884/*
885 * IRQ not set up at this point in init, so we poll.
886 */
887#define IB_SERDES_TRIM_DONE (1ULL << 11)
888#define TRIM_TMO (30)
889
890static int qib_sd_trimdone_poll(struct qib_devdata *dd)
891{
892 int trim_tmo, ret;
893 uint64_t val;
894
895 /*
896 * Default to failure, so IBC will not start
897 * without IB_SERDES_TRIM_DONE.
898 */
899 ret = 0;
900 for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
901 val = qib_read_kreg64(dd, kr_ibcstatus);
902 if (val & IB_SERDES_TRIM_DONE) {
903 ret = 1;
904 break;
905 }
906 msleep(10);
907 }
908 if (trim_tmo >= TRIM_TMO) {
909 qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
910 ret = 0;
911 }
912 return ret;
913}
914
915#define TX_FAST_ELT (9)
916
917/*
918 * Set the "negotiation" values for SERDES. These are used by the IB1.2
919 * link negotiation. Macros below are attempt to keep the values a
920 * little more human-editable.
921 * First, values related to Drive De-emphasis Settings.
922 */
923
924#define NUM_DDS_REGS 6
925#define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */
926
927#define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
928 { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
929 (main_d << 3) | 4 | (ipre_d >> 2), \
930 (main_s << 3) | 4 | (ipre_s >> 2), \
931 ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
932 ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
933
934static struct dds_init {
935 uint8_t reg_vals[NUM_DDS_REGS];
936} dds_init_vals[] = {
937 /* DDR(FDR) SDR(HDR) */
938 /* Vendor recommends below for 3m cable */
939#define DDS_3M 0
940 DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0),
941 DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
942 DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
943 DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
944 DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
945 DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
946 DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
947 DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
948 DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0),
949 DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0),
950 DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0),
951 DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0),
952 DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0),
953 /* Vendor recommends below for 1m cable */
954#define DDS_1M 13
955 DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0),
956 DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0),
957 DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0)
958};
959
960/*
961 * Now the RXEQ section of the table.
962 */
963/* Hardware packs an element number and register address thus: */
964#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
965#define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
966 {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
967
968#define RXEQ_VAL_ALL(elt, adr, val) \
969 {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
970
971#define RXEQ_SDR_DFELTH 0
972#define RXEQ_SDR_TLTH 0
973#define RXEQ_SDR_G1CNT_Z1CNT 0x11
974#define RXEQ_SDR_ZCNT 23
975
976static struct rxeq_init {
977 u16 rdesc; /* in form used in SerDesDDSRXEQ */
978 u8 rdata[4];
979} rxeq_init_vals[] = {
980 /* Set Rcv Eq. to Preset node */
981 RXEQ_VAL_ALL(7, 0x27, 0x10),
982 /* Set DFELTHFDR/HDR thresholds */
983 RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */
984 RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
985 /* Set TLTHFDR/HDR theshold */
986 RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */
987 RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */
988 /* Set Preamp setting 2 (ZFR/ZCNT) */
989 RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */
990 RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */
991 /* Set Preamp DC gain and Setting 1 (GFR/GHR) */
992 RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */
993 RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */
994 /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
995 RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
996 RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
997};
998
999/* There are 17 values from vendor, but IBC only accesses the first 16 */
1000#define DDS_ROWS (16)
1001#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
1002
1003static int qib_sd_setvals(struct qib_devdata *dd)
1004{
1005 int idx, midx;
1006 int min_idx; /* Minimum index for this portion of table */
1007 uint32_t dds_reg_map;
1008 u64 __iomem *taddr, *iaddr;
1009 uint64_t data;
1010 uint64_t sdctl;
1011
1012 taddr = dd->kregbase + kr_serdes_maptable;
1013 iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
1014
1015 /*
1016 * Init the DDS section of the table.
1017 * Each "row" of the table provokes NUM_DDS_REG writes, to the
1018 * registers indicated in DDS_REG_MAP.
1019 */
1020 sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
1021 sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
1022 sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
1023 qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
1024
1025 /*
1026 * Iterate down table within loop for each register to store.
1027 */
1028 dds_reg_map = DDS_REG_MAP;
1029 for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
1030 data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
1031 writeq(data, iaddr + idx);
1032 mmiowb();
1033 qib_read_kreg32(dd, kr_scratch);
1034 dds_reg_map >>= 4;
1035 for (midx = 0; midx < DDS_ROWS; ++midx) {
1036 u64 __iomem *daddr = taddr + ((midx << 4) + idx);
1037 data = dds_init_vals[midx].reg_vals[idx];
1038 writeq(data, daddr);
1039 mmiowb();
1040 qib_read_kreg32(dd, kr_scratch);
1041 } /* End inner for (vals for this reg, each row) */
1042 } /* end outer for (regs to be stored) */
1043
1044 /*
1045 * Init the RXEQ section of the table.
1046 * This runs in a different order, as the pattern of
1047 * register references is more complex, but there are only
1048 * four "data" values per register.
1049 */
1050 min_idx = idx; /* RXEQ indices pick up where DDS left off */
1051 taddr += 0x100; /* RXEQ data is in second half of table */
1052 /* Iterate through RXEQ register addresses */
1053 for (idx = 0; idx < RXEQ_ROWS; ++idx) {
1054 int didx; /* "destination" */
1055 int vidx;
1056
1057 /* didx is offset by min_idx to address RXEQ range of regs */
1058 didx = idx + min_idx;
1059 /* Store the next RXEQ register address */
1060 writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
1061 mmiowb();
1062 qib_read_kreg32(dd, kr_scratch);
1063 /* Iterate through RXEQ values */
1064 for (vidx = 0; vidx < 4; vidx++) {
1065 data = rxeq_init_vals[idx].rdata[vidx];
1066 writeq(data, taddr + (vidx << 6) + idx);
1067 mmiowb();
1068 qib_read_kreg32(dd, kr_scratch);
1069 }
1070 } /* end outer for (Reg-writes for RXEQ) */
1071 return 0;
1072}
1073
1074#define CMUCTRL5 EPB_LOC(7, 0, 0x15)
1075#define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
1076#define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
1077#define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
1078#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
1079#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
1080
1081/*
1082 * Repeat a "store" across all channels of the IB SerDes.
1083 * Although nominally it inherits the "read value" of the last
1084 * channel it modified, the only really useful return is <0 for
1085 * failure, >= 0 for success. The parameter 'loc' is assumed to
1086 * be the location in some channel of the register to be modified
1087 * The caller can specify use of the "gang write" option of EPB,
1088 * in which case we use the specified channel data for any fields
1089 * not explicitely written.
1090 */
1091static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
1092 int mask)
1093{
1094 int ret = -1;
1095 int chnl;
1096
1097 if (loc & EPB_GLOBAL_WR) {
1098 /*
1099 * Our caller has assured us that we can set all four
1100 * channels at once. Trust that. If mask is not 0xFF,
1101 * we will read the _specified_ channel for our starting
1102 * value.
1103 */
1104 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1105 chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
1106 if (mask != 0xFF) {
1107 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
1108 loc & ~EPB_GLOBAL_WR, 0, 0);
1109 if (ret < 0) {
1110 int sloc = loc >> EPB_ADDR_SHF;
1111
1112 qib_dev_err(dd, "pre-read failed: elt %d,"
1113 " addr 0x%X, chnl %d\n",
1114 (sloc & 0xF),
1115 (sloc >> 9) & 0x3f, chnl);
1116 return ret;
1117 }
1118 val = (ret & ~mask) | (val & mask);
1119 }
1120 loc &= ~(7 << (4+EPB_ADDR_SHF));
1121 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
1122 if (ret < 0) {
1123 int sloc = loc >> EPB_ADDR_SHF;
1124
1125 qib_dev_err(dd, "Global WR failed: elt %d,"
1126 " addr 0x%X, val %02X\n",
1127 (sloc & 0xF), (sloc >> 9) & 0x3f, val);
1128 }
1129 return ret;
1130 }
1131 /* Clear "channel" and set CS so we can simply iterate */
1132 loc &= ~(7 << (4+EPB_ADDR_SHF));
1133 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1134 for (chnl = 0; chnl < 4; ++chnl) {
1135 int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
1136
1137 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
1138 if (ret < 0) {
1139 int sloc = loc >> EPB_ADDR_SHF;
1140
1141 qib_dev_err(dd, "Write failed: elt %d,"
1142 " addr 0x%X, chnl %d, val 0x%02X,"
1143 " mask 0x%02X\n",
1144 (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
1145 val & 0xFF, mask & 0xFF);
1146 break;
1147 }
1148 }
1149 return ret;
1150}
1151
1152/*
1153 * Set the Tx values normally modified by IBC in IB1.2 mode to default
1154 * values, as gotten from first row of init table.
1155 */
1156static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
1157{
1158 int ret;
1159 int idx, reg, data;
1160 uint32_t regmap;
1161
1162 regmap = DDS_REG_MAP;
1163 for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
1164 reg = (regmap & 0xF);
1165 regmap >>= 4;
1166 data = ddi->reg_vals[idx];
1167 /* Vendor says RMW not needed for these regs, use 0xFF mask */
1168 ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
1169 if (ret < 0)
1170 break;
1171 }
1172 return ret;
1173}
1174
1175/*
1176 * Set the Rx values normally modified by IBC in IB1.2 mode to default
1177 * values, as gotten from selected column of init table.
1178 */
1179static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
1180{
1181 int ret;
1182 int ridx;
1183 int cnt = ARRAY_SIZE(rxeq_init_vals);
1184
1185 for (ridx = 0; ridx < cnt; ++ridx) {
1186 int elt, reg, val, loc;
1187
1188 elt = rxeq_init_vals[ridx].rdesc & 0xF;
1189 reg = rxeq_init_vals[ridx].rdesc >> 4;
1190 loc = EPB_LOC(0, elt, reg);
1191 val = rxeq_init_vals[ridx].rdata[vsel];
1192 /* mask of 0xFF, because hardware does full-byte store. */
1193 ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
1194 if (ret < 0)
1195 break;
1196 }
1197 return ret;
1198}
1199
1200/*
1201 * Set the default values (row 0) for DDR Driver Demphasis.
1202 * we do this initially and whenever we turn off IB-1.2
1203 *
1204 * The "default" values for Rx equalization are also stored to
1205 * SerDes registers. Formerly (and still default), we used set 2.
1206 * For experimenting with cables and link-partners, we allow changing
1207 * that via a module parameter.
1208 */
1209static unsigned qib_rxeq_set = 2;
1210module_param_named(rxeq_default_set, qib_rxeq_set, uint,
1211 S_IWUSR | S_IRUGO);
1212MODULE_PARM_DESC(rxeq_default_set,
1213 "Which set [0..3] of Rx Equalization values is default");
1214
1215static int qib_internal_presets(struct qib_devdata *dd)
1216{
1217 int ret = 0;
1218
1219 ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
1220
1221 if (ret < 0)
1222 qib_dev_err(dd, "Failed to set default DDS values\n");
1223 ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
1224 if (ret < 0)
1225 qib_dev_err(dd, "Failed to set default RXEQ values\n");
1226 return ret;
1227}
1228
1229int qib_sd7220_presets(struct qib_devdata *dd)
1230{
1231 int ret = 0;
1232
1233 if (!dd->cspec->presets_needed)
1234 return ret;
1235 dd->cspec->presets_needed = 0;
1236 /* Assert uC reset, so we don't clash with it. */
1237 qib_ibsd_reset(dd, 1);
1238 udelay(2);
1239 qib_sd_trimdone_monitor(dd, "link-down");
1240
1241 ret = qib_internal_presets(dd);
1242 return ret;
1243}
1244
1245static int qib_sd_trimself(struct qib_devdata *dd, int val)
1246{
1247 int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
1248
1249 return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
1250}
1251
1252static int qib_sd_early(struct qib_devdata *dd)
1253{
1254 int ret;
1255
1256 ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
1257 if (ret < 0)
1258 goto bail;
1259 ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
1260 if (ret < 0)
1261 goto bail;
1262 ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
1263bail:
1264 return ret;
1265}
1266
1267#define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
1268#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
1269#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
1270
1271static int qib_sd_dactrim(struct qib_devdata *dd)
1272{
1273 int ret;
1274
1275 ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
1276 if (ret < 0)
1277 goto bail;
1278
1279 /* more fine-tuning of what will be default */
1280 ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
1281 if (ret < 0)
1282 goto bail;
1283
1284 ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
1285 if (ret < 0)
1286 goto bail;
1287
1288 ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
1289 if (ret < 0)
1290 goto bail;
1291
1292 ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
1293 if (ret < 0)
1294 goto bail;
1295
1296 /*
1297 * Delay for max possible number of steps, with slop.
1298 * Each step is about 4usec.
1299 */
1300 udelay(415);
1301
1302 ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
1303
1304bail:
1305 return ret;
1306}
1307
1308#define RELOCK_FIRST_MS 3
1309#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
1310void toggle_7220_rclkrls(struct qib_devdata *dd)
1311{
1312 int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
1313 int ret;
1314
1315 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1316 if (ret < 0)
1317 qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
1318 else {
1319 udelay(1);
1320 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
1321 }
1322 /* And again for good measure */
1323 udelay(1);
1324 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1325 if (ret < 0)
1326 qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
1327 else {
1328 udelay(1);
1329 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
1330 }
1331 /* Now reset xgxs and IBC to complete the recovery */
1332 dd->f_xgxs_reset(dd->pport);
1333}
1334
1335/*
1336 * Shut down the timer that polls for relock occasions, if needed
1337 * this is "hooked" from qib_7220_quiet_serdes(), which is called
1338 * just before qib_shutdown_device() in qib_driver.c shuts down all
1339 * the other timers
1340 */
1341void shutdown_7220_relock_poll(struct qib_devdata *dd)
1342{
1343 if (dd->cspec->relock_timer_active)
1344 del_timer_sync(&dd->cspec->relock_timer);
1345}
1346
1347static unsigned qib_relock_by_timer = 1;
1348module_param_named(relock_by_timer, qib_relock_by_timer, uint,
1349 S_IWUSR | S_IRUGO);
1350MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
1351
1352static void qib_run_relock(unsigned long opaque)
1353{
1354 struct qib_devdata *dd = (struct qib_devdata *)opaque;
1355 struct qib_pportdata *ppd = dd->pport;
1356 struct qib_chip_specific *cs = dd->cspec;
1357 int timeoff;
1358
1359 /*
1360 * Check link-training state for "stuck" state, when down.
1361 * if found, try relock and schedule another try at
1362 * exponentially growing delay, maxed at one second.
1363 * if not stuck, our work is done.
1364 */
1365 if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
1366 (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
1367 QIBL_LINKACTIVE))) {
1368 if (qib_relock_by_timer) {
1369 if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
1370 toggle_7220_rclkrls(dd);
1371 }
1372 /* re-set timer for next check */
1373 timeoff = cs->relock_interval << 1;
1374 if (timeoff > HZ)
1375 timeoff = HZ;
1376 cs->relock_interval = timeoff;
1377 } else
1378 timeoff = HZ;
1379 mod_timer(&cs->relock_timer, jiffies + timeoff);
1380}
1381
1382void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
1383{
1384 struct qib_chip_specific *cs = dd->cspec;
1385
1386 if (ibup) {
1387 /* We are now up, relax timer to 1 second interval */
1388 if (cs->relock_timer_active) {
1389 cs->relock_interval = HZ;
1390 mod_timer(&cs->relock_timer, jiffies + HZ);
1391 }
1392 } else {
1393 /* Transition to down, (re-)set timer to short interval. */
1394 unsigned int timeout;
1395
1396 timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
1397 if (timeout == 0)
1398 timeout = 1;
1399 /* If timer has not yet been started, do so. */
1400 if (!cs->relock_timer_active) {
1401 cs->relock_timer_active = 1;
1402 init_timer(&cs->relock_timer);
1403 cs->relock_timer.function = qib_run_relock;
1404 cs->relock_timer.data = (unsigned long) dd;
1405 cs->relock_interval = timeout;
1406 cs->relock_timer.expires = jiffies + timeout;
1407 add_timer(&cs->relock_timer);
1408 } else {
1409 cs->relock_interval = timeout;
1410 mod_timer(&cs->relock_timer, jiffies + timeout);
1411 }
1412 }
1413}
diff --git a/drivers/infiniband/hw/qib/qib_sd7220_img.c b/drivers/infiniband/hw/qib/qib_sd7220_img.c
new file mode 100644
index 000000000000..a1118fbd2370
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sd7220_img.c
@@ -0,0 +1,1081 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file contains the memory image from the vendor, to be copied into
35 * the IB SERDES of the IBA7220 during initialization.
36 * The file also includes the two functions which use this image.
37 */
38#include <linux/pci.h>
39#include <linux/delay.h>
40
41#include "qib.h"
42#include "qib_7220.h"
43
44static unsigned char qib_sd7220_ib_img[] = {
45/*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6,
46 0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
47/*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01,
48 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x5F, 0x60, 0x08,
49/*0020*/0x53, 0xF9, 0xF7, 0xE4, 0xF5, 0xFE, 0x80, 0x08,
50 0x7F, 0x0A, 0x12, 0x17, 0x31, 0x12, 0x0E, 0xA2,
51/*0030*/0x75, 0xFC, 0x08, 0xE4, 0xF5, 0xFD, 0xE5, 0xE7,
52 0x20, 0xE7, 0x03, 0x43, 0xF9, 0x08, 0x22, 0x00,
53/*0040*/0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x75,
54 0x51, 0x01, 0xE4, 0xF5, 0x52, 0xF5, 0x53, 0xF5,
55/*0050*/0x52, 0xF5, 0x7E, 0x7F, 0x04, 0x02, 0x04, 0x38,
56 0xC2, 0x36, 0x05, 0x52, 0xE5, 0x52, 0xD3, 0x94,
57/*0060*/0x0C, 0x40, 0x05, 0x75, 0x52, 0x01, 0xD2, 0x36,
58 0x90, 0x07, 0x0C, 0x74, 0x07, 0xF0, 0xA3, 0x74,
59/*0070*/0xFF, 0xF0, 0xE4, 0xF5, 0x0C, 0xA3, 0xF0, 0x90,
60 0x07, 0x14, 0xF0, 0xA3, 0xF0, 0x75, 0x0B, 0x20,
61/*0080*/0xF5, 0x09, 0xE4, 0xF5, 0x08, 0xE5, 0x08, 0xD3,
62 0x94, 0x30, 0x40, 0x03, 0x02, 0x04, 0x04, 0x12,
63/*0090*/0x00, 0x06, 0x15, 0x0B, 0xE5, 0x08, 0x70, 0x04,
64 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x09,
65/*00A0*/0x70, 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00,
66 0xEE, 0x5F, 0x60, 0x05, 0x12, 0x18, 0x71, 0xD2,
67/*00B0*/0x35, 0x53, 0xE1, 0xF7, 0xE5, 0x08, 0x45, 0x09,
68 0xFF, 0xE5, 0x0B, 0x25, 0xE0, 0x25, 0xE0, 0x24,
69/*00C0*/0x83, 0xF5, 0x82, 0xE4, 0x34, 0x07, 0xF5, 0x83,
70 0xEF, 0xF0, 0x85, 0xE2, 0x20, 0xE5, 0x52, 0xD3,
71/*00D0*/0x94, 0x01, 0x40, 0x0D, 0x12, 0x19, 0xF3, 0xE0,
72 0x54, 0xA0, 0x64, 0x40, 0x70, 0x03, 0x02, 0x03,
73/*00E0*/0xFB, 0x53, 0xF9, 0xF8, 0x90, 0x94, 0x70, 0xE4,
74 0xF0, 0xE0, 0xF5, 0x10, 0xAF, 0x09, 0x12, 0x1E,
75/*00F0*/0xB3, 0xAF, 0x08, 0xEF, 0x44, 0x08, 0xF5, 0x82,
76 0x75, 0x83, 0x80, 0xE0, 0xF5, 0x29, 0xEF, 0x44,
77/*0100*/0x07, 0x12, 0x1A, 0x3C, 0xF5, 0x22, 0x54, 0x40,
78 0xD3, 0x94, 0x00, 0x40, 0x1E, 0xE5, 0x29, 0x54,
79/*0110*/0xF0, 0x70, 0x21, 0x12, 0x19, 0xF3, 0xE0, 0x44,
80 0x80, 0xF0, 0xE5, 0x22, 0x54, 0x30, 0x65, 0x08,
81/*0120*/0x70, 0x09, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xBF,
82 0xF0, 0x80, 0x09, 0x12, 0x19, 0xF3, 0x74, 0x40,
83/*0130*/0xF0, 0x02, 0x03, 0xFB, 0x12, 0x1A, 0x12, 0x75,
84 0x83, 0xAE, 0x74, 0xFF, 0xF0, 0xAF, 0x08, 0x7E,
85/*0140*/0x00, 0xEF, 0x44, 0x07, 0xF5, 0x82, 0xE0, 0xFD,
86 0xE5, 0x0B, 0x25, 0xE0, 0x25, 0xE0, 0x24, 0x81,
87/*0150*/0xF5, 0x82, 0xE4, 0x34, 0x07, 0xF5, 0x83, 0xED,
88 0xF0, 0x90, 0x07, 0x0E, 0xE0, 0x04, 0xF0, 0xEF,
89/*0160*/0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0x98, 0xE0,
90 0xF5, 0x28, 0x12, 0x1A, 0x23, 0x40, 0x0C, 0x12,
91/*0170*/0x19, 0xF3, 0xE0, 0x44, 0x01, 0x12, 0x1A, 0x32,
92 0x02, 0x03, 0xF6, 0xAF, 0x08, 0x7E, 0x00, 0x74,
93/*0180*/0x80, 0xCD, 0xEF, 0xCD, 0x8D, 0x82, 0xF5, 0x83,
94 0xE0, 0x30, 0xE0, 0x0A, 0x12, 0x19, 0xF3, 0xE0,
95/*0190*/0x44, 0x20, 0xF0, 0x02, 0x03, 0xFB, 0x12, 0x19,
96 0xF3, 0xE0, 0x54, 0xDF, 0xF0, 0xEE, 0x44, 0xAE,
97/*01A0*/0x12, 0x1A, 0x43, 0x30, 0xE4, 0x03, 0x02, 0x03,
98 0xFB, 0x74, 0x9E, 0x12, 0x1A, 0x05, 0x20, 0xE0,
99/*01B0*/0x03, 0x02, 0x03, 0xFB, 0x8F, 0x82, 0x8E, 0x83,
100 0xE0, 0x20, 0xE0, 0x03, 0x02, 0x03, 0xFB, 0x12,
101/*01C0*/0x19, 0xF3, 0xE0, 0x44, 0x10, 0xF0, 0xE5, 0xE3,
102 0x20, 0xE7, 0x08, 0xE5, 0x08, 0x12, 0x1A, 0x3A,
103/*01D0*/0x44, 0x04, 0xF0, 0xAF, 0x08, 0x7E, 0x00, 0xEF,
104 0x12, 0x1A, 0x3A, 0x20, 0xE2, 0x34, 0x12, 0x19,
105/*01E0*/0xF3, 0xE0, 0x44, 0x08, 0xF0, 0xE5, 0xE4, 0x30,
106 0xE6, 0x04, 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00,
107/*01F0*/0xE5, 0x7E, 0xC3, 0x94, 0x04, 0x50, 0x04, 0x7C,
108 0x01, 0x80, 0x02, 0x7C, 0x00, 0xEC, 0x4D, 0x60,
109/*0200*/0x05, 0xC2, 0x35, 0x02, 0x03, 0xFB, 0xEE, 0x44,
110 0xD2, 0x12, 0x1A, 0x43, 0x44, 0x40, 0xF0, 0x02,
111/*0210*/0x03, 0xFB, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xF7,
112 0xF0, 0x12, 0x1A, 0x12, 0x75, 0x83, 0xD2, 0xE0,
113/*0220*/0x54, 0xBF, 0xF0, 0x90, 0x07, 0x14, 0xE0, 0x04,
114 0xF0, 0xE5, 0x7E, 0x70, 0x03, 0x75, 0x7E, 0x01,
115/*0230*/0xAF, 0x08, 0x7E, 0x00, 0x12, 0x1A, 0x23, 0x40,
116 0x12, 0x12, 0x19, 0xF3, 0xE0, 0x44, 0x01, 0x12,
117/*0240*/0x19, 0xF2, 0xE0, 0x54, 0x02, 0x12, 0x1A, 0x32,
118 0x02, 0x03, 0xFB, 0x12, 0x19, 0xF3, 0xE0, 0x44,
119/*0250*/0x02, 0x12, 0x19, 0xF2, 0xE0, 0x54, 0xFE, 0xF0,
120 0xC2, 0x35, 0xEE, 0x44, 0x8A, 0x8F, 0x82, 0xF5,
121/*0260*/0x83, 0xE0, 0xF5, 0x17, 0x54, 0x8F, 0x44, 0x40,
122 0xF0, 0x74, 0x90, 0xFC, 0xE5, 0x08, 0x44, 0x07,
123/*0270*/0xFD, 0xF5, 0x82, 0x8C, 0x83, 0xE0, 0x54, 0x3F,
124 0x90, 0x07, 0x02, 0xF0, 0xE0, 0x54, 0xC0, 0x8D,
125/*0280*/0x82, 0x8C, 0x83, 0xF0, 0x74, 0x92, 0x12, 0x1A,
126 0x05, 0x90, 0x07, 0x03, 0x12, 0x1A, 0x19, 0x74,
127/*0290*/0x82, 0x12, 0x1A, 0x05, 0x90, 0x07, 0x04, 0x12,
128 0x1A, 0x19, 0x74, 0xB4, 0x12, 0x1A, 0x05, 0x90,
129/*02A0*/0x07, 0x05, 0x12, 0x1A, 0x19, 0x74, 0x94, 0xFE,
130 0xE5, 0x08, 0x44, 0x06, 0x12, 0x1A, 0x0A, 0xF5,
131/*02B0*/0x10, 0x30, 0xE0, 0x04, 0xD2, 0x37, 0x80, 0x02,
132 0xC2, 0x37, 0xE5, 0x10, 0x54, 0x7F, 0x8F, 0x82,
133/*02C0*/0x8E, 0x83, 0xF0, 0x30, 0x44, 0x30, 0x12, 0x1A,
134 0x03, 0x54, 0x80, 0xD3, 0x94, 0x00, 0x40, 0x04,
135/*02D0*/0xD2, 0x39, 0x80, 0x02, 0xC2, 0x39, 0x8F, 0x82,
136 0x8E, 0x83, 0xE0, 0x44, 0x80, 0xF0, 0x12, 0x1A,
137/*02E0*/0x03, 0x54, 0x40, 0xD3, 0x94, 0x00, 0x40, 0x04,
138 0xD2, 0x3A, 0x80, 0x02, 0xC2, 0x3A, 0x8F, 0x82,
139/*02F0*/0x8E, 0x83, 0xE0, 0x44, 0x40, 0xF0, 0x74, 0x92,
140 0xFE, 0xE5, 0x08, 0x44, 0x06, 0x12, 0x1A, 0x0A,
141/*0300*/0x30, 0xE7, 0x04, 0xD2, 0x38, 0x80, 0x02, 0xC2,
142 0x38, 0x8F, 0x82, 0x8E, 0x83, 0xE0, 0x54, 0x7F,
143/*0310*/0xF0, 0x12, 0x1E, 0x46, 0xE4, 0xF5, 0x0A, 0x20,
144 0x03, 0x02, 0x80, 0x03, 0x30, 0x43, 0x03, 0x12,
145/*0320*/0x19, 0x95, 0x20, 0x02, 0x02, 0x80, 0x03, 0x30,
146 0x42, 0x03, 0x12, 0x0C, 0x8F, 0x30, 0x30, 0x06,
147/*0330*/0x12, 0x19, 0x95, 0x12, 0x0C, 0x8F, 0x12, 0x0D,
148 0x47, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xFB, 0xF0,
149/*0340*/0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40, 0x46, 0x43,
150 0xE1, 0x08, 0x12, 0x19, 0xF3, 0xE0, 0x44, 0x04,
151/*0350*/0xF0, 0xE5, 0xE4, 0x20, 0xE7, 0x2A, 0x12, 0x1A,
152 0x12, 0x75, 0x83, 0xD2, 0xE0, 0x54, 0x08, 0xD3,
153/*0360*/0x94, 0x00, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
154 0x7F, 0x00, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40,
155/*0370*/0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF,
156 0x5E, 0x60, 0x05, 0x12, 0x1D, 0xD7, 0x80, 0x17,
157/*0380*/0x12, 0x1A, 0x12, 0x75, 0x83, 0xD2, 0xE0, 0x44,
158 0x08, 0xF0, 0x02, 0x03, 0xFB, 0x12, 0x1A, 0x12,
159/*0390*/0x75, 0x83, 0xD2, 0xE0, 0x54, 0xF7, 0xF0, 0x12,
160 0x1E, 0x46, 0x7F, 0x08, 0x12, 0x17, 0x31, 0x74,
161/*03A0*/0x8E, 0xFE, 0x12, 0x1A, 0x12, 0x8E, 0x83, 0xE0,
162 0xF5, 0x10, 0x54, 0xFE, 0xF0, 0xE5, 0x10, 0x44,
163/*03B0*/0x01, 0xFF, 0xE5, 0x08, 0xFD, 0xED, 0x44, 0x07,
164 0xF5, 0x82, 0xEF, 0xF0, 0xE5, 0x10, 0x54, 0xFE,
165/*03C0*/0xFF, 0xED, 0x44, 0x07, 0xF5, 0x82, 0xEF, 0x12,
166 0x1A, 0x11, 0x75, 0x83, 0x86, 0xE0, 0x44, 0x10,
167/*03D0*/0x12, 0x1A, 0x11, 0xE0, 0x44, 0x10, 0xF0, 0x12,
168 0x19, 0xF3, 0xE0, 0x54, 0xFD, 0x44, 0x01, 0xFF,
169/*03E0*/0x12, 0x19, 0xF3, 0xEF, 0x12, 0x1A, 0x32, 0x30,
170 0x32, 0x0C, 0xE5, 0x08, 0x44, 0x08, 0xF5, 0x82,
171/*03F0*/0x75, 0x83, 0x82, 0x74, 0x05, 0xF0, 0xAF, 0x0B,
172 0x12, 0x18, 0xD7, 0x74, 0x10, 0x25, 0x08, 0xF5,
173/*0400*/0x08, 0x02, 0x00, 0x85, 0x05, 0x09, 0xE5, 0x09,
174 0xD3, 0x94, 0x07, 0x50, 0x03, 0x02, 0x00, 0x82,
175/*0410*/0xE5, 0x7E, 0xD3, 0x94, 0x00, 0x40, 0x04, 0x7F,
176 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x7E, 0xC3,
177/*0420*/0x94, 0xFA, 0x50, 0x04, 0x7E, 0x01, 0x80, 0x02,
178 0x7E, 0x00, 0xEE, 0x5F, 0x60, 0x02, 0x05, 0x7E,
179/*0430*/0x30, 0x35, 0x0B, 0x43, 0xE1, 0x01, 0x7F, 0x09,
180 0x12, 0x17, 0x31, 0x02, 0x00, 0x58, 0x53, 0xE1,
181/*0440*/0xFE, 0x02, 0x00, 0x58, 0x8E, 0x6A, 0x8F, 0x6B,
182 0x8C, 0x6C, 0x8D, 0x6D, 0x75, 0x6E, 0x01, 0x75,
183/*0450*/0x6F, 0x01, 0x75, 0x70, 0x01, 0xE4, 0xF5, 0x73,
184 0xF5, 0x74, 0xF5, 0x75, 0x90, 0x07, 0x2F, 0xF0,
185/*0460*/0xF5, 0x3C, 0xF5, 0x3E, 0xF5, 0x46, 0xF5, 0x47,
186 0xF5, 0x3D, 0xF5, 0x3F, 0xF5, 0x6F, 0xE5, 0x6F,
187/*0470*/0x70, 0x0F, 0xE5, 0x6B, 0x45, 0x6A, 0x12, 0x07,
188 0x2A, 0x75, 0x83, 0x80, 0x74, 0x3A, 0xF0, 0x80,
189/*0480*/0x09, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x80, 0x74,
190 0x1A, 0xF0, 0xE4, 0xF5, 0x6E, 0xC3, 0x74, 0x3F,
191/*0490*/0x95, 0x6E, 0xFF, 0x12, 0x08, 0x65, 0x75, 0x83,
192 0x82, 0xEF, 0xF0, 0x12, 0x1A, 0x4D, 0x12, 0x08,
193/*04A0*/0xC6, 0xE5, 0x33, 0xF0, 0x12, 0x08, 0xFA, 0x12,
194 0x08, 0xB1, 0x40, 0xE1, 0xE5, 0x6F, 0x70, 0x0B,
195/*04B0*/0x12, 0x07, 0x2A, 0x75, 0x83, 0x80, 0x74, 0x36,
196 0xF0, 0x80, 0x09, 0x12, 0x07, 0x2A, 0x75, 0x83,
197/*04C0*/0x80, 0x74, 0x16, 0xF0, 0x75, 0x6E, 0x01, 0x12,
198 0x07, 0x2A, 0x75, 0x83, 0xB4, 0xE5, 0x6E, 0xF0,
199/*04D0*/0x12, 0x1A, 0x4D, 0x74, 0x3F, 0x25, 0x6E, 0xF5,
200 0x82, 0xE4, 0x34, 0x00, 0xF5, 0x83, 0xE5, 0x33,
201/*04E0*/0xF0, 0x74, 0xBF, 0x25, 0x6E, 0xF5, 0x82, 0xE4,
202 0x34, 0x00, 0x12, 0x08, 0xB1, 0x40, 0xD8, 0xE4,
203/*04F0*/0xF5, 0x70, 0xF5, 0x46, 0xF5, 0x47, 0xF5, 0x6E,
204 0x12, 0x08, 0xFA, 0xF5, 0x83, 0xE0, 0xFE, 0x12,
205/*0500*/0x08, 0xC6, 0xE0, 0x7C, 0x00, 0x24, 0x00, 0xFF,
206 0xEC, 0x3E, 0xFE, 0xAD, 0x3B, 0xD3, 0xEF, 0x9D,
207/*0510*/0xEE, 0x9C, 0x50, 0x04, 0x7B, 0x01, 0x80, 0x02,
208 0x7B, 0x00, 0xE5, 0x70, 0x70, 0x04, 0x7A, 0x01,
209/*0520*/0x80, 0x02, 0x7A, 0x00, 0xEB, 0x5A, 0x60, 0x06,
210 0x85, 0x6E, 0x46, 0x75, 0x70, 0x01, 0xD3, 0xEF,
211/*0530*/0x9D, 0xEE, 0x9C, 0x50, 0x04, 0x7F, 0x01, 0x80,
212 0x02, 0x7F, 0x00, 0xE5, 0x70, 0xB4, 0x01, 0x04,
213/*0540*/0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF, 0x5E,
214 0x60, 0x03, 0x85, 0x6E, 0x47, 0x05, 0x6E, 0xE5,
215/*0550*/0x6E, 0x64, 0x7F, 0x70, 0xA3, 0xE5, 0x46, 0x60,
216 0x05, 0xE5, 0x47, 0xB4, 0x7E, 0x03, 0x85, 0x46,
217/*0560*/0x47, 0xE5, 0x6F, 0x70, 0x08, 0x85, 0x46, 0x76,
218 0x85, 0x47, 0x77, 0x80, 0x0E, 0xC3, 0x74, 0x7F,
219/*0570*/0x95, 0x46, 0xF5, 0x78, 0xC3, 0x74, 0x7F, 0x95,
220 0x47, 0xF5, 0x79, 0xE5, 0x6F, 0x70, 0x37, 0xE5,
221/*0580*/0x46, 0x65, 0x47, 0x70, 0x0C, 0x75, 0x73, 0x01,
222 0x75, 0x74, 0x01, 0xF5, 0x3C, 0xF5, 0x3D, 0x80,
223/*0590*/0x35, 0xE4, 0xF5, 0x4E, 0xC3, 0xE5, 0x47, 0x95,
224 0x46, 0xF5, 0x3C, 0xC3, 0x13, 0xF5, 0x71, 0x25,
225/*05A0*/0x46, 0xF5, 0x72, 0xC3, 0x94, 0x3F, 0x40, 0x05,
226 0xE4, 0xF5, 0x3D, 0x80, 0x40, 0xC3, 0x74, 0x3F,
227/*05B0*/0x95, 0x72, 0xF5, 0x3D, 0x80, 0x37, 0xE5, 0x46,
228 0x65, 0x47, 0x70, 0x0F, 0x75, 0x73, 0x01, 0x75,
229/*05C0*/0x75, 0x01, 0xF5, 0x3E, 0xF5, 0x3F, 0x75, 0x4E,
230 0x01, 0x80, 0x22, 0xE4, 0xF5, 0x4E, 0xC3, 0xE5,
231/*05D0*/0x47, 0x95, 0x46, 0xF5, 0x3E, 0xC3, 0x13, 0xF5,
232 0x71, 0x25, 0x46, 0xF5, 0x72, 0xD3, 0x94, 0x3F,
233/*05E0*/0x50, 0x05, 0xE4, 0xF5, 0x3F, 0x80, 0x06, 0xE5,
234 0x72, 0x24, 0xC1, 0xF5, 0x3F, 0x05, 0x6F, 0xE5,
235/*05F0*/0x6F, 0xC3, 0x94, 0x02, 0x50, 0x03, 0x02, 0x04,
236 0x6E, 0xE5, 0x6D, 0x45, 0x6C, 0x70, 0x02, 0x80,
237/*0600*/0x04, 0xE5, 0x74, 0x45, 0x75, 0x90, 0x07, 0x2F,
238 0xF0, 0x7F, 0x01, 0xE5, 0x3E, 0x60, 0x04, 0xE5,
239/*0610*/0x3C, 0x70, 0x14, 0xE4, 0xF5, 0x3C, 0xF5, 0x3D,
240 0xF5, 0x3E, 0xF5, 0x3F, 0x12, 0x08, 0xD2, 0x70,
241/*0620*/0x04, 0xF0, 0x02, 0x06, 0xA4, 0x80, 0x7A, 0xE5,
242 0x3C, 0xC3, 0x95, 0x3E, 0x40, 0x07, 0xE5, 0x3C,
243/*0630*/0x95, 0x3E, 0xFF, 0x80, 0x06, 0xC3, 0xE5, 0x3E,
244 0x95, 0x3C, 0xFF, 0xE5, 0x76, 0xD3, 0x95, 0x79,
245/*0640*/0x40, 0x05, 0x85, 0x76, 0x7A, 0x80, 0x03, 0x85,
246 0x79, 0x7A, 0xE5, 0x77, 0xC3, 0x95, 0x78, 0x50,
247/*0650*/0x05, 0x85, 0x77, 0x7B, 0x80, 0x03, 0x85, 0x78,
248 0x7B, 0xE5, 0x7B, 0xD3, 0x95, 0x7A, 0x40, 0x30,
249/*0660*/0xE5, 0x7B, 0x95, 0x7A, 0xF5, 0x3C, 0xF5, 0x3E,
250 0xC3, 0xE5, 0x7B, 0x95, 0x7A, 0x90, 0x07, 0x19,
251/*0670*/0xF0, 0xE5, 0x3C, 0xC3, 0x13, 0xF5, 0x71, 0x25,
252 0x7A, 0xF5, 0x72, 0xC3, 0x94, 0x3F, 0x40, 0x05,
253/*0680*/0xE4, 0xF5, 0x3D, 0x80, 0x1F, 0xC3, 0x74, 0x3F,
254 0x95, 0x72, 0xF5, 0x3D, 0xF5, 0x3F, 0x80, 0x14,
255/*0690*/0xE4, 0xF5, 0x3C, 0xF5, 0x3E, 0x90, 0x07, 0x19,
256 0xF0, 0x12, 0x08, 0xD2, 0x70, 0x03, 0xF0, 0x80,
257/*06A0*/0x03, 0x74, 0x01, 0xF0, 0x12, 0x08, 0x65, 0x75,
258 0x83, 0xD0, 0xE0, 0x54, 0x0F, 0xFE, 0xAD, 0x3C,
259/*06B0*/0x70, 0x02, 0x7E, 0x07, 0xBE, 0x0F, 0x02, 0x7E,
260 0x80, 0xEE, 0xFB, 0xEF, 0xD3, 0x9B, 0x74, 0x80,
261/*06C0*/0xF8, 0x98, 0x40, 0x1F, 0xE4, 0xF5, 0x3C, 0xF5,
262 0x3E, 0x12, 0x08, 0xD2, 0x70, 0x03, 0xF0, 0x80,
263/*06D0*/0x12, 0x74, 0x01, 0xF0, 0xE5, 0x08, 0xFB, 0xEB,
264 0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xD2, 0xE0,
265/*06E0*/0x44, 0x10, 0xF0, 0xE5, 0x08, 0xFB, 0xEB, 0x44,
266 0x09, 0xF5, 0x82, 0x75, 0x83, 0x9E, 0xED, 0xF0,
267/*06F0*/0xEB, 0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xCA,
268 0xED, 0xF0, 0x12, 0x08, 0x65, 0x75, 0x83, 0xCC,
269/*0700*/0xEF, 0xF0, 0x22, 0xE5, 0x08, 0x44, 0x07, 0xF5,
270 0x82, 0x75, 0x83, 0xBC, 0xE0, 0x54, 0xF0, 0xF0,
271/*0710*/0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82, 0x75, 0x83,
272 0xBE, 0xE0, 0x54, 0xF0, 0xF0, 0xE5, 0x08, 0x44,
273/*0720*/0x07, 0xF5, 0x82, 0x75, 0x83, 0xC0, 0xE0, 0x54,
274 0xF0, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82,
275/*0730*/0x22, 0xF0, 0x90, 0x07, 0x28, 0xE0, 0xFE, 0xA3,
276 0xE0, 0xF5, 0x82, 0x8E, 0x83, 0x22, 0x85, 0x42,
277/*0740*/0x42, 0x85, 0x41, 0x41, 0x85, 0x40, 0x40, 0x74,
278 0xC0, 0x2F, 0xF5, 0x82, 0x74, 0x02, 0x3E, 0xF5,
279/*0750*/0x83, 0xE5, 0x42, 0xF0, 0x74, 0xE0, 0x2F, 0xF5,
280 0x82, 0x74, 0x02, 0x3E, 0xF5, 0x83, 0x22, 0xE5,
281/*0760*/0x42, 0x29, 0xFD, 0xE4, 0x33, 0xFC, 0xE5, 0x3C,
282 0xC3, 0x9D, 0xEC, 0x64, 0x80, 0xF8, 0x74, 0x80,
283/*0770*/0x98, 0x22, 0xF5, 0x83, 0xE0, 0x90, 0x07, 0x22,
284 0x54, 0x1F, 0xFD, 0xE0, 0xFA, 0xA3, 0xE0, 0xF5,
285/*0780*/0x82, 0x8A, 0x83, 0xED, 0xF0, 0x22, 0x90, 0x07,
286 0x22, 0xE0, 0xFC, 0xA3, 0xE0, 0xF5, 0x82, 0x8C,
287/*0790*/0x83, 0x22, 0x90, 0x07, 0x24, 0xFF, 0xED, 0x44,
288 0x07, 0xCF, 0xF0, 0xA3, 0xEF, 0xF0, 0x22, 0x85,
289/*07A0*/0x38, 0x38, 0x85, 0x39, 0x39, 0x85, 0x3A, 0x3A,
290 0x74, 0xC0, 0x2F, 0xF5, 0x82, 0x74, 0x02, 0x3E,
291/*07B0*/0xF5, 0x83, 0x22, 0x90, 0x07, 0x26, 0xFF, 0xED,
292 0x44, 0x07, 0xCF, 0xF0, 0xA3, 0xEF, 0xF0, 0x22,
293/*07C0*/0xF0, 0x74, 0xA0, 0x2F, 0xF5, 0x82, 0x74, 0x02,
294 0x3E, 0xF5, 0x83, 0x22, 0x74, 0xC0, 0x25, 0x11,
295/*07D0*/0xF5, 0x82, 0xE4, 0x34, 0x01, 0xF5, 0x83, 0x22,
296 0x74, 0x00, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
297/*07E0*/0x02, 0xF5, 0x83, 0x22, 0x74, 0x60, 0x25, 0x11,
298 0xF5, 0x82, 0xE4, 0x34, 0x03, 0xF5, 0x83, 0x22,
299/*07F0*/0x74, 0x80, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
300 0x03, 0xF5, 0x83, 0x22, 0x74, 0xE0, 0x25, 0x11,
301/*0800*/0xF5, 0x82, 0xE4, 0x34, 0x03, 0xF5, 0x83, 0x22,
302 0x74, 0x40, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
303/*0810*/0x06, 0xF5, 0x83, 0x22, 0x74, 0x80, 0x2F, 0xF5,
304 0x82, 0x74, 0x02, 0x3E, 0xF5, 0x83, 0x22, 0xAF,
305/*0820*/0x08, 0x7E, 0x00, 0xEF, 0x44, 0x07, 0xF5, 0x82,
306 0x22, 0xF5, 0x83, 0xE5, 0x82, 0x44, 0x07, 0xF5,
307/*0830*/0x82, 0xE5, 0x40, 0xF0, 0x22, 0x74, 0x40, 0x25,
308 0x11, 0xF5, 0x82, 0xE4, 0x34, 0x02, 0xF5, 0x83,
309/*0840*/0x22, 0x74, 0xC0, 0x25, 0x11, 0xF5, 0x82, 0xE4,
310 0x34, 0x03, 0xF5, 0x83, 0x22, 0x74, 0x00, 0x25,
311/*0850*/0x11, 0xF5, 0x82, 0xE4, 0x34, 0x06, 0xF5, 0x83,
312 0x22, 0x74, 0x20, 0x25, 0x11, 0xF5, 0x82, 0xE4,
313/*0860*/0x34, 0x06, 0xF5, 0x83, 0x22, 0xE5, 0x08, 0xFD,
314 0xED, 0x44, 0x07, 0xF5, 0x82, 0x22, 0xE5, 0x41,
315/*0870*/0xF0, 0xE5, 0x65, 0x64, 0x01, 0x45, 0x64, 0x22,
316 0x7E, 0x00, 0xFB, 0x7A, 0x00, 0xFD, 0x7C, 0x00,
317/*0880*/0x22, 0x74, 0x20, 0x25, 0x11, 0xF5, 0x82, 0xE4,
318 0x34, 0x02, 0x22, 0x74, 0xA0, 0x25, 0x11, 0xF5,
319/*0890*/0x82, 0xE4, 0x34, 0x03, 0x22, 0x85, 0x3E, 0x42,
320 0x85, 0x3F, 0x41, 0x8F, 0x40, 0x22, 0x85, 0x3C,
321/*08A0*/0x42, 0x85, 0x3D, 0x41, 0x8F, 0x40, 0x22, 0x75,
322 0x45, 0x3F, 0x90, 0x07, 0x20, 0xE4, 0xF0, 0xA3,
323/*08B0*/0x22, 0xF5, 0x83, 0xE5, 0x32, 0xF0, 0x05, 0x6E,
324 0xE5, 0x6E, 0xC3, 0x94, 0x40, 0x22, 0xF0, 0xE5,
325/*08C0*/0x08, 0x44, 0x06, 0xF5, 0x82, 0x22, 0x74, 0x00,
326 0x25, 0x6E, 0xF5, 0x82, 0xE4, 0x34, 0x00, 0xF5,
327/*08D0*/0x83, 0x22, 0xE5, 0x6D, 0x45, 0x6C, 0x90, 0x07,
328 0x2F, 0x22, 0xE4, 0xF9, 0xE5, 0x3C, 0xD3, 0x95,
329/*08E0*/0x3E, 0x22, 0x74, 0x80, 0x2E, 0xF5, 0x82, 0xE4,
330 0x34, 0x02, 0xF5, 0x83, 0xE0, 0x22, 0x74, 0xA0,
331/*08F0*/0x2E, 0xF5, 0x82, 0xE4, 0x34, 0x02, 0xF5, 0x83,
332 0xE0, 0x22, 0x74, 0x80, 0x25, 0x6E, 0xF5, 0x82,
333/*0900*/0xE4, 0x34, 0x00, 0x22, 0x25, 0x42, 0xFD, 0xE4,
334 0x33, 0xFC, 0x22, 0x85, 0x42, 0x42, 0x85, 0x41,
335/*0910*/0x41, 0x85, 0x40, 0x40, 0x22, 0xED, 0x4C, 0x60,
336 0x03, 0x02, 0x09, 0xE5, 0xEF, 0x4E, 0x70, 0x37,
337/*0920*/0x90, 0x07, 0x26, 0x12, 0x07, 0x89, 0xE0, 0xFD,
338 0x12, 0x07, 0xCC, 0xED, 0xF0, 0x90, 0x07, 0x28,
339/*0930*/0x12, 0x07, 0x89, 0xE0, 0xFD, 0x12, 0x07, 0xD8,
340 0xED, 0xF0, 0x12, 0x07, 0x86, 0xE0, 0x54, 0x1F,
341/*0940*/0xFD, 0x12, 0x08, 0x81, 0xF5, 0x83, 0xED, 0xF0,
342 0x90, 0x07, 0x24, 0x12, 0x07, 0x89, 0xE0, 0x54,
343/*0950*/0x1F, 0xFD, 0x12, 0x08, 0x35, 0xED, 0xF0, 0xEF,
344 0x64, 0x04, 0x4E, 0x70, 0x37, 0x90, 0x07, 0x26,
345/*0960*/0x12, 0x07, 0x89, 0xE0, 0xFD, 0x12, 0x07, 0xE4,
346 0xED, 0xF0, 0x90, 0x07, 0x28, 0x12, 0x07, 0x89,
347/*0970*/0xE0, 0xFD, 0x12, 0x07, 0xF0, 0xED, 0xF0, 0x12,
348 0x07, 0x86, 0xE0, 0x54, 0x1F, 0xFD, 0x12, 0x08,
349/*0980*/0x8B, 0xF5, 0x83, 0xED, 0xF0, 0x90, 0x07, 0x24,
350 0x12, 0x07, 0x89, 0xE0, 0x54, 0x1F, 0xFD, 0x12,
351/*0990*/0x08, 0x41, 0xED, 0xF0, 0xEF, 0x64, 0x01, 0x4E,
352 0x70, 0x04, 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00,
353/*09A0*/0xEF, 0x64, 0x02, 0x4E, 0x70, 0x04, 0x7F, 0x01,
354 0x80, 0x02, 0x7F, 0x00, 0xEF, 0x4D, 0x60, 0x78,
355/*09B0*/0x90, 0x07, 0x26, 0x12, 0x07, 0x35, 0xE0, 0xFF,
356 0x12, 0x07, 0xFC, 0xEF, 0x12, 0x07, 0x31, 0xE0,
357/*09C0*/0xFF, 0x12, 0x08, 0x08, 0xEF, 0xF0, 0x90, 0x07,
358 0x22, 0x12, 0x07, 0x35, 0xE0, 0x54, 0x1F, 0xFF,
359/*09D0*/0x12, 0x08, 0x4D, 0xEF, 0xF0, 0x90, 0x07, 0x24,
360 0x12, 0x07, 0x35, 0xE0, 0x54, 0x1F, 0xFF, 0x12,
361/*09E0*/0x08, 0x59, 0xEF, 0xF0, 0x22, 0x12, 0x07, 0xCC,
362 0xE4, 0xF0, 0x12, 0x07, 0xD8, 0xE4, 0xF0, 0x12,
363/*09F0*/0x08, 0x81, 0xF5, 0x83, 0xE4, 0xF0, 0x12, 0x08,
364 0x35, 0x74, 0x14, 0xF0, 0x12, 0x07, 0xE4, 0xE4,
365/*0A00*/0xF0, 0x12, 0x07, 0xF0, 0xE4, 0xF0, 0x12, 0x08,
366 0x8B, 0xF5, 0x83, 0xE4, 0xF0, 0x12, 0x08, 0x41,
367/*0A10*/0x74, 0x14, 0xF0, 0x12, 0x07, 0xFC, 0xE4, 0xF0,
368 0x12, 0x08, 0x08, 0xE4, 0xF0, 0x12, 0x08, 0x4D,
369/*0A20*/0xE4, 0xF0, 0x12, 0x08, 0x59, 0x74, 0x14, 0xF0,
370 0x22, 0x53, 0xF9, 0xF7, 0x75, 0xFC, 0x10, 0xE4,
371/*0A30*/0xF5, 0xFD, 0x75, 0xFE, 0x30, 0xF5, 0xFF, 0xE5,
372 0xE7, 0x20, 0xE7, 0x03, 0x43, 0xF9, 0x08, 0xE5,
373/*0A40*/0xE6, 0x20, 0xE7, 0x0B, 0x78, 0xFF, 0xE4, 0xF6,
374 0xD8, 0xFD, 0x53, 0xE6, 0xFE, 0x80, 0x09, 0x78,
375/*0A50*/0x08, 0xE4, 0xF6, 0xD8, 0xFD, 0x53, 0xE6, 0xFE,
376 0x75, 0x81, 0x80, 0xE4, 0xF5, 0xA8, 0xD2, 0xA8,
377/*0A60*/0xC2, 0xA9, 0xD2, 0xAF, 0xE5, 0xE2, 0x20, 0xE5,
378 0x05, 0x20, 0xE6, 0x02, 0x80, 0x03, 0x43, 0xE1,
379/*0A70*/0x02, 0xE5, 0xE2, 0x20, 0xE0, 0x0E, 0x90, 0x00,
380 0x00, 0x7F, 0x00, 0x7E, 0x08, 0xE4, 0xF0, 0xA3,
381/*0A80*/0xDF, 0xFC, 0xDE, 0xFA, 0x02, 0x0A, 0xDB, 0x43,
382 0xFA, 0x01, 0xC0, 0xE0, 0xC0, 0xF0, 0xC0, 0x83,
383/*0A90*/0xC0, 0x82, 0xC0, 0xD0, 0x12, 0x1C, 0xE7, 0xD0,
384 0xD0, 0xD0, 0x82, 0xD0, 0x83, 0xD0, 0xF0, 0xD0,
385/*0AA0*/0xE0, 0x53, 0xFA, 0xFE, 0x32, 0x02, 0x1B, 0x55,
386 0xE4, 0x93, 0xA3, 0xF8, 0xE4, 0x93, 0xA3, 0xF6,
387/*0AB0*/0x08, 0xDF, 0xF9, 0x80, 0x29, 0xE4, 0x93, 0xA3,
388 0xF8, 0x54, 0x07, 0x24, 0x0C, 0xC8, 0xC3, 0x33,
389/*0AC0*/0xC4, 0x54, 0x0F, 0x44, 0x20, 0xC8, 0x83, 0x40,
390 0x04, 0xF4, 0x56, 0x80, 0x01, 0x46, 0xF6, 0xDF,
391/*0AD0*/0xE4, 0x80, 0x0B, 0x01, 0x02, 0x04, 0x08, 0x10,
392 0x20, 0x40, 0x80, 0x90, 0x00, 0x3F, 0xE4, 0x7E,
393/*0AE0*/0x01, 0x93, 0x60, 0xC1, 0xA3, 0xFF, 0x54, 0x3F,
394 0x30, 0xE5, 0x09, 0x54, 0x1F, 0xFE, 0xE4, 0x93,
395/*0AF0*/0xA3, 0x60, 0x01, 0x0E, 0xCF, 0x54, 0xC0, 0x25,
396 0xE0, 0x60, 0xAD, 0x40, 0xB8, 0x80, 0xFE, 0x8C,
397/*0B00*/0x64, 0x8D, 0x65, 0x8A, 0x66, 0x8B, 0x67, 0xE4,
398 0xF5, 0x69, 0xEF, 0x4E, 0x70, 0x03, 0x02, 0x1D,
399/*0B10*/0x55, 0xE4, 0xF5, 0x68, 0xE5, 0x67, 0x45, 0x66,
400 0x70, 0x32, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x90,
401/*0B20*/0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE4,
402 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0xE4, 0x12,
403/*0B30*/0x08, 0x70, 0x70, 0x29, 0x12, 0x07, 0x2A, 0x75,
404 0x83, 0x92, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83,
405/*0B40*/0xC6, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC8,
406 0xE4, 0xF0, 0x80, 0x11, 0x90, 0x07, 0x26, 0x12,
407/*0B50*/0x07, 0x35, 0xE4, 0x12, 0x08, 0x70, 0x70, 0x05,
408 0x12, 0x07, 0x32, 0xE4, 0xF0, 0x12, 0x1D, 0x55,
409/*0B60*/0x12, 0x1E, 0xBF, 0xE5, 0x67, 0x45, 0x66, 0x70,
410 0x33, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x90, 0xE5,
411/*0B70*/0x41, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE5,
412 0x41, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0x12,
413/*0B80*/0x08, 0x6E, 0x70, 0x29, 0x12, 0x07, 0x2A, 0x75,
414 0x83, 0x92, 0xE5, 0x40, 0x12, 0x07, 0x29, 0x75,
415/*0B90*/0x83, 0xC6, 0xE5, 0x40, 0x12, 0x07, 0x29, 0x75,
416 0x83, 0xC8, 0x80, 0x0E, 0x90, 0x07, 0x26, 0x12,
417/*0BA0*/0x07, 0x35, 0x12, 0x08, 0x6E, 0x70, 0x06, 0x12,
418 0x07, 0x32, 0xE5, 0x40, 0xF0, 0xAF, 0x69, 0x7E,
419/*0BB0*/0x00, 0xAD, 0x67, 0xAC, 0x66, 0x12, 0x04, 0x44,
420 0x12, 0x07, 0x2A, 0x75, 0x83, 0xCA, 0xE0, 0xD3,
421/*0BC0*/0x94, 0x00, 0x50, 0x0C, 0x05, 0x68, 0xE5, 0x68,
422 0xC3, 0x94, 0x05, 0x50, 0x03, 0x02, 0x0B, 0x14,
423/*0BD0*/0x22, 0x8C, 0x60, 0x8D, 0x61, 0x12, 0x08, 0xDA,
424 0x74, 0x20, 0x40, 0x0D, 0x2F, 0xF5, 0x82, 0x74,
425/*0BE0*/0x03, 0x3E, 0xF5, 0x83, 0xE5, 0x3E, 0xF0, 0x80,
426 0x0B, 0x2F, 0xF5, 0x82, 0x74, 0x03, 0x3E, 0xF5,
427/*0BF0*/0x83, 0xE5, 0x3C, 0xF0, 0xE5, 0x3C, 0xD3, 0x95,
428 0x3E, 0x40, 0x3C, 0xE5, 0x61, 0x45, 0x60, 0x70,
429/*0C00*/0x10, 0xE9, 0x12, 0x09, 0x04, 0xE5, 0x3E, 0x12,
430 0x07, 0x68, 0x40, 0x3B, 0x12, 0x08, 0x95, 0x80,
431/*0C10*/0x18, 0xE5, 0x3E, 0xC3, 0x95, 0x38, 0x40, 0x1D,
432 0x85, 0x3E, 0x38, 0xE5, 0x3E, 0x60, 0x05, 0x85,
433/*0C20*/0x3F, 0x39, 0x80, 0x03, 0x85, 0x39, 0x39, 0x8F,
434 0x3A, 0x12, 0x08, 0x14, 0xE5, 0x3E, 0x12, 0x07,
435/*0C30*/0xC0, 0xE5, 0x3F, 0xF0, 0x22, 0x80, 0x43, 0xE5,
436 0x61, 0x45, 0x60, 0x70, 0x19, 0x12, 0x07, 0x5F,
437/*0C40*/0x40, 0x05, 0x12, 0x08, 0x9E, 0x80, 0x27, 0x12,
438 0x09, 0x0B, 0x12, 0x08, 0x14, 0xE5, 0x42, 0x12,
439/*0C50*/0x07, 0xC0, 0xE5, 0x41, 0xF0, 0x22, 0xE5, 0x3C,
440 0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3C, 0x38,
441/*0C60*/0xE5, 0x3C, 0x60, 0x05, 0x85, 0x3D, 0x39, 0x80,
442 0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12, 0x08,
443/*0C70*/0x14, 0xE5, 0x3C, 0x12, 0x07, 0xC0, 0xE5, 0x3D,
444 0xF0, 0x22, 0x85, 0x38, 0x38, 0x85, 0x39, 0x39,
445/*0C80*/0x85, 0x3A, 0x3A, 0x12, 0x08, 0x14, 0xE5, 0x38,
446 0x12, 0x07, 0xC0, 0xE5, 0x39, 0xF0, 0x22, 0x7F,
447/*0C90*/0x06, 0x12, 0x17, 0x31, 0x12, 0x1D, 0x23, 0x12,
448 0x0E, 0x04, 0x12, 0x0E, 0x33, 0xE0, 0x44, 0x0A,
449/*0CA0*/0xF0, 0x74, 0x8E, 0xFE, 0x12, 0x0E, 0x04, 0x12,
450 0x0E, 0x0B, 0xEF, 0xF0, 0xE5, 0x28, 0x30, 0xE5,
451/*0CB0*/0x03, 0xD3, 0x80, 0x01, 0xC3, 0x40, 0x05, 0x75,
452 0x14, 0x20, 0x80, 0x03, 0x75, 0x14, 0x08, 0x12,
453/*0CC0*/0x0E, 0x04, 0x75, 0x83, 0x8A, 0xE5, 0x14, 0xF0,
454 0xB4, 0xFF, 0x05, 0x75, 0x12, 0x80, 0x80, 0x06,
455/*0CD0*/0xE5, 0x14, 0xC3, 0x13, 0xF5, 0x12, 0xE4, 0xF5,
456 0x16, 0xF5, 0x7F, 0x12, 0x19, 0x36, 0x12, 0x13,
457/*0CE0*/0xA3, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x50, 0x09,
458 0x05, 0x16, 0xE5, 0x16, 0xC3, 0x94, 0x14, 0x40,
459/*0CF0*/0xEA, 0xE5, 0xE4, 0x20, 0xE7, 0x28, 0x12, 0x0E,
460 0x04, 0x75, 0x83, 0xD2, 0xE0, 0x54, 0x08, 0xD3,
461/*0D00*/0x94, 0x00, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
462 0x7F, 0x00, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40,
463/*0D10*/0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF,
464 0x5E, 0x60, 0x03, 0x12, 0x1D, 0xD7, 0xE5, 0x7F,
465/*0D20*/0xC3, 0x94, 0x11, 0x40, 0x14, 0x12, 0x0E, 0x04,
466 0x75, 0x83, 0xD2, 0xE0, 0x44, 0x80, 0xF0, 0xE5,
467/*0D30*/0xE4, 0x20, 0xE7, 0x0F, 0x12, 0x1D, 0xD7, 0x80,
468 0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0xD2, 0xE0,
469/*0D40*/0x54, 0x7F, 0xF0, 0x12, 0x1D, 0x23, 0x22, 0x74,
470 0x8A, 0x85, 0x08, 0x82, 0xF5, 0x83, 0xE5, 0x17,
471/*0D50*/0xF0, 0x12, 0x0E, 0x3A, 0xE4, 0xF0, 0x90, 0x07,
472 0x02, 0xE0, 0x12, 0x0E, 0x17, 0x75, 0x83, 0x90,
473/*0D60*/0xEF, 0xF0, 0x74, 0x92, 0xFE, 0xE5, 0x08, 0x44,
474 0x07, 0xFF, 0xF5, 0x82, 0x8E, 0x83, 0xE0, 0x54,
475/*0D70*/0xC0, 0xFD, 0x90, 0x07, 0x03, 0xE0, 0x54, 0x3F,
476 0x4D, 0x8F, 0x82, 0x8E, 0x83, 0xF0, 0x90, 0x07,
477/*0D80*/0x04, 0xE0, 0x12, 0x0E, 0x17, 0x75, 0x83, 0x82,
478 0xEF, 0xF0, 0x90, 0x07, 0x05, 0xE0, 0xFF, 0xED,
479/*0D90*/0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xB4, 0xEF,
480 0x12, 0x0E, 0x03, 0x75, 0x83, 0x80, 0xE0, 0x54,
481/*0DA0*/0xBF, 0xF0, 0x30, 0x37, 0x0A, 0x12, 0x0E, 0x91,
482 0x75, 0x83, 0x94, 0xE0, 0x44, 0x80, 0xF0, 0x30,
483/*0DB0*/0x38, 0x0A, 0x12, 0x0E, 0x91, 0x75, 0x83, 0x92,
484 0xE0, 0x44, 0x80, 0xF0, 0xE5, 0x28, 0x30, 0xE4,
485/*0DC0*/0x1A, 0x20, 0x39, 0x0A, 0x12, 0x0E, 0x04, 0x75,
486 0x83, 0x88, 0xE0, 0x54, 0x7F, 0xF0, 0x20, 0x3A,
487/*0DD0*/0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0x88, 0xE0,
488 0x54, 0xBF, 0xF0, 0x74, 0x8C, 0xFE, 0x12, 0x0E,
489/*0DE0*/0x04, 0x8E, 0x83, 0xE0, 0x54, 0x0F, 0x12, 0x0E,
490 0x03, 0x75, 0x83, 0x86, 0xE0, 0x54, 0xBF, 0xF0,
491/*0DF0*/0xE5, 0x08, 0x44, 0x06, 0x12, 0x0D, 0xFD, 0x75,
492 0x83, 0x8A, 0xE4, 0xF0, 0x22, 0xF5, 0x82, 0x75,
493/*0E00*/0x83, 0x82, 0xE4, 0xF0, 0xE5, 0x08, 0x44, 0x07,
494 0xF5, 0x82, 0x22, 0x8E, 0x83, 0xE0, 0xF5, 0x10,
495/*0E10*/0x54, 0xFE, 0xF0, 0xE5, 0x10, 0x44, 0x01, 0xFF,
496 0xE5, 0x08, 0xFD, 0xED, 0x44, 0x07, 0xF5, 0x82,
497/*0E20*/0x22, 0xE5, 0x15, 0xC4, 0x54, 0x07, 0xFF, 0xE5,
498 0x08, 0xFD, 0xED, 0x44, 0x08, 0xF5, 0x82, 0x75,
499/*0E30*/0x83, 0x82, 0x22, 0x75, 0x83, 0x80, 0xE0, 0x44,
500 0x40, 0xF0, 0xE5, 0x08, 0x44, 0x08, 0xF5, 0x82,
501/*0E40*/0x75, 0x83, 0x8A, 0x22, 0xE5, 0x16, 0x25, 0xE0,
502 0x25, 0xE0, 0x24, 0xAF, 0xF5, 0x82, 0xE4, 0x34,
503/*0E50*/0x1A, 0xF5, 0x83, 0xE4, 0x93, 0xF5, 0x0D, 0x22,
504 0x43, 0xE1, 0x10, 0x43, 0xE1, 0x80, 0x53, 0xE1,
505/*0E60*/0xFD, 0x85, 0xE1, 0x10, 0x22, 0xE5, 0x16, 0x25,
506 0xE0, 0x25, 0xE0, 0x24, 0xB2, 0xF5, 0x82, 0xE4,
507/*0E70*/0x34, 0x1A, 0xF5, 0x83, 0xE4, 0x93, 0x22, 0x85,
508 0x55, 0x82, 0x85, 0x54, 0x83, 0xE5, 0x15, 0xF0,
509/*0E80*/0x22, 0xE5, 0xE2, 0x54, 0x20, 0xD3, 0x94, 0x00,
510 0x22, 0xE5, 0xE2, 0x54, 0x40, 0xD3, 0x94, 0x00,
511/*0E90*/0x22, 0xE5, 0x08, 0x44, 0x06, 0xF5, 0x82, 0x22,
512 0xFD, 0xE5, 0x08, 0xFB, 0xEB, 0x44, 0x07, 0xF5,
513/*0EA0*/0x82, 0x22, 0x53, 0xF9, 0xF7, 0x75, 0xFE, 0x30,
514 0x22, 0xEF, 0x4E, 0x70, 0x26, 0x12, 0x07, 0xCC,
515/*0EB0*/0xE0, 0xFD, 0x90, 0x07, 0x26, 0x12, 0x07, 0x7B,
516 0x12, 0x07, 0xD8, 0xE0, 0xFD, 0x90, 0x07, 0x28,
517/*0EC0*/0x12, 0x07, 0x7B, 0x12, 0x08, 0x81, 0x12, 0x07,
518 0x72, 0x12, 0x08, 0x35, 0xE0, 0x90, 0x07, 0x24,
519/*0ED0*/0x12, 0x07, 0x78, 0xEF, 0x64, 0x04, 0x4E, 0x70,
520 0x29, 0x12, 0x07, 0xE4, 0xE0, 0xFD, 0x90, 0x07,
521/*0EE0*/0x26, 0x12, 0x07, 0x7B, 0x12, 0x07, 0xF0, 0xE0,
522 0xFD, 0x90, 0x07, 0x28, 0x12, 0x07, 0x7B, 0x12,
523/*0EF0*/0x08, 0x8B, 0x12, 0x07, 0x72, 0x12, 0x08, 0x41,
524 0xE0, 0x54, 0x1F, 0xFD, 0x90, 0x07, 0x24, 0x12,
525/*0F00*/0x07, 0x7B, 0xEF, 0x64, 0x01, 0x4E, 0x70, 0x04,
526 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00, 0xEF, 0x64,
527/*0F10*/0x02, 0x4E, 0x70, 0x04, 0x7F, 0x01, 0x80, 0x02,
528 0x7F, 0x00, 0xEF, 0x4D, 0x60, 0x35, 0x12, 0x07,
529/*0F20*/0xFC, 0xE0, 0xFF, 0x90, 0x07, 0x26, 0x12, 0x07,
530 0x89, 0xEF, 0xF0, 0x12, 0x08, 0x08, 0xE0, 0xFF,
531/*0F30*/0x90, 0x07, 0x28, 0x12, 0x07, 0x89, 0xEF, 0xF0,
532 0x12, 0x08, 0x4D, 0xE0, 0x54, 0x1F, 0xFF, 0x12,
533/*0F40*/0x07, 0x86, 0xEF, 0xF0, 0x12, 0x08, 0x59, 0xE0,
534 0x54, 0x1F, 0xFF, 0x90, 0x07, 0x24, 0x12, 0x07,
535/*0F50*/0x89, 0xEF, 0xF0, 0x22, 0xE4, 0xF5, 0x53, 0x12,
536 0x0E, 0x81, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
537/*0F60*/0x7F, 0x00, 0x12, 0x0E, 0x89, 0x40, 0x04, 0x7E,
538 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x70,
539/*0F70*/0x03, 0x02, 0x0F, 0xF6, 0x85, 0xE1, 0x10, 0x43,
540 0xE1, 0x02, 0x53, 0xE1, 0x0F, 0x85, 0xE1, 0x10,
541/*0F80*/0xE4, 0xF5, 0x51, 0xE5, 0xE3, 0x54, 0x3F, 0xF5,
542 0x52, 0x12, 0x0E, 0x89, 0x40, 0x1D, 0xAD, 0x52,
543/*0F90*/0xAF, 0x51, 0x12, 0x11, 0x18, 0xEF, 0x60, 0x08,
544 0x85, 0xE1, 0x10, 0x43, 0xE1, 0x40, 0x80, 0x0B,
545/*0FA0*/0x53, 0xE1, 0xBF, 0x12, 0x0E, 0x58, 0x12, 0x00,
546 0x06, 0x80, 0xFB, 0xE5, 0xE3, 0x54, 0x3F, 0xF5,
547/*0FB0*/0x51, 0xE5, 0xE4, 0x54, 0x3F, 0xF5, 0x52, 0x12,
548 0x0E, 0x81, 0x40, 0x1D, 0xAD, 0x52, 0xAF, 0x51,
549/*0FC0*/0x12, 0x11, 0x18, 0xEF, 0x60, 0x08, 0x85, 0xE1,
550 0x10, 0x43, 0xE1, 0x20, 0x80, 0x0B, 0x53, 0xE1,
551/*0FD0*/0xDF, 0x12, 0x0E, 0x58, 0x12, 0x00, 0x06, 0x80,
552 0xFB, 0x12, 0x0E, 0x81, 0x40, 0x04, 0x7F, 0x01,
553/*0FE0*/0x80, 0x02, 0x7F, 0x00, 0x12, 0x0E, 0x89, 0x40,
554 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE,
555/*0FF0*/0x4F, 0x60, 0x03, 0x12, 0x0E, 0x5B, 0x22, 0x12,
556 0x0E, 0x21, 0xEF, 0xF0, 0x12, 0x10, 0x91, 0x22,
557/*1000*/0x02, 0x11, 0x00, 0x02, 0x10, 0x40, 0x02, 0x10,
558 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
559/*1010*/0x01, 0x20, 0x01, 0x20, 0xE4, 0xF5, 0x57, 0x12,
560 0x16, 0xBD, 0x12, 0x16, 0x44, 0xE4, 0x12, 0x10,
561/*1020*/0x56, 0x12, 0x14, 0xB7, 0x90, 0x07, 0x26, 0x12,
562 0x07, 0x35, 0xE4, 0x12, 0x07, 0x31, 0xE4, 0xF0,
563/*1030*/0x12, 0x10, 0x56, 0x12, 0x14, 0xB7, 0x90, 0x07,
564 0x26, 0x12, 0x07, 0x35, 0xE5, 0x41, 0x12, 0x07,
565/*1040*/0x31, 0xE5, 0x40, 0xF0, 0xAF, 0x57, 0x7E, 0x00,
566 0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44, 0xAF,
567/*1050*/0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0xFF, 0x90,
568 0x07, 0x20, 0xA3, 0xE0, 0xFD, 0xE4, 0xF5, 0x56,
569/*1060*/0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA, 0x12,
570 0x11, 0x51, 0x7F, 0x0F, 0x7D, 0x18, 0xE4, 0xF5,
571/*1070*/0x56, 0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA,
572 0x12, 0x15, 0x41, 0xAF, 0x56, 0x7E, 0x00, 0x12,
573/*1080*/0x1A, 0xFF, 0xE4, 0xFF, 0xF5, 0x56, 0x7D, 0x1F,
574 0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA, 0x22,
575/*1090*/0x22, 0xE4, 0xF5, 0x55, 0xE5, 0x08, 0xFD, 0x74,
576 0xA0, 0xF5, 0x56, 0xED, 0x44, 0x07, 0xF5, 0x57,
577/*10A0*/0xE5, 0x28, 0x30, 0xE5, 0x03, 0xD3, 0x80, 0x01,
578 0xC3, 0x40, 0x05, 0x7F, 0x28, 0xEF, 0x80, 0x04,
579/*10B0*/0x7F, 0x14, 0xEF, 0xC3, 0x13, 0xF5, 0x54, 0xE4,
580 0xF9, 0x12, 0x0E, 0x18, 0x75, 0x83, 0x8E, 0xE0,
581/*10C0*/0xF5, 0x10, 0xCE, 0xEF, 0xCE, 0xEE, 0xD3, 0x94,
582 0x00, 0x40, 0x26, 0xE5, 0x10, 0x54, 0xFE, 0x12,
583/*10D0*/0x0E, 0x98, 0x75, 0x83, 0x8E, 0xED, 0xF0, 0xE5,
584 0x10, 0x44, 0x01, 0xFD, 0xEB, 0x44, 0x07, 0xF5,
585/*10E0*/0x82, 0xED, 0xF0, 0x85, 0x57, 0x82, 0x85, 0x56,
586 0x83, 0xE0, 0x30, 0xE3, 0x01, 0x09, 0x1E, 0x80,
587/*10F0*/0xD4, 0xC2, 0x34, 0xE9, 0xC3, 0x95, 0x54, 0x40,
588 0x02, 0xD2, 0x34, 0x22, 0x02, 0x00, 0x06, 0x22,
589/*1100*/0x30, 0x30, 0x11, 0x90, 0x10, 0x00, 0xE4, 0x93,
590 0xF5, 0x10, 0x90, 0x10, 0x10, 0xE4, 0x93, 0xF5,
591/*1110*/0x10, 0x12, 0x10, 0x90, 0x12, 0x11, 0x50, 0x22,
592 0xE4, 0xFC, 0xC3, 0xED, 0x9F, 0xFA, 0xEF, 0xF5,
593/*1120*/0x83, 0x75, 0x82, 0x00, 0x79, 0xFF, 0xE4, 0x93,
594 0xCC, 0x6C, 0xCC, 0xA3, 0xD9, 0xF8, 0xDA, 0xF6,
595/*1130*/0xE5, 0xE2, 0x30, 0xE4, 0x02, 0x8C, 0xE5, 0xED,
596 0x24, 0xFF, 0xFF, 0xEF, 0x75, 0x82, 0xFF, 0xF5,
597/*1140*/0x83, 0xE4, 0x93, 0x6C, 0x70, 0x03, 0x7F, 0x01,
598 0x22, 0x7F, 0x00, 0x22, 0x22, 0x11, 0x00, 0x00,
599/*1150*/0x22, 0x8E, 0x58, 0x8F, 0x59, 0x8C, 0x5A, 0x8D,
600 0x5B, 0x8A, 0x5C, 0x8B, 0x5D, 0x75, 0x5E, 0x01,
601/*1160*/0xE4, 0xF5, 0x5F, 0xF5, 0x60, 0xF5, 0x62, 0x12,
602 0x07, 0x2A, 0x75, 0x83, 0xD0, 0xE0, 0xFF, 0xC4,
603/*1170*/0x54, 0x0F, 0xF5, 0x61, 0x12, 0x1E, 0xA5, 0x85,
604 0x59, 0x5E, 0xD3, 0xE5, 0x5E, 0x95, 0x5B, 0xE5,
605/*1180*/0x5A, 0x12, 0x07, 0x6B, 0x50, 0x4B, 0x12, 0x07,
606 0x03, 0x75, 0x83, 0xBC, 0xE0, 0x45, 0x5E, 0x12,
607/*1190*/0x07, 0x29, 0x75, 0x83, 0xBE, 0xE0, 0x45, 0x5E,
608 0x12, 0x07, 0x29, 0x75, 0x83, 0xC0, 0xE0, 0x45,
609/*11A0*/0x5E, 0xF0, 0xAF, 0x5F, 0xE5, 0x60, 0x12, 0x08,
610 0x78, 0x12, 0x0A, 0xFF, 0xAF, 0x62, 0x7E, 0x00,
611/*11B0*/0xAD, 0x5D, 0xAC, 0x5C, 0x12, 0x04, 0x44, 0xE5,
612 0x61, 0xAF, 0x5E, 0x7E, 0x00, 0xB4, 0x03, 0x05,
613/*11C0*/0x12, 0x1E, 0x21, 0x80, 0x07, 0xAD, 0x5D, 0xAC,
614 0x5C, 0x12, 0x13, 0x17, 0x05, 0x5E, 0x02, 0x11,
615/*11D0*/0x7A, 0x12, 0x07, 0x03, 0x75, 0x83, 0xBC, 0xE0,
616 0x45, 0x40, 0x12, 0x07, 0x29, 0x75, 0x83, 0xBE,
617/*11E0*/0xE0, 0x45, 0x40, 0x12, 0x07, 0x29, 0x75, 0x83,
618 0xC0, 0xE0, 0x45, 0x40, 0xF0, 0x22, 0x8E, 0x58,
619/*11F0*/0x8F, 0x59, 0x75, 0x5A, 0x01, 0x79, 0x01, 0x75,
620 0x5B, 0x01, 0xE4, 0xFB, 0x12, 0x07, 0x2A, 0x75,
621/*1200*/0x83, 0xAE, 0xE0, 0x54, 0x1A, 0xFF, 0x12, 0x08,
622 0x65, 0xE0, 0xC4, 0x13, 0x54, 0x07, 0xFE, 0xEF,
623/*1210*/0x70, 0x0C, 0xEE, 0x65, 0x35, 0x70, 0x07, 0x90,
624 0x07, 0x2F, 0xE0, 0xB4, 0x01, 0x0D, 0xAF, 0x35,
625/*1220*/0x7E, 0x00, 0x12, 0x0E, 0xA9, 0xCF, 0xEB, 0xCF,
626 0x02, 0x1E, 0x60, 0xE5, 0x59, 0x64, 0x02, 0x45,
627/*1230*/0x58, 0x70, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
628 0x00, 0xE5, 0x59, 0x45, 0x58, 0x70, 0x04, 0x7E,
629/*1240*/0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x60,
630 0x23, 0x85, 0x41, 0x49, 0x85, 0x40, 0x4B, 0xE5,
631/*1250*/0x59, 0x45, 0x58, 0x70, 0x2C, 0xAF, 0x5A, 0xFE,
632 0xCD, 0xE9, 0xCD, 0xFC, 0xAB, 0x59, 0xAA, 0x58,
633/*1260*/0x12, 0x0A, 0xFF, 0xAF, 0x5B, 0x7E, 0x00, 0x12,
634 0x1E, 0x60, 0x80, 0x15, 0xAF, 0x5B, 0x7E, 0x00,
635/*1270*/0x12, 0x1E, 0x60, 0x90, 0x07, 0x26, 0x12, 0x07,
636 0x35, 0xE5, 0x49, 0x12, 0x07, 0x31, 0xE5, 0x4B,
637/*1280*/0xF0, 0xE4, 0xFD, 0xAF, 0x35, 0xFE, 0xFC, 0x12,
638 0x09, 0x15, 0x22, 0x8C, 0x64, 0x8D, 0x65, 0x12,
639/*1290*/0x08, 0xDA, 0x40, 0x3C, 0xE5, 0x65, 0x45, 0x64,
640 0x70, 0x10, 0x12, 0x09, 0x04, 0xC3, 0xE5, 0x3E,
641/*12A0*/0x12, 0x07, 0x69, 0x40, 0x3B, 0x12, 0x08, 0x95,
642 0x80, 0x18, 0xE5, 0x3E, 0xC3, 0x95, 0x38, 0x40,
643/*12B0*/0x1D, 0x85, 0x3E, 0x38, 0xE5, 0x3E, 0x60, 0x05,
644 0x85, 0x3F, 0x39, 0x80, 0x03, 0x85, 0x39, 0x39,
645/*12C0*/0x8F, 0x3A, 0x12, 0x07, 0xA8, 0xE5, 0x3E, 0x12,
646 0x07, 0x53, 0xE5, 0x3F, 0xF0, 0x22, 0x80, 0x3B,
647/*12D0*/0xE5, 0x65, 0x45, 0x64, 0x70, 0x11, 0x12, 0x07,
648 0x5F, 0x40, 0x05, 0x12, 0x08, 0x9E, 0x80, 0x1F,
649/*12E0*/0x12, 0x07, 0x3E, 0xE5, 0x41, 0xF0, 0x22, 0xE5,
650 0x3C, 0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3C,
651/*12F0*/0x38, 0xE5, 0x3C, 0x60, 0x05, 0x85, 0x3D, 0x39,
652 0x80, 0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12,
653/*1300*/0x07, 0xA8, 0xE5, 0x3C, 0x12, 0x07, 0x53, 0xE5,
654 0x3D, 0xF0, 0x22, 0x12, 0x07, 0x9F, 0xE5, 0x38,
655/*1310*/0x12, 0x07, 0x53, 0xE5, 0x39, 0xF0, 0x22, 0x8C,
656 0x63, 0x8D, 0x64, 0x12, 0x08, 0xDA, 0x40, 0x3C,
657/*1320*/0xE5, 0x64, 0x45, 0x63, 0x70, 0x10, 0x12, 0x09,
658 0x04, 0xC3, 0xE5, 0x3E, 0x12, 0x07, 0x69, 0x40,
659/*1330*/0x3B, 0x12, 0x08, 0x95, 0x80, 0x18, 0xE5, 0x3E,
660 0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3E, 0x38,
661/*1340*/0xE5, 0x3E, 0x60, 0x05, 0x85, 0x3F, 0x39, 0x80,
662 0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12, 0x07,
663/*1350*/0xA8, 0xE5, 0x3E, 0x12, 0x07, 0x53, 0xE5, 0x3F,
664 0xF0, 0x22, 0x80, 0x3B, 0xE5, 0x64, 0x45, 0x63,
665/*1360*/0x70, 0x11, 0x12, 0x07, 0x5F, 0x40, 0x05, 0x12,
666 0x08, 0x9E, 0x80, 0x1F, 0x12, 0x07, 0x3E, 0xE5,
667/*1370*/0x41, 0xF0, 0x22, 0xE5, 0x3C, 0xC3, 0x95, 0x38,
668 0x40, 0x1D, 0x85, 0x3C, 0x38, 0xE5, 0x3C, 0x60,
669/*1380*/0x05, 0x85, 0x3D, 0x39, 0x80, 0x03, 0x85, 0x39,
670 0x39, 0x8F, 0x3A, 0x12, 0x07, 0xA8, 0xE5, 0x3C,
671/*1390*/0x12, 0x07, 0x53, 0xE5, 0x3D, 0xF0, 0x22, 0x12,
672 0x07, 0x9F, 0xE5, 0x38, 0x12, 0x07, 0x53, 0xE5,
673/*13A0*/0x39, 0xF0, 0x22, 0xE5, 0x0D, 0xFE, 0xE5, 0x08,
674 0x8E, 0x54, 0x44, 0x05, 0xF5, 0x55, 0x75, 0x15,
675/*13B0*/0x0F, 0xF5, 0x82, 0x12, 0x0E, 0x7A, 0x12, 0x17,
676 0xA3, 0x20, 0x31, 0x05, 0x75, 0x15, 0x03, 0x80,
677/*13C0*/0x03, 0x75, 0x15, 0x0B, 0xE5, 0x0A, 0xC3, 0x94,
678 0x01, 0x50, 0x38, 0x12, 0x14, 0x20, 0x20, 0x31,
679/*13D0*/0x06, 0x05, 0x15, 0x05, 0x15, 0x80, 0x04, 0x15,
680 0x15, 0x15, 0x15, 0xE5, 0x0A, 0xC3, 0x94, 0x01,
681/*13E0*/0x50, 0x21, 0x12, 0x14, 0x20, 0x20, 0x31, 0x04,
682 0x05, 0x15, 0x80, 0x02, 0x15, 0x15, 0xE5, 0x0A,
683/*13F0*/0xC3, 0x94, 0x01, 0x50, 0x0E, 0x12, 0x0E, 0x77,
684 0x12, 0x17, 0xA3, 0x20, 0x31, 0x05, 0x05, 0x15,
685/*1400*/0x12, 0x0E, 0x77, 0xE5, 0x15, 0xB4, 0x08, 0x04,
686 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x15,
687/*1410*/0xB4, 0x07, 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E,
688 0x00, 0xEE, 0x4F, 0x60, 0x02, 0x05, 0x7F, 0x22,
689/*1420*/0x85, 0x55, 0x82, 0x85, 0x54, 0x83, 0xE5, 0x15,
690 0xF0, 0x12, 0x17, 0xA3, 0x22, 0x12, 0x07, 0x2A,
691/*1430*/0x75, 0x83, 0xAE, 0x74, 0xFF, 0x12, 0x07, 0x29,
692 0xE0, 0x54, 0x1A, 0xF5, 0x34, 0xE0, 0xC4, 0x13,
693/*1440*/0x54, 0x07, 0xF5, 0x35, 0x24, 0xFE, 0x60, 0x24,
694 0x24, 0xFE, 0x60, 0x3C, 0x24, 0x04, 0x70, 0x63,
695/*1450*/0x75, 0x31, 0x2D, 0xE5, 0x08, 0xFD, 0x74, 0xB6,
696 0x12, 0x07, 0x92, 0x74, 0xBC, 0x90, 0x07, 0x22,
697/*1460*/0x12, 0x07, 0x95, 0x74, 0x90, 0x12, 0x07, 0xB3,
698 0x74, 0x92, 0x80, 0x3C, 0x75, 0x31, 0x3A, 0xE5,
699/*1470*/0x08, 0xFD, 0x74, 0xBA, 0x12, 0x07, 0x92, 0x74,
700 0xC0, 0x90, 0x07, 0x22, 0x12, 0x07, 0xB6, 0x74,
701/*1480*/0xC4, 0x12, 0x07, 0xB3, 0x74, 0xC8, 0x80, 0x20,
702 0x75, 0x31, 0x35, 0xE5, 0x08, 0xFD, 0x74, 0xB8,
703/*1490*/0x12, 0x07, 0x92, 0x74, 0xBE, 0xFF, 0xED, 0x44,
704 0x07, 0x90, 0x07, 0x22, 0xCF, 0xF0, 0xA3, 0xEF,
705/*14A0*/0xF0, 0x74, 0xC2, 0x12, 0x07, 0xB3, 0x74, 0xC6,
706 0xFF, 0xED, 0x44, 0x07, 0xA3, 0xCF, 0xF0, 0xA3,
707/*14B0*/0xEF, 0xF0, 0x22, 0x75, 0x34, 0x01, 0x22, 0x8E,
708 0x58, 0x8F, 0x59, 0x8C, 0x5A, 0x8D, 0x5B, 0x8A,
709/*14C0*/0x5C, 0x8B, 0x5D, 0x75, 0x5E, 0x01, 0xE4, 0xF5,
710 0x5F, 0x12, 0x1E, 0xA5, 0x85, 0x59, 0x5E, 0xD3,
711/*14D0*/0xE5, 0x5E, 0x95, 0x5B, 0xE5, 0x5A, 0x12, 0x07,
712 0x6B, 0x50, 0x57, 0xE5, 0x5D, 0x45, 0x5C, 0x70,
713/*14E0*/0x30, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x92, 0xE5,
714 0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC6, 0xE5,
715/*14F0*/0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC8, 0xE5,
716 0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0x90, 0xE5,
717/*1500*/0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE5,
718 0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0x80,
719/*1510*/0x03, 0x12, 0x07, 0x32, 0xE5, 0x5E, 0xF0, 0xAF,
720 0x5F, 0x7E, 0x00, 0xAD, 0x5D, 0xAC, 0x5C, 0x12,
721/*1520*/0x04, 0x44, 0xAF, 0x5E, 0x7E, 0x00, 0xAD, 0x5D,
722 0xAC, 0x5C, 0x12, 0x0B, 0xD1, 0x05, 0x5E, 0x02,
723/*1530*/0x14, 0xCF, 0xAB, 0x5D, 0xAA, 0x5C, 0xAD, 0x5B,
724 0xAC, 0x5A, 0xAF, 0x59, 0xAE, 0x58, 0x02, 0x1B,
725/*1540*/0xFB, 0x8C, 0x5C, 0x8D, 0x5D, 0x8A, 0x5E, 0x8B,
726 0x5F, 0x75, 0x60, 0x01, 0xE4, 0xF5, 0x61, 0xF5,
727/*1550*/0x62, 0xF5, 0x63, 0x12, 0x1E, 0xA5, 0x8F, 0x60,
728 0xD3, 0xE5, 0x60, 0x95, 0x5D, 0xE5, 0x5C, 0x12,
729/*1560*/0x07, 0x6B, 0x50, 0x61, 0xE5, 0x5F, 0x45, 0x5E,
730 0x70, 0x27, 0x12, 0x07, 0x2A, 0x75, 0x83, 0xB6,
731/*1570*/0xE5, 0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0xB8,
732 0xE5, 0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0xBA,
733/*1580*/0xE5, 0x60, 0xF0, 0xAF, 0x61, 0x7E, 0x00, 0xE5,
734 0x62, 0x12, 0x08, 0x7A, 0x12, 0x0A, 0xFF, 0x80,
735/*1590*/0x19, 0x90, 0x07, 0x24, 0x12, 0x07, 0x35, 0xE5,
736 0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0x8E, 0xE4,
737/*15A0*/0x12, 0x07, 0x29, 0x74, 0x01, 0x12, 0x07, 0x29,
738 0xE4, 0xF0, 0xAF, 0x63, 0x7E, 0x00, 0xAD, 0x5F,
739/*15B0*/0xAC, 0x5E, 0x12, 0x04, 0x44, 0xAF, 0x60, 0x7E,
740 0x00, 0xAD, 0x5F, 0xAC, 0x5E, 0x12, 0x12, 0x8B,
741/*15C0*/0x05, 0x60, 0x02, 0x15, 0x58, 0x22, 0x90, 0x11,
742 0x4D, 0xE4, 0x93, 0x90, 0x07, 0x2E, 0xF0, 0x12,
743/*15D0*/0x08, 0x1F, 0x75, 0x83, 0xAE, 0xE0, 0x54, 0x1A,
744 0xF5, 0x34, 0x70, 0x67, 0xEF, 0x44, 0x07, 0xF5,
745/*15E0*/0x82, 0x75, 0x83, 0xCE, 0xE0, 0xFF, 0x13, 0x13,
746 0x13, 0x54, 0x07, 0xF5, 0x36, 0x54, 0x0F, 0xD3,
747/*15F0*/0x94, 0x00, 0x40, 0x06, 0x12, 0x14, 0x2D, 0x12,
748 0x1B, 0xA9, 0xE5, 0x36, 0x54, 0x0F, 0x24, 0xFE,
749/*1600*/0x60, 0x0C, 0x14, 0x60, 0x0C, 0x14, 0x60, 0x19,
750 0x24, 0x03, 0x70, 0x37, 0x80, 0x10, 0x02, 0x1E,
751/*1610*/0x91, 0x12, 0x1E, 0x91, 0x12, 0x07, 0x2A, 0x75,
752 0x83, 0xCE, 0xE0, 0x54, 0xEF, 0xF0, 0x02, 0x1D,
753/*1620*/0xAE, 0x12, 0x10, 0x14, 0xE4, 0xF5, 0x55, 0x12,
754 0x1D, 0x85, 0x05, 0x55, 0xE5, 0x55, 0xC3, 0x94,
755/*1630*/0x05, 0x40, 0xF4, 0x12, 0x07, 0x2A, 0x75, 0x83,
756 0xCE, 0xE0, 0x54, 0xC7, 0x12, 0x07, 0x29, 0xE0,
757/*1640*/0x44, 0x08, 0xF0, 0x22, 0xE4, 0xF5, 0x58, 0xF5,
758 0x59, 0xAF, 0x08, 0xEF, 0x44, 0x07, 0xF5, 0x82,
759/*1650*/0x75, 0x83, 0xD0, 0xE0, 0xFD, 0xC4, 0x54, 0x0F,
760 0xF5, 0x5A, 0xEF, 0x44, 0x07, 0xF5, 0x82, 0x75,
761/*1660*/0x83, 0x80, 0x74, 0x01, 0xF0, 0x12, 0x08, 0x21,
762 0x75, 0x83, 0x82, 0xE5, 0x45, 0xF0, 0xEF, 0x44,
763/*1670*/0x07, 0xF5, 0x82, 0x75, 0x83, 0x8A, 0x74, 0xFF,
764 0xF0, 0x12, 0x1A, 0x4D, 0x12, 0x07, 0x2A, 0x75,
765/*1680*/0x83, 0xBC, 0xE0, 0x54, 0xEF, 0x12, 0x07, 0x29,
766 0x75, 0x83, 0xBE, 0xE0, 0x54, 0xEF, 0x12, 0x07,
767/*1690*/0x29, 0x75, 0x83, 0xC0, 0xE0, 0x54, 0xEF, 0x12,
768 0x07, 0x29, 0x75, 0x83, 0xBC, 0xE0, 0x44, 0x10,
769/*16A0*/0x12, 0x07, 0x29, 0x75, 0x83, 0xBE, 0xE0, 0x44,
770 0x10, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC0, 0xE0,
771/*16B0*/0x44, 0x10, 0xF0, 0xAF, 0x58, 0xE5, 0x59, 0x12,
772 0x08, 0x78, 0x02, 0x0A, 0xFF, 0xE4, 0xF5, 0x58,
773/*16C0*/0x7D, 0x01, 0xF5, 0x59, 0xAF, 0x35, 0xFE, 0xFC,
774 0x12, 0x09, 0x15, 0x12, 0x07, 0x2A, 0x75, 0x83,
775/*16D0*/0xB6, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
776 0xB8, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
777/*16E0*/0xBA, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
778 0xBC, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
779/*16F0*/0xBE, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
780 0xC0, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
781/*1700*/0x90, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2,
782 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0xE4,
783/*1710*/0x12, 0x07, 0x29, 0x75, 0x83, 0x92, 0xE4, 0x12,
784 0x07, 0x29, 0x75, 0x83, 0xC6, 0xE4, 0x12, 0x07,
785/*1720*/0x29, 0x75, 0x83, 0xC8, 0xE4, 0xF0, 0xAF, 0x58,
786 0xFE, 0xE5, 0x59, 0x12, 0x08, 0x7A, 0x02, 0x0A,
787/*1730*/0xFF, 0xE5, 0xE2, 0x30, 0xE4, 0x6C, 0xE5, 0xE7,
788 0x54, 0xC0, 0x64, 0x40, 0x70, 0x64, 0xE5, 0x09,
789/*1740*/0xC4, 0x54, 0x30, 0xFE, 0xE5, 0x08, 0x25, 0xE0,
790 0x25, 0xE0, 0x54, 0xC0, 0x4E, 0xFE, 0xEF, 0x54,
791/*1750*/0x3F, 0x4E, 0xFD, 0xE5, 0x2B, 0xAE, 0x2A, 0x78,
792 0x02, 0xC3, 0x33, 0xCE, 0x33, 0xCE, 0xD8, 0xF9,
793/*1760*/0xF5, 0x82, 0x8E, 0x83, 0xED, 0xF0, 0xE5, 0x2B,
794 0xAE, 0x2A, 0x78, 0x02, 0xC3, 0x33, 0xCE, 0x33,
795/*1770*/0xCE, 0xD8, 0xF9, 0xFF, 0xF5, 0x82, 0x8E, 0x83,
796 0xA3, 0xE5, 0xFE, 0xF0, 0x8F, 0x82, 0x8E, 0x83,
797/*1780*/0xA3, 0xA3, 0xE5, 0xFD, 0xF0, 0x8F, 0x82, 0x8E,
798 0x83, 0xA3, 0xA3, 0xA3, 0xE5, 0xFC, 0xF0, 0xC3,
799/*1790*/0xE5, 0x2B, 0x94, 0xFA, 0xE5, 0x2A, 0x94, 0x00,
800 0x50, 0x08, 0x05, 0x2B, 0xE5, 0x2B, 0x70, 0x02,
801/*17A0*/0x05, 0x2A, 0x22, 0xE4, 0xFF, 0xE4, 0xF5, 0x58,
802 0xF5, 0x56, 0xF5, 0x57, 0x74, 0x82, 0xFC, 0x12,
803/*17B0*/0x0E, 0x04, 0x8C, 0x83, 0xE0, 0xF5, 0x10, 0x54,
804 0x7F, 0xF0, 0xE5, 0x10, 0x44, 0x80, 0x12, 0x0E,
805/*17C0*/0x98, 0xED, 0xF0, 0x7E, 0x0A, 0x12, 0x0E, 0x04,
806 0x75, 0x83, 0xA0, 0xE0, 0x20, 0xE0, 0x26, 0xDE,
807/*17D0*/0xF4, 0x05, 0x57, 0xE5, 0x57, 0x70, 0x02, 0x05,
808 0x56, 0xE5, 0x14, 0x24, 0x01, 0xFD, 0xE4, 0x33,
809/*17E0*/0xFC, 0xD3, 0xE5, 0x57, 0x9D, 0xE5, 0x56, 0x9C,
810 0x40, 0xD9, 0xE5, 0x0A, 0x94, 0x20, 0x50, 0x02,
811/*17F0*/0x05, 0x0A, 0x43, 0xE1, 0x08, 0xC2, 0x31, 0x12,
812 0x0E, 0x04, 0x75, 0x83, 0xA6, 0xE0, 0x55, 0x12,
813/*1800*/0x65, 0x12, 0x70, 0x03, 0xD2, 0x31, 0x22, 0xC2,
814 0x31, 0x22, 0x90, 0x07, 0x26, 0xE0, 0xFA, 0xA3,
815/*1810*/0xE0, 0xF5, 0x82, 0x8A, 0x83, 0xE0, 0xF5, 0x41,
816 0xE5, 0x39, 0xC3, 0x95, 0x41, 0x40, 0x26, 0xE5,
817/*1820*/0x39, 0x95, 0x41, 0xC3, 0x9F, 0xEE, 0x12, 0x07,
818 0x6B, 0x40, 0x04, 0x7C, 0x01, 0x80, 0x02, 0x7C,
819/*1830*/0x00, 0xE5, 0x41, 0x64, 0x3F, 0x60, 0x04, 0x7B,
820 0x01, 0x80, 0x02, 0x7B, 0x00, 0xEC, 0x5B, 0x60,
821/*1840*/0x29, 0x05, 0x41, 0x80, 0x28, 0xC3, 0xE5, 0x41,
822 0x95, 0x39, 0xC3, 0x9F, 0xEE, 0x12, 0x07, 0x6B,
823/*1850*/0x40, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00,
824 0xE5, 0x41, 0x60, 0x04, 0x7E, 0x01, 0x80, 0x02,
825/*1860*/0x7E, 0x00, 0xEF, 0x5E, 0x60, 0x04, 0x15, 0x41,
826 0x80, 0x03, 0x85, 0x39, 0x41, 0x85, 0x3A, 0x40,
827/*1870*/0x22, 0xE5, 0xE2, 0x30, 0xE4, 0x60, 0xE5, 0xE1,
828 0x30, 0xE2, 0x5B, 0xE5, 0x09, 0x70, 0x04, 0x7F,
829/*1880*/0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x08, 0x70,
830 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE,
831/*1890*/0x5F, 0x60, 0x43, 0x53, 0xF9, 0xF8, 0xE5, 0xE2,
832 0x30, 0xE4, 0x3B, 0xE5, 0xE1, 0x30, 0xE2, 0x2E,
833/*18A0*/0x43, 0xFA, 0x02, 0x53, 0xFA, 0xFB, 0xE4, 0xF5,
834 0x10, 0x90, 0x94, 0x70, 0xE5, 0x10, 0xF0, 0xE5,
835/*18B0*/0xE1, 0x30, 0xE2, 0xE7, 0x90, 0x94, 0x70, 0xE0,
836 0x65, 0x10, 0x60, 0x03, 0x43, 0xFA, 0x04, 0x05,
837/*18C0*/0x10, 0x90, 0x94, 0x70, 0xE5, 0x10, 0xF0, 0x70,
838 0xE6, 0x12, 0x00, 0x06, 0x80, 0xE1, 0x53, 0xFA,
839/*18D0*/0xFD, 0x53, 0xFA, 0xFB, 0x80, 0xC0, 0x22, 0x8F,
840 0x54, 0x12, 0x00, 0x06, 0xE5, 0xE1, 0x30, 0xE0,
841/*18E0*/0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5,
842 0x7E, 0xD3, 0x94, 0x05, 0x40, 0x04, 0x7E, 0x01,
843/*18F0*/0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x60, 0x3D,
844 0x85, 0x54, 0x11, 0xE5, 0xE2, 0x20, 0xE1, 0x32,
845/*1900*/0x74, 0xCE, 0x12, 0x1A, 0x05, 0x30, 0xE7, 0x04,
846 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00, 0x8F, 0x82,
847/*1910*/0x8E, 0x83, 0xE0, 0x30, 0xE6, 0x04, 0x7F, 0x01,
848 0x80, 0x02, 0x7F, 0x00, 0xEF, 0x5D, 0x70, 0x15,
849/*1920*/0x12, 0x15, 0xC6, 0x74, 0xCE, 0x12, 0x1A, 0x05,
850 0x30, 0xE6, 0x07, 0xE0, 0x44, 0x80, 0xF0, 0x43,
851/*1930*/0xF9, 0x80, 0x12, 0x18, 0x71, 0x22, 0x12, 0x0E,
852 0x44, 0xE5, 0x16, 0x25, 0xE0, 0x25, 0xE0, 0x24,
853/*1940*/0xB0, 0xF5, 0x82, 0xE4, 0x34, 0x1A, 0xF5, 0x83,
854 0xE4, 0x93, 0xF5, 0x0F, 0xE5, 0x16, 0x25, 0xE0,
855/*1950*/0x25, 0xE0, 0x24, 0xB1, 0xF5, 0x82, 0xE4, 0x34,
856 0x1A, 0xF5, 0x83, 0xE4, 0x93, 0xF5, 0x0E, 0x12,
857/*1960*/0x0E, 0x65, 0xF5, 0x10, 0xE5, 0x0F, 0x54, 0xF0,
858 0x12, 0x0E, 0x17, 0x75, 0x83, 0x8C, 0xEF, 0xF0,
859/*1970*/0xE5, 0x0F, 0x30, 0xE0, 0x0C, 0x12, 0x0E, 0x04,
860 0x75, 0x83, 0x86, 0xE0, 0x44, 0x40, 0xF0, 0x80,
861/*1980*/0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0x86, 0xE0,
862 0x54, 0xBF, 0xF0, 0x12, 0x0E, 0x91, 0x75, 0x83,
863/*1990*/0x82, 0xE5, 0x0E, 0xF0, 0x22, 0x7F, 0x05, 0x12,
864 0x17, 0x31, 0x12, 0x0E, 0x04, 0x12, 0x0E, 0x33,
865/*19A0*/0x74, 0x02, 0xF0, 0x74, 0x8E, 0xFE, 0x12, 0x0E,
866 0x04, 0x12, 0x0E, 0x0B, 0xEF, 0xF0, 0x75, 0x15,
867/*19B0*/0x70, 0x12, 0x0F, 0xF7, 0x20, 0x34, 0x05, 0x75,
868 0x15, 0x10, 0x80, 0x03, 0x75, 0x15, 0x50, 0x12,
869/*19C0*/0x0F, 0xF7, 0x20, 0x34, 0x04, 0x74, 0x10, 0x80,
870 0x02, 0x74, 0xF0, 0x25, 0x15, 0xF5, 0x15, 0x12,
871/*19D0*/0x0E, 0x21, 0xEF, 0xF0, 0x12, 0x10, 0x91, 0x20,
872 0x34, 0x17, 0xE5, 0x15, 0x64, 0x30, 0x60, 0x0C,
873/*19E0*/0x74, 0x10, 0x25, 0x15, 0xF5, 0x15, 0xB4, 0x80,
874 0x03, 0xE4, 0xF5, 0x15, 0x12, 0x0E, 0x21, 0xEF,
875/*19F0*/0xF0, 0x22, 0xF0, 0xE5, 0x0B, 0x25, 0xE0, 0x25,
876 0xE0, 0x24, 0x82, 0xF5, 0x82, 0xE4, 0x34, 0x07,
877/*1A00*/0xF5, 0x83, 0x22, 0x74, 0x88, 0xFE, 0xE5, 0x08,
878 0x44, 0x07, 0xFF, 0xF5, 0x82, 0x8E, 0x83, 0xE0,
879/*1A10*/0x22, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82,
880 0x22, 0xF0, 0xE0, 0x54, 0xC0, 0x8F, 0x82, 0x8E,
881/*1A20*/0x83, 0xF0, 0x22, 0xEF, 0x44, 0x07, 0xF5, 0x82,
882 0x75, 0x83, 0x86, 0xE0, 0x54, 0x10, 0xD3, 0x94,
883/*1A30*/0x00, 0x22, 0xF0, 0x90, 0x07, 0x15, 0xE0, 0x04,
884 0xF0, 0x22, 0x44, 0x06, 0xF5, 0x82, 0x75, 0x83,
885/*1A40*/0x9E, 0xE0, 0x22, 0xFE, 0xEF, 0x44, 0x07, 0xF5,
886 0x82, 0x8E, 0x83, 0xE0, 0x22, 0xE4, 0x90, 0x07,
887/*1A50*/0x2A, 0xF0, 0xA3, 0xF0, 0x12, 0x07, 0x2A, 0x75,
888 0x83, 0x82, 0xE0, 0x54, 0x7F, 0x12, 0x07, 0x29,
889/*1A60*/0xE0, 0x44, 0x80, 0xF0, 0x12, 0x10, 0xFC, 0x12,
890 0x08, 0x1F, 0x75, 0x83, 0xA0, 0xE0, 0x20, 0xE0,
891/*1A70*/0x1A, 0x90, 0x07, 0x2B, 0xE0, 0x04, 0xF0, 0x70,
892 0x06, 0x90, 0x07, 0x2A, 0xE0, 0x04, 0xF0, 0x90,
893/*1A80*/0x07, 0x2A, 0xE0, 0xB4, 0x10, 0xE1, 0xA3, 0xE0,
894 0xB4, 0x00, 0xDC, 0xEE, 0x44, 0xA6, 0xFC, 0xEF,
895/*1A90*/0x44, 0x07, 0xF5, 0x82, 0x8C, 0x83, 0xE0, 0xF5,
896 0x32, 0xEE, 0x44, 0xA8, 0xFE, 0xEF, 0x44, 0x07,
897/*1AA0*/0xF5, 0x82, 0x8E, 0x83, 0xE0, 0xF5, 0x33, 0x22,
898 0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x90,
899/*1AB0*/0x00, 0x20, 0x0F, 0x92, 0x00, 0x21, 0x0F, 0x94,
900 0x00, 0x22, 0x0F, 0x96, 0x00, 0x23, 0x0F, 0x98,
901/*1AC0*/0x00, 0x24, 0x0F, 0x9A, 0x00, 0x25, 0x0F, 0x9C,
902 0x00, 0x26, 0x0F, 0x9E, 0x00, 0x27, 0x0F, 0xA0,
903/*1AD0*/0x01, 0x20, 0x01, 0xA2, 0x01, 0x21, 0x01, 0xA4,
904 0x01, 0x22, 0x01, 0xA6, 0x01, 0x23, 0x01, 0xA8,
905/*1AE0*/0x01, 0x24, 0x01, 0xAA, 0x01, 0x25, 0x01, 0xAC,
906 0x01, 0x26, 0x01, 0xAE, 0x01, 0x27, 0x01, 0xB0,
907/*1AF0*/0x01, 0x28, 0x01, 0xB4, 0x00, 0x28, 0x0F, 0xB6,
908 0x40, 0x28, 0x0F, 0xB8, 0x61, 0x28, 0x01, 0xCB,
909/*1B00*/0xEF, 0xCB, 0xCA, 0xEE, 0xCA, 0x7F, 0x01, 0xE4,
910 0xFD, 0xEB, 0x4A, 0x70, 0x24, 0xE5, 0x08, 0xF5,
911/*1B10*/0x82, 0x74, 0xB6, 0x12, 0x08, 0x29, 0xE5, 0x08,
912 0xF5, 0x82, 0x74, 0xB8, 0x12, 0x08, 0x29, 0xE5,
913/*1B20*/0x08, 0xF5, 0x82, 0x74, 0xBA, 0x12, 0x08, 0x29,
914 0x7E, 0x00, 0x7C, 0x00, 0x12, 0x0A, 0xFF, 0x80,
915/*1B30*/0x12, 0x90, 0x07, 0x26, 0x12, 0x07, 0x35, 0xE5,
916 0x41, 0xF0, 0x90, 0x07, 0x24, 0x12, 0x07, 0x35,
917/*1B40*/0xE5, 0x40, 0xF0, 0x12, 0x07, 0x2A, 0x75, 0x83,
918 0x8E, 0xE4, 0x12, 0x07, 0x29, 0x74, 0x01, 0x12,
919/*1B50*/0x07, 0x29, 0xE4, 0xF0, 0x22, 0xE4, 0xF5, 0x26,
920 0xF5, 0x27, 0x53, 0xE1, 0xFE, 0xF5, 0x2A, 0x75,
921/*1B60*/0x2B, 0x01, 0xF5, 0x08, 0x7F, 0x01, 0x12, 0x17,
922 0x31, 0x30, 0x30, 0x1C, 0x90, 0x1A, 0xA9, 0xE4,
923/*1B70*/0x93, 0xF5, 0x10, 0x90, 0x1F, 0xF9, 0xE4, 0x93,
924 0xF5, 0x10, 0x90, 0x00, 0x41, 0xE4, 0x93, 0xF5,
925/*1B80*/0x10, 0x90, 0x1E, 0xCA, 0xE4, 0x93, 0xF5, 0x10,
926 0x7F, 0x02, 0x12, 0x17, 0x31, 0x12, 0x0F, 0x54,
927/*1B90*/0x7F, 0x03, 0x12, 0x17, 0x31, 0x12, 0x00, 0x06,
928 0xE5, 0xE2, 0x30, 0xE7, 0x09, 0x12, 0x10, 0x00,
929/*1BA0*/0x30, 0x30, 0x03, 0x12, 0x11, 0x00, 0x02, 0x00,
930 0x47, 0x12, 0x08, 0x1F, 0x75, 0x83, 0xD0, 0xE0,
931/*1BB0*/0xC4, 0x54, 0x0F, 0xFD, 0x75, 0x43, 0x01, 0x75,
932 0x44, 0xFF, 0x12, 0x08, 0xAA, 0x74, 0x04, 0xF0,
933/*1BC0*/0x75, 0x3B, 0x01, 0xED, 0x14, 0x60, 0x0C, 0x14,
934 0x60, 0x0B, 0x14, 0x60, 0x0F, 0x24, 0x03, 0x70,
935/*1BD0*/0x0B, 0x80, 0x09, 0x80, 0x00, 0x12, 0x08, 0xA7,
936 0x04, 0xF0, 0x80, 0x06, 0x12, 0x08, 0xA7, 0x74,
937/*1BE0*/0x04, 0xF0, 0xEE, 0x44, 0x82, 0xFE, 0xEF, 0x44,
938 0x07, 0xF5, 0x82, 0x8E, 0x83, 0xE5, 0x45, 0x12,
939/*1BF0*/0x08, 0xBE, 0x75, 0x83, 0x82, 0xE5, 0x31, 0xF0,
940 0x02, 0x11, 0x4C, 0x8E, 0x60, 0x8F, 0x61, 0x12,
941/*1C00*/0x1E, 0xA5, 0xE4, 0xFF, 0xCE, 0xED, 0xCE, 0xEE,
942 0xD3, 0x95, 0x61, 0xE5, 0x60, 0x12, 0x07, 0x6B,
943/*1C10*/0x40, 0x39, 0x74, 0x20, 0x2E, 0xF5, 0x82, 0xE4,
944 0x34, 0x03, 0xF5, 0x83, 0xE0, 0x70, 0x03, 0xFF,
945/*1C20*/0x80, 0x26, 0x12, 0x08, 0xE2, 0xFD, 0xC3, 0x9F,
946 0x40, 0x1E, 0xCF, 0xED, 0xCF, 0xEB, 0x4A, 0x70,
947/*1C30*/0x0B, 0x8D, 0x42, 0x12, 0x08, 0xEE, 0xF5, 0x41,
948 0x8E, 0x40, 0x80, 0x0C, 0x12, 0x08, 0xE2, 0xF5,
949/*1C40*/0x38, 0x12, 0x08, 0xEE, 0xF5, 0x39, 0x8E, 0x3A,
950 0x1E, 0x80, 0xBC, 0x22, 0x75, 0x58, 0x01, 0xE5,
951/*1C50*/0x35, 0x70, 0x0C, 0x12, 0x07, 0xCC, 0xE0, 0xF5,
952 0x4A, 0x12, 0x07, 0xD8, 0xE0, 0xF5, 0x4C, 0xE5,
953/*1C60*/0x35, 0xB4, 0x04, 0x0C, 0x12, 0x07, 0xE4, 0xE0,
954 0xF5, 0x4A, 0x12, 0x07, 0xF0, 0xE0, 0xF5, 0x4C,
955/*1C70*/0xE5, 0x35, 0xB4, 0x01, 0x04, 0x7F, 0x01, 0x80,
956 0x02, 0x7F, 0x00, 0xE5, 0x35, 0xB4, 0x02, 0x04,
957/*1C80*/0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F,
958 0x60, 0x0C, 0x12, 0x07, 0xFC, 0xE0, 0xF5, 0x4A,
959/*1C90*/0x12, 0x08, 0x08, 0xE0, 0xF5, 0x4C, 0x85, 0x41,
960 0x49, 0x85, 0x40, 0x4B, 0x22, 0x75, 0x5B, 0x01,
961/*1CA0*/0x90, 0x07, 0x24, 0x12, 0x07, 0x35, 0xE0, 0x54,
962 0x1F, 0xFF, 0xD3, 0x94, 0x02, 0x50, 0x04, 0x8F,
963/*1CB0*/0x58, 0x80, 0x05, 0xEF, 0x24, 0xFE, 0xF5, 0x58,
964 0xEF, 0xC3, 0x94, 0x18, 0x40, 0x05, 0x75, 0x59,
965/*1CC0*/0x18, 0x80, 0x04, 0xEF, 0x04, 0xF5, 0x59, 0x85,
966 0x43, 0x5A, 0xAF, 0x58, 0x7E, 0x00, 0xAD, 0x59,
967/*1CD0*/0x7C, 0x00, 0xAB, 0x5B, 0x7A, 0x00, 0x12, 0x15,
968 0x41, 0xAF, 0x5A, 0x7E, 0x00, 0x12, 0x18, 0x0A,
969/*1CE0*/0xAF, 0x5B, 0x7E, 0x00, 0x02, 0x1A, 0xFF, 0xE5,
970 0xE2, 0x30, 0xE7, 0x0E, 0x12, 0x10, 0x03, 0xC2,
971/*1CF0*/0x30, 0x30, 0x30, 0x03, 0x12, 0x10, 0xFF, 0x20,
972 0x33, 0x28, 0xE5, 0xE7, 0x30, 0xE7, 0x05, 0x12,
973/*1D00*/0x0E, 0xA2, 0x80, 0x0D, 0xE5, 0xFE, 0xC3, 0x94,
974 0x20, 0x50, 0x06, 0x12, 0x0E, 0xA2, 0x43, 0xF9,
975/*1D10*/0x08, 0xE5, 0xF2, 0x30, 0xE7, 0x03, 0x53, 0xF9,
976 0x7F, 0xE5, 0xF1, 0x54, 0x70, 0xD3, 0x94, 0x00,
977/*1D20*/0x50, 0xD8, 0x22, 0x12, 0x0E, 0x04, 0x75, 0x83,
978 0x80, 0xE4, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0x12,
979/*1D30*/0x0D, 0xFD, 0x75, 0x83, 0x84, 0x12, 0x0E, 0x02,
980 0x75, 0x83, 0x86, 0x12, 0x0E, 0x02, 0x75, 0x83,
981/*1D40*/0x8C, 0xE0, 0x54, 0xF3, 0x12, 0x0E, 0x03, 0x75,
982 0x83, 0x8E, 0x12, 0x0E, 0x02, 0x75, 0x83, 0x94,
983/*1D50*/0xE0, 0x54, 0xFB, 0xF0, 0x22, 0x12, 0x07, 0x2A,
984 0x75, 0x83, 0x8E, 0xE4, 0x12, 0x07, 0x29, 0x74,
985/*1D60*/0x01, 0x12, 0x07, 0x29, 0xE4, 0x12, 0x08, 0xBE,
986 0x75, 0x83, 0x8C, 0xE0, 0x44, 0x20, 0x12, 0x08,
987/*1D70*/0xBE, 0xE0, 0x54, 0xDF, 0xF0, 0x74, 0x84, 0x85,
988 0x08, 0x82, 0xF5, 0x83, 0xE0, 0x54, 0x7F, 0xF0,
989/*1D80*/0xE0, 0x44, 0x80, 0xF0, 0x22, 0x75, 0x56, 0x01,
990 0xE4, 0xFD, 0xF5, 0x57, 0xAF, 0x35, 0xFE, 0xFC,
991/*1D90*/0x12, 0x09, 0x15, 0x12, 0x1C, 0x9D, 0x12, 0x1E,
992 0x7A, 0x12, 0x1C, 0x4C, 0xAF, 0x57, 0x7E, 0x00,
993/*1DA0*/0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44, 0xAF,
994 0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0x75, 0x56,
995/*1DB0*/0x01, 0xE4, 0xFD, 0xF5, 0x57, 0xAF, 0x35, 0xFE,
996 0xFC, 0x12, 0x09, 0x15, 0x12, 0x1C, 0x9D, 0x12,
997/*1DC0*/0x1E, 0x7A, 0x12, 0x1C, 0x4C, 0xAF, 0x57, 0x7E,
998 0x00, 0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44,
999/*1DD0*/0xAF, 0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0xE4,
1000 0xF5, 0x16, 0x12, 0x0E, 0x44, 0xFE, 0xE5, 0x08,
1001/*1DE0*/0x44, 0x05, 0xFF, 0x12, 0x0E, 0x65, 0x8F, 0x82,
1002 0x8E, 0x83, 0xF0, 0x05, 0x16, 0xE5, 0x16, 0xC3,
1003/*1DF0*/0x94, 0x14, 0x40, 0xE6, 0xE5, 0x08, 0x12, 0x0E,
1004 0x2B, 0xE4, 0xF0, 0x22, 0xE4, 0xF5, 0x58, 0xF5,
1005/*1E00*/0x59, 0xF5, 0x5A, 0xFF, 0xFE, 0xAD, 0x58, 0xFC,
1006 0x12, 0x09, 0x15, 0x7F, 0x04, 0x7E, 0x00, 0xAD,
1007/*1E10*/0x58, 0x7C, 0x00, 0x12, 0x09, 0x15, 0x7F, 0x02,
1008 0x7E, 0x00, 0xAD, 0x58, 0x7C, 0x00, 0x02, 0x09,
1009/*1E20*/0x15, 0xE5, 0x3C, 0x25, 0x3E, 0xFC, 0xE5, 0x42,
1010 0x24, 0x00, 0xFB, 0xE4, 0x33, 0xFA, 0xEC, 0xC3,
1011/*1E30*/0x9B, 0xEA, 0x12, 0x07, 0x6B, 0x40, 0x0B, 0x8C,
1012 0x42, 0xE5, 0x3D, 0x25, 0x3F, 0xF5, 0x41, 0x8F,
1013/*1E40*/0x40, 0x22, 0x12, 0x09, 0x0B, 0x22, 0x74, 0x84,
1014 0xF5, 0x18, 0x85, 0x08, 0x19, 0x85, 0x19, 0x82,
1015/*1E50*/0x85, 0x18, 0x83, 0xE0, 0x54, 0x7F, 0xF0, 0xE0,
1016 0x44, 0x80, 0xF0, 0xE0, 0x44, 0x80, 0xF0, 0x22,
1017/*1E60*/0xEF, 0x4E, 0x70, 0x0B, 0x12, 0x07, 0x2A, 0x75,
1018 0x83, 0xD2, 0xE0, 0x54, 0xDF, 0xF0, 0x22, 0x12,
1019/*1E70*/0x07, 0x2A, 0x75, 0x83, 0xD2, 0xE0, 0x44, 0x20,
1020 0xF0, 0x22, 0x75, 0x58, 0x01, 0x90, 0x07, 0x26,
1021/*1E80*/0x12, 0x07, 0x35, 0xE0, 0x54, 0x3F, 0xF5, 0x41,
1022 0x12, 0x07, 0x32, 0xE0, 0x54, 0x3F, 0xF5, 0x40,
1023/*1E90*/0x22, 0x75, 0x56, 0x02, 0xE4, 0xF5, 0x57, 0x12,
1024 0x1D, 0xFC, 0xAF, 0x57, 0x7E, 0x00, 0xAD, 0x56,
1025/*1EA0*/0x7C, 0x00, 0x02, 0x04, 0x44, 0xE4, 0xF5, 0x42,
1026 0xF5, 0x41, 0xF5, 0x40, 0xF5, 0x38, 0xF5, 0x39,
1027/*1EB0*/0xF5, 0x3A, 0x22, 0xEF, 0x54, 0x07, 0xFF, 0xE5,
1028 0xF9, 0x54, 0xF8, 0x4F, 0xF5, 0xF9, 0x22, 0x7F,
1029/*1EC0*/0x01, 0xE4, 0xFE, 0x0F, 0x0E, 0xBE, 0xFF, 0xFB,
1030 0x22, 0x01, 0x20, 0x00, 0x01, 0x04, 0x20, 0x00,
1031/*1ED0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1032 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1033/*1EE0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1034 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1035/*1EF0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1036 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1037/*1F00*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1038 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1039/*1F10*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1040 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1041/*1F20*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1042 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1043/*1F30*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1044 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1045/*1F40*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1046 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1047/*1F50*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1048 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1049/*1F60*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1050 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1051/*1F70*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1052 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1053/*1F80*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1054 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1055/*1F90*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1056 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1057/*1FA0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1058 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1059/*1FB0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1060 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1061/*1FC0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1062 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1063/*1FD0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1064 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1065/*1FE0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1066 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1067/*1FF0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1068 0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81
1069};
1070
1071int qib_sd7220_ib_load(struct qib_devdata *dd)
1072{
1073 return qib_sd7220_prog_ld(dd, IB_7220_SERDES, qib_sd7220_ib_img,
1074 sizeof(qib_sd7220_ib_img), 0);
1075}
1076
1077int qib_sd7220_ib_vfy(struct qib_devdata *dd)
1078{
1079 return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, qib_sd7220_ib_img,
1080 sizeof(qib_sd7220_ib_img), 0);
1081}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
new file mode 100644
index 000000000000..b8456881f7f6
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -0,0 +1,973 @@
1/*
2 * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34#include <linux/netdevice.h>
35
36#include "qib.h"
37#include "qib_common.h"
38
39/* default pio off, sdma on */
40static ushort sdma_descq_cnt = 256;
41module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
42MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
43
44/*
45 * Bits defined in the send DMA descriptor.
46 */
47#define SDMA_DESC_LAST (1ULL << 11)
48#define SDMA_DESC_FIRST (1ULL << 12)
49#define SDMA_DESC_DMA_HEAD (1ULL << 13)
50#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
51#define SDMA_DESC_INTR (1ULL << 15)
52#define SDMA_DESC_COUNT_LSB 16
53#define SDMA_DESC_GEN_LSB 30
54
55char *qib_sdma_state_names[] = {
56 [qib_sdma_state_s00_hw_down] = "s00_HwDown",
57 [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
58 [qib_sdma_state_s20_idle] = "s20_Idle",
59 [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
60 [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
61 [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
62 [qib_sdma_state_s99_running] = "s99_Running",
63};
64
65char *qib_sdma_event_names[] = {
66 [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
67 [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
68 [qib_sdma_event_e20_hw_started] = "e20_HwStarted",
69 [qib_sdma_event_e30_go_running] = "e30_GoRunning",
70 [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
71 [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
72 [qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
73 [qib_sdma_event_e70_go_idle] = "e70_GoIdle",
74 [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
75 [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
76 [qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
77};
78
79/* declare all statics here rather than keep sorting */
80static int alloc_sdma(struct qib_pportdata *);
81static void sdma_complete(struct kref *);
82static void sdma_finalput(struct qib_sdma_state *);
83static void sdma_get(struct qib_sdma_state *);
84static void sdma_put(struct qib_sdma_state *);
85static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
86static void sdma_start_sw_clean_up(struct qib_pportdata *);
87static void sdma_sw_clean_up_task(unsigned long);
88static void unmap_desc(struct qib_pportdata *, unsigned);
89
90static void sdma_get(struct qib_sdma_state *ss)
91{
92 kref_get(&ss->kref);
93}
94
95static void sdma_complete(struct kref *kref)
96{
97 struct qib_sdma_state *ss =
98 container_of(kref, struct qib_sdma_state, kref);
99
100 complete(&ss->comp);
101}
102
103static void sdma_put(struct qib_sdma_state *ss)
104{
105 kref_put(&ss->kref, sdma_complete);
106}
107
108static void sdma_finalput(struct qib_sdma_state *ss)
109{
110 sdma_put(ss);
111 wait_for_completion(&ss->comp);
112}
113
114/*
115 * Complete all the sdma requests on the active list, in the correct
116 * order, and with appropriate processing. Called when cleaning up
117 * after sdma shutdown, and when new sdma requests are submitted for
118 * a link that is down. This matches what is done for requests
119 * that complete normally, it's just the full list.
120 *
121 * Must be called with sdma_lock held
122 */
123static void clear_sdma_activelist(struct qib_pportdata *ppd)
124{
125 struct qib_sdma_txreq *txp, *txp_next;
126
127 list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
128 list_del_init(&txp->list);
129 if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
130 unsigned idx;
131
132 idx = txp->start_idx;
133 while (idx != txp->next_descq_idx) {
134 unmap_desc(ppd, idx);
135 if (++idx == ppd->sdma_descq_cnt)
136 idx = 0;
137 }
138 }
139 if (txp->callback)
140 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
141 }
142}
143
144static void sdma_sw_clean_up_task(unsigned long opaque)
145{
146 struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
147 unsigned long flags;
148
149 spin_lock_irqsave(&ppd->sdma_lock, flags);
150
151 /*
152 * At this point, the following should always be true:
153 * - We are halted, so no more descriptors are getting retired.
154 * - We are not running, so no one is submitting new work.
155 * - Only we can send the e40_sw_cleaned, so we can't start
156 * running again until we say so. So, the active list and
157 * descq are ours to play with.
158 */
159
160 /* Process all retired requests. */
161 qib_sdma_make_progress(ppd);
162
163 clear_sdma_activelist(ppd);
164
165 /*
166 * Resync count of added and removed. It is VERY important that
167 * sdma_descq_removed NEVER decrement - user_sdma depends on it.
168 */
169 ppd->sdma_descq_removed = ppd->sdma_descq_added;
170
171 /*
172 * Reset our notion of head and tail.
173 * Note that the HW registers will be reset when switching states
174 * due to calling __qib_sdma_process_event() below.
175 */
176 ppd->sdma_descq_tail = 0;
177 ppd->sdma_descq_head = 0;
178 ppd->sdma_head_dma[0] = 0;
179 ppd->sdma_generation = 0;
180
181 __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
182
183 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
184}
185
186/*
187 * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
188 * as a result of send buffer errors or send DMA descriptor errors.
189 * We want to disarm the buffers in these cases.
190 */
191static void sdma_hw_start_up(struct qib_pportdata *ppd)
192{
193 struct qib_sdma_state *ss = &ppd->sdma_state;
194 unsigned bufno;
195
196 for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
197 ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
198
199 ppd->dd->f_sdma_hw_start_up(ppd);
200}
201
202static void sdma_sw_tear_down(struct qib_pportdata *ppd)
203{
204 struct qib_sdma_state *ss = &ppd->sdma_state;
205
206 /* Releasing this reference means the state machine has stopped. */
207 sdma_put(ss);
208}
209
210static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
211{
212 tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
213}
214
215static void sdma_set_state(struct qib_pportdata *ppd,
216 enum qib_sdma_states next_state)
217{
218 struct qib_sdma_state *ss = &ppd->sdma_state;
219 struct sdma_set_state_action *action = ss->set_state_action;
220 unsigned op = 0;
221
222 /* debugging bookkeeping */
223 ss->previous_state = ss->current_state;
224 ss->previous_op = ss->current_op;
225
226 ss->current_state = next_state;
227
228 if (action[next_state].op_enable)
229 op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
230
231 if (action[next_state].op_intenable)
232 op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
233
234 if (action[next_state].op_halt)
235 op |= QIB_SDMA_SENDCTRL_OP_HALT;
236
237 if (action[next_state].op_drain)
238 op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
239
240 if (action[next_state].go_s99_running_tofalse)
241 ss->go_s99_running = 0;
242
243 if (action[next_state].go_s99_running_totrue)
244 ss->go_s99_running = 1;
245
246 ss->current_op = op;
247
248 ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
249}
250
251static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
252{
253 __le64 *descqp = &ppd->sdma_descq[head].qw[0];
254 u64 desc[2];
255 dma_addr_t addr;
256 size_t len;
257
258 desc[0] = le64_to_cpu(descqp[0]);
259 desc[1] = le64_to_cpu(descqp[1]);
260
261 addr = (desc[1] << 32) | (desc[0] >> 32);
262 len = (desc[0] >> 14) & (0x7ffULL << 2);
263 dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
264}
265
266static int alloc_sdma(struct qib_pportdata *ppd)
267{
268 ppd->sdma_descq_cnt = sdma_descq_cnt;
269 if (!ppd->sdma_descq_cnt)
270 ppd->sdma_descq_cnt = 256;
271
272 /* Allocate memory for SendDMA descriptor FIFO */
273 ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
274 ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
275 GFP_KERNEL);
276
277 if (!ppd->sdma_descq) {
278 qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
279 "FIFO memory\n");
280 goto bail;
281 }
282
283 /* Allocate memory for DMA of head register to memory */
284 ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
285 PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
286 if (!ppd->sdma_head_dma) {
287 qib_dev_err(ppd->dd, "failed to allocate SendDMA "
288 "head memory\n");
289 goto cleanup_descq;
290 }
291 ppd->sdma_head_dma[0] = 0;
292 return 0;
293
294cleanup_descq:
295 dma_free_coherent(&ppd->dd->pcidev->dev,
296 ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
297 ppd->sdma_descq_phys);
298 ppd->sdma_descq = NULL;
299 ppd->sdma_descq_phys = 0;
300bail:
301 ppd->sdma_descq_cnt = 0;
302 return -ENOMEM;
303}
304
305static void free_sdma(struct qib_pportdata *ppd)
306{
307 struct qib_devdata *dd = ppd->dd;
308
309 if (ppd->sdma_head_dma) {
310 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
311 (void *)ppd->sdma_head_dma,
312 ppd->sdma_head_phys);
313 ppd->sdma_head_dma = NULL;
314 ppd->sdma_head_phys = 0;
315 }
316
317 if (ppd->sdma_descq) {
318 dma_free_coherent(&dd->pcidev->dev,
319 ppd->sdma_descq_cnt * sizeof(u64[2]),
320 ppd->sdma_descq, ppd->sdma_descq_phys);
321 ppd->sdma_descq = NULL;
322 ppd->sdma_descq_phys = 0;
323 }
324}
325
326static inline void make_sdma_desc(struct qib_pportdata *ppd,
327 u64 *sdmadesc, u64 addr, u64 dwlen,
328 u64 dwoffset)
329{
330
331 WARN_ON(addr & 3);
332 /* SDmaPhyAddr[47:32] */
333 sdmadesc[1] = addr >> 32;
334 /* SDmaPhyAddr[31:0] */
335 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
336 /* SDmaGeneration[1:0] */
337 sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
338 SDMA_DESC_GEN_LSB;
339 /* SDmaDwordCount[10:0] */
340 sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
341 /* SDmaBufOffset[12:2] */
342 sdmadesc[0] |= dwoffset & 0x7ffULL;
343}
344
345/* sdma_lock must be held */
346int qib_sdma_make_progress(struct qib_pportdata *ppd)
347{
348 struct list_head *lp = NULL;
349 struct qib_sdma_txreq *txp = NULL;
350 struct qib_devdata *dd = ppd->dd;
351 int progress = 0;
352 u16 hwhead;
353 u16 idx = 0;
354
355 hwhead = dd->f_sdma_gethead(ppd);
356
357 /* The reason for some of the complexity of this code is that
358 * not all descriptors have corresponding txps. So, we have to
359 * be able to skip over descs until we wander into the range of
360 * the next txp on the list.
361 */
362
363 if (!list_empty(&ppd->sdma_activelist)) {
364 lp = ppd->sdma_activelist.next;
365 txp = list_entry(lp, struct qib_sdma_txreq, list);
366 idx = txp->start_idx;
367 }
368
369 while (ppd->sdma_descq_head != hwhead) {
370 /* if desc is part of this txp, unmap if needed */
371 if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
372 (idx == ppd->sdma_descq_head)) {
373 unmap_desc(ppd, ppd->sdma_descq_head);
374 if (++idx == ppd->sdma_descq_cnt)
375 idx = 0;
376 }
377
378 /* increment dequed desc count */
379 ppd->sdma_descq_removed++;
380
381 /* advance head, wrap if needed */
382 if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
383 ppd->sdma_descq_head = 0;
384
385 /* if now past this txp's descs, do the callback */
386 if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
387 /* remove from active list */
388 list_del_init(&txp->list);
389 if (txp->callback)
390 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
391 /* see if there is another txp */
392 if (list_empty(&ppd->sdma_activelist))
393 txp = NULL;
394 else {
395 lp = ppd->sdma_activelist.next;
396 txp = list_entry(lp, struct qib_sdma_txreq,
397 list);
398 idx = txp->start_idx;
399 }
400 }
401 progress = 1;
402 }
403 if (progress)
404 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
405 return progress;
406}
407
408/*
409 * This is called from interrupt context.
410 */
411void qib_sdma_intr(struct qib_pportdata *ppd)
412{
413 unsigned long flags;
414
415 spin_lock_irqsave(&ppd->sdma_lock, flags);
416
417 __qib_sdma_intr(ppd);
418
419 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
420}
421
422void __qib_sdma_intr(struct qib_pportdata *ppd)
423{
424 if (__qib_sdma_running(ppd))
425 qib_sdma_make_progress(ppd);
426}
427
428int qib_setup_sdma(struct qib_pportdata *ppd)
429{
430 struct qib_devdata *dd = ppd->dd;
431 unsigned long flags;
432 int ret = 0;
433
434 ret = alloc_sdma(ppd);
435 if (ret)
436 goto bail;
437
438 /* set consistent sdma state */
439 ppd->dd->f_sdma_init_early(ppd);
440 spin_lock_irqsave(&ppd->sdma_lock, flags);
441 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
442 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
443
444 /* set up reference counting */
445 kref_init(&ppd->sdma_state.kref);
446 init_completion(&ppd->sdma_state.comp);
447
448 ppd->sdma_generation = 0;
449 ppd->sdma_descq_head = 0;
450 ppd->sdma_descq_removed = 0;
451 ppd->sdma_descq_added = 0;
452
453 INIT_LIST_HEAD(&ppd->sdma_activelist);
454
455 tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
456 (unsigned long)ppd);
457
458 ret = dd->f_init_sdma_regs(ppd);
459 if (ret)
460 goto bail_alloc;
461
462 qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
463
464 return 0;
465
466bail_alloc:
467 qib_teardown_sdma(ppd);
468bail:
469 return ret;
470}
471
472void qib_teardown_sdma(struct qib_pportdata *ppd)
473{
474 qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
475
476 /*
477 * This waits for the state machine to exit so it is not
478 * necessary to kill the sdma_sw_clean_up_task to make sure
479 * it is not running.
480 */
481 sdma_finalput(&ppd->sdma_state);
482
483 free_sdma(ppd);
484}
485
486int qib_sdma_running(struct qib_pportdata *ppd)
487{
488 unsigned long flags;
489 int ret;
490
491 spin_lock_irqsave(&ppd->sdma_lock, flags);
492 ret = __qib_sdma_running(ppd);
493 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
494
495 return ret;
496}
497
498/*
499 * Complete a request when sdma not running; likely only request
500 * but to simplify the code, always queue it, then process the full
501 * activelist. We process the entire list to ensure that this particular
502 * request does get it's callback, but in the correct order.
503 * Must be called with sdma_lock held
504 */
505static void complete_sdma_err_req(struct qib_pportdata *ppd,
506 struct qib_verbs_txreq *tx)
507{
508 atomic_inc(&tx->qp->s_dma_busy);
509 /* no sdma descriptors, so no unmap_desc */
510 tx->txreq.start_idx = 0;
511 tx->txreq.next_descq_idx = 0;
512 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
513 clear_sdma_activelist(ppd);
514}
515
516/*
517 * This function queues one IB packet onto the send DMA queue per call.
518 * The caller is responsible for checking:
519 * 1) The number of send DMA descriptor entries is less than the size of
520 * the descriptor queue.
521 * 2) The IB SGE addresses and lengths are 32-bit aligned
522 * (except possibly the last SGE's length)
523 * 3) The SGE addresses are suitable for passing to dma_map_single().
524 */
525int qib_sdma_verbs_send(struct qib_pportdata *ppd,
526 struct qib_sge_state *ss, u32 dwords,
527 struct qib_verbs_txreq *tx)
528{
529 unsigned long flags;
530 struct qib_sge *sge;
531 struct qib_qp *qp;
532 int ret = 0;
533 u16 tail;
534 __le64 *descqp;
535 u64 sdmadesc[2];
536 u32 dwoffset;
537 dma_addr_t addr;
538
539 spin_lock_irqsave(&ppd->sdma_lock, flags);
540
541retry:
542 if (unlikely(!__qib_sdma_running(ppd))) {
543 complete_sdma_err_req(ppd, tx);
544 goto unlock;
545 }
546
547 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
548 if (qib_sdma_make_progress(ppd))
549 goto retry;
550 if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
551 ppd->dd->f_sdma_set_desc_cnt(ppd,
552 ppd->sdma_descq_cnt / 2);
553 goto busy;
554 }
555
556 dwoffset = tx->hdr_dwords;
557 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
558
559 sdmadesc[0] |= SDMA_DESC_FIRST;
560 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
561 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
562
563 /* write to the descq */
564 tail = ppd->sdma_descq_tail;
565 descqp = &ppd->sdma_descq[tail].qw[0];
566 *descqp++ = cpu_to_le64(sdmadesc[0]);
567 *descqp++ = cpu_to_le64(sdmadesc[1]);
568
569 /* increment the tail */
570 if (++tail == ppd->sdma_descq_cnt) {
571 tail = 0;
572 descqp = &ppd->sdma_descq[0].qw[0];
573 ++ppd->sdma_generation;
574 }
575
576 tx->txreq.start_idx = tail;
577
578 sge = &ss->sge;
579 while (dwords) {
580 u32 dw;
581 u32 len;
582
583 len = dwords << 2;
584 if (len > sge->length)
585 len = sge->length;
586 if (len > sge->sge_length)
587 len = sge->sge_length;
588 BUG_ON(len == 0);
589 dw = (len + 3) >> 2;
590 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
591 dw << 2, DMA_TO_DEVICE);
592 if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
593 goto unmap;
594 sdmadesc[0] = 0;
595 make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
596 /* SDmaUseLargeBuf has to be set in every descriptor */
597 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
598 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
599 /* write to the descq */
600 *descqp++ = cpu_to_le64(sdmadesc[0]);
601 *descqp++ = cpu_to_le64(sdmadesc[1]);
602
603 /* increment the tail */
604 if (++tail == ppd->sdma_descq_cnt) {
605 tail = 0;
606 descqp = &ppd->sdma_descq[0].qw[0];
607 ++ppd->sdma_generation;
608 }
609 sge->vaddr += len;
610 sge->length -= len;
611 sge->sge_length -= len;
612 if (sge->sge_length == 0) {
613 if (--ss->num_sge)
614 *sge = *ss->sg_list++;
615 } else if (sge->length == 0 && sge->mr->lkey) {
616 if (++sge->n >= QIB_SEGSZ) {
617 if (++sge->m >= sge->mr->mapsz)
618 break;
619 sge->n = 0;
620 }
621 sge->vaddr =
622 sge->mr->map[sge->m]->segs[sge->n].vaddr;
623 sge->length =
624 sge->mr->map[sge->m]->segs[sge->n].length;
625 }
626
627 dwoffset += dw;
628 dwords -= dw;
629 }
630
631 if (!tail)
632 descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
633 descqp -= 2;
634 descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
635 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
636 descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
637 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
638 descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
639
640 atomic_inc(&tx->qp->s_dma_busy);
641 tx->txreq.next_descq_idx = tail;
642 ppd->dd->f_sdma_update_tail(ppd, tail);
643 ppd->sdma_descq_added += tx->txreq.sg_count;
644 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
645 goto unlock;
646
647unmap:
648 for (;;) {
649 if (!tail)
650 tail = ppd->sdma_descq_cnt - 1;
651 else
652 tail--;
653 if (tail == ppd->sdma_descq_tail)
654 break;
655 unmap_desc(ppd, tail);
656 }
657 qp = tx->qp;
658 qib_put_txreq(tx);
659 spin_lock(&qp->s_lock);
660 if (qp->ibqp.qp_type == IB_QPT_RC) {
661 /* XXX what about error sending RDMA read responses? */
662 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
663 qib_error_qp(qp, IB_WC_GENERAL_ERR);
664 } else if (qp->s_wqe)
665 qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
666 spin_unlock(&qp->s_lock);
667 /* return zero to process the next send work request */
668 goto unlock;
669
670busy:
671 qp = tx->qp;
672 spin_lock(&qp->s_lock);
673 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
674 struct qib_ibdev *dev;
675
676 /*
677 * If we couldn't queue the DMA request, save the info
678 * and try again later rather than destroying the
679 * buffer and undoing the side effects of the copy.
680 */
681 tx->ss = ss;
682 tx->dwords = dwords;
683 qp->s_tx = tx;
684 dev = &ppd->dd->verbs_dev;
685 spin_lock(&dev->pending_lock);
686 if (list_empty(&qp->iowait)) {
687 struct qib_ibport *ibp;
688
689 ibp = &ppd->ibport_data;
690 ibp->n_dmawait++;
691 qp->s_flags |= QIB_S_WAIT_DMA_DESC;
692 list_add_tail(&qp->iowait, &dev->dmawait);
693 }
694 spin_unlock(&dev->pending_lock);
695 qp->s_flags &= ~QIB_S_BUSY;
696 spin_unlock(&qp->s_lock);
697 ret = -EBUSY;
698 } else {
699 spin_unlock(&qp->s_lock);
700 qib_put_txreq(tx);
701 }
702unlock:
703 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
704 return ret;
705}
706
707void qib_sdma_process_event(struct qib_pportdata *ppd,
708 enum qib_sdma_events event)
709{
710 unsigned long flags;
711
712 spin_lock_irqsave(&ppd->sdma_lock, flags);
713
714 __qib_sdma_process_event(ppd, event);
715
716 if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
717 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
718
719 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
720}
721
722void __qib_sdma_process_event(struct qib_pportdata *ppd,
723 enum qib_sdma_events event)
724{
725 struct qib_sdma_state *ss = &ppd->sdma_state;
726
727 switch (ss->current_state) {
728 case qib_sdma_state_s00_hw_down:
729 switch (event) {
730 case qib_sdma_event_e00_go_hw_down:
731 break;
732 case qib_sdma_event_e30_go_running:
733 /*
734 * If down, but running requested (usually result
735 * of link up, then we need to start up.
736 * This can happen when hw down is requested while
737 * bringing the link up with traffic active on
738 * 7220, e.g. */
739 ss->go_s99_running = 1;
740 /* fall through and start dma engine */
741 case qib_sdma_event_e10_go_hw_start:
742 /* This reference means the state machine is started */
743 sdma_get(&ppd->sdma_state);
744 sdma_set_state(ppd,
745 qib_sdma_state_s10_hw_start_up_wait);
746 break;
747 case qib_sdma_event_e20_hw_started:
748 break;
749 case qib_sdma_event_e40_sw_cleaned:
750 sdma_sw_tear_down(ppd);
751 break;
752 case qib_sdma_event_e50_hw_cleaned:
753 break;
754 case qib_sdma_event_e60_hw_halted:
755 break;
756 case qib_sdma_event_e70_go_idle:
757 break;
758 case qib_sdma_event_e7220_err_halted:
759 break;
760 case qib_sdma_event_e7322_err_halted:
761 break;
762 case qib_sdma_event_e90_timer_tick:
763 break;
764 }
765 break;
766
767 case qib_sdma_state_s10_hw_start_up_wait:
768 switch (event) {
769 case qib_sdma_event_e00_go_hw_down:
770 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
771 sdma_sw_tear_down(ppd);
772 break;
773 case qib_sdma_event_e10_go_hw_start:
774 break;
775 case qib_sdma_event_e20_hw_started:
776 sdma_set_state(ppd, ss->go_s99_running ?
777 qib_sdma_state_s99_running :
778 qib_sdma_state_s20_idle);
779 break;
780 case qib_sdma_event_e30_go_running:
781 ss->go_s99_running = 1;
782 break;
783 case qib_sdma_event_e40_sw_cleaned:
784 break;
785 case qib_sdma_event_e50_hw_cleaned:
786 break;
787 case qib_sdma_event_e60_hw_halted:
788 break;
789 case qib_sdma_event_e70_go_idle:
790 ss->go_s99_running = 0;
791 break;
792 case qib_sdma_event_e7220_err_halted:
793 break;
794 case qib_sdma_event_e7322_err_halted:
795 break;
796 case qib_sdma_event_e90_timer_tick:
797 break;
798 }
799 break;
800
801 case qib_sdma_state_s20_idle:
802 switch (event) {
803 case qib_sdma_event_e00_go_hw_down:
804 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
805 sdma_sw_tear_down(ppd);
806 break;
807 case qib_sdma_event_e10_go_hw_start:
808 break;
809 case qib_sdma_event_e20_hw_started:
810 break;
811 case qib_sdma_event_e30_go_running:
812 sdma_set_state(ppd, qib_sdma_state_s99_running);
813 ss->go_s99_running = 1;
814 break;
815 case qib_sdma_event_e40_sw_cleaned:
816 break;
817 case qib_sdma_event_e50_hw_cleaned:
818 break;
819 case qib_sdma_event_e60_hw_halted:
820 break;
821 case qib_sdma_event_e70_go_idle:
822 break;
823 case qib_sdma_event_e7220_err_halted:
824 break;
825 case qib_sdma_event_e7322_err_halted:
826 break;
827 case qib_sdma_event_e90_timer_tick:
828 break;
829 }
830 break;
831
832 case qib_sdma_state_s30_sw_clean_up_wait:
833 switch (event) {
834 case qib_sdma_event_e00_go_hw_down:
835 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
836 break;
837 case qib_sdma_event_e10_go_hw_start:
838 break;
839 case qib_sdma_event_e20_hw_started:
840 break;
841 case qib_sdma_event_e30_go_running:
842 ss->go_s99_running = 1;
843 break;
844 case qib_sdma_event_e40_sw_cleaned:
845 sdma_set_state(ppd,
846 qib_sdma_state_s10_hw_start_up_wait);
847 sdma_hw_start_up(ppd);
848 break;
849 case qib_sdma_event_e50_hw_cleaned:
850 break;
851 case qib_sdma_event_e60_hw_halted:
852 break;
853 case qib_sdma_event_e70_go_idle:
854 ss->go_s99_running = 0;
855 break;
856 case qib_sdma_event_e7220_err_halted:
857 break;
858 case qib_sdma_event_e7322_err_halted:
859 break;
860 case qib_sdma_event_e90_timer_tick:
861 break;
862 }
863 break;
864
865 case qib_sdma_state_s40_hw_clean_up_wait:
866 switch (event) {
867 case qib_sdma_event_e00_go_hw_down:
868 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
869 sdma_start_sw_clean_up(ppd);
870 break;
871 case qib_sdma_event_e10_go_hw_start:
872 break;
873 case qib_sdma_event_e20_hw_started:
874 break;
875 case qib_sdma_event_e30_go_running:
876 ss->go_s99_running = 1;
877 break;
878 case qib_sdma_event_e40_sw_cleaned:
879 break;
880 case qib_sdma_event_e50_hw_cleaned:
881 sdma_set_state(ppd,
882 qib_sdma_state_s30_sw_clean_up_wait);
883 sdma_start_sw_clean_up(ppd);
884 break;
885 case qib_sdma_event_e60_hw_halted:
886 break;
887 case qib_sdma_event_e70_go_idle:
888 ss->go_s99_running = 0;
889 break;
890 case qib_sdma_event_e7220_err_halted:
891 break;
892 case qib_sdma_event_e7322_err_halted:
893 break;
894 case qib_sdma_event_e90_timer_tick:
895 break;
896 }
897 break;
898
899 case qib_sdma_state_s50_hw_halt_wait:
900 switch (event) {
901 case qib_sdma_event_e00_go_hw_down:
902 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
903 sdma_start_sw_clean_up(ppd);
904 break;
905 case qib_sdma_event_e10_go_hw_start:
906 break;
907 case qib_sdma_event_e20_hw_started:
908 break;
909 case qib_sdma_event_e30_go_running:
910 ss->go_s99_running = 1;
911 break;
912 case qib_sdma_event_e40_sw_cleaned:
913 break;
914 case qib_sdma_event_e50_hw_cleaned:
915 break;
916 case qib_sdma_event_e60_hw_halted:
917 sdma_set_state(ppd,
918 qib_sdma_state_s40_hw_clean_up_wait);
919 ppd->dd->f_sdma_hw_clean_up(ppd);
920 break;
921 case qib_sdma_event_e70_go_idle:
922 ss->go_s99_running = 0;
923 break;
924 case qib_sdma_event_e7220_err_halted:
925 break;
926 case qib_sdma_event_e7322_err_halted:
927 break;
928 case qib_sdma_event_e90_timer_tick:
929 break;
930 }
931 break;
932
933 case qib_sdma_state_s99_running:
934 switch (event) {
935 case qib_sdma_event_e00_go_hw_down:
936 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
937 sdma_start_sw_clean_up(ppd);
938 break;
939 case qib_sdma_event_e10_go_hw_start:
940 break;
941 case qib_sdma_event_e20_hw_started:
942 break;
943 case qib_sdma_event_e30_go_running:
944 break;
945 case qib_sdma_event_e40_sw_cleaned:
946 break;
947 case qib_sdma_event_e50_hw_cleaned:
948 break;
949 case qib_sdma_event_e60_hw_halted:
950 sdma_set_state(ppd,
951 qib_sdma_state_s30_sw_clean_up_wait);
952 sdma_start_sw_clean_up(ppd);
953 break;
954 case qib_sdma_event_e70_go_idle:
955 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
956 ss->go_s99_running = 0;
957 break;
958 case qib_sdma_event_e7220_err_halted:
959 sdma_set_state(ppd,
960 qib_sdma_state_s30_sw_clean_up_wait);
961 sdma_start_sw_clean_up(ppd);
962 break;
963 case qib_sdma_event_e7322_err_halted:
964 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
965 break;
966 case qib_sdma_event_e90_timer_tick:
967 break;
968 }
969 break;
970 }
971
972 ss->last_event = event;
973}
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c
new file mode 100644
index 000000000000..c3ec8efc2ed8
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_srq.c
@@ -0,0 +1,375 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/err.h>
35#include <linux/slab.h>
36#include <linux/vmalloc.h>
37
38#include "qib_verbs.h"
39
40/**
41 * qib_post_srq_receive - post a receive on a shared receive queue
42 * @ibsrq: the SRQ to post the receive on
43 * @wr: the list of work requests to post
44 * @bad_wr: A pointer to the first WR to cause a problem is put here
45 *
46 * This may be called from interrupt context.
47 */
48int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
49 struct ib_recv_wr **bad_wr)
50{
51 struct qib_srq *srq = to_isrq(ibsrq);
52 struct qib_rwq *wq;
53 unsigned long flags;
54 int ret;
55
56 for (; wr; wr = wr->next) {
57 struct qib_rwqe *wqe;
58 u32 next;
59 int i;
60
61 if ((unsigned) wr->num_sge > srq->rq.max_sge) {
62 *bad_wr = wr;
63 ret = -EINVAL;
64 goto bail;
65 }
66
67 spin_lock_irqsave(&srq->rq.lock, flags);
68 wq = srq->rq.wq;
69 next = wq->head + 1;
70 if (next >= srq->rq.size)
71 next = 0;
72 if (next == wq->tail) {
73 spin_unlock_irqrestore(&srq->rq.lock, flags);
74 *bad_wr = wr;
75 ret = -ENOMEM;
76 goto bail;
77 }
78
79 wqe = get_rwqe_ptr(&srq->rq, wq->head);
80 wqe->wr_id = wr->wr_id;
81 wqe->num_sge = wr->num_sge;
82 for (i = 0; i < wr->num_sge; i++)
83 wqe->sg_list[i] = wr->sg_list[i];
84 /* Make sure queue entry is written before the head index. */
85 smp_wmb();
86 wq->head = next;
87 spin_unlock_irqrestore(&srq->rq.lock, flags);
88 }
89 ret = 0;
90
91bail:
92 return ret;
93}
94
95/**
96 * qib_create_srq - create a shared receive queue
97 * @ibpd: the protection domain of the SRQ to create
98 * @srq_init_attr: the attributes of the SRQ
99 * @udata: data from libibverbs when creating a user SRQ
100 */
101struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
102 struct ib_srq_init_attr *srq_init_attr,
103 struct ib_udata *udata)
104{
105 struct qib_ibdev *dev = to_idev(ibpd->device);
106 struct qib_srq *srq;
107 u32 sz;
108 struct ib_srq *ret;
109
110 if (srq_init_attr->attr.max_sge == 0 ||
111 srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
112 srq_init_attr->attr.max_wr == 0 ||
113 srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
114 ret = ERR_PTR(-EINVAL);
115 goto done;
116 }
117
118 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
119 if (!srq) {
120 ret = ERR_PTR(-ENOMEM);
121 goto done;
122 }
123
124 /*
125 * Need to use vmalloc() if we want to support large #s of entries.
126 */
127 srq->rq.size = srq_init_attr->attr.max_wr + 1;
128 srq->rq.max_sge = srq_init_attr->attr.max_sge;
129 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
130 sizeof(struct qib_rwqe);
131 srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
132 if (!srq->rq.wq) {
133 ret = ERR_PTR(-ENOMEM);
134 goto bail_srq;
135 }
136
137 /*
138 * Return the address of the RWQ as the offset to mmap.
139 * See qib_mmap() for details.
140 */
141 if (udata && udata->outlen >= sizeof(__u64)) {
142 int err;
143 u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
144
145 srq->ip =
146 qib_create_mmap_info(dev, s, ibpd->uobject->context,
147 srq->rq.wq);
148 if (!srq->ip) {
149 ret = ERR_PTR(-ENOMEM);
150 goto bail_wq;
151 }
152
153 err = ib_copy_to_udata(udata, &srq->ip->offset,
154 sizeof(srq->ip->offset));
155 if (err) {
156 ret = ERR_PTR(err);
157 goto bail_ip;
158 }
159 } else
160 srq->ip = NULL;
161
162 /*
163 * ib_create_srq() will initialize srq->ibsrq.
164 */
165 spin_lock_init(&srq->rq.lock);
166 srq->rq.wq->head = 0;
167 srq->rq.wq->tail = 0;
168 srq->limit = srq_init_attr->attr.srq_limit;
169
170 spin_lock(&dev->n_srqs_lock);
171 if (dev->n_srqs_allocated == ib_qib_max_srqs) {
172 spin_unlock(&dev->n_srqs_lock);
173 ret = ERR_PTR(-ENOMEM);
174 goto bail_ip;
175 }
176
177 dev->n_srqs_allocated++;
178 spin_unlock(&dev->n_srqs_lock);
179
180 if (srq->ip) {
181 spin_lock_irq(&dev->pending_lock);
182 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
183 spin_unlock_irq(&dev->pending_lock);
184 }
185
186 ret = &srq->ibsrq;
187 goto done;
188
189bail_ip:
190 kfree(srq->ip);
191bail_wq:
192 vfree(srq->rq.wq);
193bail_srq:
194 kfree(srq);
195done:
196 return ret;
197}
198
199/**
200 * qib_modify_srq - modify a shared receive queue
201 * @ibsrq: the SRQ to modify
202 * @attr: the new attributes of the SRQ
203 * @attr_mask: indicates which attributes to modify
204 * @udata: user data for libibverbs.so
205 */
206int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
207 enum ib_srq_attr_mask attr_mask,
208 struct ib_udata *udata)
209{
210 struct qib_srq *srq = to_isrq(ibsrq);
211 struct qib_rwq *wq;
212 int ret = 0;
213
214 if (attr_mask & IB_SRQ_MAX_WR) {
215 struct qib_rwq *owq;
216 struct qib_rwqe *p;
217 u32 sz, size, n, head, tail;
218
219 /* Check that the requested sizes are below the limits. */
220 if ((attr->max_wr > ib_qib_max_srq_wrs) ||
221 ((attr_mask & IB_SRQ_LIMIT) ?
222 attr->srq_limit : srq->limit) > attr->max_wr) {
223 ret = -EINVAL;
224 goto bail;
225 }
226
227 sz = sizeof(struct qib_rwqe) +
228 srq->rq.max_sge * sizeof(struct ib_sge);
229 size = attr->max_wr + 1;
230 wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
231 if (!wq) {
232 ret = -ENOMEM;
233 goto bail;
234 }
235
236 /* Check that we can write the offset to mmap. */
237 if (udata && udata->inlen >= sizeof(__u64)) {
238 __u64 offset_addr;
239 __u64 offset = 0;
240
241 ret = ib_copy_from_udata(&offset_addr, udata,
242 sizeof(offset_addr));
243 if (ret)
244 goto bail_free;
245 udata->outbuf =
246 (void __user *) (unsigned long) offset_addr;
247 ret = ib_copy_to_udata(udata, &offset,
248 sizeof(offset));
249 if (ret)
250 goto bail_free;
251 }
252
253 spin_lock_irq(&srq->rq.lock);
254 /*
255 * validate head and tail pointer values and compute
256 * the number of remaining WQEs.
257 */
258 owq = srq->rq.wq;
259 head = owq->head;
260 tail = owq->tail;
261 if (head >= srq->rq.size || tail >= srq->rq.size) {
262 ret = -EINVAL;
263 goto bail_unlock;
264 }
265 n = head;
266 if (n < tail)
267 n += srq->rq.size - tail;
268 else
269 n -= tail;
270 if (size <= n) {
271 ret = -EINVAL;
272 goto bail_unlock;
273 }
274 n = 0;
275 p = wq->wq;
276 while (tail != head) {
277 struct qib_rwqe *wqe;
278 int i;
279
280 wqe = get_rwqe_ptr(&srq->rq, tail);
281 p->wr_id = wqe->wr_id;
282 p->num_sge = wqe->num_sge;
283 for (i = 0; i < wqe->num_sge; i++)
284 p->sg_list[i] = wqe->sg_list[i];
285 n++;
286 p = (struct qib_rwqe *)((char *) p + sz);
287 if (++tail >= srq->rq.size)
288 tail = 0;
289 }
290 srq->rq.wq = wq;
291 srq->rq.size = size;
292 wq->head = n;
293 wq->tail = 0;
294 if (attr_mask & IB_SRQ_LIMIT)
295 srq->limit = attr->srq_limit;
296 spin_unlock_irq(&srq->rq.lock);
297
298 vfree(owq);
299
300 if (srq->ip) {
301 struct qib_mmap_info *ip = srq->ip;
302 struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
303 u32 s = sizeof(struct qib_rwq) + size * sz;
304
305 qib_update_mmap_info(dev, ip, s, wq);
306
307 /*
308 * Return the offset to mmap.
309 * See qib_mmap() for details.
310 */
311 if (udata && udata->inlen >= sizeof(__u64)) {
312 ret = ib_copy_to_udata(udata, &ip->offset,
313 sizeof(ip->offset));
314 if (ret)
315 goto bail;
316 }
317
318 /*
319 * Put user mapping info onto the pending list
320 * unless it already is on the list.
321 */
322 spin_lock_irq(&dev->pending_lock);
323 if (list_empty(&ip->pending_mmaps))
324 list_add(&ip->pending_mmaps,
325 &dev->pending_mmaps);
326 spin_unlock_irq(&dev->pending_lock);
327 }
328 } else if (attr_mask & IB_SRQ_LIMIT) {
329 spin_lock_irq(&srq->rq.lock);
330 if (attr->srq_limit >= srq->rq.size)
331 ret = -EINVAL;
332 else
333 srq->limit = attr->srq_limit;
334 spin_unlock_irq(&srq->rq.lock);
335 }
336 goto bail;
337
338bail_unlock:
339 spin_unlock_irq(&srq->rq.lock);
340bail_free:
341 vfree(wq);
342bail:
343 return ret;
344}
345
346int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
347{
348 struct qib_srq *srq = to_isrq(ibsrq);
349
350 attr->max_wr = srq->rq.size - 1;
351 attr->max_sge = srq->rq.max_sge;
352 attr->srq_limit = srq->limit;
353 return 0;
354}
355
356/**
357 * qib_destroy_srq - destroy a shared receive queue
358 * @ibsrq: the SRQ to destroy
359 */
360int qib_destroy_srq(struct ib_srq *ibsrq)
361{
362 struct qib_srq *srq = to_isrq(ibsrq);
363 struct qib_ibdev *dev = to_idev(ibsrq->device);
364
365 spin_lock(&dev->n_srqs_lock);
366 dev->n_srqs_allocated--;
367 spin_unlock(&dev->n_srqs_lock);
368 if (srq->ip)
369 kref_put(&srq->ip->ref, qib_release_mmap_info);
370 else
371 vfree(srq->rq.wq);
372 kfree(srq);
373
374 return 0;
375}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
new file mode 100644
index 000000000000..dab4d9f4a2cc
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -0,0 +1,691 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/ctype.h>
34
35#include "qib.h"
36
37/**
38 * qib_parse_ushort - parse an unsigned short value in an arbitrary base
39 * @str: the string containing the number
40 * @valp: where to put the result
41 *
42 * Returns the number of bytes consumed, or negative value on error.
43 */
44static int qib_parse_ushort(const char *str, unsigned short *valp)
45{
46 unsigned long val;
47 char *end;
48 int ret;
49
50 if (!isdigit(str[0])) {
51 ret = -EINVAL;
52 goto bail;
53 }
54
55 val = simple_strtoul(str, &end, 0);
56
57 if (val > 0xffff) {
58 ret = -EINVAL;
59 goto bail;
60 }
61
62 *valp = val;
63
64 ret = end + 1 - str;
65 if (ret == 0)
66 ret = -EINVAL;
67
68bail:
69 return ret;
70}
71
72/* start of per-port functions */
73/*
74 * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
75 */
76static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
77{
78 struct qib_devdata *dd = ppd->dd;
79 int ret;
80
81 ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
82 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
83 return ret;
84}
85
86static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
87 size_t count)
88{
89 struct qib_devdata *dd = ppd->dd;
90 int ret;
91 u16 val;
92
93 ret = qib_parse_ushort(buf, &val);
94
95 /*
96 * Set the "intentional" heartbeat enable per either of
97 * "Enable" and "Auto", as these are normally set together.
98 * This bit is consulted when leaving loopback mode,
99 * because entering loopback mode overrides it and automatically
100 * disables heartbeat.
101 */
102 if (ret >= 0)
103 ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
104 if (ret < 0)
105 qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
106 return ret < 0 ? ret : count;
107}
108
109static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
110 size_t count)
111{
112 struct qib_devdata *dd = ppd->dd;
113 int ret = count, r;
114
115 r = dd->f_set_ib_loopback(ppd, buf);
116 if (r < 0)
117 ret = r;
118
119 return ret;
120}
121
122static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
123 size_t count)
124{
125 struct qib_devdata *dd = ppd->dd;
126 int ret;
127 u16 val;
128
129 ret = qib_parse_ushort(buf, &val);
130 if (ret > 0)
131 qib_set_led_override(ppd, val);
132 else
133 qib_dev_err(dd, "attempt to set invalid LED override\n");
134 return ret < 0 ? ret : count;
135}
136
137static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
138{
139 ssize_t ret;
140
141 if (!ppd->statusp)
142 ret = -EINVAL;
143 else
144 ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
145 (unsigned long long) *(ppd->statusp));
146 return ret;
147}
148
149/*
150 * For userland compatibility, these offsets must remain fixed.
151 * They are strings for QIB_STATUS_*
152 */
153static const char *qib_status_str[] = {
154 "Initted",
155 "",
156 "",
157 "",
158 "",
159 "Present",
160 "IB_link_up",
161 "IB_configured",
162 "",
163 "Fatal_Hardware_Error",
164 NULL,
165};
166
167static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
168{
169 int i, any;
170 u64 s;
171 ssize_t ret;
172
173 if (!ppd->statusp) {
174 ret = -EINVAL;
175 goto bail;
176 }
177
178 s = *(ppd->statusp);
179 *buf = '\0';
180 for (any = i = 0; s && qib_status_str[i]; i++) {
181 if (s & 1) {
182 /* if overflow */
183 if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
184 break;
185 if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
186 PAGE_SIZE)
187 break;
188 any = 1;
189 }
190 s >>= 1;
191 }
192 if (any)
193 strlcat(buf, "\n", PAGE_SIZE);
194
195 ret = strlen(buf);
196
197bail:
198 return ret;
199}
200
201/* end of per-port functions */
202
203/*
204 * Start of per-port file structures and support code
205 * Because we are fitting into other infrastructure, we have to supply the
206 * full set of kobject/sysfs_ops structures and routines.
207 */
208#define QIB_PORT_ATTR(name, mode, show, store) \
209 static struct qib_port_attr qib_port_attr_##name = \
210 __ATTR(name, mode, show, store)
211
212struct qib_port_attr {
213 struct attribute attr;
214 ssize_t (*show)(struct qib_pportdata *, char *);
215 ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
216};
217
218QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
219QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
220QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
221 store_hrtbt_enb);
222QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
223QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
224
225static struct attribute *port_default_attributes[] = {
226 &qib_port_attr_loopback.attr,
227 &qib_port_attr_led_override.attr,
228 &qib_port_attr_hrtbt_enable.attr,
229 &qib_port_attr_status.attr,
230 &qib_port_attr_status_str.attr,
231 NULL
232};
233
234static ssize_t qib_portattr_show(struct kobject *kobj,
235 struct attribute *attr, char *buf)
236{
237 struct qib_port_attr *pattr =
238 container_of(attr, struct qib_port_attr, attr);
239 struct qib_pportdata *ppd =
240 container_of(kobj, struct qib_pportdata, pport_kobj);
241
242 return pattr->show(ppd, buf);
243}
244
245static ssize_t qib_portattr_store(struct kobject *kobj,
246 struct attribute *attr, const char *buf, size_t len)
247{
248 struct qib_port_attr *pattr =
249 container_of(attr, struct qib_port_attr, attr);
250 struct qib_pportdata *ppd =
251 container_of(kobj, struct qib_pportdata, pport_kobj);
252
253 return pattr->store(ppd, buf, len);
254}
255
256static void qib_port_release(struct kobject *kobj)
257{
258 /* nothing to do since memory is freed by qib_free_devdata() */
259}
260
261static const struct sysfs_ops qib_port_ops = {
262 .show = qib_portattr_show,
263 .store = qib_portattr_store,
264};
265
266static struct kobj_type qib_port_ktype = {
267 .release = qib_port_release,
268 .sysfs_ops = &qib_port_ops,
269 .default_attrs = port_default_attributes
270};
271
272/* Start sl2vl */
273
274#define QIB_SL2VL_ATTR(N) \
275 static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
276 .attr = { .name = __stringify(N), .mode = 0444 }, \
277 .sl = N \
278 }
279
280struct qib_sl2vl_attr {
281 struct attribute attr;
282 int sl;
283};
284
285QIB_SL2VL_ATTR(0);
286QIB_SL2VL_ATTR(1);
287QIB_SL2VL_ATTR(2);
288QIB_SL2VL_ATTR(3);
289QIB_SL2VL_ATTR(4);
290QIB_SL2VL_ATTR(5);
291QIB_SL2VL_ATTR(6);
292QIB_SL2VL_ATTR(7);
293QIB_SL2VL_ATTR(8);
294QIB_SL2VL_ATTR(9);
295QIB_SL2VL_ATTR(10);
296QIB_SL2VL_ATTR(11);
297QIB_SL2VL_ATTR(12);
298QIB_SL2VL_ATTR(13);
299QIB_SL2VL_ATTR(14);
300QIB_SL2VL_ATTR(15);
301
302static struct attribute *sl2vl_default_attributes[] = {
303 &qib_sl2vl_attr_0.attr,
304 &qib_sl2vl_attr_1.attr,
305 &qib_sl2vl_attr_2.attr,
306 &qib_sl2vl_attr_3.attr,
307 &qib_sl2vl_attr_4.attr,
308 &qib_sl2vl_attr_5.attr,
309 &qib_sl2vl_attr_6.attr,
310 &qib_sl2vl_attr_7.attr,
311 &qib_sl2vl_attr_8.attr,
312 &qib_sl2vl_attr_9.attr,
313 &qib_sl2vl_attr_10.attr,
314 &qib_sl2vl_attr_11.attr,
315 &qib_sl2vl_attr_12.attr,
316 &qib_sl2vl_attr_13.attr,
317 &qib_sl2vl_attr_14.attr,
318 &qib_sl2vl_attr_15.attr,
319 NULL
320};
321
322static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
323 char *buf)
324{
325 struct qib_sl2vl_attr *sattr =
326 container_of(attr, struct qib_sl2vl_attr, attr);
327 struct qib_pportdata *ppd =
328 container_of(kobj, struct qib_pportdata, sl2vl_kobj);
329 struct qib_ibport *qibp = &ppd->ibport_data;
330
331 return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
332}
333
334static const struct sysfs_ops qib_sl2vl_ops = {
335 .show = sl2vl_attr_show,
336};
337
338static struct kobj_type qib_sl2vl_ktype = {
339 .release = qib_port_release,
340 .sysfs_ops = &qib_sl2vl_ops,
341 .default_attrs = sl2vl_default_attributes
342};
343
344/* End sl2vl */
345
346/* Start diag_counters */
347
348#define QIB_DIAGC_ATTR(N) \
349 static struct qib_diagc_attr qib_diagc_attr_##N = { \
350 .attr = { .name = __stringify(N), .mode = 0444 }, \
351 .counter = offsetof(struct qib_ibport, n_##N) \
352 }
353
354struct qib_diagc_attr {
355 struct attribute attr;
356 size_t counter;
357};
358
359QIB_DIAGC_ATTR(rc_resends);
360QIB_DIAGC_ATTR(rc_acks);
361QIB_DIAGC_ATTR(rc_qacks);
362QIB_DIAGC_ATTR(rc_delayed_comp);
363QIB_DIAGC_ATTR(seq_naks);
364QIB_DIAGC_ATTR(rdma_seq);
365QIB_DIAGC_ATTR(rnr_naks);
366QIB_DIAGC_ATTR(other_naks);
367QIB_DIAGC_ATTR(rc_timeouts);
368QIB_DIAGC_ATTR(loop_pkts);
369QIB_DIAGC_ATTR(pkt_drops);
370QIB_DIAGC_ATTR(dmawait);
371QIB_DIAGC_ATTR(unaligned);
372QIB_DIAGC_ATTR(rc_dupreq);
373QIB_DIAGC_ATTR(rc_seqnak);
374
375static struct attribute *diagc_default_attributes[] = {
376 &qib_diagc_attr_rc_resends.attr,
377 &qib_diagc_attr_rc_acks.attr,
378 &qib_diagc_attr_rc_qacks.attr,
379 &qib_diagc_attr_rc_delayed_comp.attr,
380 &qib_diagc_attr_seq_naks.attr,
381 &qib_diagc_attr_rdma_seq.attr,
382 &qib_diagc_attr_rnr_naks.attr,
383 &qib_diagc_attr_other_naks.attr,
384 &qib_diagc_attr_rc_timeouts.attr,
385 &qib_diagc_attr_loop_pkts.attr,
386 &qib_diagc_attr_pkt_drops.attr,
387 &qib_diagc_attr_dmawait.attr,
388 &qib_diagc_attr_unaligned.attr,
389 &qib_diagc_attr_rc_dupreq.attr,
390 &qib_diagc_attr_rc_seqnak.attr,
391 NULL
392};
393
394static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
395 char *buf)
396{
397 struct qib_diagc_attr *dattr =
398 container_of(attr, struct qib_diagc_attr, attr);
399 struct qib_pportdata *ppd =
400 container_of(kobj, struct qib_pportdata, diagc_kobj);
401 struct qib_ibport *qibp = &ppd->ibport_data;
402
403 return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
404}
405
406static const struct sysfs_ops qib_diagc_ops = {
407 .show = diagc_attr_show,
408};
409
410static struct kobj_type qib_diagc_ktype = {
411 .release = qib_port_release,
412 .sysfs_ops = &qib_diagc_ops,
413 .default_attrs = diagc_default_attributes
414};
415
416/* End diag_counters */
417
418/* end of per-port file structures and support code */
419
420/*
421 * Start of per-unit (or driver, in some cases, but replicated
422 * per unit) functions (these get a device *)
423 */
424static ssize_t show_rev(struct device *device, struct device_attribute *attr,
425 char *buf)
426{
427 struct qib_ibdev *dev =
428 container_of(device, struct qib_ibdev, ibdev.dev);
429
430 return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
431}
432
433static ssize_t show_hca(struct device *device, struct device_attribute *attr,
434 char *buf)
435{
436 struct qib_ibdev *dev =
437 container_of(device, struct qib_ibdev, ibdev.dev);
438 struct qib_devdata *dd = dd_from_dev(dev);
439 int ret;
440
441 if (!dd->boardname)
442 ret = -EINVAL;
443 else
444 ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
445 return ret;
446}
447
448static ssize_t show_version(struct device *device,
449 struct device_attribute *attr, char *buf)
450{
451 /* The string printed here is already newline-terminated. */
452 return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
453}
454
455static ssize_t show_boardversion(struct device *device,
456 struct device_attribute *attr, char *buf)
457{
458 struct qib_ibdev *dev =
459 container_of(device, struct qib_ibdev, ibdev.dev);
460 struct qib_devdata *dd = dd_from_dev(dev);
461
462 /* The string printed here is already newline-terminated. */
463 return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
464}
465
466
467static ssize_t show_localbus_info(struct device *device,
468 struct device_attribute *attr, char *buf)
469{
470 struct qib_ibdev *dev =
471 container_of(device, struct qib_ibdev, ibdev.dev);
472 struct qib_devdata *dd = dd_from_dev(dev);
473
474 /* The string printed here is already newline-terminated. */
475 return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
476}
477
478
479static ssize_t show_nctxts(struct device *device,
480 struct device_attribute *attr, char *buf)
481{
482 struct qib_ibdev *dev =
483 container_of(device, struct qib_ibdev, ibdev.dev);
484 struct qib_devdata *dd = dd_from_dev(dev);
485
486 /* Return the number of user ports (contexts) available. */
487 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
488 dd->first_user_ctxt);
489}
490
491static ssize_t show_serial(struct device *device,
492 struct device_attribute *attr, char *buf)
493{
494 struct qib_ibdev *dev =
495 container_of(device, struct qib_ibdev, ibdev.dev);
496 struct qib_devdata *dd = dd_from_dev(dev);
497
498 buf[sizeof dd->serial] = '\0';
499 memcpy(buf, dd->serial, sizeof dd->serial);
500 strcat(buf, "\n");
501 return strlen(buf);
502}
503
504static ssize_t store_chip_reset(struct device *device,
505 struct device_attribute *attr, const char *buf,
506 size_t count)
507{
508 struct qib_ibdev *dev =
509 container_of(device, struct qib_ibdev, ibdev.dev);
510 struct qib_devdata *dd = dd_from_dev(dev);
511 int ret;
512
513 if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
514 ret = -EINVAL;
515 goto bail;
516 }
517
518 ret = qib_reset_device(dd->unit);
519bail:
520 return ret < 0 ? ret : count;
521}
522
523static ssize_t show_logged_errs(struct device *device,
524 struct device_attribute *attr, char *buf)
525{
526 struct qib_ibdev *dev =
527 container_of(device, struct qib_ibdev, ibdev.dev);
528 struct qib_devdata *dd = dd_from_dev(dev);
529 int idx, count;
530
531 /* force consistency with actual EEPROM */
532 if (qib_update_eeprom_log(dd) != 0)
533 return -ENXIO;
534
535 count = 0;
536 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
537 count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
538 dd->eep_st_errs[idx],
539 idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
540 }
541
542 return count;
543}
544
545/*
546 * Dump tempsense regs. in decimal, to ease shell-scripts.
547 */
548static ssize_t show_tempsense(struct device *device,
549 struct device_attribute *attr, char *buf)
550{
551 struct qib_ibdev *dev =
552 container_of(device, struct qib_ibdev, ibdev.dev);
553 struct qib_devdata *dd = dd_from_dev(dev);
554 int ret;
555 int idx;
556 u8 regvals[8];
557
558 ret = -ENXIO;
559 for (idx = 0; idx < 8; ++idx) {
560 if (idx == 6)
561 continue;
562 ret = dd->f_tempsense_rd(dd, idx);
563 if (ret < 0)
564 break;
565 regvals[idx] = ret;
566 }
567 if (idx == 8)
568 ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
569 *(signed char *)(regvals),
570 *(signed char *)(regvals + 1),
571 regvals[2], regvals[3],
572 *(signed char *)(regvals + 5),
573 *(signed char *)(regvals + 7));
574 return ret;
575}
576
577/*
578 * end of per-unit (or driver, in some cases, but replicated
579 * per unit) functions
580 */
581
582/* start of per-unit file structures and support code */
583static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
584static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
585static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
586static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
587static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
588static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
589static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
590static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
591static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
592static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
593static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
594
595static struct device_attribute *qib_attributes[] = {
596 &dev_attr_hw_rev,
597 &dev_attr_hca_type,
598 &dev_attr_board_id,
599 &dev_attr_version,
600 &dev_attr_nctxts,
601 &dev_attr_serial,
602 &dev_attr_boardversion,
603 &dev_attr_logged_errors,
604 &dev_attr_tempsense,
605 &dev_attr_localbus_info,
606 &dev_attr_chip_reset,
607};
608
609int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
610 struct kobject *kobj)
611{
612 struct qib_pportdata *ppd;
613 struct qib_devdata *dd = dd_from_ibdev(ibdev);
614 int ret;
615
616 if (!port_num || port_num > dd->num_pports) {
617 qib_dev_err(dd, "Skipping infiniband class with "
618 "invalid port %u\n", port_num);
619 ret = -ENODEV;
620 goto bail;
621 }
622 ppd = &dd->pport[port_num - 1];
623
624 ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
625 "linkcontrol");
626 if (ret) {
627 qib_dev_err(dd, "Skipping linkcontrol sysfs info, "
628 "(err %d) port %u\n", ret, port_num);
629 goto bail;
630 }
631 kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
632
633 ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
634 "sl2vl");
635 if (ret) {
636 qib_dev_err(dd, "Skipping sl2vl sysfs info, "
637 "(err %d) port %u\n", ret, port_num);
638 goto bail_sl;
639 }
640 kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
641
642 ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
643 "diag_counters");
644 if (ret) {
645 qib_dev_err(dd, "Skipping diag_counters sysfs info, "
646 "(err %d) port %u\n", ret, port_num);
647 goto bail_diagc;
648 }
649 kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
650
651 return 0;
652
653bail_diagc:
654 kobject_put(&ppd->sl2vl_kobj);
655bail_sl:
656 kobject_put(&ppd->pport_kobj);
657bail:
658 return ret;
659}
660
661/*
662 * Register and create our files in /sys/class/infiniband.
663 */
664int qib_verbs_register_sysfs(struct qib_devdata *dd)
665{
666 struct ib_device *dev = &dd->verbs_dev.ibdev;
667 int i, ret;
668
669 for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
670 ret = device_create_file(&dev->dev, qib_attributes[i]);
671 if (ret)
672 return ret;
673 }
674
675 return 0;
676}
677
678/*
679 * Unregister and remove our files in /sys/class/infiniband.
680 */
681void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
682{
683 struct qib_pportdata *ppd;
684 int i;
685
686 for (i = 0; i < dd->num_pports; i++) {
687 ppd = &dd->pport[i];
688 kobject_put(&ppd->pport_kobj);
689 kobject_put(&ppd->sl2vl_kobj);
690 }
691}
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
new file mode 100644
index 000000000000..6f31ca5039db
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -0,0 +1,498 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/delay.h>
35#include <linux/pci.h>
36#include <linux/vmalloc.h>
37
38#include "qib.h"
39
40/*
41 * QLogic_IB "Two Wire Serial Interface" driver.
42 * Originally written for a not-quite-i2c serial eeprom, which is
43 * still used on some supported boards. Later boards have added a
44 * variety of other uses, most board-specific, so teh bit-boffing
45 * part has been split off to this file, while the other parts
46 * have been moved to chip-specific files.
47 *
48 * We have also dropped all pretense of fully generic (e.g. pretend
49 * we don't know whether '1' is the higher voltage) interface, as
50 * the restrictions of the generic i2c interface (e.g. no access from
51 * driver itself) make it unsuitable for this use.
52 */
53
54#define READ_CMD 1
55#define WRITE_CMD 0
56
57/**
58 * i2c_wait_for_writes - wait for a write
59 * @dd: the qlogic_ib device
60 *
61 * We use this instead of udelay directly, so we can make sure
62 * that previous register writes have been flushed all the way
63 * to the chip. Since we are delaying anyway, the cost doesn't
64 * hurt, and makes the bit twiddling more regular
65 */
66static void i2c_wait_for_writes(struct qib_devdata *dd)
67{
68 /*
69 * implicit read of EXTStatus is as good as explicit
70 * read of scratch, if all we want to do is flush
71 * writes.
72 */
73 dd->f_gpio_mod(dd, 0, 0, 0);
74 rmb(); /* inlined, so prevent compiler reordering */
75}
76
77/*
78 * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
79 * for "almost compliant" modules
80 */
81#define SCL_WAIT_USEC 1000
82
83/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
84 * Should be 20, but some chips need more.
85 */
86#define TWSI_BUF_WAIT_USEC 60
87
88static void scl_out(struct qib_devdata *dd, u8 bit)
89{
90 u32 mask;
91
92 udelay(1);
93
94 mask = 1UL << dd->gpio_scl_num;
95
96 /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
97 dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
98
99 /*
100 * Allow for slow slaves by simple
101 * delay for falling edge, sampling on rise.
102 */
103 if (!bit)
104 udelay(2);
105 else {
106 int rise_usec;
107 for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
108 if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
109 break;
110 udelay(2);
111 }
112 if (rise_usec <= 0)
113 qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
114 SCL_WAIT_USEC);
115 }
116 i2c_wait_for_writes(dd);
117}
118
119static void sda_out(struct qib_devdata *dd, u8 bit)
120{
121 u32 mask;
122
123 mask = 1UL << dd->gpio_sda_num;
124
125 /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
126 dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
127
128 i2c_wait_for_writes(dd);
129 udelay(2);
130}
131
132static u8 sda_in(struct qib_devdata *dd, int wait)
133{
134 int bnum;
135 u32 read_val, mask;
136
137 bnum = dd->gpio_sda_num;
138 mask = (1UL << bnum);
139 /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
140 dd->f_gpio_mod(dd, 0, 0, mask);
141 read_val = dd->f_gpio_mod(dd, 0, 0, 0);
142 if (wait)
143 i2c_wait_for_writes(dd);
144 return (read_val & mask) >> bnum;
145}
146
147/**
148 * i2c_ackrcv - see if ack following write is true
149 * @dd: the qlogic_ib device
150 */
151static int i2c_ackrcv(struct qib_devdata *dd)
152{
153 u8 ack_received;
154
155 /* AT ENTRY SCL = LOW */
156 /* change direction, ignore data */
157 ack_received = sda_in(dd, 1);
158 scl_out(dd, 1);
159 ack_received = sda_in(dd, 1) == 0;
160 scl_out(dd, 0);
161 return ack_received;
162}
163
164static void stop_cmd(struct qib_devdata *dd);
165
166/**
167 * rd_byte - read a byte, sending STOP on last, else ACK
168 * @dd: the qlogic_ib device
169 *
170 * Returns byte shifted out of device
171 */
172static int rd_byte(struct qib_devdata *dd, int last)
173{
174 int bit_cntr, data;
175
176 data = 0;
177
178 for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
179 data <<= 1;
180 scl_out(dd, 1);
181 data |= sda_in(dd, 0);
182 scl_out(dd, 0);
183 }
184 if (last) {
185 scl_out(dd, 1);
186 stop_cmd(dd);
187 } else {
188 sda_out(dd, 0);
189 scl_out(dd, 1);
190 scl_out(dd, 0);
191 sda_out(dd, 1);
192 }
193 return data;
194}
195
196/**
197 * wr_byte - write a byte, one bit at a time
198 * @dd: the qlogic_ib device
199 * @data: the byte to write
200 *
201 * Returns 0 if we got the following ack, otherwise 1
202 */
203static int wr_byte(struct qib_devdata *dd, u8 data)
204{
205 int bit_cntr;
206 u8 bit;
207
208 for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
209 bit = (data >> bit_cntr) & 1;
210 sda_out(dd, bit);
211 scl_out(dd, 1);
212 scl_out(dd, 0);
213 }
214 return (!i2c_ackrcv(dd)) ? 1 : 0;
215}
216
217/*
218 * issue TWSI start sequence:
219 * (both clock/data high, clock high, data low while clock is high)
220 */
221static void start_seq(struct qib_devdata *dd)
222{
223 sda_out(dd, 1);
224 scl_out(dd, 1);
225 sda_out(dd, 0);
226 udelay(1);
227 scl_out(dd, 0);
228}
229
230/**
231 * stop_seq - transmit the stop sequence
232 * @dd: the qlogic_ib device
233 *
234 * (both clock/data low, clock high, data high while clock is high)
235 */
236static void stop_seq(struct qib_devdata *dd)
237{
238 scl_out(dd, 0);
239 sda_out(dd, 0);
240 scl_out(dd, 1);
241 sda_out(dd, 1);
242}
243
244/**
245 * stop_cmd - transmit the stop condition
246 * @dd: the qlogic_ib device
247 *
248 * (both clock/data low, clock high, data high while clock is high)
249 */
250static void stop_cmd(struct qib_devdata *dd)
251{
252 stop_seq(dd);
253 udelay(TWSI_BUF_WAIT_USEC);
254}
255
256/**
257 * qib_twsi_reset - reset I2C communication
258 * @dd: the qlogic_ib device
259 */
260
261int qib_twsi_reset(struct qib_devdata *dd)
262{
263 int clock_cycles_left = 9;
264 int was_high = 0;
265 u32 pins, mask;
266
267 /* Both SCL and SDA should be high. If not, there
268 * is something wrong.
269 */
270 mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
271
272 /*
273 * Force pins to desired innocuous state.
274 * This is the default power-on state with out=0 and dir=0,
275 * So tri-stated and should be floating high (barring HW problems)
276 */
277 dd->f_gpio_mod(dd, 0, 0, mask);
278
279 /*
280 * Clock nine times to get all listeners into a sane state.
281 * If SDA does not go high at any point, we are wedged.
282 * One vendor recommends then issuing START followed by STOP.
283 * we cannot use our "normal" functions to do that, because
284 * if SCL drops between them, another vendor's part will
285 * wedge, dropping SDA and keeping it low forever, at the end of
286 * the next transaction (even if it was not the device addressed).
287 * So our START and STOP take place with SCL held high.
288 */
289 while (clock_cycles_left--) {
290 scl_out(dd, 0);
291 scl_out(dd, 1);
292 /* Note if SDA is high, but keep clocking to sync slave */
293 was_high |= sda_in(dd, 0);
294 }
295
296 if (was_high) {
297 /*
298 * We saw a high, which we hope means the slave is sync'd.
299 * Issue START, STOP, pause for T_BUF.
300 */
301
302 pins = dd->f_gpio_mod(dd, 0, 0, 0);
303 if ((pins & mask) != mask)
304 qib_dev_err(dd, "GPIO pins not at rest: %d\n",
305 pins & mask);
306 /* Drop SDA to issue START */
307 udelay(1); /* Guarantee .6 uSec setup */
308 sda_out(dd, 0);
309 udelay(1); /* Guarantee .6 uSec hold */
310 /* At this point, SCL is high, SDA low. Raise SDA for STOP */
311 sda_out(dd, 1);
312 udelay(TWSI_BUF_WAIT_USEC);
313 }
314
315 return !was_high;
316}
317
318#define QIB_TWSI_START 0x100
319#define QIB_TWSI_STOP 0x200
320
321/* Write byte to TWSI, optionally prefixed with START or suffixed with
322 * STOP.
323 * returns 0 if OK (ACK received), else != 0
324 */
325static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
326{
327 int ret = 1;
328 if (flags & QIB_TWSI_START)
329 start_seq(dd);
330
331 ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
332
333 if (flags & QIB_TWSI_STOP)
334 stop_cmd(dd);
335 return ret;
336}
337
338/* Added functionality for IBA7220-based cards */
339#define QIB_TEMP_DEV 0x98
340
341/*
342 * qib_twsi_blk_rd
343 * Formerly called qib_eeprom_internal_read, and only used for eeprom,
344 * but now the general interface for data transfer from twsi devices.
345 * One vestige of its former role is that it recognizes a device
346 * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
347 * which responded to all TWSI device codes, interpreting them as
348 * address within device. On all other devices found on board handled by
349 * this driver, the device is followed by a one-byte "address" which selects
350 * the "register" or "offset" within the device from which data should
351 * be read.
352 */
353int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
354 void *buffer, int len)
355{
356 int ret;
357 u8 *bp = buffer;
358
359 ret = 1;
360
361 if (dev == QIB_TWSI_NO_DEV) {
362 /* legacy not-really-I2C */
363 addr = (addr << 1) | READ_CMD;
364 ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
365 } else {
366 /* Actual I2C */
367 ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
368 if (ret) {
369 stop_cmd(dd);
370 ret = 1;
371 goto bail;
372 }
373 /*
374 * SFF spec claims we do _not_ stop after the addr
375 * but simply issue a start with the "read" dev-addr.
376 * Since we are implicitely waiting for ACK here,
377 * we need t_buf (nominally 20uSec) before that start,
378 * and cannot rely on the delay built in to the STOP
379 */
380 ret = qib_twsi_wr(dd, addr, 0);
381 udelay(TWSI_BUF_WAIT_USEC);
382
383 if (ret) {
384 qib_dev_err(dd,
385 "Failed to write interface read addr %02X\n",
386 addr);
387 ret = 1;
388 goto bail;
389 }
390 ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
391 }
392 if (ret) {
393 stop_cmd(dd);
394 ret = 1;
395 goto bail;
396 }
397
398 /*
399 * block devices keeps clocking data out as long as we ack,
400 * automatically incrementing the address. Some have "pages"
401 * whose boundaries will not be crossed, but the handling
402 * of these is left to the caller, who is in a better
403 * position to know.
404 */
405 while (len-- > 0) {
406 /*
407 * Get and store data, sending ACK if length remaining,
408 * else STOP
409 */
410 *bp++ = rd_byte(dd, !len);
411 }
412
413 ret = 0;
414
415bail:
416 return ret;
417}
418
419/*
420 * qib_twsi_blk_wr
421 * Formerly called qib_eeprom_internal_write, and only used for eeprom,
422 * but now the general interface for data transfer to twsi devices.
423 * One vestige of its former role is that it recognizes a device
424 * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
425 * which responded to all TWSI device codes, interpreting them as
426 * address within device. On all other devices found on board handled by
427 * this driver, the device is followed by a one-byte "address" which selects
428 * the "register" or "offset" within the device to which data should
429 * be written.
430 */
431int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
432 const void *buffer, int len)
433{
434 int sub_len;
435 const u8 *bp = buffer;
436 int max_wait_time, i;
437 int ret;
438 ret = 1;
439
440 while (len > 0) {
441 if (dev == QIB_TWSI_NO_DEV) {
442 if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
443 QIB_TWSI_START)) {
444 goto failed_write;
445 }
446 } else {
447 /* Real I2C */
448 if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
449 goto failed_write;
450 ret = qib_twsi_wr(dd, addr, 0);
451 if (ret) {
452 qib_dev_err(dd, "Failed to write interface"
453 " write addr %02X\n", addr);
454 goto failed_write;
455 }
456 }
457
458 sub_len = min(len, 4);
459 addr += sub_len;
460 len -= sub_len;
461
462 for (i = 0; i < sub_len; i++)
463 if (qib_twsi_wr(dd, *bp++, 0))
464 goto failed_write;
465
466 stop_cmd(dd);
467
468 /*
469 * Wait for write complete by waiting for a successful
470 * read (the chip replies with a zero after the write
471 * cmd completes, and before it writes to the eeprom.
472 * The startcmd for the read will fail the ack until
473 * the writes have completed. We do this inline to avoid
474 * the debug prints that are in the real read routine
475 * if the startcmd fails.
476 * We also use the proper device address, so it doesn't matter
477 * whether we have real eeprom_dev. Legacy likes any address.
478 */
479 max_wait_time = 100;
480 while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
481 stop_cmd(dd);
482 if (!--max_wait_time)
483 goto failed_write;
484 }
485 /* now read (and ignore) the resulting byte */
486 rd_byte(dd, 1);
487 }
488
489 ret = 0;
490 goto bail;
491
492failed_write:
493 stop_cmd(dd);
494 ret = 1;
495
496bail:
497 return ret;
498}
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
new file mode 100644
index 000000000000..f7eb1ddff5f3
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -0,0 +1,557 @@
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34#include <linux/pci.h>
35#include <linux/io.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39
40#include "qib.h"
41
42static unsigned qib_hol_timeout_ms = 3000;
43module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
44MODULE_PARM_DESC(hol_timeout_ms,
45 "duration of user app suspension after link failure");
46
47unsigned qib_sdma_fetch_arb = 1;
48module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
49MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
50
51/**
52 * qib_disarm_piobufs - cancel a range of PIO buffers
53 * @dd: the qlogic_ib device
54 * @first: the first PIO buffer to cancel
55 * @cnt: the number of PIO buffers to cancel
56 *
57 * Cancel a range of PIO buffers. Used at user process close,
58 * in case it died while writing to a PIO buffer.
59 */
60void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
61{
62 unsigned long flags;
63 unsigned i;
64 unsigned last;
65
66 last = first + cnt;
67 spin_lock_irqsave(&dd->pioavail_lock, flags);
68 for (i = first; i < last; i++) {
69 __clear_bit(i, dd->pio_need_disarm);
70 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
71 }
72 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
73}
74
75/*
76 * This is called by a user process when it sees the DISARM_BUFS event
77 * bit is set.
78 */
79int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
80{
81 struct qib_devdata *dd = rcd->dd;
82 unsigned i;
83 unsigned last;
84 unsigned n = 0;
85
86 last = rcd->pio_base + rcd->piocnt;
87 /*
88 * Don't need uctxt_lock here, since user has called in to us.
89 * Clear at start in case more interrupts set bits while we
90 * are disarming
91 */
92 if (rcd->user_event_mask) {
93 /*
94 * subctxt_cnt is 0 if not shared, so do base
95 * separately, first, then remaining subctxt, if any
96 */
97 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
98 for (i = 1; i < rcd->subctxt_cnt; i++)
99 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
100 &rcd->user_event_mask[i]);
101 }
102 spin_lock_irq(&dd->pioavail_lock);
103 for (i = rcd->pio_base; i < last; i++) {
104 if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
105 n++;
106 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
107 }
108 }
109 spin_unlock_irq(&dd->pioavail_lock);
110 return 0;
111}
112
113static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
114{
115 struct qib_pportdata *ppd;
116 unsigned pidx;
117
118 for (pidx = 0; pidx < dd->num_pports; pidx++) {
119 ppd = dd->pport + pidx;
120 if (i >= ppd->sdma_state.first_sendbuf &&
121 i < ppd->sdma_state.last_sendbuf)
122 return ppd;
123 }
124 return NULL;
125}
126
127/*
128 * Return true if send buffer is being used by a user context.
129 * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
130 */
131static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
132{
133 struct qib_ctxtdata *rcd;
134 unsigned ctxt;
135 int ret = 0;
136
137 spin_lock(&dd->uctxt_lock);
138 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
139 rcd = dd->rcd[ctxt];
140 if (!rcd || bufn < rcd->pio_base ||
141 bufn >= rcd->pio_base + rcd->piocnt)
142 continue;
143 if (rcd->user_event_mask) {
144 int i;
145 /*
146 * subctxt_cnt is 0 if not shared, so do base
147 * separately, first, then remaining subctxt, if any
148 */
149 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
150 &rcd->user_event_mask[0]);
151 for (i = 1; i < rcd->subctxt_cnt; i++)
152 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
153 &rcd->user_event_mask[i]);
154 }
155 ret = 1;
156 break;
157 }
158 spin_unlock(&dd->uctxt_lock);
159
160 return ret;
161}
162
163/*
164 * Disarm a set of send buffers. If the buffer might be actively being
165 * written to, mark the buffer to be disarmed later when it is not being
166 * written to.
167 *
168 * This should only be called from the IRQ error handler.
169 */
170void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
171 unsigned cnt)
172{
173 struct qib_pportdata *ppd, *pppd[dd->num_pports];
174 unsigned i;
175 unsigned long flags;
176
177 for (i = 0; i < dd->num_pports; i++)
178 pppd[i] = NULL;
179
180 for (i = 0; i < cnt; i++) {
181 int which;
182 if (!test_bit(i, mask))
183 continue;
184 /*
185 * If the buffer is owned by the DMA hardware,
186 * reset the DMA engine.
187 */
188 ppd = is_sdma_buf(dd, i);
189 if (ppd) {
190 pppd[ppd->port] = ppd;
191 continue;
192 }
193 /*
194 * If the kernel is writing the buffer or the buffer is
195 * owned by a user process, we can't clear it yet.
196 */
197 spin_lock_irqsave(&dd->pioavail_lock, flags);
198 if (test_bit(i, dd->pio_writing) ||
199 (!test_bit(i << 1, dd->pioavailkernel) &&
200 find_ctxt(dd, i))) {
201 __set_bit(i, dd->pio_need_disarm);
202 which = 0;
203 } else {
204 which = 1;
205 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
206 }
207 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
208 }
209
210 /* do cancel_sends once per port that had sdma piobufs in error */
211 for (i = 0; i < dd->num_pports; i++)
212 if (pppd[i])
213 qib_cancel_sends(pppd[i]);
214}
215
216/**
217 * update_send_bufs - update shadow copy of the PIO availability map
218 * @dd: the qlogic_ib device
219 *
220 * called whenever our local copy indicates we have run out of send buffers
221 */
222static void update_send_bufs(struct qib_devdata *dd)
223{
224 unsigned long flags;
225 unsigned i;
226 const unsigned piobregs = dd->pioavregs;
227
228 /*
229 * If the generation (check) bits have changed, then we update the
230 * busy bit for the corresponding PIO buffer. This algorithm will
231 * modify positions to the value they already have in some cases
232 * (i.e., no change), but it's faster than changing only the bits
233 * that have changed.
234 *
235 * We would like to do this atomicly, to avoid spinlocks in the
236 * critical send path, but that's not really possible, given the
237 * type of changes, and that this routine could be called on
238 * multiple cpu's simultaneously, so we lock in this routine only,
239 * to avoid conflicting updates; all we change is the shadow, and
240 * it's a single 64 bit memory location, so by definition the update
241 * is atomic in terms of what other cpu's can see in testing the
242 * bits. The spin_lock overhead isn't too bad, since it only
243 * happens when all buffers are in use, so only cpu overhead, not
244 * latency or bandwidth is affected.
245 */
246 if (!dd->pioavailregs_dma)
247 return;
248 spin_lock_irqsave(&dd->pioavail_lock, flags);
249 for (i = 0; i < piobregs; i++) {
250 u64 pchbusy, pchg, piov, pnew;
251
252 piov = le64_to_cpu(dd->pioavailregs_dma[i]);
253 pchg = dd->pioavailkernel[i] &
254 ~(dd->pioavailshadow[i] ^ piov);
255 pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
256 if (pchg && (pchbusy & dd->pioavailshadow[i])) {
257 pnew = dd->pioavailshadow[i] & ~pchbusy;
258 pnew |= piov & pchbusy;
259 dd->pioavailshadow[i] = pnew;
260 }
261 }
262 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
263}
264
265/*
266 * Debugging code and stats updates if no pio buffers available.
267 */
268static noinline void no_send_bufs(struct qib_devdata *dd)
269{
270 dd->upd_pio_shadow = 1;
271
272 /* not atomic, but if we lose a stat count in a while, that's OK */
273 qib_stats.sps_nopiobufs++;
274}
275
276/*
277 * Common code for normal driver send buffer allocation, and reserved
278 * allocation.
279 *
280 * Do appropriate marking as busy, etc.
281 * Returns buffer pointer if one is found, otherwise NULL.
282 */
283u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
284 u32 first, u32 last)
285{
286 unsigned i, j, updated = 0;
287 unsigned nbufs;
288 unsigned long flags;
289 unsigned long *shadow = dd->pioavailshadow;
290 u32 __iomem *buf;
291
292 if (!(dd->flags & QIB_PRESENT))
293 return NULL;
294
295 nbufs = last - first + 1; /* number in range to check */
296 if (dd->upd_pio_shadow) {
297 /*
298 * Minor optimization. If we had no buffers on last call,
299 * start out by doing the update; continue and do scan even
300 * if no buffers were updated, to be paranoid.
301 */
302 update_send_bufs(dd);
303 updated++;
304 }
305 i = first;
306rescan:
307 /*
308 * While test_and_set_bit() is atomic, we do that and then the
309 * change_bit(), and the pair is not. See if this is the cause
310 * of the remaining armlaunch errors.
311 */
312 spin_lock_irqsave(&dd->pioavail_lock, flags);
313 for (j = 0; j < nbufs; j++, i++) {
314 if (i > last)
315 i = first;
316 if (__test_and_set_bit((2 * i) + 1, shadow))
317 continue;
318 /* flip generation bit */
319 __change_bit(2 * i, shadow);
320 /* remember that the buffer can be written to now */
321 __set_bit(i, dd->pio_writing);
322 break;
323 }
324 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
325
326 if (j == nbufs) {
327 if (!updated) {
328 /*
329 * First time through; shadow exhausted, but may be
330 * buffers available, try an update and then rescan.
331 */
332 update_send_bufs(dd);
333 updated++;
334 i = first;
335 goto rescan;
336 }
337 no_send_bufs(dd);
338 buf = NULL;
339 } else {
340 if (i < dd->piobcnt2k)
341 buf = (u32 __iomem *)(dd->pio2kbase +
342 i * dd->palign);
343 else
344 buf = (u32 __iomem *)(dd->pio4kbase +
345 (i - dd->piobcnt2k) * dd->align4k);
346 if (pbufnum)
347 *pbufnum = i;
348 dd->upd_pio_shadow = 0;
349 }
350
351 return buf;
352}
353
354/*
355 * Record that the caller is finished writing to the buffer so we don't
356 * disarm it while it is being written and disarm it now if needed.
357 */
358void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
359{
360 unsigned long flags;
361
362 spin_lock_irqsave(&dd->pioavail_lock, flags);
363 __clear_bit(n, dd->pio_writing);
364 if (__test_and_clear_bit(n, dd->pio_need_disarm))
365 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
366 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
367}
368
369/**
370 * qib_chg_pioavailkernel - change which send buffers are available for kernel
371 * @dd: the qlogic_ib device
372 * @start: the starting send buffer number
373 * @len: the number of send buffers
374 * @avail: true if the buffers are available for kernel use, false otherwise
375 */
376void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
377 unsigned len, u32 avail, struct qib_ctxtdata *rcd)
378{
379 unsigned long flags;
380 unsigned end;
381 unsigned ostart = start;
382
383 /* There are two bits per send buffer (busy and generation) */
384 start *= 2;
385 end = start + len * 2;
386
387 spin_lock_irqsave(&dd->pioavail_lock, flags);
388 /* Set or clear the busy bit in the shadow. */
389 while (start < end) {
390 if (avail) {
391 unsigned long dma;
392 int i;
393
394 /*
395 * The BUSY bit will never be set, because we disarm
396 * the user buffers before we hand them back to the
397 * kernel. We do have to make sure the generation
398 * bit is set correctly in shadow, since it could
399 * have changed many times while allocated to user.
400 * We can't use the bitmap functions on the full
401 * dma array because it is always little-endian, so
402 * we have to flip to host-order first.
403 * BITS_PER_LONG is slightly wrong, since it's
404 * always 64 bits per register in chip...
405 * We only work on 64 bit kernels, so that's OK.
406 */
407 i = start / BITS_PER_LONG;
408 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
409 dd->pioavailshadow);
410 dma = (unsigned long)
411 le64_to_cpu(dd->pioavailregs_dma[i]);
412 if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
413 start) % BITS_PER_LONG, &dma))
414 __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
415 start, dd->pioavailshadow);
416 else
417 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
418 + start, dd->pioavailshadow);
419 __set_bit(start, dd->pioavailkernel);
420 } else {
421 __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
422 dd->pioavailshadow);
423 __clear_bit(start, dd->pioavailkernel);
424 }
425 start += 2;
426 }
427
428 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
429
430 dd->f_txchk_change(dd, ostart, len, avail, rcd);
431}
432
433/*
434 * Flush all sends that might be in the ready to send state, as well as any
435 * that are in the process of being sent. Used whenever we need to be
436 * sure the send side is idle. Cleans up all buffer state by canceling
437 * all pio buffers, and issuing an abort, which cleans up anything in the
438 * launch fifo. The cancel is superfluous on some chip versions, but
439 * it's safer to always do it.
440 * PIOAvail bits are updated by the chip as if a normal send had happened.
441 */
442void qib_cancel_sends(struct qib_pportdata *ppd)
443{
444 struct qib_devdata *dd = ppd->dd;
445 struct qib_ctxtdata *rcd;
446 unsigned long flags;
447 unsigned ctxt;
448 unsigned i;
449 unsigned last;
450
451 /*
452 * Tell PSM to disarm buffers again before trying to reuse them.
453 * We need to be sure the rcd doesn't change out from under us
454 * while we do so. We hold the two locks sequentially. We might
455 * needlessly set some need_disarm bits as a result, if the
456 * context is closed after we release the uctxt_lock, but that's
457 * fairly benign, and safer than nesting the locks.
458 */
459 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
460 spin_lock_irqsave(&dd->uctxt_lock, flags);
461 rcd = dd->rcd[ctxt];
462 if (rcd && rcd->ppd == ppd) {
463 last = rcd->pio_base + rcd->piocnt;
464 if (rcd->user_event_mask) {
465 /*
466 * subctxt_cnt is 0 if not shared, so do base
467 * separately, first, then remaining subctxt,
468 * if any
469 */
470 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
471 &rcd->user_event_mask[0]);
472 for (i = 1; i < rcd->subctxt_cnt; i++)
473 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
474 &rcd->user_event_mask[i]);
475 }
476 i = rcd->pio_base;
477 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
478 spin_lock_irqsave(&dd->pioavail_lock, flags);
479 for (; i < last; i++)
480 __set_bit(i, dd->pio_need_disarm);
481 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
482 } else
483 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
484 }
485
486 if (!(dd->flags & QIB_HAS_SEND_DMA))
487 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
488 QIB_SENDCTRL_FLUSH);
489}
490
491/*
492 * Force an update of in-memory copy of the pioavail registers, when
493 * needed for any of a variety of reasons.
494 * If already off, this routine is a nop, on the assumption that the
495 * caller (or set of callers) will "do the right thing".
496 * This is a per-device operation, so just the first port.
497 */
498void qib_force_pio_avail_update(struct qib_devdata *dd)
499{
500 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
501}
502
503void qib_hol_down(struct qib_pportdata *ppd)
504{
505 /*
506 * Cancel sends when the link goes DOWN so that we aren't doing it
507 * at INIT when we might be trying to send SMI packets.
508 */
509 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
510 qib_cancel_sends(ppd);
511}
512
513/*
514 * Link is at INIT.
515 * We start the HoL timer so we can detect stuck packets blocking SMP replies.
516 * Timer may already be running, so use mod_timer, not add_timer.
517 */
518void qib_hol_init(struct qib_pportdata *ppd)
519{
520 if (ppd->hol_state != QIB_HOL_INIT) {
521 ppd->hol_state = QIB_HOL_INIT;
522 mod_timer(&ppd->hol_timer,
523 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
524 }
525}
526
527/*
528 * Link is up, continue any user processes, and ensure timer
529 * is a nop, if running. Let timer keep running, if set; it
530 * will nop when it sees the link is up.
531 */
532void qib_hol_up(struct qib_pportdata *ppd)
533{
534 ppd->hol_state = QIB_HOL_UP;
535}
536
537/*
538 * This is only called via the timer.
539 */
540void qib_hol_event(unsigned long opaque)
541{
542 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
543
544 /* If hardware error, etc, skip. */
545 if (!(ppd->dd->flags & QIB_INITTED))
546 return;
547
548 if (ppd->hol_state != QIB_HOL_UP) {
549 /*
550 * Try to flush sends in case a stuck packet is blocking
551 * SMP replies.
552 */
553 qib_hol_down(ppd);
554 mod_timer(&ppd->hol_timer,
555 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
556 }
557}
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
new file mode 100644
index 000000000000..6c7fe78cca64
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -0,0 +1,555 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include "qib.h"
36
37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_UC_##x
39
40/**
41 * qib_make_uc_req - construct a request packet (SEND, RDMA write)
42 * @qp: a pointer to the QP
43 *
44 * Return 1 if constructed; otherwise, return 0.
45 */
46int qib_make_uc_req(struct qib_qp *qp)
47{
48 struct qib_other_headers *ohdr;
49 struct qib_swqe *wqe;
50 unsigned long flags;
51 u32 hwords;
52 u32 bth0;
53 u32 len;
54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
55 int ret = 0;
56
57 spin_lock_irqsave(&qp->s_lock, flags);
58
59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
61 goto bail;
62 /* We are in the error state, flush the work request. */
63 if (qp->s_last == qp->s_head)
64 goto bail;
65 /* If DMAs are in progress, we can't flush immediately. */
66 if (atomic_read(&qp->s_dma_busy)) {
67 qp->s_flags |= QIB_S_WAIT_DMA;
68 goto bail;
69 }
70 wqe = get_swqe_ptr(qp, qp->s_last);
71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
72 goto done;
73 }
74
75 ohdr = &qp->s_hdr.u.oth;
76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
77 ohdr = &qp->s_hdr.u.l.oth;
78
79 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
80 hwords = 5;
81 bth0 = 0;
82
83 /* Get the next send request. */
84 wqe = get_swqe_ptr(qp, qp->s_cur);
85 qp->s_wqe = NULL;
86 switch (qp->s_state) {
87 default:
88 if (!(ib_qib_state_ops[qp->state] &
89 QIB_PROCESS_NEXT_SEND_OK))
90 goto bail;
91 /* Check if send work queue is empty. */
92 if (qp->s_cur == qp->s_head)
93 goto bail;
94 /*
95 * Start a new request.
96 */
97 wqe->psn = qp->s_next_psn;
98 qp->s_psn = qp->s_next_psn;
99 qp->s_sge.sge = wqe->sg_list[0];
100 qp->s_sge.sg_list = wqe->sg_list + 1;
101 qp->s_sge.num_sge = wqe->wr.num_sge;
102 qp->s_sge.total_len = wqe->length;
103 len = wqe->length;
104 qp->s_len = len;
105 switch (wqe->wr.opcode) {
106 case IB_WR_SEND:
107 case IB_WR_SEND_WITH_IMM:
108 if (len > pmtu) {
109 qp->s_state = OP(SEND_FIRST);
110 len = pmtu;
111 break;
112 }
113 if (wqe->wr.opcode == IB_WR_SEND)
114 qp->s_state = OP(SEND_ONLY);
115 else {
116 qp->s_state =
117 OP(SEND_ONLY_WITH_IMMEDIATE);
118 /* Immediate data comes after the BTH */
119 ohdr->u.imm_data = wqe->wr.ex.imm_data;
120 hwords += 1;
121 }
122 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
123 bth0 |= IB_BTH_SOLICITED;
124 qp->s_wqe = wqe;
125 if (++qp->s_cur >= qp->s_size)
126 qp->s_cur = 0;
127 break;
128
129 case IB_WR_RDMA_WRITE:
130 case IB_WR_RDMA_WRITE_WITH_IMM:
131 ohdr->u.rc.reth.vaddr =
132 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
133 ohdr->u.rc.reth.rkey =
134 cpu_to_be32(wqe->wr.wr.rdma.rkey);
135 ohdr->u.rc.reth.length = cpu_to_be32(len);
136 hwords += sizeof(struct ib_reth) / 4;
137 if (len > pmtu) {
138 qp->s_state = OP(RDMA_WRITE_FIRST);
139 len = pmtu;
140 break;
141 }
142 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
143 qp->s_state = OP(RDMA_WRITE_ONLY);
144 else {
145 qp->s_state =
146 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
147 /* Immediate data comes after the RETH */
148 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
149 hwords += 1;
150 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
151 bth0 |= IB_BTH_SOLICITED;
152 }
153 qp->s_wqe = wqe;
154 if (++qp->s_cur >= qp->s_size)
155 qp->s_cur = 0;
156 break;
157
158 default:
159 goto bail;
160 }
161 break;
162
163 case OP(SEND_FIRST):
164 qp->s_state = OP(SEND_MIDDLE);
165 /* FALLTHROUGH */
166 case OP(SEND_MIDDLE):
167 len = qp->s_len;
168 if (len > pmtu) {
169 len = pmtu;
170 break;
171 }
172 if (wqe->wr.opcode == IB_WR_SEND)
173 qp->s_state = OP(SEND_LAST);
174 else {
175 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
176 /* Immediate data comes after the BTH */
177 ohdr->u.imm_data = wqe->wr.ex.imm_data;
178 hwords += 1;
179 }
180 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
181 bth0 |= IB_BTH_SOLICITED;
182 qp->s_wqe = wqe;
183 if (++qp->s_cur >= qp->s_size)
184 qp->s_cur = 0;
185 break;
186
187 case OP(RDMA_WRITE_FIRST):
188 qp->s_state = OP(RDMA_WRITE_MIDDLE);
189 /* FALLTHROUGH */
190 case OP(RDMA_WRITE_MIDDLE):
191 len = qp->s_len;
192 if (len > pmtu) {
193 len = pmtu;
194 break;
195 }
196 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
197 qp->s_state = OP(RDMA_WRITE_LAST);
198 else {
199 qp->s_state =
200 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
201 /* Immediate data comes after the BTH */
202 ohdr->u.imm_data = wqe->wr.ex.imm_data;
203 hwords += 1;
204 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
205 bth0 |= IB_BTH_SOLICITED;
206 }
207 qp->s_wqe = wqe;
208 if (++qp->s_cur >= qp->s_size)
209 qp->s_cur = 0;
210 break;
211 }
212 qp->s_len -= len;
213 qp->s_hdrwords = hwords;
214 qp->s_cur_sge = &qp->s_sge;
215 qp->s_cur_size = len;
216 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
217 qp->s_next_psn++ & QIB_PSN_MASK);
218done:
219 ret = 1;
220 goto unlock;
221
222bail:
223 qp->s_flags &= ~QIB_S_BUSY;
224unlock:
225 spin_unlock_irqrestore(&qp->s_lock, flags);
226 return ret;
227}
228
229/**
230 * qib_uc_rcv - handle an incoming UC packet
231 * @ibp: the port the packet came in on
232 * @hdr: the header of the packet
233 * @has_grh: true if the packet has a GRH
234 * @data: the packet data
235 * @tlen: the length of the packet
236 * @qp: the QP for this packet.
237 *
238 * This is called from qib_qp_rcv() to process an incoming UC packet
239 * for the given QP.
240 * Called at interrupt level.
241 */
242void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
243 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
244{
245 struct qib_other_headers *ohdr;
246 unsigned long flags;
247 u32 opcode;
248 u32 hdrsize;
249 u32 psn;
250 u32 pad;
251 struct ib_wc wc;
252 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
253 struct ib_reth *reth;
254 int ret;
255
256 /* Check for GRH */
257 if (!has_grh) {
258 ohdr = &hdr->u.oth;
259 hdrsize = 8 + 12; /* LRH + BTH */
260 } else {
261 ohdr = &hdr->u.l.oth;
262 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
263 }
264
265 opcode = be32_to_cpu(ohdr->bth[0]);
266 spin_lock_irqsave(&qp->s_lock, flags);
267 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
268 goto sunlock;
269 spin_unlock_irqrestore(&qp->s_lock, flags);
270
271 psn = be32_to_cpu(ohdr->bth[2]);
272 opcode >>= 24;
273 memset(&wc, 0, sizeof wc);
274
275 /* Prevent simultaneous processing after APM on different CPUs */
276 spin_lock(&qp->r_lock);
277
278 /* Compare the PSN verses the expected PSN. */
279 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
280 /*
281 * Handle a sequence error.
282 * Silently drop any current message.
283 */
284 qp->r_psn = psn;
285inv:
286 if (qp->r_state == OP(SEND_FIRST) ||
287 qp->r_state == OP(SEND_MIDDLE)) {
288 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
289 qp->r_sge.num_sge = 0;
290 } else
291 while (qp->r_sge.num_sge) {
292 atomic_dec(&qp->r_sge.sge.mr->refcount);
293 if (--qp->r_sge.num_sge)
294 qp->r_sge.sge = *qp->r_sge.sg_list++;
295 }
296 qp->r_state = OP(SEND_LAST);
297 switch (opcode) {
298 case OP(SEND_FIRST):
299 case OP(SEND_ONLY):
300 case OP(SEND_ONLY_WITH_IMMEDIATE):
301 goto send_first;
302
303 case OP(RDMA_WRITE_FIRST):
304 case OP(RDMA_WRITE_ONLY):
305 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
306 goto rdma_first;
307
308 default:
309 goto drop;
310 }
311 }
312
313 /* Check for opcode sequence errors. */
314 switch (qp->r_state) {
315 case OP(SEND_FIRST):
316 case OP(SEND_MIDDLE):
317 if (opcode == OP(SEND_MIDDLE) ||
318 opcode == OP(SEND_LAST) ||
319 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
320 break;
321 goto inv;
322
323 case OP(RDMA_WRITE_FIRST):
324 case OP(RDMA_WRITE_MIDDLE):
325 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
326 opcode == OP(RDMA_WRITE_LAST) ||
327 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
328 break;
329 goto inv;
330
331 default:
332 if (opcode == OP(SEND_FIRST) ||
333 opcode == OP(SEND_ONLY) ||
334 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
335 opcode == OP(RDMA_WRITE_FIRST) ||
336 opcode == OP(RDMA_WRITE_ONLY) ||
337 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
338 break;
339 goto inv;
340 }
341
342 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
343 qp->r_flags |= QIB_R_COMM_EST;
344 if (qp->ibqp.event_handler) {
345 struct ib_event ev;
346
347 ev.device = qp->ibqp.device;
348 ev.element.qp = &qp->ibqp;
349 ev.event = IB_EVENT_COMM_EST;
350 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
351 }
352 }
353
354 /* OK, process the packet. */
355 switch (opcode) {
356 case OP(SEND_FIRST):
357 case OP(SEND_ONLY):
358 case OP(SEND_ONLY_WITH_IMMEDIATE):
359send_first:
360 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
361 qp->r_sge = qp->s_rdma_read_sge;
362 else {
363 ret = qib_get_rwqe(qp, 0);
364 if (ret < 0)
365 goto op_err;
366 if (!ret)
367 goto drop;
368 /*
369 * qp->s_rdma_read_sge will be the owner
370 * of the mr references.
371 */
372 qp->s_rdma_read_sge = qp->r_sge;
373 }
374 qp->r_rcv_len = 0;
375 if (opcode == OP(SEND_ONLY))
376 goto send_last;
377 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
378 goto send_last_imm;
379 /* FALLTHROUGH */
380 case OP(SEND_MIDDLE):
381 /* Check for invalid length PMTU or posted rwqe len. */
382 if (unlikely(tlen != (hdrsize + pmtu + 4)))
383 goto rewind;
384 qp->r_rcv_len += pmtu;
385 if (unlikely(qp->r_rcv_len > qp->r_len))
386 goto rewind;
387 qib_copy_sge(&qp->r_sge, data, pmtu, 0);
388 break;
389
390 case OP(SEND_LAST_WITH_IMMEDIATE):
391send_last_imm:
392 wc.ex.imm_data = ohdr->u.imm_data;
393 hdrsize += 4;
394 wc.wc_flags = IB_WC_WITH_IMM;
395 /* FALLTHROUGH */
396 case OP(SEND_LAST):
397send_last:
398 /* Get the number of bytes the message was padded by. */
399 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
400 /* Check for invalid length. */
401 /* XXX LAST len should be >= 1 */
402 if (unlikely(tlen < (hdrsize + pad + 4)))
403 goto rewind;
404 /* Don't count the CRC. */
405 tlen -= (hdrsize + pad + 4);
406 wc.byte_len = tlen + qp->r_rcv_len;
407 if (unlikely(wc.byte_len > qp->r_len))
408 goto rewind;
409 wc.opcode = IB_WC_RECV;
410last_imm:
411 qib_copy_sge(&qp->r_sge, data, tlen, 0);
412 while (qp->s_rdma_read_sge.num_sge) {
413 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
414 if (--qp->s_rdma_read_sge.num_sge)
415 qp->s_rdma_read_sge.sge =
416 *qp->s_rdma_read_sge.sg_list++;
417 }
418 wc.wr_id = qp->r_wr_id;
419 wc.status = IB_WC_SUCCESS;
420 wc.qp = &qp->ibqp;
421 wc.src_qp = qp->remote_qpn;
422 wc.slid = qp->remote_ah_attr.dlid;
423 wc.sl = qp->remote_ah_attr.sl;
424 /* Signal completion event if the solicited bit is set. */
425 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
426 (ohdr->bth[0] &
427 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
428 break;
429
430 case OP(RDMA_WRITE_FIRST):
431 case OP(RDMA_WRITE_ONLY):
432 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
433rdma_first:
434 if (unlikely(!(qp->qp_access_flags &
435 IB_ACCESS_REMOTE_WRITE))) {
436 goto drop;
437 }
438 reth = &ohdr->u.rc.reth;
439 hdrsize += sizeof(*reth);
440 qp->r_len = be32_to_cpu(reth->length);
441 qp->r_rcv_len = 0;
442 qp->r_sge.sg_list = NULL;
443 if (qp->r_len != 0) {
444 u32 rkey = be32_to_cpu(reth->rkey);
445 u64 vaddr = be64_to_cpu(reth->vaddr);
446 int ok;
447
448 /* Check rkey */
449 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
450 vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
451 if (unlikely(!ok))
452 goto drop;
453 qp->r_sge.num_sge = 1;
454 } else {
455 qp->r_sge.num_sge = 0;
456 qp->r_sge.sge.mr = NULL;
457 qp->r_sge.sge.vaddr = NULL;
458 qp->r_sge.sge.length = 0;
459 qp->r_sge.sge.sge_length = 0;
460 }
461 if (opcode == OP(RDMA_WRITE_ONLY))
462 goto rdma_last;
463 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
464 goto rdma_last_imm;
465 /* FALLTHROUGH */
466 case OP(RDMA_WRITE_MIDDLE):
467 /* Check for invalid length PMTU or posted rwqe len. */
468 if (unlikely(tlen != (hdrsize + pmtu + 4)))
469 goto drop;
470 qp->r_rcv_len += pmtu;
471 if (unlikely(qp->r_rcv_len > qp->r_len))
472 goto drop;
473 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
474 break;
475
476 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
477rdma_last_imm:
478 wc.ex.imm_data = ohdr->u.imm_data;
479 hdrsize += 4;
480 wc.wc_flags = IB_WC_WITH_IMM;
481
482 /* Get the number of bytes the message was padded by. */
483 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
484 /* Check for invalid length. */
485 /* XXX LAST len should be >= 1 */
486 if (unlikely(tlen < (hdrsize + pad + 4)))
487 goto drop;
488 /* Don't count the CRC. */
489 tlen -= (hdrsize + pad + 4);
490 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
491 goto drop;
492 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
493 while (qp->s_rdma_read_sge.num_sge) {
494 atomic_dec(&qp->s_rdma_read_sge.sge.mr->
495 refcount);
496 if (--qp->s_rdma_read_sge.num_sge)
497 qp->s_rdma_read_sge.sge =
498 *qp->s_rdma_read_sge.sg_list++;
499 }
500 else {
501 ret = qib_get_rwqe(qp, 1);
502 if (ret < 0)
503 goto op_err;
504 if (!ret)
505 goto drop;
506 }
507 wc.byte_len = qp->r_len;
508 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
509 goto last_imm;
510
511 case OP(RDMA_WRITE_LAST):
512rdma_last:
513 /* Get the number of bytes the message was padded by. */
514 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
515 /* Check for invalid length. */
516 /* XXX LAST len should be >= 1 */
517 if (unlikely(tlen < (hdrsize + pad + 4)))
518 goto drop;
519 /* Don't count the CRC. */
520 tlen -= (hdrsize + pad + 4);
521 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
522 goto drop;
523 qib_copy_sge(&qp->r_sge, data, tlen, 1);
524 while (qp->r_sge.num_sge) {
525 atomic_dec(&qp->r_sge.sge.mr->refcount);
526 if (--qp->r_sge.num_sge)
527 qp->r_sge.sge = *qp->r_sge.sg_list++;
528 }
529 break;
530
531 default:
532 /* Drop packet for unknown opcodes. */
533 goto drop;
534 }
535 qp->r_psn++;
536 qp->r_state = opcode;
537 spin_unlock(&qp->r_lock);
538 return;
539
540rewind:
541 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
542 qp->r_sge.num_sge = 0;
543drop:
544 ibp->n_pkt_drops++;
545 spin_unlock(&qp->r_lock);
546 return;
547
548op_err:
549 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
550 spin_unlock(&qp->r_lock);
551 return;
552
553sunlock:
554 spin_unlock_irqrestore(&qp->s_lock, flags);
555}
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
new file mode 100644
index 000000000000..c838cda73347
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -0,0 +1,607 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_smi.h>
35
36#include "qib.h"
37#include "qib_mad.h"
38
39/**
40 * qib_ud_loopback - handle send on loopback QPs
41 * @sqp: the sending QP
42 * @swqe: the send work request
43 *
44 * This is called from qib_make_ud_req() to forward a WQE addressed
45 * to the same HCA.
46 * Note that the receive interrupt handler may be calling qib_ud_rcv()
47 * while this is being called.
48 */
49static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
50{
51 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
52 struct qib_pportdata *ppd;
53 struct qib_qp *qp;
54 struct ib_ah_attr *ah_attr;
55 unsigned long flags;
56 struct qib_sge_state ssge;
57 struct qib_sge *sge;
58 struct ib_wc wc;
59 u32 length;
60
61 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
62 if (!qp) {
63 ibp->n_pkt_drops++;
64 return;
65 }
66 if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
67 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
68 ibp->n_pkt_drops++;
69 goto drop;
70 }
71
72 ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
73 ppd = ppd_from_ibp(ibp);
74
75 if (qp->ibqp.qp_num > 1) {
76 u16 pkey1;
77 u16 pkey2;
78 u16 lid;
79
80 pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
81 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
82 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
83 lid = ppd->lid | (ah_attr->src_path_bits &
84 ((1 << ppd->lmc) - 1));
85 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
86 ah_attr->sl,
87 sqp->ibqp.qp_num, qp->ibqp.qp_num,
88 cpu_to_be16(lid),
89 cpu_to_be16(ah_attr->dlid));
90 goto drop;
91 }
92 }
93
94 /*
95 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
96 * Qkeys with the high order bit set mean use the
97 * qkey from the QP context instead of the WR (see 10.2.5).
98 */
99 if (qp->ibqp.qp_num) {
100 u32 qkey;
101
102 qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
103 sqp->qkey : swqe->wr.wr.ud.remote_qkey;
104 if (unlikely(qkey != qp->qkey)) {
105 u16 lid;
106
107 lid = ppd->lid | (ah_attr->src_path_bits &
108 ((1 << ppd->lmc) - 1));
109 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
110 ah_attr->sl,
111 sqp->ibqp.qp_num, qp->ibqp.qp_num,
112 cpu_to_be16(lid),
113 cpu_to_be16(ah_attr->dlid));
114 goto drop;
115 }
116 }
117
118 /*
119 * A GRH is expected to preceed the data even if not
120 * present on the wire.
121 */
122 length = swqe->length;
123 memset(&wc, 0, sizeof wc);
124 wc.byte_len = length + sizeof(struct ib_grh);
125
126 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
127 wc.wc_flags = IB_WC_WITH_IMM;
128 wc.ex.imm_data = swqe->wr.ex.imm_data;
129 }
130
131 spin_lock_irqsave(&qp->r_lock, flags);
132
133 /*
134 * Get the next work request entry to find where to put the data.
135 */
136 if (qp->r_flags & QIB_R_REUSE_SGE)
137 qp->r_flags &= ~QIB_R_REUSE_SGE;
138 else {
139 int ret;
140
141 ret = qib_get_rwqe(qp, 0);
142 if (ret < 0) {
143 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
144 goto bail_unlock;
145 }
146 if (!ret) {
147 if (qp->ibqp.qp_num == 0)
148 ibp->n_vl15_dropped++;
149 goto bail_unlock;
150 }
151 }
152 /* Silently drop packets which are too big. */
153 if (unlikely(wc.byte_len > qp->r_len)) {
154 qp->r_flags |= QIB_R_REUSE_SGE;
155 ibp->n_pkt_drops++;
156 goto bail_unlock;
157 }
158
159 if (ah_attr->ah_flags & IB_AH_GRH) {
160 qib_copy_sge(&qp->r_sge, &ah_attr->grh,
161 sizeof(struct ib_grh), 1);
162 wc.wc_flags |= IB_WC_GRH;
163 } else
164 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
165 ssge.sg_list = swqe->sg_list + 1;
166 ssge.sge = *swqe->sg_list;
167 ssge.num_sge = swqe->wr.num_sge;
168 sge = &ssge.sge;
169 while (length) {
170 u32 len = sge->length;
171
172 if (len > length)
173 len = length;
174 if (len > sge->sge_length)
175 len = sge->sge_length;
176 BUG_ON(len == 0);
177 qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
178 sge->vaddr += len;
179 sge->length -= len;
180 sge->sge_length -= len;
181 if (sge->sge_length == 0) {
182 if (--ssge.num_sge)
183 *sge = *ssge.sg_list++;
184 } else if (sge->length == 0 && sge->mr->lkey) {
185 if (++sge->n >= QIB_SEGSZ) {
186 if (++sge->m >= sge->mr->mapsz)
187 break;
188 sge->n = 0;
189 }
190 sge->vaddr =
191 sge->mr->map[sge->m]->segs[sge->n].vaddr;
192 sge->length =
193 sge->mr->map[sge->m]->segs[sge->n].length;
194 }
195 length -= len;
196 }
197 while (qp->r_sge.num_sge) {
198 atomic_dec(&qp->r_sge.sge.mr->refcount);
199 if (--qp->r_sge.num_sge)
200 qp->r_sge.sge = *qp->r_sge.sg_list++;
201 }
202 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
203 goto bail_unlock;
204 wc.wr_id = qp->r_wr_id;
205 wc.status = IB_WC_SUCCESS;
206 wc.opcode = IB_WC_RECV;
207 wc.qp = &qp->ibqp;
208 wc.src_qp = sqp->ibqp.qp_num;
209 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
210 swqe->wr.wr.ud.pkey_index : 0;
211 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
212 wc.sl = ah_attr->sl;
213 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
214 wc.port_num = qp->port_num;
215 /* Signal completion event if the solicited bit is set. */
216 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
217 swqe->wr.send_flags & IB_SEND_SOLICITED);
218 ibp->n_loop_pkts++;
219bail_unlock:
220 spin_unlock_irqrestore(&qp->r_lock, flags);
221drop:
222 if (atomic_dec_and_test(&qp->refcount))
223 wake_up(&qp->wait);
224}
225
226/**
227 * qib_make_ud_req - construct a UD request packet
228 * @qp: the QP
229 *
230 * Return 1 if constructed; otherwise, return 0.
231 */
232int qib_make_ud_req(struct qib_qp *qp)
233{
234 struct qib_other_headers *ohdr;
235 struct ib_ah_attr *ah_attr;
236 struct qib_pportdata *ppd;
237 struct qib_ibport *ibp;
238 struct qib_swqe *wqe;
239 unsigned long flags;
240 u32 nwords;
241 u32 extra_bytes;
242 u32 bth0;
243 u16 lrh0;
244 u16 lid;
245 int ret = 0;
246 int next_cur;
247
248 spin_lock_irqsave(&qp->s_lock, flags);
249
250 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
251 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
252 goto bail;
253 /* We are in the error state, flush the work request. */
254 if (qp->s_last == qp->s_head)
255 goto bail;
256 /* If DMAs are in progress, we can't flush immediately. */
257 if (atomic_read(&qp->s_dma_busy)) {
258 qp->s_flags |= QIB_S_WAIT_DMA;
259 goto bail;
260 }
261 wqe = get_swqe_ptr(qp, qp->s_last);
262 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
263 goto done;
264 }
265
266 if (qp->s_cur == qp->s_head)
267 goto bail;
268
269 wqe = get_swqe_ptr(qp, qp->s_cur);
270 next_cur = qp->s_cur + 1;
271 if (next_cur >= qp->s_size)
272 next_cur = 0;
273
274 /* Construct the header. */
275 ibp = to_iport(qp->ibqp.device, qp->port_num);
276 ppd = ppd_from_ibp(ibp);
277 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
278 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
279 if (ah_attr->dlid != QIB_PERMISSIVE_LID)
280 ibp->n_multicast_xmit++;
281 else
282 ibp->n_unicast_xmit++;
283 } else {
284 ibp->n_unicast_xmit++;
285 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
286 if (unlikely(lid == ppd->lid)) {
287 /*
288 * If DMAs are in progress, we can't generate
289 * a completion for the loopback packet since
290 * it would be out of order.
291 * XXX Instead of waiting, we could queue a
292 * zero length descriptor so we get a callback.
293 */
294 if (atomic_read(&qp->s_dma_busy)) {
295 qp->s_flags |= QIB_S_WAIT_DMA;
296 goto bail;
297 }
298 qp->s_cur = next_cur;
299 spin_unlock_irqrestore(&qp->s_lock, flags);
300 qib_ud_loopback(qp, wqe);
301 spin_lock_irqsave(&qp->s_lock, flags);
302 qib_send_complete(qp, wqe, IB_WC_SUCCESS);
303 goto done;
304 }
305 }
306
307 qp->s_cur = next_cur;
308 extra_bytes = -wqe->length & 3;
309 nwords = (wqe->length + extra_bytes) >> 2;
310
311 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
312 qp->s_hdrwords = 7;
313 qp->s_cur_size = wqe->length;
314 qp->s_cur_sge = &qp->s_sge;
315 qp->s_srate = ah_attr->static_rate;
316 qp->s_wqe = wqe;
317 qp->s_sge.sge = wqe->sg_list[0];
318 qp->s_sge.sg_list = wqe->sg_list + 1;
319 qp->s_sge.num_sge = wqe->wr.num_sge;
320 qp->s_sge.total_len = wqe->length;
321
322 if (ah_attr->ah_flags & IB_AH_GRH) {
323 /* Header size in 32-bit words. */
324 qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
325 &ah_attr->grh,
326 qp->s_hdrwords, nwords);
327 lrh0 = QIB_LRH_GRH;
328 ohdr = &qp->s_hdr.u.l.oth;
329 /*
330 * Don't worry about sending to locally attached multicast
331 * QPs. It is unspecified by the spec. what happens.
332 */
333 } else {
334 /* Header size in 32-bit words. */
335 lrh0 = QIB_LRH_BTH;
336 ohdr = &qp->s_hdr.u.oth;
337 }
338 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
339 qp->s_hdrwords++;
340 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
341 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
342 } else
343 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
344 lrh0 |= ah_attr->sl << 4;
345 if (qp->ibqp.qp_type == IB_QPT_SMI)
346 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
347 else
348 lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
349 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
350 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
351 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
352 lid = ppd->lid;
353 if (lid) {
354 lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
355 qp->s_hdr.lrh[3] = cpu_to_be16(lid);
356 } else
357 qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
358 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
359 bth0 |= IB_BTH_SOLICITED;
360 bth0 |= extra_bytes << 20;
361 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
362 qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
363 wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
364 ohdr->bth[0] = cpu_to_be32(bth0);
365 /*
366 * Use the multicast QP if the destination LID is a multicast LID.
367 */
368 ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
369 ah_attr->dlid != QIB_PERMISSIVE_LID ?
370 cpu_to_be32(QIB_MULTICAST_QPN) :
371 cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
372 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
373 /*
374 * Qkeys with the high order bit set mean use the
375 * qkey from the QP context instead of the WR (see 10.2.5).
376 */
377 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
378 qp->qkey : wqe->wr.wr.ud.remote_qkey);
379 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
380
381done:
382 ret = 1;
383 goto unlock;
384
385bail:
386 qp->s_flags &= ~QIB_S_BUSY;
387unlock:
388 spin_unlock_irqrestore(&qp->s_lock, flags);
389 return ret;
390}
391
392static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
393{
394 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
395 struct qib_devdata *dd = ppd->dd;
396 unsigned ctxt = ppd->hw_pidx;
397 unsigned i;
398
399 pkey &= 0x7fff; /* remove limited/full membership bit */
400
401 for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
402 if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
403 return i;
404
405 /*
406 * Should not get here, this means hardware failed to validate pkeys.
407 * Punt and return index 0.
408 */
409 return 0;
410}
411
412/**
413 * qib_ud_rcv - receive an incoming UD packet
414 * @ibp: the port the packet came in on
415 * @hdr: the packet header
416 * @has_grh: true if the packet has a GRH
417 * @data: the packet data
418 * @tlen: the packet length
419 * @qp: the QP the packet came on
420 *
421 * This is called from qib_qp_rcv() to process an incoming UD packet
422 * for the given QP.
423 * Called at interrupt level.
424 */
425void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
426 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
427{
428 struct qib_other_headers *ohdr;
429 int opcode;
430 u32 hdrsize;
431 u32 pad;
432 struct ib_wc wc;
433 u32 qkey;
434 u32 src_qp;
435 u16 dlid;
436
437 /* Check for GRH */
438 if (!has_grh) {
439 ohdr = &hdr->u.oth;
440 hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
441 } else {
442 ohdr = &hdr->u.l.oth;
443 hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
444 }
445 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
446 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
447
448 /* Get the number of bytes the message was padded by. */
449 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
450 if (unlikely(tlen < (hdrsize + pad + 4))) {
451 /* Drop incomplete packets. */
452 ibp->n_pkt_drops++;
453 goto bail;
454 }
455 tlen -= hdrsize + pad + 4;
456
457 /*
458 * Check that the permissive LID is only used on QP0
459 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
460 */
461 if (qp->ibqp.qp_num) {
462 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
463 hdr->lrh[3] == IB_LID_PERMISSIVE)) {
464 ibp->n_pkt_drops++;
465 goto bail;
466 }
467 if (qp->ibqp.qp_num > 1) {
468 u16 pkey1, pkey2;
469
470 pkey1 = be32_to_cpu(ohdr->bth[0]);
471 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
472 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
473 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
474 pkey1,
475 (be16_to_cpu(hdr->lrh[0]) >> 4) &
476 0xF,
477 src_qp, qp->ibqp.qp_num,
478 hdr->lrh[3], hdr->lrh[1]);
479 goto bail;
480 }
481 }
482 if (unlikely(qkey != qp->qkey)) {
483 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
484 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
485 src_qp, qp->ibqp.qp_num,
486 hdr->lrh[3], hdr->lrh[1]);
487 goto bail;
488 }
489 /* Drop invalid MAD packets (see 13.5.3.1). */
490 if (unlikely(qp->ibqp.qp_num == 1 &&
491 (tlen != 256 ||
492 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
493 ibp->n_pkt_drops++;
494 goto bail;
495 }
496 } else {
497 struct ib_smp *smp;
498
499 /* Drop invalid MAD packets (see 13.5.3.1). */
500 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
501 ibp->n_pkt_drops++;
502 goto bail;
503 }
504 smp = (struct ib_smp *) data;
505 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
506 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
507 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
508 ibp->n_pkt_drops++;
509 goto bail;
510 }
511 }
512
513 /*
514 * The opcode is in the low byte when its in network order
515 * (top byte when in host order).
516 */
517 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
518 if (qp->ibqp.qp_num > 1 &&
519 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
520 wc.ex.imm_data = ohdr->u.ud.imm_data;
521 wc.wc_flags = IB_WC_WITH_IMM;
522 hdrsize += sizeof(u32);
523 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
524 wc.ex.imm_data = 0;
525 wc.wc_flags = 0;
526 } else {
527 ibp->n_pkt_drops++;
528 goto bail;
529 }
530
531 /*
532 * A GRH is expected to preceed the data even if not
533 * present on the wire.
534 */
535 wc.byte_len = tlen + sizeof(struct ib_grh);
536
537 /*
538 * We need to serialize getting a receive work queue entry and
539 * generating a completion for it against QPs sending to this QP
540 * locally.
541 */
542 spin_lock(&qp->r_lock);
543
544 /*
545 * Get the next work request entry to find where to put the data.
546 */
547 if (qp->r_flags & QIB_R_REUSE_SGE)
548 qp->r_flags &= ~QIB_R_REUSE_SGE;
549 else {
550 int ret;
551
552 ret = qib_get_rwqe(qp, 0);
553 if (ret < 0) {
554 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
555 goto bail_unlock;
556 }
557 if (!ret) {
558 if (qp->ibqp.qp_num == 0)
559 ibp->n_vl15_dropped++;
560 goto bail_unlock;
561 }
562 }
563 /* Silently drop packets which are too big. */
564 if (unlikely(wc.byte_len > qp->r_len)) {
565 qp->r_flags |= QIB_R_REUSE_SGE;
566 ibp->n_pkt_drops++;
567 goto bail_unlock;
568 }
569 if (has_grh) {
570 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
571 sizeof(struct ib_grh), 1);
572 wc.wc_flags |= IB_WC_GRH;
573 } else
574 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
575 qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
576 while (qp->r_sge.num_sge) {
577 atomic_dec(&qp->r_sge.sge.mr->refcount);
578 if (--qp->r_sge.num_sge)
579 qp->r_sge.sge = *qp->r_sge.sg_list++;
580 }
581 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
582 goto bail_unlock;
583 wc.wr_id = qp->r_wr_id;
584 wc.status = IB_WC_SUCCESS;
585 wc.opcode = IB_WC_RECV;
586 wc.vendor_err = 0;
587 wc.qp = &qp->ibqp;
588 wc.src_qp = src_qp;
589 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
590 qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
591 wc.slid = be16_to_cpu(hdr->lrh[3]);
592 wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
593 dlid = be16_to_cpu(hdr->lrh[1]);
594 /*
595 * Save the LMC lower bits if the destination LID is a unicast LID.
596 */
597 wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
598 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
599 wc.port_num = qp->port_num;
600 /* Signal completion event if the solicited bit is set. */
601 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
602 (ohdr->bth[0] &
603 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
604bail_unlock:
605 spin_unlock(&qp->r_lock);
606bail:;
607}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
new file mode 100644
index 000000000000..d7a26c1d4f37
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/mm.h>
35#include <linux/device.h>
36
37#include "qib.h"
38
39static void __qib_release_user_pages(struct page **p, size_t num_pages,
40 int dirty)
41{
42 size_t i;
43
44 for (i = 0; i < num_pages; i++) {
45 if (dirty)
46 set_page_dirty_lock(p[i]);
47 put_page(p[i]);
48 }
49}
50
51/*
52 * Call with current->mm->mmap_sem held.
53 */
54static int __get_user_pages(unsigned long start_page, size_t num_pages,
55 struct page **p, struct vm_area_struct **vma)
56{
57 unsigned long lock_limit;
58 size_t got;
59 int ret;
60
61 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
62
63 if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
64 ret = -ENOMEM;
65 goto bail;
66 }
67
68 for (got = 0; got < num_pages; got += ret) {
69 ret = get_user_pages(current, current->mm,
70 start_page + got * PAGE_SIZE,
71 num_pages - got, 1, 1,
72 p + got, vma);
73 if (ret < 0)
74 goto bail_release;
75 }
76
77 current->mm->locked_vm += num_pages;
78
79 ret = 0;
80 goto bail;
81
82bail_release:
83 __qib_release_user_pages(p, got, 0);
84bail:
85 return ret;
86}
87
88/**
89 * qib_map_page - a safety wrapper around pci_map_page()
90 *
91 * A dma_addr of all 0's is interpreted by the chip as "disabled".
92 * Unfortunately, it can also be a valid dma_addr returned on some
93 * architectures.
94 *
95 * The powerpc iommu assigns dma_addrs in ascending order, so we don't
96 * have to bother with retries or mapping a dummy page to insure we
97 * don't just get the same mapping again.
98 *
99 * I'm sure we won't be so lucky with other iommu's, so FIXME.
100 */
101dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
102 unsigned long offset, size_t size, int direction)
103{
104 dma_addr_t phys;
105
106 phys = pci_map_page(hwdev, page, offset, size, direction);
107
108 if (phys == 0) {
109 pci_unmap_page(hwdev, phys, size, direction);
110 phys = pci_map_page(hwdev, page, offset, size, direction);
111 /*
112 * FIXME: If we get 0 again, we should keep this page,
113 * map another, then free the 0 page.
114 */
115 }
116
117 return phys;
118}
119
120/**
121 * qib_get_user_pages - lock user pages into memory
122 * @start_page: the start page
123 * @num_pages: the number of pages
124 * @p: the output page structures
125 *
126 * This function takes a given start page (page aligned user virtual
127 * address) and pins it and the following specified number of pages. For
128 * now, num_pages is always 1, but that will probably change at some point
129 * (because caller is doing expected sends on a single virtually contiguous
130 * buffer, so we can do all pages at once).
131 */
132int qib_get_user_pages(unsigned long start_page, size_t num_pages,
133 struct page **p)
134{
135 int ret;
136
137 down_write(&current->mm->mmap_sem);
138
139 ret = __get_user_pages(start_page, num_pages, p, NULL);
140
141 up_write(&current->mm->mmap_sem);
142
143 return ret;
144}
145
146void qib_release_user_pages(struct page **p, size_t num_pages)
147{
148 if (current->mm) /* during close after signal, mm can be NULL */
149 down_write(&current->mm->mmap_sem);
150
151 __qib_release_user_pages(p, num_pages, 1);
152
153 if (current->mm) {
154 current->mm->locked_vm -= num_pages;
155 up_write(&current->mm->mmap_sem);
156 }
157}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
new file mode 100644
index 000000000000..4c19e06b5e85
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -0,0 +1,897 @@
1/*
2 * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/mm.h>
33#include <linux/types.h>
34#include <linux/device.h>
35#include <linux/dmapool.h>
36#include <linux/slab.h>
37#include <linux/list.h>
38#include <linux/highmem.h>
39#include <linux/io.h>
40#include <linux/uio.h>
41#include <linux/rbtree.h>
42#include <linux/spinlock.h>
43#include <linux/delay.h>
44
45#include "qib.h"
46#include "qib_user_sdma.h"
47
48/* minimum size of header */
49#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50/* expected size of headers (for dma_pool) */
51#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52/* attempt to drain the queue for 5secs */
53#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
54
55struct qib_user_sdma_pkt {
56 u8 naddr; /* dimension of addr (1..3) ... */
57 u32 counter; /* sdma pkts queued counter for this entry */
58 u64 added; /* global descq number of entries */
59
60 struct {
61 u32 offset; /* offset for kvaddr, addr */
62 u32 length; /* length in page */
63 u8 put_page; /* should we put_page? */
64 u8 dma_mapped; /* is page dma_mapped? */
65 struct page *page; /* may be NULL (coherent mem) */
66 void *kvaddr; /* FIXME: only for pio hack */
67 dma_addr_t addr;
68 } addr[4]; /* max pages, any more and we coalesce */
69 struct list_head list; /* list element */
70};
71
72struct qib_user_sdma_queue {
73 /*
74 * pkts sent to dma engine are queued on this
75 * list head. the type of the elements of this
76 * list are struct qib_user_sdma_pkt...
77 */
78 struct list_head sent;
79
80 /* headers with expected length are allocated from here... */
81 char header_cache_name[64];
82 struct dma_pool *header_cache;
83
84 /* packets are allocated from the slab cache... */
85 char pkt_slab_name[64];
86 struct kmem_cache *pkt_slab;
87
88 /* as packets go on the queued queue, they are counted... */
89 u32 counter;
90 u32 sent_counter;
91
92 /* dma page table */
93 struct rb_root dma_pages_root;
94
95 /* protect everything above... */
96 struct mutex lock;
97};
98
99struct qib_user_sdma_queue *
100qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
101{
102 struct qib_user_sdma_queue *pq =
103 kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
104
105 if (!pq)
106 goto done;
107
108 pq->counter = 0;
109 pq->sent_counter = 0;
110 INIT_LIST_HEAD(&pq->sent);
111
112 mutex_init(&pq->lock);
113
114 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
115 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
116 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
117 sizeof(struct qib_user_sdma_pkt),
118 0, 0, NULL);
119
120 if (!pq->pkt_slab)
121 goto err_kfree;
122
123 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
124 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
125 pq->header_cache = dma_pool_create(pq->header_cache_name,
126 dev,
127 QIB_USER_SDMA_EXP_HEADER_LENGTH,
128 4, 0);
129 if (!pq->header_cache)
130 goto err_slab;
131
132 pq->dma_pages_root = RB_ROOT;
133
134 goto done;
135
136err_slab:
137 kmem_cache_destroy(pq->pkt_slab);
138err_kfree:
139 kfree(pq);
140 pq = NULL;
141
142done:
143 return pq;
144}
145
146static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
147 int i, size_t offset, size_t len,
148 int put_page, int dma_mapped,
149 struct page *page,
150 void *kvaddr, dma_addr_t dma_addr)
151{
152 pkt->addr[i].offset = offset;
153 pkt->addr[i].length = len;
154 pkt->addr[i].put_page = put_page;
155 pkt->addr[i].dma_mapped = dma_mapped;
156 pkt->addr[i].page = page;
157 pkt->addr[i].kvaddr = kvaddr;
158 pkt->addr[i].addr = dma_addr;
159}
160
161static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
162 u32 counter, size_t offset,
163 size_t len, int dma_mapped,
164 struct page *page,
165 void *kvaddr, dma_addr_t dma_addr)
166{
167 pkt->naddr = 1;
168 pkt->counter = counter;
169 qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
170 kvaddr, dma_addr);
171}
172
173/* we've too many pages in the iovec, coalesce to a single page */
174static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
175 struct qib_user_sdma_pkt *pkt,
176 const struct iovec *iov,
177 unsigned long niov)
178{
179 int ret = 0;
180 struct page *page = alloc_page(GFP_KERNEL);
181 void *mpage_save;
182 char *mpage;
183 int i;
184 int len = 0;
185 dma_addr_t dma_addr;
186
187 if (!page) {
188 ret = -ENOMEM;
189 goto done;
190 }
191
192 mpage = kmap(page);
193 mpage_save = mpage;
194 for (i = 0; i < niov; i++) {
195 int cfur;
196
197 cfur = copy_from_user(mpage,
198 iov[i].iov_base, iov[i].iov_len);
199 if (cfur) {
200 ret = -EFAULT;
201 goto free_unmap;
202 }
203
204 mpage += iov[i].iov_len;
205 len += iov[i].iov_len;
206 }
207
208 dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
209 DMA_TO_DEVICE);
210 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
211 ret = -ENOMEM;
212 goto free_unmap;
213 }
214
215 qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
216 dma_addr);
217 pkt->naddr = 2;
218
219 goto done;
220
221free_unmap:
222 kunmap(page);
223 __free_page(page);
224done:
225 return ret;
226}
227
228/*
229 * How many pages in this iovec element?
230 */
231static int qib_user_sdma_num_pages(const struct iovec *iov)
232{
233 const unsigned long addr = (unsigned long) iov->iov_base;
234 const unsigned long len = iov->iov_len;
235 const unsigned long spage = addr & PAGE_MASK;
236 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
237
238 return 1 + ((epage - spage) >> PAGE_SHIFT);
239}
240
241/*
242 * Truncate length to page boundry.
243 */
244static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
245{
246 const unsigned long offset = addr & ~PAGE_MASK;
247
248 return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
249}
250
251static void qib_user_sdma_free_pkt_frag(struct device *dev,
252 struct qib_user_sdma_queue *pq,
253 struct qib_user_sdma_pkt *pkt,
254 int frag)
255{
256 const int i = frag;
257
258 if (pkt->addr[i].page) {
259 if (pkt->addr[i].dma_mapped)
260 dma_unmap_page(dev,
261 pkt->addr[i].addr,
262 pkt->addr[i].length,
263 DMA_TO_DEVICE);
264
265 if (pkt->addr[i].kvaddr)
266 kunmap(pkt->addr[i].page);
267
268 if (pkt->addr[i].put_page)
269 put_page(pkt->addr[i].page);
270 else
271 __free_page(pkt->addr[i].page);
272 } else if (pkt->addr[i].kvaddr)
273 /* free coherent mem from cache... */
274 dma_pool_free(pq->header_cache,
275 pkt->addr[i].kvaddr, pkt->addr[i].addr);
276}
277
278/* return number of pages pinned... */
279static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
280 struct qib_user_sdma_pkt *pkt,
281 unsigned long addr, int tlen, int npages)
282{
283 struct page *pages[2];
284 int j;
285 int ret;
286
287 ret = get_user_pages(current, current->mm, addr,
288 npages, 0, 1, pages, NULL);
289
290 if (ret != npages) {
291 int i;
292
293 for (i = 0; i < ret; i++)
294 put_page(pages[i]);
295
296 ret = -ENOMEM;
297 goto done;
298 }
299
300 for (j = 0; j < npages; j++) {
301 /* map the pages... */
302 const int flen = qib_user_sdma_page_length(addr, tlen);
303 dma_addr_t dma_addr =
304 dma_map_page(&dd->pcidev->dev,
305 pages[j], 0, flen, DMA_TO_DEVICE);
306 unsigned long fofs = addr & ~PAGE_MASK;
307
308 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
309 ret = -ENOMEM;
310 goto done;
311 }
312
313 qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
314 pages[j], kmap(pages[j]), dma_addr);
315
316 pkt->naddr++;
317 addr += flen;
318 tlen -= flen;
319 }
320
321done:
322 return ret;
323}
324
325static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
326 struct qib_user_sdma_queue *pq,
327 struct qib_user_sdma_pkt *pkt,
328 const struct iovec *iov,
329 unsigned long niov)
330{
331 int ret = 0;
332 unsigned long idx;
333
334 for (idx = 0; idx < niov; idx++) {
335 const int npages = qib_user_sdma_num_pages(iov + idx);
336 const unsigned long addr = (unsigned long) iov[idx].iov_base;
337
338 ret = qib_user_sdma_pin_pages(dd, pkt, addr,
339 iov[idx].iov_len, npages);
340 if (ret < 0)
341 goto free_pkt;
342 }
343
344 goto done;
345
346free_pkt:
347 for (idx = 0; idx < pkt->naddr; idx++)
348 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
349
350done:
351 return ret;
352}
353
354static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
355 struct qib_user_sdma_queue *pq,
356 struct qib_user_sdma_pkt *pkt,
357 const struct iovec *iov,
358 unsigned long niov, int npages)
359{
360 int ret = 0;
361
362 if (npages >= ARRAY_SIZE(pkt->addr))
363 ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
364 else
365 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
366
367 return ret;
368}
369
370/* free a packet list -- return counter value of last packet */
371static void qib_user_sdma_free_pkt_list(struct device *dev,
372 struct qib_user_sdma_queue *pq,
373 struct list_head *list)
374{
375 struct qib_user_sdma_pkt *pkt, *pkt_next;
376
377 list_for_each_entry_safe(pkt, pkt_next, list, list) {
378 int i;
379
380 for (i = 0; i < pkt->naddr; i++)
381 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
382
383 kmem_cache_free(pq->pkt_slab, pkt);
384 }
385}
386
387/*
388 * copy headers, coalesce etc -- pq->lock must be held
389 *
390 * we queue all the packets to list, returning the
391 * number of bytes total. list must be empty initially,
392 * as, if there is an error we clean it...
393 */
394static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
395 struct qib_user_sdma_queue *pq,
396 struct list_head *list,
397 const struct iovec *iov,
398 unsigned long niov,
399 int maxpkts)
400{
401 unsigned long idx = 0;
402 int ret = 0;
403 int npkts = 0;
404 struct page *page = NULL;
405 __le32 *pbc;
406 dma_addr_t dma_addr;
407 struct qib_user_sdma_pkt *pkt = NULL;
408 size_t len;
409 size_t nw;
410 u32 counter = pq->counter;
411 int dma_mapped = 0;
412
413 while (idx < niov && npkts < maxpkts) {
414 const unsigned long addr = (unsigned long) iov[idx].iov_base;
415 const unsigned long idx_save = idx;
416 unsigned pktnw;
417 unsigned pktnwc;
418 int nfrags = 0;
419 int npages = 0;
420 int cfur;
421
422 dma_mapped = 0;
423 len = iov[idx].iov_len;
424 nw = len >> 2;
425 page = NULL;
426
427 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
428 if (!pkt) {
429 ret = -ENOMEM;
430 goto free_list;
431 }
432
433 if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
434 len > PAGE_SIZE || len & 3 || addr & 3) {
435 ret = -EINVAL;
436 goto free_pkt;
437 }
438
439 if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
440 pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
441 &dma_addr);
442 else
443 pbc = NULL;
444
445 if (!pbc) {
446 page = alloc_page(GFP_KERNEL);
447 if (!page) {
448 ret = -ENOMEM;
449 goto free_pkt;
450 }
451 pbc = kmap(page);
452 }
453
454 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
455 if (cfur) {
456 ret = -EFAULT;
457 goto free_pbc;
458 }
459
460 /*
461 * This assignment is a bit strange. it's because the
462 * the pbc counts the number of 32 bit words in the full
463 * packet _except_ the first word of the pbc itself...
464 */
465 pktnwc = nw - 1;
466
467 /*
468 * pktnw computation yields the number of 32 bit words
469 * that the caller has indicated in the PBC. note that
470 * this is one less than the total number of words that
471 * goes to the send DMA engine as the first 32 bit word
472 * of the PBC itself is not counted. Armed with this count,
473 * we can verify that the packet is consistent with the
474 * iovec lengths.
475 */
476 pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
477 if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
478 ret = -EINVAL;
479 goto free_pbc;
480 }
481
482 idx++;
483 while (pktnwc < pktnw && idx < niov) {
484 const size_t slen = iov[idx].iov_len;
485 const unsigned long faddr =
486 (unsigned long) iov[idx].iov_base;
487
488 if (slen & 3 || faddr & 3 || !slen ||
489 slen > PAGE_SIZE) {
490 ret = -EINVAL;
491 goto free_pbc;
492 }
493
494 npages++;
495 if ((faddr & PAGE_MASK) !=
496 ((faddr + slen - 1) & PAGE_MASK))
497 npages++;
498
499 pktnwc += slen >> 2;
500 idx++;
501 nfrags++;
502 }
503
504 if (pktnwc != pktnw) {
505 ret = -EINVAL;
506 goto free_pbc;
507 }
508
509 if (page) {
510 dma_addr = dma_map_page(&dd->pcidev->dev,
511 page, 0, len, DMA_TO_DEVICE);
512 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
513 ret = -ENOMEM;
514 goto free_pbc;
515 }
516
517 dma_mapped = 1;
518 }
519
520 qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
521 page, pbc, dma_addr);
522
523 if (nfrags) {
524 ret = qib_user_sdma_init_payload(dd, pq, pkt,
525 iov + idx_save + 1,
526 nfrags, npages);
527 if (ret < 0)
528 goto free_pbc_dma;
529 }
530
531 counter++;
532 npkts++;
533
534 list_add_tail(&pkt->list, list);
535 }
536
537 ret = idx;
538 goto done;
539
540free_pbc_dma:
541 if (dma_mapped)
542 dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
543free_pbc:
544 if (page) {
545 kunmap(page);
546 __free_page(page);
547 } else
548 dma_pool_free(pq->header_cache, pbc, dma_addr);
549free_pkt:
550 kmem_cache_free(pq->pkt_slab, pkt);
551free_list:
552 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
553done:
554 return ret;
555}
556
557static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
558 u32 c)
559{
560 pq->sent_counter = c;
561}
562
563/* try to clean out queue -- needs pq->lock */
564static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
565 struct qib_user_sdma_queue *pq)
566{
567 struct qib_devdata *dd = ppd->dd;
568 struct list_head free_list;
569 struct qib_user_sdma_pkt *pkt;
570 struct qib_user_sdma_pkt *pkt_prev;
571 int ret = 0;
572
573 INIT_LIST_HEAD(&free_list);
574
575 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
576 s64 descd = ppd->sdma_descq_removed - pkt->added;
577
578 if (descd < 0)
579 break;
580
581 list_move_tail(&pkt->list, &free_list);
582
583 /* one more packet cleaned */
584 ret++;
585 }
586
587 if (!list_empty(&free_list)) {
588 u32 counter;
589
590 pkt = list_entry(free_list.prev,
591 struct qib_user_sdma_pkt, list);
592 counter = pkt->counter;
593
594 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
595 qib_user_sdma_set_complete_counter(pq, counter);
596 }
597
598 return ret;
599}
600
601void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
602{
603 if (!pq)
604 return;
605
606 kmem_cache_destroy(pq->pkt_slab);
607 dma_pool_destroy(pq->header_cache);
608 kfree(pq);
609}
610
611/* clean descriptor queue, returns > 0 if some elements cleaned */
612static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
613{
614 int ret;
615 unsigned long flags;
616
617 spin_lock_irqsave(&ppd->sdma_lock, flags);
618 ret = qib_sdma_make_progress(ppd);
619 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
620
621 return ret;
622}
623
624/* we're in close, drain packets so that we can cleanup successfully... */
625void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
626 struct qib_user_sdma_queue *pq)
627{
628 struct qib_devdata *dd = ppd->dd;
629 int i;
630
631 if (!pq)
632 return;
633
634 for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
635 mutex_lock(&pq->lock);
636 if (list_empty(&pq->sent)) {
637 mutex_unlock(&pq->lock);
638 break;
639 }
640 qib_user_sdma_hwqueue_clean(ppd);
641 qib_user_sdma_queue_clean(ppd, pq);
642 mutex_unlock(&pq->lock);
643 msleep(10);
644 }
645
646 if (!list_empty(&pq->sent)) {
647 struct list_head free_list;
648
649 qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
650 INIT_LIST_HEAD(&free_list);
651 mutex_lock(&pq->lock);
652 list_splice_init(&pq->sent, &free_list);
653 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
654 mutex_unlock(&pq->lock);
655 }
656}
657
658static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
659 u64 addr, u64 dwlen, u64 dwoffset)
660{
661 u8 tmpgen;
662
663 tmpgen = ppd->sdma_generation;
664
665 return cpu_to_le64(/* SDmaPhyAddr[31:0] */
666 ((addr & 0xfffffffcULL) << 32) |
667 /* SDmaGeneration[1:0] */
668 ((tmpgen & 3ULL) << 30) |
669 /* SDmaDwordCount[10:0] */
670 ((dwlen & 0x7ffULL) << 16) |
671 /* SDmaBufOffset[12:2] */
672 (dwoffset & 0x7ffULL));
673}
674
675static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
676{
677 return descq | cpu_to_le64(1ULL << 12);
678}
679
680static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
681{
682 /* last */ /* dma head */
683 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
684}
685
686static inline __le64 qib_sdma_make_desc1(u64 addr)
687{
688 /* SDmaPhyAddr[47:32] */
689 return cpu_to_le64(addr >> 32);
690}
691
692static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
693 struct qib_user_sdma_pkt *pkt, int idx,
694 unsigned ofs, u16 tail)
695{
696 const u64 addr = (u64) pkt->addr[idx].addr +
697 (u64) pkt->addr[idx].offset;
698 const u64 dwlen = (u64) pkt->addr[idx].length / 4;
699 __le64 *descqp;
700 __le64 descq0;
701
702 descqp = &ppd->sdma_descq[tail].qw[0];
703
704 descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
705 if (idx == 0)
706 descq0 = qib_sdma_make_first_desc0(descq0);
707 if (idx == pkt->naddr - 1)
708 descq0 = qib_sdma_make_last_desc0(descq0);
709
710 descqp[0] = descq0;
711 descqp[1] = qib_sdma_make_desc1(addr);
712}
713
714/* pq->lock must be held, get packets on the wire... */
715static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
716 struct qib_user_sdma_queue *pq,
717 struct list_head *pktlist)
718{
719 struct qib_devdata *dd = ppd->dd;
720 int ret = 0;
721 unsigned long flags;
722 u16 tail;
723 u8 generation;
724 u64 descq_added;
725
726 if (list_empty(pktlist))
727 return 0;
728
729 if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
730 return -ECOMM;
731
732 spin_lock_irqsave(&ppd->sdma_lock, flags);
733
734 /* keep a copy for restoring purposes in case of problems */
735 generation = ppd->sdma_generation;
736 descq_added = ppd->sdma_descq_added;
737
738 if (unlikely(!__qib_sdma_running(ppd))) {
739 ret = -ECOMM;
740 goto unlock;
741 }
742
743 tail = ppd->sdma_descq_tail;
744 while (!list_empty(pktlist)) {
745 struct qib_user_sdma_pkt *pkt =
746 list_entry(pktlist->next, struct qib_user_sdma_pkt,
747 list);
748 int i;
749 unsigned ofs = 0;
750 u16 dtail = tail;
751
752 if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
753 goto unlock_check_tail;
754
755 for (i = 0; i < pkt->naddr; i++) {
756 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
757 ofs += pkt->addr[i].length >> 2;
758
759 if (++tail == ppd->sdma_descq_cnt) {
760 tail = 0;
761 ++ppd->sdma_generation;
762 }
763 }
764
765 if ((ofs << 2) > ppd->ibmaxlen) {
766 ret = -EMSGSIZE;
767 goto unlock;
768 }
769
770 /*
771 * If the packet is >= 2KB mtu equivalent, we have to use
772 * the large buffers, and have to mark each descriptor as
773 * part of a large buffer packet.
774 */
775 if (ofs > dd->piosize2kmax_dwords) {
776 for (i = 0; i < pkt->naddr; i++) {
777 ppd->sdma_descq[dtail].qw[0] |=
778 cpu_to_le64(1ULL << 14);
779 if (++dtail == ppd->sdma_descq_cnt)
780 dtail = 0;
781 }
782 }
783
784 ppd->sdma_descq_added += pkt->naddr;
785 pkt->added = ppd->sdma_descq_added;
786 list_move_tail(&pkt->list, &pq->sent);
787 ret++;
788 }
789
790unlock_check_tail:
791 /* advance the tail on the chip if necessary */
792 if (ppd->sdma_descq_tail != tail)
793 dd->f_sdma_update_tail(ppd, tail);
794
795unlock:
796 if (unlikely(ret < 0)) {
797 ppd->sdma_generation = generation;
798 ppd->sdma_descq_added = descq_added;
799 }
800 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
801
802 return ret;
803}
804
805int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
806 struct qib_user_sdma_queue *pq,
807 const struct iovec *iov,
808 unsigned long dim)
809{
810 struct qib_devdata *dd = rcd->dd;
811 struct qib_pportdata *ppd = rcd->ppd;
812 int ret = 0;
813 struct list_head list;
814 int npkts = 0;
815
816 INIT_LIST_HEAD(&list);
817
818 mutex_lock(&pq->lock);
819
820 /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
821 if (!qib_sdma_running(ppd))
822 goto done_unlock;
823
824 if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
825 qib_user_sdma_hwqueue_clean(ppd);
826 qib_user_sdma_queue_clean(ppd, pq);
827 }
828
829 while (dim) {
830 const int mxp = 8;
831
832 down_write(&current->mm->mmap_sem);
833 ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
834 up_write(&current->mm->mmap_sem);
835
836 if (ret <= 0)
837 goto done_unlock;
838 else {
839 dim -= ret;
840 iov += ret;
841 }
842
843 /* force packets onto the sdma hw queue... */
844 if (!list_empty(&list)) {
845 /*
846 * Lazily clean hw queue. the 4 is a guess of about
847 * how many sdma descriptors a packet will take (it
848 * doesn't have to be perfect).
849 */
850 if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
851 qib_user_sdma_hwqueue_clean(ppd);
852 qib_user_sdma_queue_clean(ppd, pq);
853 }
854
855 ret = qib_user_sdma_push_pkts(ppd, pq, &list);
856 if (ret < 0)
857 goto done_unlock;
858 else {
859 npkts += ret;
860 pq->counter += ret;
861
862 if (!list_empty(&list))
863 goto done_unlock;
864 }
865 }
866 }
867
868done_unlock:
869 if (!list_empty(&list))
870 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
871 mutex_unlock(&pq->lock);
872
873 return (ret < 0) ? ret : npkts;
874}
875
876int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
877 struct qib_user_sdma_queue *pq)
878{
879 int ret = 0;
880
881 mutex_lock(&pq->lock);
882 qib_user_sdma_hwqueue_clean(ppd);
883 ret = qib_user_sdma_queue_clean(ppd, pq);
884 mutex_unlock(&pq->lock);
885
886 return ret;
887}
888
889u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
890{
891 return pq ? pq->sent_counter : 0;
892}
893
894u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
895{
896 return pq ? pq->counter : 0;
897}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.h b/drivers/infiniband/hw/qib/qib_user_sdma.h
new file mode 100644
index 000000000000..ce8cbaf6a5c2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/device.h>
33
34struct qib_user_sdma_queue;
35
36struct qib_user_sdma_queue *
37qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
38void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq);
39
40int qib_user_sdma_writev(struct qib_ctxtdata *pd,
41 struct qib_user_sdma_queue *pq,
42 const struct iovec *iov,
43 unsigned long dim);
44
45int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
46 struct qib_user_sdma_queue *pq);
47
48void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
49 struct qib_user_sdma_queue *pq);
50
51u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq);
52u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
new file mode 100644
index 000000000000..cda8f4173d23
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -0,0 +1,2248 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_mad.h>
36#include <rdma/ib_user_verbs.h>
37#include <linux/io.h>
38#include <linux/utsname.h>
39#include <linux/rculist.h>
40#include <linux/mm.h>
41
42#include "qib.h"
43#include "qib_common.h"
44
45static unsigned int ib_qib_qp_table_size = 251;
46module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
47MODULE_PARM_DESC(qp_table_size, "QP table size");
48
49unsigned int ib_qib_lkey_table_size = 16;
50module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
51 S_IRUGO);
52MODULE_PARM_DESC(lkey_table_size,
53 "LKEY table size in bits (2^n, 1 <= n <= 23)");
54
55static unsigned int ib_qib_max_pds = 0xFFFF;
56module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
57MODULE_PARM_DESC(max_pds,
58 "Maximum number of protection domains to support");
59
60static unsigned int ib_qib_max_ahs = 0xFFFF;
61module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
62MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
63
64unsigned int ib_qib_max_cqes = 0x2FFFF;
65module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
66MODULE_PARM_DESC(max_cqes,
67 "Maximum number of completion queue entries to support");
68
69unsigned int ib_qib_max_cqs = 0x1FFFF;
70module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
71MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
72
73unsigned int ib_qib_max_qp_wrs = 0x3FFF;
74module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
75MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
76
77unsigned int ib_qib_max_qps = 16384;
78module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
79MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
80
81unsigned int ib_qib_max_sges = 0x60;
82module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
83MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
84
85unsigned int ib_qib_max_mcast_grps = 16384;
86module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
87MODULE_PARM_DESC(max_mcast_grps,
88 "Maximum number of multicast groups to support");
89
90unsigned int ib_qib_max_mcast_qp_attached = 16;
91module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
92 uint, S_IRUGO);
93MODULE_PARM_DESC(max_mcast_qp_attached,
94 "Maximum number of attached QPs to support");
95
96unsigned int ib_qib_max_srqs = 1024;
97module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
98MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
99
100unsigned int ib_qib_max_srq_sges = 128;
101module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
102MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
103
104unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
105module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
106MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
107
108static unsigned int ib_qib_disable_sma;
109module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
110MODULE_PARM_DESC(disable_sma, "Disable the SMA");
111
112/*
113 * Note that it is OK to post send work requests in the SQE and ERR
114 * states; qib_do_send() will process them and generate error
115 * completions as per IB 1.2 C10-96.
116 */
117const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
118 [IB_QPS_RESET] = 0,
119 [IB_QPS_INIT] = QIB_POST_RECV_OK,
120 [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
121 [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
122 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
123 QIB_PROCESS_NEXT_SEND_OK,
124 [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
125 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
126 [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
127 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
128 [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
129 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
130};
131
132struct qib_ucontext {
133 struct ib_ucontext ibucontext;
134};
135
136static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
137 *ibucontext)
138{
139 return container_of(ibucontext, struct qib_ucontext, ibucontext);
140}
141
142/*
143 * Translate ib_wr_opcode into ib_wc_opcode.
144 */
145const enum ib_wc_opcode ib_qib_wc_opcode[] = {
146 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
147 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
148 [IB_WR_SEND] = IB_WC_SEND,
149 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
150 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
151 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
152 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
153};
154
155/*
156 * System image GUID.
157 */
158__be64 ib_qib_sys_image_guid;
159
160/**
161 * qib_copy_sge - copy data to SGE memory
162 * @ss: the SGE state
163 * @data: the data to copy
164 * @length: the length of the data
165 */
166void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
167{
168 struct qib_sge *sge = &ss->sge;
169
170 while (length) {
171 u32 len = sge->length;
172
173 if (len > length)
174 len = length;
175 if (len > sge->sge_length)
176 len = sge->sge_length;
177 BUG_ON(len == 0);
178 memcpy(sge->vaddr, data, len);
179 sge->vaddr += len;
180 sge->length -= len;
181 sge->sge_length -= len;
182 if (sge->sge_length == 0) {
183 if (release)
184 atomic_dec(&sge->mr->refcount);
185 if (--ss->num_sge)
186 *sge = *ss->sg_list++;
187 } else if (sge->length == 0 && sge->mr->lkey) {
188 if (++sge->n >= QIB_SEGSZ) {
189 if (++sge->m >= sge->mr->mapsz)
190 break;
191 sge->n = 0;
192 }
193 sge->vaddr =
194 sge->mr->map[sge->m]->segs[sge->n].vaddr;
195 sge->length =
196 sge->mr->map[sge->m]->segs[sge->n].length;
197 }
198 data += len;
199 length -= len;
200 }
201}
202
203/**
204 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
205 * @ss: the SGE state
206 * @length: the number of bytes to skip
207 */
208void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
209{
210 struct qib_sge *sge = &ss->sge;
211
212 while (length) {
213 u32 len = sge->length;
214
215 if (len > length)
216 len = length;
217 if (len > sge->sge_length)
218 len = sge->sge_length;
219 BUG_ON(len == 0);
220 sge->vaddr += len;
221 sge->length -= len;
222 sge->sge_length -= len;
223 if (sge->sge_length == 0) {
224 if (release)
225 atomic_dec(&sge->mr->refcount);
226 if (--ss->num_sge)
227 *sge = *ss->sg_list++;
228 } else if (sge->length == 0 && sge->mr->lkey) {
229 if (++sge->n >= QIB_SEGSZ) {
230 if (++sge->m >= sge->mr->mapsz)
231 break;
232 sge->n = 0;
233 }
234 sge->vaddr =
235 sge->mr->map[sge->m]->segs[sge->n].vaddr;
236 sge->length =
237 sge->mr->map[sge->m]->segs[sge->n].length;
238 }
239 length -= len;
240 }
241}
242
243/*
244 * Count the number of DMA descriptors needed to send length bytes of data.
245 * Don't modify the qib_sge_state to get the count.
246 * Return zero if any of the segments is not aligned.
247 */
248static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
249{
250 struct qib_sge *sg_list = ss->sg_list;
251 struct qib_sge sge = ss->sge;
252 u8 num_sge = ss->num_sge;
253 u32 ndesc = 1; /* count the header */
254
255 while (length) {
256 u32 len = sge.length;
257
258 if (len > length)
259 len = length;
260 if (len > sge.sge_length)
261 len = sge.sge_length;
262 BUG_ON(len == 0);
263 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
264 (len != length && (len & (sizeof(u32) - 1)))) {
265 ndesc = 0;
266 break;
267 }
268 ndesc++;
269 sge.vaddr += len;
270 sge.length -= len;
271 sge.sge_length -= len;
272 if (sge.sge_length == 0) {
273 if (--num_sge)
274 sge = *sg_list++;
275 } else if (sge.length == 0 && sge.mr->lkey) {
276 if (++sge.n >= QIB_SEGSZ) {
277 if (++sge.m >= sge.mr->mapsz)
278 break;
279 sge.n = 0;
280 }
281 sge.vaddr =
282 sge.mr->map[sge.m]->segs[sge.n].vaddr;
283 sge.length =
284 sge.mr->map[sge.m]->segs[sge.n].length;
285 }
286 length -= len;
287 }
288 return ndesc;
289}
290
291/*
292 * Copy from the SGEs to the data buffer.
293 */
294static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
295{
296 struct qib_sge *sge = &ss->sge;
297
298 while (length) {
299 u32 len = sge->length;
300
301 if (len > length)
302 len = length;
303 if (len > sge->sge_length)
304 len = sge->sge_length;
305 BUG_ON(len == 0);
306 memcpy(data, sge->vaddr, len);
307 sge->vaddr += len;
308 sge->length -= len;
309 sge->sge_length -= len;
310 if (sge->sge_length == 0) {
311 if (--ss->num_sge)
312 *sge = *ss->sg_list++;
313 } else if (sge->length == 0 && sge->mr->lkey) {
314 if (++sge->n >= QIB_SEGSZ) {
315 if (++sge->m >= sge->mr->mapsz)
316 break;
317 sge->n = 0;
318 }
319 sge->vaddr =
320 sge->mr->map[sge->m]->segs[sge->n].vaddr;
321 sge->length =
322 sge->mr->map[sge->m]->segs[sge->n].length;
323 }
324 data += len;
325 length -= len;
326 }
327}
328
329/**
330 * qib_post_one_send - post one RC, UC, or UD send work request
331 * @qp: the QP to post on
332 * @wr: the work request to send
333 */
334static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
335{
336 struct qib_swqe *wqe;
337 u32 next;
338 int i;
339 int j;
340 int acc;
341 int ret;
342 unsigned long flags;
343 struct qib_lkey_table *rkt;
344 struct qib_pd *pd;
345
346 spin_lock_irqsave(&qp->s_lock, flags);
347
348 /* Check that state is OK to post send. */
349 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
350 goto bail_inval;
351
352 /* IB spec says that num_sge == 0 is OK. */
353 if (wr->num_sge > qp->s_max_sge)
354 goto bail_inval;
355
356 /*
357 * Don't allow RDMA reads or atomic operations on UC or
358 * undefined operations.
359 * Make sure buffer is large enough to hold the result for atomics.
360 */
361 if (wr->opcode == IB_WR_FAST_REG_MR) {
362 if (qib_fast_reg_mr(qp, wr))
363 goto bail_inval;
364 } else if (qp->ibqp.qp_type == IB_QPT_UC) {
365 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
366 goto bail_inval;
367 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
368 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
369 if (wr->opcode != IB_WR_SEND &&
370 wr->opcode != IB_WR_SEND_WITH_IMM)
371 goto bail_inval;
372 /* Check UD destination address PD */
373 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
374 goto bail_inval;
375 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
376 goto bail_inval;
377 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
378 (wr->num_sge == 0 ||
379 wr->sg_list[0].length < sizeof(u64) ||
380 wr->sg_list[0].addr & (sizeof(u64) - 1)))
381 goto bail_inval;
382 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
383 goto bail_inval;
384
385 next = qp->s_head + 1;
386 if (next >= qp->s_size)
387 next = 0;
388 if (next == qp->s_last) {
389 ret = -ENOMEM;
390 goto bail;
391 }
392
393 rkt = &to_idev(qp->ibqp.device)->lk_table;
394 pd = to_ipd(qp->ibqp.pd);
395 wqe = get_swqe_ptr(qp, qp->s_head);
396 wqe->wr = *wr;
397 wqe->length = 0;
398 j = 0;
399 if (wr->num_sge) {
400 acc = wr->opcode >= IB_WR_RDMA_READ ?
401 IB_ACCESS_LOCAL_WRITE : 0;
402 for (i = 0; i < wr->num_sge; i++) {
403 u32 length = wr->sg_list[i].length;
404 int ok;
405
406 if (length == 0)
407 continue;
408 ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
409 &wr->sg_list[i], acc);
410 if (!ok)
411 goto bail_inval_free;
412 wqe->length += length;
413 j++;
414 }
415 wqe->wr.num_sge = j;
416 }
417 if (qp->ibqp.qp_type == IB_QPT_UC ||
418 qp->ibqp.qp_type == IB_QPT_RC) {
419 if (wqe->length > 0x80000000U)
420 goto bail_inval_free;
421 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
422 qp->port_num - 1)->ibmtu)
423 goto bail_inval_free;
424 else
425 atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
426 wqe->ssn = qp->s_ssn++;
427 qp->s_head = next;
428
429 ret = 0;
430 goto bail;
431
432bail_inval_free:
433 while (j) {
434 struct qib_sge *sge = &wqe->sg_list[--j];
435
436 atomic_dec(&sge->mr->refcount);
437 }
438bail_inval:
439 ret = -EINVAL;
440bail:
441 spin_unlock_irqrestore(&qp->s_lock, flags);
442 return ret;
443}
444
445/**
446 * qib_post_send - post a send on a QP
447 * @ibqp: the QP to post the send on
448 * @wr: the list of work requests to post
449 * @bad_wr: the first bad WR is put here
450 *
451 * This may be called from interrupt context.
452 */
453static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
454 struct ib_send_wr **bad_wr)
455{
456 struct qib_qp *qp = to_iqp(ibqp);
457 int err = 0;
458
459 for (; wr; wr = wr->next) {
460 err = qib_post_one_send(qp, wr);
461 if (err) {
462 *bad_wr = wr;
463 goto bail;
464 }
465 }
466
467 /* Try to do the send work in the caller's context. */
468 qib_do_send(&qp->s_work);
469
470bail:
471 return err;
472}
473
474/**
475 * qib_post_receive - post a receive on a QP
476 * @ibqp: the QP to post the receive on
477 * @wr: the WR to post
478 * @bad_wr: the first bad WR is put here
479 *
480 * This may be called from interrupt context.
481 */
482static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
483 struct ib_recv_wr **bad_wr)
484{
485 struct qib_qp *qp = to_iqp(ibqp);
486 struct qib_rwq *wq = qp->r_rq.wq;
487 unsigned long flags;
488 int ret;
489
490 /* Check that state is OK to post receive. */
491 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
492 *bad_wr = wr;
493 ret = -EINVAL;
494 goto bail;
495 }
496
497 for (; wr; wr = wr->next) {
498 struct qib_rwqe *wqe;
499 u32 next;
500 int i;
501
502 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
503 *bad_wr = wr;
504 ret = -EINVAL;
505 goto bail;
506 }
507
508 spin_lock_irqsave(&qp->r_rq.lock, flags);
509 next = wq->head + 1;
510 if (next >= qp->r_rq.size)
511 next = 0;
512 if (next == wq->tail) {
513 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
514 *bad_wr = wr;
515 ret = -ENOMEM;
516 goto bail;
517 }
518
519 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
520 wqe->wr_id = wr->wr_id;
521 wqe->num_sge = wr->num_sge;
522 for (i = 0; i < wr->num_sge; i++)
523 wqe->sg_list[i] = wr->sg_list[i];
524 /* Make sure queue entry is written before the head index. */
525 smp_wmb();
526 wq->head = next;
527 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
528 }
529 ret = 0;
530
531bail:
532 return ret;
533}
534
535/**
536 * qib_qp_rcv - processing an incoming packet on a QP
537 * @rcd: the context pointer
538 * @hdr: the packet header
539 * @has_grh: true if the packet has a GRH
540 * @data: the packet data
541 * @tlen: the packet length
542 * @qp: the QP the packet came on
543 *
544 * This is called from qib_ib_rcv() to process an incoming packet
545 * for the given QP.
546 * Called at interrupt level.
547 */
548static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
549 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
550{
551 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
552
553 /* Check for valid receive state. */
554 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
555 ibp->n_pkt_drops++;
556 return;
557 }
558
559 switch (qp->ibqp.qp_type) {
560 case IB_QPT_SMI:
561 case IB_QPT_GSI:
562 if (ib_qib_disable_sma)
563 break;
564 /* FALLTHROUGH */
565 case IB_QPT_UD:
566 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
567 break;
568
569 case IB_QPT_RC:
570 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
571 break;
572
573 case IB_QPT_UC:
574 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
575 break;
576
577 default:
578 break;
579 }
580}
581
582/**
583 * qib_ib_rcv - process an incoming packet
584 * @rcd: the context pointer
585 * @rhdr: the header of the packet
586 * @data: the packet payload
587 * @tlen: the packet length
588 *
589 * This is called from qib_kreceive() to process an incoming packet at
590 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
591 */
592void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
593{
594 struct qib_pportdata *ppd = rcd->ppd;
595 struct qib_ibport *ibp = &ppd->ibport_data;
596 struct qib_ib_header *hdr = rhdr;
597 struct qib_other_headers *ohdr;
598 struct qib_qp *qp;
599 u32 qp_num;
600 int lnh;
601 u8 opcode;
602 u16 lid;
603
604 /* 24 == LRH+BTH+CRC */
605 if (unlikely(tlen < 24))
606 goto drop;
607
608 /* Check for a valid destination LID (see ch. 7.11.1). */
609 lid = be16_to_cpu(hdr->lrh[1]);
610 if (lid < QIB_MULTICAST_LID_BASE) {
611 lid &= ~((1 << ppd->lmc) - 1);
612 if (unlikely(lid != ppd->lid))
613 goto drop;
614 }
615
616 /* Check for GRH */
617 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
618 if (lnh == QIB_LRH_BTH)
619 ohdr = &hdr->u.oth;
620 else if (lnh == QIB_LRH_GRH) {
621 u32 vtf;
622
623 ohdr = &hdr->u.l.oth;
624 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
625 goto drop;
626 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
627 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
628 goto drop;
629 } else
630 goto drop;
631
632 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
633 ibp->opstats[opcode & 0x7f].n_bytes += tlen;
634 ibp->opstats[opcode & 0x7f].n_packets++;
635
636 /* Get the destination QP number. */
637 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
638 if (qp_num == QIB_MULTICAST_QPN) {
639 struct qib_mcast *mcast;
640 struct qib_mcast_qp *p;
641
642 if (lnh != QIB_LRH_GRH)
643 goto drop;
644 mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
645 if (mcast == NULL)
646 goto drop;
647 ibp->n_multicast_rcv++;
648 list_for_each_entry_rcu(p, &mcast->qp_list, list)
649 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
650 /*
651 * Notify qib_multicast_detach() if it is waiting for us
652 * to finish.
653 */
654 if (atomic_dec_return(&mcast->refcount) <= 1)
655 wake_up(&mcast->wait);
656 } else {
657 qp = qib_lookup_qpn(ibp, qp_num);
658 if (!qp)
659 goto drop;
660 ibp->n_unicast_rcv++;
661 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
662 /*
663 * Notify qib_destroy_qp() if it is waiting
664 * for us to finish.
665 */
666 if (atomic_dec_and_test(&qp->refcount))
667 wake_up(&qp->wait);
668 }
669 return;
670
671drop:
672 ibp->n_pkt_drops++;
673}
674
675/*
676 * This is called from a timer to check for QPs
677 * which need kernel memory in order to send a packet.
678 */
679static void mem_timer(unsigned long data)
680{
681 struct qib_ibdev *dev = (struct qib_ibdev *) data;
682 struct list_head *list = &dev->memwait;
683 struct qib_qp *qp = NULL;
684 unsigned long flags;
685
686 spin_lock_irqsave(&dev->pending_lock, flags);
687 if (!list_empty(list)) {
688 qp = list_entry(list->next, struct qib_qp, iowait);
689 list_del_init(&qp->iowait);
690 atomic_inc(&qp->refcount);
691 if (!list_empty(list))
692 mod_timer(&dev->mem_timer, jiffies + 1);
693 }
694 spin_unlock_irqrestore(&dev->pending_lock, flags);
695
696 if (qp) {
697 spin_lock_irqsave(&qp->s_lock, flags);
698 if (qp->s_flags & QIB_S_WAIT_KMEM) {
699 qp->s_flags &= ~QIB_S_WAIT_KMEM;
700 qib_schedule_send(qp);
701 }
702 spin_unlock_irqrestore(&qp->s_lock, flags);
703 if (atomic_dec_and_test(&qp->refcount))
704 wake_up(&qp->wait);
705 }
706}
707
708static void update_sge(struct qib_sge_state *ss, u32 length)
709{
710 struct qib_sge *sge = &ss->sge;
711
712 sge->vaddr += length;
713 sge->length -= length;
714 sge->sge_length -= length;
715 if (sge->sge_length == 0) {
716 if (--ss->num_sge)
717 *sge = *ss->sg_list++;
718 } else if (sge->length == 0 && sge->mr->lkey) {
719 if (++sge->n >= QIB_SEGSZ) {
720 if (++sge->m >= sge->mr->mapsz)
721 return;
722 sge->n = 0;
723 }
724 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
725 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
726 }
727}
728
729#ifdef __LITTLE_ENDIAN
730static inline u32 get_upper_bits(u32 data, u32 shift)
731{
732 return data >> shift;
733}
734
735static inline u32 set_upper_bits(u32 data, u32 shift)
736{
737 return data << shift;
738}
739
740static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
741{
742 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
743 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
744 return data;
745}
746#else
747static inline u32 get_upper_bits(u32 data, u32 shift)
748{
749 return data << shift;
750}
751
752static inline u32 set_upper_bits(u32 data, u32 shift)
753{
754 return data >> shift;
755}
756
757static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
758{
759 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
760 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
761 return data;
762}
763#endif
764
765static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
766 u32 length, unsigned flush_wc)
767{
768 u32 extra = 0;
769 u32 data = 0;
770 u32 last;
771
772 while (1) {
773 u32 len = ss->sge.length;
774 u32 off;
775
776 if (len > length)
777 len = length;
778 if (len > ss->sge.sge_length)
779 len = ss->sge.sge_length;
780 BUG_ON(len == 0);
781 /* If the source address is not aligned, try to align it. */
782 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
783 if (off) {
784 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
785 ~(sizeof(u32) - 1));
786 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
787 u32 y;
788
789 y = sizeof(u32) - off;
790 if (len > y)
791 len = y;
792 if (len + extra >= sizeof(u32)) {
793 data |= set_upper_bits(v, extra *
794 BITS_PER_BYTE);
795 len = sizeof(u32) - extra;
796 if (len == length) {
797 last = data;
798 break;
799 }
800 __raw_writel(data, piobuf);
801 piobuf++;
802 extra = 0;
803 data = 0;
804 } else {
805 /* Clear unused upper bytes */
806 data |= clear_upper_bytes(v, len, extra);
807 if (len == length) {
808 last = data;
809 break;
810 }
811 extra += len;
812 }
813 } else if (extra) {
814 /* Source address is aligned. */
815 u32 *addr = (u32 *) ss->sge.vaddr;
816 int shift = extra * BITS_PER_BYTE;
817 int ushift = 32 - shift;
818 u32 l = len;
819
820 while (l >= sizeof(u32)) {
821 u32 v = *addr;
822
823 data |= set_upper_bits(v, shift);
824 __raw_writel(data, piobuf);
825 data = get_upper_bits(v, ushift);
826 piobuf++;
827 addr++;
828 l -= sizeof(u32);
829 }
830 /*
831 * We still have 'extra' number of bytes leftover.
832 */
833 if (l) {
834 u32 v = *addr;
835
836 if (l + extra >= sizeof(u32)) {
837 data |= set_upper_bits(v, shift);
838 len -= l + extra - sizeof(u32);
839 if (len == length) {
840 last = data;
841 break;
842 }
843 __raw_writel(data, piobuf);
844 piobuf++;
845 extra = 0;
846 data = 0;
847 } else {
848 /* Clear unused upper bytes */
849 data |= clear_upper_bytes(v, l, extra);
850 if (len == length) {
851 last = data;
852 break;
853 }
854 extra += l;
855 }
856 } else if (len == length) {
857 last = data;
858 break;
859 }
860 } else if (len == length) {
861 u32 w;
862
863 /*
864 * Need to round up for the last dword in the
865 * packet.
866 */
867 w = (len + 3) >> 2;
868 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
869 piobuf += w - 1;
870 last = ((u32 *) ss->sge.vaddr)[w - 1];
871 break;
872 } else {
873 u32 w = len >> 2;
874
875 qib_pio_copy(piobuf, ss->sge.vaddr, w);
876 piobuf += w;
877
878 extra = len & (sizeof(u32) - 1);
879 if (extra) {
880 u32 v = ((u32 *) ss->sge.vaddr)[w];
881
882 /* Clear unused upper bytes */
883 data = clear_upper_bytes(v, extra, 0);
884 }
885 }
886 update_sge(ss, len);
887 length -= len;
888 }
889 /* Update address before sending packet. */
890 update_sge(ss, length);
891 if (flush_wc) {
892 /* must flush early everything before trigger word */
893 qib_flush_wc();
894 __raw_writel(last, piobuf);
895 /* be sure trigger word is written */
896 qib_flush_wc();
897 } else
898 __raw_writel(last, piobuf);
899}
900
901static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
902 struct qib_qp *qp, int *retp)
903{
904 struct qib_verbs_txreq *tx;
905 unsigned long flags;
906
907 spin_lock_irqsave(&qp->s_lock, flags);
908 spin_lock(&dev->pending_lock);
909
910 if (!list_empty(&dev->txreq_free)) {
911 struct list_head *l = dev->txreq_free.next;
912
913 list_del(l);
914 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
915 *retp = 0;
916 } else {
917 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
918 list_empty(&qp->iowait)) {
919 dev->n_txwait++;
920 qp->s_flags |= QIB_S_WAIT_TX;
921 list_add_tail(&qp->iowait, &dev->txwait);
922 }
923 tx = NULL;
924 qp->s_flags &= ~QIB_S_BUSY;
925 *retp = -EBUSY;
926 }
927
928 spin_unlock(&dev->pending_lock);
929 spin_unlock_irqrestore(&qp->s_lock, flags);
930
931 return tx;
932}
933
934void qib_put_txreq(struct qib_verbs_txreq *tx)
935{
936 struct qib_ibdev *dev;
937 struct qib_qp *qp;
938 unsigned long flags;
939
940 qp = tx->qp;
941 dev = to_idev(qp->ibqp.device);
942
943 if (atomic_dec_and_test(&qp->refcount))
944 wake_up(&qp->wait);
945 if (tx->mr) {
946 atomic_dec(&tx->mr->refcount);
947 tx->mr = NULL;
948 }
949 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
950 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
951 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
952 tx->txreq.addr, tx->hdr_dwords << 2,
953 DMA_TO_DEVICE);
954 kfree(tx->align_buf);
955 }
956
957 spin_lock_irqsave(&dev->pending_lock, flags);
958
959 /* Put struct back on free list */
960 list_add(&tx->txreq.list, &dev->txreq_free);
961
962 if (!list_empty(&dev->txwait)) {
963 /* Wake up first QP wanting a free struct */
964 qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
965 list_del_init(&qp->iowait);
966 atomic_inc(&qp->refcount);
967 spin_unlock_irqrestore(&dev->pending_lock, flags);
968
969 spin_lock_irqsave(&qp->s_lock, flags);
970 if (qp->s_flags & QIB_S_WAIT_TX) {
971 qp->s_flags &= ~QIB_S_WAIT_TX;
972 qib_schedule_send(qp);
973 }
974 spin_unlock_irqrestore(&qp->s_lock, flags);
975
976 if (atomic_dec_and_test(&qp->refcount))
977 wake_up(&qp->wait);
978 } else
979 spin_unlock_irqrestore(&dev->pending_lock, flags);
980}
981
982/*
983 * This is called when there are send DMA descriptors that might be
984 * available.
985 *
986 * This is called with ppd->sdma_lock held.
987 */
988void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
989{
990 struct qib_qp *qp, *nqp;
991 struct qib_qp *qps[20];
992 struct qib_ibdev *dev;
993 unsigned i, n;
994
995 n = 0;
996 dev = &ppd->dd->verbs_dev;
997 spin_lock(&dev->pending_lock);
998
999 /* Search wait list for first QP wanting DMA descriptors. */
1000 list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
1001 if (qp->port_num != ppd->port)
1002 continue;
1003 if (n == ARRAY_SIZE(qps))
1004 break;
1005 if (qp->s_tx->txreq.sg_count > avail)
1006 break;
1007 avail -= qp->s_tx->txreq.sg_count;
1008 list_del_init(&qp->iowait);
1009 atomic_inc(&qp->refcount);
1010 qps[n++] = qp;
1011 }
1012
1013 spin_unlock(&dev->pending_lock);
1014
1015 for (i = 0; i < n; i++) {
1016 qp = qps[i];
1017 spin_lock(&qp->s_lock);
1018 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
1019 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
1020 qib_schedule_send(qp);
1021 }
1022 spin_unlock(&qp->s_lock);
1023 if (atomic_dec_and_test(&qp->refcount))
1024 wake_up(&qp->wait);
1025 }
1026}
1027
1028/*
1029 * This is called with ppd->sdma_lock held.
1030 */
1031static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
1032{
1033 struct qib_verbs_txreq *tx =
1034 container_of(cookie, struct qib_verbs_txreq, txreq);
1035 struct qib_qp *qp = tx->qp;
1036
1037 spin_lock(&qp->s_lock);
1038 if (tx->wqe)
1039 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
1040 else if (qp->ibqp.qp_type == IB_QPT_RC) {
1041 struct qib_ib_header *hdr;
1042
1043 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
1044 hdr = &tx->align_buf->hdr;
1045 else {
1046 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1047
1048 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
1049 }
1050 qib_rc_send_complete(qp, hdr);
1051 }
1052 if (atomic_dec_and_test(&qp->s_dma_busy)) {
1053 if (qp->state == IB_QPS_RESET)
1054 wake_up(&qp->wait_dma);
1055 else if (qp->s_flags & QIB_S_WAIT_DMA) {
1056 qp->s_flags &= ~QIB_S_WAIT_DMA;
1057 qib_schedule_send(qp);
1058 }
1059 }
1060 spin_unlock(&qp->s_lock);
1061
1062 qib_put_txreq(tx);
1063}
1064
1065static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
1066{
1067 unsigned long flags;
1068 int ret = 0;
1069
1070 spin_lock_irqsave(&qp->s_lock, flags);
1071 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1072 spin_lock(&dev->pending_lock);
1073 if (list_empty(&qp->iowait)) {
1074 if (list_empty(&dev->memwait))
1075 mod_timer(&dev->mem_timer, jiffies + 1);
1076 qp->s_flags |= QIB_S_WAIT_KMEM;
1077 list_add_tail(&qp->iowait, &dev->memwait);
1078 }
1079 spin_unlock(&dev->pending_lock);
1080 qp->s_flags &= ~QIB_S_BUSY;
1081 ret = -EBUSY;
1082 }
1083 spin_unlock_irqrestore(&qp->s_lock, flags);
1084
1085 return ret;
1086}
1087
1088static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
1089 u32 hdrwords, struct qib_sge_state *ss, u32 len,
1090 u32 plen, u32 dwords)
1091{
1092 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1093 struct qib_devdata *dd = dd_from_dev(dev);
1094 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1095 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1096 struct qib_verbs_txreq *tx;
1097 struct qib_pio_header *phdr;
1098 u32 control;
1099 u32 ndesc;
1100 int ret;
1101
1102 tx = qp->s_tx;
1103 if (tx) {
1104 qp->s_tx = NULL;
1105 /* resend previously constructed packet */
1106 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
1107 goto bail;
1108 }
1109
1110 tx = get_txreq(dev, qp, &ret);
1111 if (!tx)
1112 goto bail;
1113
1114 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1115 be16_to_cpu(hdr->lrh[0]) >> 12);
1116 tx->qp = qp;
1117 atomic_inc(&qp->refcount);
1118 tx->wqe = qp->s_wqe;
1119 tx->mr = qp->s_rdma_mr;
1120 if (qp->s_rdma_mr)
1121 qp->s_rdma_mr = NULL;
1122 tx->txreq.callback = sdma_complete;
1123 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
1124 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
1125 else
1126 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
1127 if (plen + 1 > dd->piosize2kmax_dwords)
1128 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
1129
1130 if (len) {
1131 /*
1132 * Don't try to DMA if it takes more descriptors than
1133 * the queue holds.
1134 */
1135 ndesc = qib_count_sge(ss, len);
1136 if (ndesc >= ppd->sdma_descq_cnt)
1137 ndesc = 0;
1138 } else
1139 ndesc = 1;
1140 if (ndesc) {
1141 phdr = &dev->pio_hdrs[tx->hdr_inx];
1142 phdr->pbc[0] = cpu_to_le32(plen);
1143 phdr->pbc[1] = cpu_to_le32(control);
1144 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1145 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
1146 tx->txreq.sg_count = ndesc;
1147 tx->txreq.addr = dev->pio_hdrs_phys +
1148 tx->hdr_inx * sizeof(struct qib_pio_header);
1149 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
1150 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
1151 goto bail;
1152 }
1153
1154 /* Allocate a buffer and copy the header and payload to it. */
1155 tx->hdr_dwords = plen + 1;
1156 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
1157 if (!phdr)
1158 goto err_tx;
1159 phdr->pbc[0] = cpu_to_le32(plen);
1160 phdr->pbc[1] = cpu_to_le32(control);
1161 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1162 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
1163
1164 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
1165 tx->hdr_dwords << 2, DMA_TO_DEVICE);
1166 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
1167 goto map_err;
1168 tx->align_buf = phdr;
1169 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
1170 tx->txreq.sg_count = 1;
1171 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
1172 goto unaligned;
1173
1174map_err:
1175 kfree(phdr);
1176err_tx:
1177 qib_put_txreq(tx);
1178 ret = wait_kmem(dev, qp);
1179unaligned:
1180 ibp->n_unaligned++;
1181bail:
1182 return ret;
1183}
1184
1185/*
1186 * If we are now in the error state, return zero to flush the
1187 * send work request.
1188 */
1189static int no_bufs_available(struct qib_qp *qp)
1190{
1191 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1192 struct qib_devdata *dd;
1193 unsigned long flags;
1194 int ret = 0;
1195
1196 /*
1197 * Note that as soon as want_buffer() is called and
1198 * possibly before it returns, qib_ib_piobufavail()
1199 * could be called. Therefore, put QP on the I/O wait list before
1200 * enabling the PIO avail interrupt.
1201 */
1202 spin_lock_irqsave(&qp->s_lock, flags);
1203 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1204 spin_lock(&dev->pending_lock);
1205 if (list_empty(&qp->iowait)) {
1206 dev->n_piowait++;
1207 qp->s_flags |= QIB_S_WAIT_PIO;
1208 list_add_tail(&qp->iowait, &dev->piowait);
1209 dd = dd_from_dev(dev);
1210 dd->f_wantpiobuf_intr(dd, 1);
1211 }
1212 spin_unlock(&dev->pending_lock);
1213 qp->s_flags &= ~QIB_S_BUSY;
1214 ret = -EBUSY;
1215 }
1216 spin_unlock_irqrestore(&qp->s_lock, flags);
1217 return ret;
1218}
1219
1220static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
1221 u32 hdrwords, struct qib_sge_state *ss, u32 len,
1222 u32 plen, u32 dwords)
1223{
1224 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1225 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1226 u32 *hdr = (u32 *) ibhdr;
1227 u32 __iomem *piobuf_orig;
1228 u32 __iomem *piobuf;
1229 u64 pbc;
1230 unsigned long flags;
1231 unsigned flush_wc;
1232 u32 control;
1233 u32 pbufn;
1234
1235 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1236 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1237 pbc = ((u64) control << 32) | plen;
1238 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1239 if (unlikely(piobuf == NULL))
1240 return no_bufs_available(qp);
1241
1242 /*
1243 * Write the pbc.
1244 * We have to flush after the PBC for correctness on some cpus
1245 * or WC buffer can be written out of order.
1246 */
1247 writeq(pbc, piobuf);
1248 piobuf_orig = piobuf;
1249 piobuf += 2;
1250
1251 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1252 if (len == 0) {
1253 /*
1254 * If there is just the header portion, must flush before
1255 * writing last word of header for correctness, and after
1256 * the last header word (trigger word).
1257 */
1258 if (flush_wc) {
1259 qib_flush_wc();
1260 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1261 qib_flush_wc();
1262 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1263 qib_flush_wc();
1264 } else
1265 qib_pio_copy(piobuf, hdr, hdrwords);
1266 goto done;
1267 }
1268
1269 if (flush_wc)
1270 qib_flush_wc();
1271 qib_pio_copy(piobuf, hdr, hdrwords);
1272 piobuf += hdrwords;
1273
1274 /* The common case is aligned and contained in one segment. */
1275 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1276 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1277 u32 *addr = (u32 *) ss->sge.vaddr;
1278
1279 /* Update address before sending packet. */
1280 update_sge(ss, len);
1281 if (flush_wc) {
1282 qib_pio_copy(piobuf, addr, dwords - 1);
1283 /* must flush early everything before trigger word */
1284 qib_flush_wc();
1285 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1286 /* be sure trigger word is written */
1287 qib_flush_wc();
1288 } else
1289 qib_pio_copy(piobuf, addr, dwords);
1290 goto done;
1291 }
1292 copy_io(piobuf, ss, len, flush_wc);
1293done:
1294 if (dd->flags & QIB_USE_SPCL_TRIG) {
1295 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1296 qib_flush_wc();
1297 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1298 }
1299 qib_sendbuf_done(dd, pbufn);
1300 if (qp->s_rdma_mr) {
1301 atomic_dec(&qp->s_rdma_mr->refcount);
1302 qp->s_rdma_mr = NULL;
1303 }
1304 if (qp->s_wqe) {
1305 spin_lock_irqsave(&qp->s_lock, flags);
1306 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1307 spin_unlock_irqrestore(&qp->s_lock, flags);
1308 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1309 spin_lock_irqsave(&qp->s_lock, flags);
1310 qib_rc_send_complete(qp, ibhdr);
1311 spin_unlock_irqrestore(&qp->s_lock, flags);
1312 }
1313 return 0;
1314}
1315
1316/**
1317 * qib_verbs_send - send a packet
1318 * @qp: the QP to send on
1319 * @hdr: the packet header
1320 * @hdrwords: the number of 32-bit words in the header
1321 * @ss: the SGE to send
1322 * @len: the length of the packet in bytes
1323 *
1324 * Return zero if packet is sent or queued OK.
1325 * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
1326 */
1327int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
1328 u32 hdrwords, struct qib_sge_state *ss, u32 len)
1329{
1330 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1331 u32 plen;
1332 int ret;
1333 u32 dwords = (len + 3) >> 2;
1334
1335 /*
1336 * Calculate the send buffer trigger address.
1337 * The +1 counts for the pbc control dword following the pbc length.
1338 */
1339 plen = hdrwords + dwords + 1;
1340
1341 /*
1342 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1343 * can defer SDMA restart until link goes ACTIVE without
1344 * worrying about just how we got there.
1345 */
1346 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1347 !(dd->flags & QIB_HAS_SEND_DMA))
1348 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1349 plen, dwords);
1350 else
1351 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1352 plen, dwords);
1353
1354 return ret;
1355}
1356
1357int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1358 u64 *rwords, u64 *spkts, u64 *rpkts,
1359 u64 *xmit_wait)
1360{
1361 int ret;
1362 struct qib_devdata *dd = ppd->dd;
1363
1364 if (!(dd->flags & QIB_PRESENT)) {
1365 /* no hardware, freeze, etc. */
1366 ret = -EINVAL;
1367 goto bail;
1368 }
1369 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1370 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1371 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1372 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1373 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1374
1375 ret = 0;
1376
1377bail:
1378 return ret;
1379}
1380
1381/**
1382 * qib_get_counters - get various chip counters
1383 * @dd: the qlogic_ib device
1384 * @cntrs: counters are placed here
1385 *
1386 * Return the counters needed by recv_pma_get_portcounters().
1387 */
1388int qib_get_counters(struct qib_pportdata *ppd,
1389 struct qib_verbs_counters *cntrs)
1390{
1391 int ret;
1392
1393 if (!(ppd->dd->flags & QIB_PRESENT)) {
1394 /* no hardware, freeze, etc. */
1395 ret = -EINVAL;
1396 goto bail;
1397 }
1398 cntrs->symbol_error_counter =
1399 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1400 cntrs->link_error_recovery_counter =
1401 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1402 /*
1403 * The link downed counter counts when the other side downs the
1404 * connection. We add in the number of times we downed the link
1405 * due to local link integrity errors to compensate.
1406 */
1407 cntrs->link_downed_counter =
1408 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1409 cntrs->port_rcv_errors =
1410 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1411 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1412 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1413 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1414 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1415 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1416 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1417 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1418 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1419 cntrs->port_rcv_errors +=
1420 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1421 cntrs->port_rcv_errors +=
1422 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1423 cntrs->port_rcv_remphys_errors =
1424 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1425 cntrs->port_xmit_discards =
1426 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1427 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1428 QIBPORTCNTR_WORDSEND);
1429 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1430 QIBPORTCNTR_WORDRCV);
1431 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1432 QIBPORTCNTR_PKTSEND);
1433 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1434 QIBPORTCNTR_PKTRCV);
1435 cntrs->local_link_integrity_errors =
1436 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1437 cntrs->excessive_buffer_overrun_errors =
1438 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1439 cntrs->vl15_dropped =
1440 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1441
1442 ret = 0;
1443
1444bail:
1445 return ret;
1446}
1447
1448/**
1449 * qib_ib_piobufavail - callback when a PIO buffer is available
1450 * @dd: the device pointer
1451 *
1452 * This is called from qib_intr() at interrupt level when a PIO buffer is
1453 * available after qib_verbs_send() returned an error that no buffers were
1454 * available. Disable the interrupt if there are no more QPs waiting.
1455 */
1456void qib_ib_piobufavail(struct qib_devdata *dd)
1457{
1458 struct qib_ibdev *dev = &dd->verbs_dev;
1459 struct list_head *list;
1460 struct qib_qp *qps[5];
1461 struct qib_qp *qp;
1462 unsigned long flags;
1463 unsigned i, n;
1464
1465 list = &dev->piowait;
1466 n = 0;
1467
1468 /*
1469 * Note: checking that the piowait list is empty and clearing
1470 * the buffer available interrupt needs to be atomic or we
1471 * could end up with QPs on the wait list with the interrupt
1472 * disabled.
1473 */
1474 spin_lock_irqsave(&dev->pending_lock, flags);
1475 while (!list_empty(list)) {
1476 if (n == ARRAY_SIZE(qps))
1477 goto full;
1478 qp = list_entry(list->next, struct qib_qp, iowait);
1479 list_del_init(&qp->iowait);
1480 atomic_inc(&qp->refcount);
1481 qps[n++] = qp;
1482 }
1483 dd->f_wantpiobuf_intr(dd, 0);
1484full:
1485 spin_unlock_irqrestore(&dev->pending_lock, flags);
1486
1487 for (i = 0; i < n; i++) {
1488 qp = qps[i];
1489
1490 spin_lock_irqsave(&qp->s_lock, flags);
1491 if (qp->s_flags & QIB_S_WAIT_PIO) {
1492 qp->s_flags &= ~QIB_S_WAIT_PIO;
1493 qib_schedule_send(qp);
1494 }
1495 spin_unlock_irqrestore(&qp->s_lock, flags);
1496
1497 /* Notify qib_destroy_qp() if it is waiting. */
1498 if (atomic_dec_and_test(&qp->refcount))
1499 wake_up(&qp->wait);
1500 }
1501}
1502
1503static int qib_query_device(struct ib_device *ibdev,
1504 struct ib_device_attr *props)
1505{
1506 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1507 struct qib_ibdev *dev = to_idev(ibdev);
1508
1509 memset(props, 0, sizeof(*props));
1510
1511 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1512 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1513 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1514 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1515 props->page_size_cap = PAGE_SIZE;
1516 props->vendor_id =
1517 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1518 props->vendor_part_id = dd->deviceid;
1519 props->hw_ver = dd->minrev;
1520 props->sys_image_guid = ib_qib_sys_image_guid;
1521 props->max_mr_size = ~0ULL;
1522 props->max_qp = ib_qib_max_qps;
1523 props->max_qp_wr = ib_qib_max_qp_wrs;
1524 props->max_sge = ib_qib_max_sges;
1525 props->max_cq = ib_qib_max_cqs;
1526 props->max_ah = ib_qib_max_ahs;
1527 props->max_cqe = ib_qib_max_cqes;
1528 props->max_mr = dev->lk_table.max;
1529 props->max_fmr = dev->lk_table.max;
1530 props->max_map_per_fmr = 32767;
1531 props->max_pd = ib_qib_max_pds;
1532 props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1533 props->max_qp_init_rd_atom = 255;
1534 /* props->max_res_rd_atom */
1535 props->max_srq = ib_qib_max_srqs;
1536 props->max_srq_wr = ib_qib_max_srq_wrs;
1537 props->max_srq_sge = ib_qib_max_srq_sges;
1538 /* props->local_ca_ack_delay */
1539 props->atomic_cap = IB_ATOMIC_GLOB;
1540 props->max_pkeys = qib_get_npkeys(dd);
1541 props->max_mcast_grp = ib_qib_max_mcast_grps;
1542 props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1543 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1544 props->max_mcast_grp;
1545
1546 return 0;
1547}
1548
1549static int qib_query_port(struct ib_device *ibdev, u8 port,
1550 struct ib_port_attr *props)
1551{
1552 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1553 struct qib_ibport *ibp = to_iport(ibdev, port);
1554 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1555 enum ib_mtu mtu;
1556 u16 lid = ppd->lid;
1557
1558 memset(props, 0, sizeof(*props));
1559 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1560 props->lmc = ppd->lmc;
1561 props->sm_lid = ibp->sm_lid;
1562 props->sm_sl = ibp->sm_sl;
1563 props->state = dd->f_iblink_state(ppd->lastibcstat);
1564 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1565 props->port_cap_flags = ibp->port_cap_flags;
1566 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1567 props->max_msg_sz = 0x80000000;
1568 props->pkey_tbl_len = qib_get_npkeys(dd);
1569 props->bad_pkey_cntr = ibp->pkey_violations;
1570 props->qkey_viol_cntr = ibp->qkey_violations;
1571 props->active_width = ppd->link_width_active;
1572 /* See rate_show() */
1573 props->active_speed = ppd->link_speed_active;
1574 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1575 props->init_type_reply = 0;
1576
1577 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1578 switch (ppd->ibmtu) {
1579 case 4096:
1580 mtu = IB_MTU_4096;
1581 break;
1582 case 2048:
1583 mtu = IB_MTU_2048;
1584 break;
1585 case 1024:
1586 mtu = IB_MTU_1024;
1587 break;
1588 case 512:
1589 mtu = IB_MTU_512;
1590 break;
1591 case 256:
1592 mtu = IB_MTU_256;
1593 break;
1594 default:
1595 mtu = IB_MTU_2048;
1596 }
1597 props->active_mtu = mtu;
1598 props->subnet_timeout = ibp->subnet_timeout;
1599
1600 return 0;
1601}
1602
1603static int qib_modify_device(struct ib_device *device,
1604 int device_modify_mask,
1605 struct ib_device_modify *device_modify)
1606{
1607 struct qib_devdata *dd = dd_from_ibdev(device);
1608 unsigned i;
1609 int ret;
1610
1611 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1612 IB_DEVICE_MODIFY_NODE_DESC)) {
1613 ret = -EOPNOTSUPP;
1614 goto bail;
1615 }
1616
1617 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1618 memcpy(device->node_desc, device_modify->node_desc, 64);
1619 for (i = 0; i < dd->num_pports; i++) {
1620 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1621
1622 qib_node_desc_chg(ibp);
1623 }
1624 }
1625
1626 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1627 ib_qib_sys_image_guid =
1628 cpu_to_be64(device_modify->sys_image_guid);
1629 for (i = 0; i < dd->num_pports; i++) {
1630 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1631
1632 qib_sys_guid_chg(ibp);
1633 }
1634 }
1635
1636 ret = 0;
1637
1638bail:
1639 return ret;
1640}
1641
1642static int qib_modify_port(struct ib_device *ibdev, u8 port,
1643 int port_modify_mask, struct ib_port_modify *props)
1644{
1645 struct qib_ibport *ibp = to_iport(ibdev, port);
1646 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1647
1648 ibp->port_cap_flags |= props->set_port_cap_mask;
1649 ibp->port_cap_flags &= ~props->clr_port_cap_mask;
1650 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1651 qib_cap_mask_chg(ibp);
1652 if (port_modify_mask & IB_PORT_SHUTDOWN)
1653 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1654 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1655 ibp->qkey_violations = 0;
1656 return 0;
1657}
1658
1659static int qib_query_gid(struct ib_device *ibdev, u8 port,
1660 int index, union ib_gid *gid)
1661{
1662 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1663 int ret = 0;
1664
1665 if (!port || port > dd->num_pports)
1666 ret = -EINVAL;
1667 else {
1668 struct qib_ibport *ibp = to_iport(ibdev, port);
1669 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1670
1671 gid->global.subnet_prefix = ibp->gid_prefix;
1672 if (index == 0)
1673 gid->global.interface_id = ppd->guid;
1674 else if (index < QIB_GUIDS_PER_PORT)
1675 gid->global.interface_id = ibp->guids[index - 1];
1676 else
1677 ret = -EINVAL;
1678 }
1679
1680 return ret;
1681}
1682
1683static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
1684 struct ib_ucontext *context,
1685 struct ib_udata *udata)
1686{
1687 struct qib_ibdev *dev = to_idev(ibdev);
1688 struct qib_pd *pd;
1689 struct ib_pd *ret;
1690
1691 /*
1692 * This is actually totally arbitrary. Some correctness tests
1693 * assume there's a maximum number of PDs that can be allocated.
1694 * We don't actually have this limit, but we fail the test if
1695 * we allow allocations of more than we report for this value.
1696 */
1697
1698 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1699 if (!pd) {
1700 ret = ERR_PTR(-ENOMEM);
1701 goto bail;
1702 }
1703
1704 spin_lock(&dev->n_pds_lock);
1705 if (dev->n_pds_allocated == ib_qib_max_pds) {
1706 spin_unlock(&dev->n_pds_lock);
1707 kfree(pd);
1708 ret = ERR_PTR(-ENOMEM);
1709 goto bail;
1710 }
1711
1712 dev->n_pds_allocated++;
1713 spin_unlock(&dev->n_pds_lock);
1714
1715 /* ib_alloc_pd() will initialize pd->ibpd. */
1716 pd->user = udata != NULL;
1717
1718 ret = &pd->ibpd;
1719
1720bail:
1721 return ret;
1722}
1723
1724static int qib_dealloc_pd(struct ib_pd *ibpd)
1725{
1726 struct qib_pd *pd = to_ipd(ibpd);
1727 struct qib_ibdev *dev = to_idev(ibpd->device);
1728
1729 spin_lock(&dev->n_pds_lock);
1730 dev->n_pds_allocated--;
1731 spin_unlock(&dev->n_pds_lock);
1732
1733 kfree(pd);
1734
1735 return 0;
1736}
1737
1738int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1739{
1740 /* A multicast address requires a GRH (see ch. 8.4.1). */
1741 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
1742 ah_attr->dlid != QIB_PERMISSIVE_LID &&
1743 !(ah_attr->ah_flags & IB_AH_GRH))
1744 goto bail;
1745 if ((ah_attr->ah_flags & IB_AH_GRH) &&
1746 ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
1747 goto bail;
1748 if (ah_attr->dlid == 0)
1749 goto bail;
1750 if (ah_attr->port_num < 1 ||
1751 ah_attr->port_num > ibdev->phys_port_cnt)
1752 goto bail;
1753 if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
1754 ib_rate_to_mult(ah_attr->static_rate) < 0)
1755 goto bail;
1756 if (ah_attr->sl > 15)
1757 goto bail;
1758 return 0;
1759bail:
1760 return -EINVAL;
1761}
1762
1763/**
1764 * qib_create_ah - create an address handle
1765 * @pd: the protection domain
1766 * @ah_attr: the attributes of the AH
1767 *
1768 * This may be called from interrupt context.
1769 */
1770static struct ib_ah *qib_create_ah(struct ib_pd *pd,
1771 struct ib_ah_attr *ah_attr)
1772{
1773 struct qib_ah *ah;
1774 struct ib_ah *ret;
1775 struct qib_ibdev *dev = to_idev(pd->device);
1776 unsigned long flags;
1777
1778 if (qib_check_ah(pd->device, ah_attr)) {
1779 ret = ERR_PTR(-EINVAL);
1780 goto bail;
1781 }
1782
1783 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1784 if (!ah) {
1785 ret = ERR_PTR(-ENOMEM);
1786 goto bail;
1787 }
1788
1789 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1790 if (dev->n_ahs_allocated == ib_qib_max_ahs) {
1791 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1792 kfree(ah);
1793 ret = ERR_PTR(-ENOMEM);
1794 goto bail;
1795 }
1796
1797 dev->n_ahs_allocated++;
1798 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1799
1800 /* ib_create_ah() will initialize ah->ibah. */
1801 ah->attr = *ah_attr;
1802 atomic_set(&ah->refcount, 0);
1803
1804 ret = &ah->ibah;
1805
1806bail:
1807 return ret;
1808}
1809
1810/**
1811 * qib_destroy_ah - destroy an address handle
1812 * @ibah: the AH to destroy
1813 *
1814 * This may be called from interrupt context.
1815 */
1816static int qib_destroy_ah(struct ib_ah *ibah)
1817{
1818 struct qib_ibdev *dev = to_idev(ibah->device);
1819 struct qib_ah *ah = to_iah(ibah);
1820 unsigned long flags;
1821
1822 if (atomic_read(&ah->refcount) != 0)
1823 return -EBUSY;
1824
1825 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1826 dev->n_ahs_allocated--;
1827 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1828
1829 kfree(ah);
1830
1831 return 0;
1832}
1833
1834static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1835{
1836 struct qib_ah *ah = to_iah(ibah);
1837
1838 if (qib_check_ah(ibah->device, ah_attr))
1839 return -EINVAL;
1840
1841 ah->attr = *ah_attr;
1842
1843 return 0;
1844}
1845
1846static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1847{
1848 struct qib_ah *ah = to_iah(ibah);
1849
1850 *ah_attr = ah->attr;
1851
1852 return 0;
1853}
1854
1855/**
1856 * qib_get_npkeys - return the size of the PKEY table for context 0
1857 * @dd: the qlogic_ib device
1858 */
1859unsigned qib_get_npkeys(struct qib_devdata *dd)
1860{
1861 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1862}
1863
1864/*
1865 * Return the indexed PKEY from the port PKEY table.
1866 * No need to validate rcd[ctxt]; the port is setup if we are here.
1867 */
1868unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1869{
1870 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1871 struct qib_devdata *dd = ppd->dd;
1872 unsigned ctxt = ppd->hw_pidx;
1873 unsigned ret;
1874
1875 /* dd->rcd null if mini_init or some init failures */
1876 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1877 ret = 0;
1878 else
1879 ret = dd->rcd[ctxt]->pkeys[index];
1880
1881 return ret;
1882}
1883
1884static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1885 u16 *pkey)
1886{
1887 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1888 int ret;
1889
1890 if (index >= qib_get_npkeys(dd)) {
1891 ret = -EINVAL;
1892 goto bail;
1893 }
1894
1895 *pkey = qib_get_pkey(to_iport(ibdev, port), index);
1896 ret = 0;
1897
1898bail:
1899 return ret;
1900}
1901
1902/**
1903 * qib_alloc_ucontext - allocate a ucontest
1904 * @ibdev: the infiniband device
1905 * @udata: not used by the QLogic_IB driver
1906 */
1907
1908static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1909 struct ib_udata *udata)
1910{
1911 struct qib_ucontext *context;
1912 struct ib_ucontext *ret;
1913
1914 context = kmalloc(sizeof *context, GFP_KERNEL);
1915 if (!context) {
1916 ret = ERR_PTR(-ENOMEM);
1917 goto bail;
1918 }
1919
1920 ret = &context->ibucontext;
1921
1922bail:
1923 return ret;
1924}
1925
1926static int qib_dealloc_ucontext(struct ib_ucontext *context)
1927{
1928 kfree(to_iucontext(context));
1929 return 0;
1930}
1931
1932static void init_ibport(struct qib_pportdata *ppd)
1933{
1934 struct qib_verbs_counters cntrs;
1935 struct qib_ibport *ibp = &ppd->ibport_data;
1936
1937 spin_lock_init(&ibp->lock);
1938 /* Set the prefix to the default value (see ch. 4.1.1) */
1939 ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
1940 ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1941 ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1942 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1943 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1944 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1945 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1946 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1947 ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1948 ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1949 ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1950 ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1951 ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1952 ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1953
1954 /* Snapshot current HW counters to "clear" them. */
1955 qib_get_counters(ppd, &cntrs);
1956 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1957 ibp->z_link_error_recovery_counter =
1958 cntrs.link_error_recovery_counter;
1959 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1960 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1961 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1962 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1963 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1964 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1965 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1966 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1967 ibp->z_local_link_integrity_errors =
1968 cntrs.local_link_integrity_errors;
1969 ibp->z_excessive_buffer_overrun_errors =
1970 cntrs.excessive_buffer_overrun_errors;
1971 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1972}
1973
1974/**
1975 * qib_register_ib_device - register our device with the infiniband core
1976 * @dd: the device data structure
1977 * Return the allocated qib_ibdev pointer or NULL on error.
1978 */
1979int qib_register_ib_device(struct qib_devdata *dd)
1980{
1981 struct qib_ibdev *dev = &dd->verbs_dev;
1982 struct ib_device *ibdev = &dev->ibdev;
1983 struct qib_pportdata *ppd = dd->pport;
1984 unsigned i, lk_tab_size;
1985 int ret;
1986
1987 dev->qp_table_size = ib_qib_qp_table_size;
1988 dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table,
1989 GFP_KERNEL);
1990 if (!dev->qp_table) {
1991 ret = -ENOMEM;
1992 goto err_qpt;
1993 }
1994
1995 for (i = 0; i < dd->num_pports; i++)
1996 init_ibport(ppd + i);
1997
1998 /* Only need to initialize non-zero fields. */
1999 spin_lock_init(&dev->qpt_lock);
2000 spin_lock_init(&dev->n_pds_lock);
2001 spin_lock_init(&dev->n_ahs_lock);
2002 spin_lock_init(&dev->n_cqs_lock);
2003 spin_lock_init(&dev->n_qps_lock);
2004 spin_lock_init(&dev->n_srqs_lock);
2005 spin_lock_init(&dev->n_mcast_grps_lock);
2006 init_timer(&dev->mem_timer);
2007 dev->mem_timer.function = mem_timer;
2008 dev->mem_timer.data = (unsigned long) dev;
2009
2010 qib_init_qpn_table(dd, &dev->qpn_table);
2011
2012 /*
2013 * The top ib_qib_lkey_table_size bits are used to index the
2014 * table. The lower 8 bits can be owned by the user (copied from
2015 * the LKEY). The remaining bits act as a generation number or tag.
2016 */
2017 spin_lock_init(&dev->lk_table.lock);
2018 dev->lk_table.max = 1 << ib_qib_lkey_table_size;
2019 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2020 dev->lk_table.table = (struct qib_mregion **)
2021 __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
2022 if (dev->lk_table.table == NULL) {
2023 ret = -ENOMEM;
2024 goto err_lk;
2025 }
2026 memset(dev->lk_table.table, 0, lk_tab_size);
2027 INIT_LIST_HEAD(&dev->pending_mmaps);
2028 spin_lock_init(&dev->pending_lock);
2029 dev->mmap_offset = PAGE_SIZE;
2030 spin_lock_init(&dev->mmap_offset_lock);
2031 INIT_LIST_HEAD(&dev->piowait);
2032 INIT_LIST_HEAD(&dev->dmawait);
2033 INIT_LIST_HEAD(&dev->txwait);
2034 INIT_LIST_HEAD(&dev->memwait);
2035 INIT_LIST_HEAD(&dev->txreq_free);
2036
2037 if (ppd->sdma_descq_cnt) {
2038 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
2039 ppd->sdma_descq_cnt *
2040 sizeof(struct qib_pio_header),
2041 &dev->pio_hdrs_phys,
2042 GFP_KERNEL);
2043 if (!dev->pio_hdrs) {
2044 ret = -ENOMEM;
2045 goto err_hdrs;
2046 }
2047 }
2048
2049 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
2050 struct qib_verbs_txreq *tx;
2051
2052 tx = kzalloc(sizeof *tx, GFP_KERNEL);
2053 if (!tx) {
2054 ret = -ENOMEM;
2055 goto err_tx;
2056 }
2057 tx->hdr_inx = i;
2058 list_add(&tx->txreq.list, &dev->txreq_free);
2059 }
2060
2061 /*
2062 * The system image GUID is supposed to be the same for all
2063 * IB HCAs in a single system but since there can be other
2064 * device types in the system, we can't be sure this is unique.
2065 */
2066 if (!ib_qib_sys_image_guid)
2067 ib_qib_sys_image_guid = ppd->guid;
2068
2069 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
2070 ibdev->owner = THIS_MODULE;
2071 ibdev->node_guid = ppd->guid;
2072 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
2073 ibdev->uverbs_cmd_mask =
2074 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2075 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2076 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2077 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2078 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2079 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2080 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
2081 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
2082 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2083 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2084 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2085 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2086 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2087 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2088 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2089 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2090 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2091 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2092 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2093 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2094 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2095 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2096 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2097 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2098 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2099 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2100 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2101 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2102 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2103 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2104 ibdev->node_type = RDMA_NODE_IB_CA;
2105 ibdev->phys_port_cnt = dd->num_pports;
2106 ibdev->num_comp_vectors = 1;
2107 ibdev->dma_device = &dd->pcidev->dev;
2108 ibdev->query_device = qib_query_device;
2109 ibdev->modify_device = qib_modify_device;
2110 ibdev->query_port = qib_query_port;
2111 ibdev->modify_port = qib_modify_port;
2112 ibdev->query_pkey = qib_query_pkey;
2113 ibdev->query_gid = qib_query_gid;
2114 ibdev->alloc_ucontext = qib_alloc_ucontext;
2115 ibdev->dealloc_ucontext = qib_dealloc_ucontext;
2116 ibdev->alloc_pd = qib_alloc_pd;
2117 ibdev->dealloc_pd = qib_dealloc_pd;
2118 ibdev->create_ah = qib_create_ah;
2119 ibdev->destroy_ah = qib_destroy_ah;
2120 ibdev->modify_ah = qib_modify_ah;
2121 ibdev->query_ah = qib_query_ah;
2122 ibdev->create_srq = qib_create_srq;
2123 ibdev->modify_srq = qib_modify_srq;
2124 ibdev->query_srq = qib_query_srq;
2125 ibdev->destroy_srq = qib_destroy_srq;
2126 ibdev->create_qp = qib_create_qp;
2127 ibdev->modify_qp = qib_modify_qp;
2128 ibdev->query_qp = qib_query_qp;
2129 ibdev->destroy_qp = qib_destroy_qp;
2130 ibdev->post_send = qib_post_send;
2131 ibdev->post_recv = qib_post_receive;
2132 ibdev->post_srq_recv = qib_post_srq_receive;
2133 ibdev->create_cq = qib_create_cq;
2134 ibdev->destroy_cq = qib_destroy_cq;
2135 ibdev->resize_cq = qib_resize_cq;
2136 ibdev->poll_cq = qib_poll_cq;
2137 ibdev->req_notify_cq = qib_req_notify_cq;
2138 ibdev->get_dma_mr = qib_get_dma_mr;
2139 ibdev->reg_phys_mr = qib_reg_phys_mr;
2140 ibdev->reg_user_mr = qib_reg_user_mr;
2141 ibdev->dereg_mr = qib_dereg_mr;
2142 ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
2143 ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
2144 ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
2145 ibdev->alloc_fmr = qib_alloc_fmr;
2146 ibdev->map_phys_fmr = qib_map_phys_fmr;
2147 ibdev->unmap_fmr = qib_unmap_fmr;
2148 ibdev->dealloc_fmr = qib_dealloc_fmr;
2149 ibdev->attach_mcast = qib_multicast_attach;
2150 ibdev->detach_mcast = qib_multicast_detach;
2151 ibdev->process_mad = qib_process_mad;
2152 ibdev->mmap = qib_mmap;
2153 ibdev->dma_ops = &qib_dma_mapping_ops;
2154
2155 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2156 QIB_IDSTR " %s", init_utsname()->nodename);
2157
2158 ret = ib_register_device(ibdev, qib_create_port_files);
2159 if (ret)
2160 goto err_reg;
2161
2162 ret = qib_create_agents(dev);
2163 if (ret)
2164 goto err_agents;
2165
2166 if (qib_verbs_register_sysfs(dd))
2167 goto err_class;
2168
2169 goto bail;
2170
2171err_class:
2172 qib_free_agents(dev);
2173err_agents:
2174 ib_unregister_device(ibdev);
2175err_reg:
2176err_tx:
2177 while (!list_empty(&dev->txreq_free)) {
2178 struct list_head *l = dev->txreq_free.next;
2179 struct qib_verbs_txreq *tx;
2180
2181 list_del(l);
2182 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2183 kfree(tx);
2184 }
2185 if (ppd->sdma_descq_cnt)
2186 dma_free_coherent(&dd->pcidev->dev,
2187 ppd->sdma_descq_cnt *
2188 sizeof(struct qib_pio_header),
2189 dev->pio_hdrs, dev->pio_hdrs_phys);
2190err_hdrs:
2191 free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
2192err_lk:
2193 kfree(dev->qp_table);
2194err_qpt:
2195 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2196bail:
2197 return ret;
2198}
2199
2200void qib_unregister_ib_device(struct qib_devdata *dd)
2201{
2202 struct qib_ibdev *dev = &dd->verbs_dev;
2203 struct ib_device *ibdev = &dev->ibdev;
2204 u32 qps_inuse;
2205 unsigned lk_tab_size;
2206
2207 qib_verbs_unregister_sysfs(dd);
2208
2209 qib_free_agents(dev);
2210
2211 ib_unregister_device(ibdev);
2212
2213 if (!list_empty(&dev->piowait))
2214 qib_dev_err(dd, "piowait list not empty!\n");
2215 if (!list_empty(&dev->dmawait))
2216 qib_dev_err(dd, "dmawait list not empty!\n");
2217 if (!list_empty(&dev->txwait))
2218 qib_dev_err(dd, "txwait list not empty!\n");
2219 if (!list_empty(&dev->memwait))
2220 qib_dev_err(dd, "memwait list not empty!\n");
2221 if (dev->dma_mr)
2222 qib_dev_err(dd, "DMA MR not NULL!\n");
2223
2224 qps_inuse = qib_free_all_qps(dd);
2225 if (qps_inuse)
2226 qib_dev_err(dd, "QP memory leak! %u still in use\n",
2227 qps_inuse);
2228
2229 del_timer_sync(&dev->mem_timer);
2230 qib_free_qpn_table(&dev->qpn_table);
2231 while (!list_empty(&dev->txreq_free)) {
2232 struct list_head *l = dev->txreq_free.next;
2233 struct qib_verbs_txreq *tx;
2234
2235 list_del(l);
2236 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2237 kfree(tx);
2238 }
2239 if (dd->pport->sdma_descq_cnt)
2240 dma_free_coherent(&dd->pcidev->dev,
2241 dd->pport->sdma_descq_cnt *
2242 sizeof(struct qib_pio_header),
2243 dev->pio_hdrs, dev->pio_hdrs_phys);
2244 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2245 free_pages((unsigned long) dev->lk_table.table,
2246 get_order(lk_tab_size));
2247 kfree(dev->qp_table);
2248}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
new file mode 100644
index 000000000000..bd57c1273225
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -0,0 +1,1100 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef QIB_VERBS_H
36#define QIB_VERBS_H
37
38#include <linux/types.h>
39#include <linux/spinlock.h>
40#include <linux/kernel.h>
41#include <linux/interrupt.h>
42#include <linux/kref.h>
43#include <linux/workqueue.h>
44#include <rdma/ib_pack.h>
45#include <rdma/ib_user_verbs.h>
46
47struct qib_ctxtdata;
48struct qib_pportdata;
49struct qib_devdata;
50struct qib_verbs_txreq;
51
52#define QIB_MAX_RDMA_ATOMIC 16
53#define QIB_GUIDS_PER_PORT 5
54
55#define QPN_MAX (1 << 24)
56#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
57
58/*
59 * Increment this value if any changes that break userspace ABI
60 * compatibility are made.
61 */
62#define QIB_UVERBS_ABI_VERSION 2
63
64/*
65 * Define an ib_cq_notify value that is not valid so we know when CQ
66 * notifications are armed.
67 */
68#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
69
70#define IB_SEQ_NAK (3 << 29)
71
72/* AETH NAK opcode values */
73#define IB_RNR_NAK 0x20
74#define IB_NAK_PSN_ERROR 0x60
75#define IB_NAK_INVALID_REQUEST 0x61
76#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
77#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
78#define IB_NAK_INVALID_RD_REQUEST 0x64
79
80/* Flags for checking QP state (see ib_qib_state_ops[]) */
81#define QIB_POST_SEND_OK 0x01
82#define QIB_POST_RECV_OK 0x02
83#define QIB_PROCESS_RECV_OK 0x04
84#define QIB_PROCESS_SEND_OK 0x08
85#define QIB_PROCESS_NEXT_SEND_OK 0x10
86#define QIB_FLUSH_SEND 0x20
87#define QIB_FLUSH_RECV 0x40
88#define QIB_PROCESS_OR_FLUSH_SEND \
89 (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
90
91/* IB Performance Manager status values */
92#define IB_PMA_SAMPLE_STATUS_DONE 0x00
93#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
94#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
95
96/* Mandatory IB performance counter select values. */
97#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
98#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
99#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
100#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
101#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
102
103#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
104
105#define IB_BTH_REQ_ACK (1 << 31)
106#define IB_BTH_SOLICITED (1 << 23)
107#define IB_BTH_MIG_REQ (1 << 22)
108
109/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
110#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
111
112#define IB_GRH_VERSION 6
113#define IB_GRH_VERSION_MASK 0xF
114#define IB_GRH_VERSION_SHIFT 28
115#define IB_GRH_TCLASS_MASK 0xFF
116#define IB_GRH_TCLASS_SHIFT 20
117#define IB_GRH_FLOW_MASK 0xFFFFF
118#define IB_GRH_FLOW_SHIFT 0
119#define IB_GRH_NEXT_HDR 0x1B
120
121#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
122
123/* Values for set/get portinfo VLCap OperationalVLs */
124#define IB_VL_VL0 1
125#define IB_VL_VL0_1 2
126#define IB_VL_VL0_3 3
127#define IB_VL_VL0_7 4
128#define IB_VL_VL0_14 5
129
130static inline int qib_num_vls(int vls)
131{
132 switch (vls) {
133 default:
134 case IB_VL_VL0:
135 return 1;
136 case IB_VL_VL0_1:
137 return 2;
138 case IB_VL_VL0_3:
139 return 4;
140 case IB_VL_VL0_7:
141 return 8;
142 case IB_VL_VL0_14:
143 return 15;
144 }
145}
146
147struct ib_reth {
148 __be64 vaddr;
149 __be32 rkey;
150 __be32 length;
151} __attribute__ ((packed));
152
153struct ib_atomic_eth {
154 __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
155 __be32 rkey;
156 __be64 swap_data;
157 __be64 compare_data;
158} __attribute__ ((packed));
159
160struct qib_other_headers {
161 __be32 bth[3];
162 union {
163 struct {
164 __be32 deth[2];
165 __be32 imm_data;
166 } ud;
167 struct {
168 struct ib_reth reth;
169 __be32 imm_data;
170 } rc;
171 struct {
172 __be32 aeth;
173 __be32 atomic_ack_eth[2];
174 } at;
175 __be32 imm_data;
176 __be32 aeth;
177 struct ib_atomic_eth atomic_eth;
178 } u;
179} __attribute__ ((packed));
180
181/*
182 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
183 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
184 * will be in the eager header buffer. The remaining 12 or 16 bytes
185 * are in the data buffer.
186 */
187struct qib_ib_header {
188 __be16 lrh[4];
189 union {
190 struct {
191 struct ib_grh grh;
192 struct qib_other_headers oth;
193 } l;
194 struct qib_other_headers oth;
195 } u;
196} __attribute__ ((packed));
197
198struct qib_pio_header {
199 __le32 pbc[2];
200 struct qib_ib_header hdr;
201} __attribute__ ((packed));
202
203/*
204 * There is one struct qib_mcast for each multicast GID.
205 * All attached QPs are then stored as a list of
206 * struct qib_mcast_qp.
207 */
208struct qib_mcast_qp {
209 struct list_head list;
210 struct qib_qp *qp;
211};
212
213struct qib_mcast {
214 struct rb_node rb_node;
215 union ib_gid mgid;
216 struct list_head qp_list;
217 wait_queue_head_t wait;
218 atomic_t refcount;
219 int n_attached;
220};
221
222/* Protection domain */
223struct qib_pd {
224 struct ib_pd ibpd;
225 int user; /* non-zero if created from user space */
226};
227
228/* Address Handle */
229struct qib_ah {
230 struct ib_ah ibah;
231 struct ib_ah_attr attr;
232 atomic_t refcount;
233};
234
235/*
236 * This structure is used by qib_mmap() to validate an offset
237 * when an mmap() request is made. The vm_area_struct then uses
238 * this as its vm_private_data.
239 */
240struct qib_mmap_info {
241 struct list_head pending_mmaps;
242 struct ib_ucontext *context;
243 void *obj;
244 __u64 offset;
245 struct kref ref;
246 unsigned size;
247};
248
249/*
250 * This structure is used to contain the head pointer, tail pointer,
251 * and completion queue entries as a single memory allocation so
252 * it can be mmap'ed into user space.
253 */
254struct qib_cq_wc {
255 u32 head; /* index of next entry to fill */
256 u32 tail; /* index of next ib_poll_cq() entry */
257 union {
258 /* these are actually size ibcq.cqe + 1 */
259 struct ib_uverbs_wc uqueue[0];
260 struct ib_wc kqueue[0];
261 };
262};
263
264/*
265 * The completion queue structure.
266 */
267struct qib_cq {
268 struct ib_cq ibcq;
269 struct work_struct comptask;
270 spinlock_t lock; /* protect changes in this struct */
271 u8 notify;
272 u8 triggered;
273 struct qib_cq_wc *queue;
274 struct qib_mmap_info *ip;
275};
276
277/*
278 * A segment is a linear region of low physical memory.
279 * XXX Maybe we should use phys addr here and kmap()/kunmap().
280 * Used by the verbs layer.
281 */
282struct qib_seg {
283 void *vaddr;
284 size_t length;
285};
286
287/* The number of qib_segs that fit in a page. */
288#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
289
290struct qib_segarray {
291 struct qib_seg segs[QIB_SEGSZ];
292};
293
294struct qib_mregion {
295 struct ib_pd *pd; /* shares refcnt of ibmr.pd */
296 u64 user_base; /* User's address for this region */
297 u64 iova; /* IB start address of this region */
298 size_t length;
299 u32 lkey;
300 u32 offset; /* offset (bytes) to start of region */
301 int access_flags;
302 u32 max_segs; /* number of qib_segs in all the arrays */
303 u32 mapsz; /* size of the map array */
304 atomic_t refcount;
305 struct qib_segarray *map[0]; /* the segments */
306};
307
308/*
309 * These keep track of the copy progress within a memory region.
310 * Used by the verbs layer.
311 */
312struct qib_sge {
313 struct qib_mregion *mr;
314 void *vaddr; /* kernel virtual address of segment */
315 u32 sge_length; /* length of the SGE */
316 u32 length; /* remaining length of the segment */
317 u16 m; /* current index: mr->map[m] */
318 u16 n; /* current index: mr->map[m]->segs[n] */
319};
320
321/* Memory region */
322struct qib_mr {
323 struct ib_mr ibmr;
324 struct ib_umem *umem;
325 struct qib_mregion mr; /* must be last */
326};
327
328/*
329 * Send work request queue entry.
330 * The size of the sg_list is determined when the QP is created and stored
331 * in qp->s_max_sge.
332 */
333struct qib_swqe {
334 struct ib_send_wr wr; /* don't use wr.sg_list */
335 u32 psn; /* first packet sequence number */
336 u32 lpsn; /* last packet sequence number */
337 u32 ssn; /* send sequence number */
338 u32 length; /* total length of data in sg_list */
339 struct qib_sge sg_list[0];
340};
341
342/*
343 * Receive work request queue entry.
344 * The size of the sg_list is determined when the QP (or SRQ) is created
345 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
346 */
347struct qib_rwqe {
348 u64 wr_id;
349 u8 num_sge;
350 struct ib_sge sg_list[0];
351};
352
353/*
354 * This structure is used to contain the head pointer, tail pointer,
355 * and receive work queue entries as a single memory allocation so
356 * it can be mmap'ed into user space.
357 * Note that the wq array elements are variable size so you can't
358 * just index into the array to get the N'th element;
359 * use get_rwqe_ptr() instead.
360 */
361struct qib_rwq {
362 u32 head; /* new work requests posted to the head */
363 u32 tail; /* receives pull requests from here. */
364 struct qib_rwqe wq[0];
365};
366
367struct qib_rq {
368 struct qib_rwq *wq;
369 spinlock_t lock; /* protect changes in this struct */
370 u32 size; /* size of RWQE array */
371 u8 max_sge;
372};
373
374struct qib_srq {
375 struct ib_srq ibsrq;
376 struct qib_rq rq;
377 struct qib_mmap_info *ip;
378 /* send signal when number of RWQEs < limit */
379 u32 limit;
380};
381
382struct qib_sge_state {
383 struct qib_sge *sg_list; /* next SGE to be used if any */
384 struct qib_sge sge; /* progress state for the current SGE */
385 u32 total_len;
386 u8 num_sge;
387};
388
389/*
390 * This structure holds the information that the send tasklet needs
391 * to send a RDMA read response or atomic operation.
392 */
393struct qib_ack_entry {
394 u8 opcode;
395 u8 sent;
396 u32 psn;
397 u32 lpsn;
398 union {
399 struct qib_sge rdma_sge;
400 u64 atomic_data;
401 };
402};
403
404/*
405 * Variables prefixed with s_ are for the requester (sender).
406 * Variables prefixed with r_ are for the responder (receiver).
407 * Variables prefixed with ack_ are for responder replies.
408 *
409 * Common variables are protected by both r_rq.lock and s_lock in that order
410 * which only happens in modify_qp() or changing the QP 'state'.
411 */
412struct qib_qp {
413 struct ib_qp ibqp;
414 struct qib_qp *next; /* link list for QPN hash table */
415 struct qib_qp *timer_next; /* link list for qib_ib_timer() */
416 struct list_head iowait; /* link for wait PIO buf */
417 struct list_head rspwait; /* link for waititing to respond */
418 struct ib_ah_attr remote_ah_attr;
419 struct ib_ah_attr alt_ah_attr;
420 struct qib_ib_header s_hdr; /* next packet header to send */
421 atomic_t refcount;
422 wait_queue_head_t wait;
423 wait_queue_head_t wait_dma;
424 struct timer_list s_timer;
425 struct work_struct s_work;
426 struct qib_mmap_info *ip;
427 struct qib_sge_state *s_cur_sge;
428 struct qib_verbs_txreq *s_tx;
429 struct qib_mregion *s_rdma_mr;
430 struct qib_sge_state s_sge; /* current send request data */
431 struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1];
432 struct qib_sge_state s_ack_rdma_sge;
433 struct qib_sge_state s_rdma_read_sge;
434 struct qib_sge_state r_sge; /* current receive data */
435 spinlock_t r_lock; /* used for APM */
436 spinlock_t s_lock;
437 atomic_t s_dma_busy;
438 unsigned processor_id; /* Processor ID QP is bound to */
439 u32 s_flags;
440 u32 s_cur_size; /* size of send packet in bytes */
441 u32 s_len; /* total length of s_sge */
442 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
443 u32 s_next_psn; /* PSN for next request */
444 u32 s_last_psn; /* last response PSN processed */
445 u32 s_sending_psn; /* lowest PSN that is being sent */
446 u32 s_sending_hpsn; /* highest PSN that is being sent */
447 u32 s_psn; /* current packet sequence number */
448 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
449 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
450 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
451 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
452 u64 r_wr_id; /* ID for current receive WQE */
453 unsigned long r_aflags;
454 u32 r_len; /* total length of r_sge */
455 u32 r_rcv_len; /* receive data len processed */
456 u32 r_psn; /* expected rcv packet sequence number */
457 u32 r_msn; /* message sequence number */
458 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
459 u16 s_rdma_ack_cnt;
460 u8 state; /* QP state */
461 u8 s_state; /* opcode of last packet sent */
462 u8 s_ack_state; /* opcode of packet to ACK */
463 u8 s_nak_state; /* non-zero if NAK is pending */
464 u8 r_state; /* opcode of last packet received */
465 u8 r_nak_state; /* non-zero if NAK is pending */
466 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
467 u8 r_flags;
468 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
469 u8 r_head_ack_queue; /* index into s_ack_queue[] */
470 u8 qp_access_flags;
471 u8 s_max_sge; /* size of s_wq->sg_list */
472 u8 s_retry_cnt; /* number of times to retry */
473 u8 s_rnr_retry_cnt;
474 u8 s_retry; /* requester retry counter */
475 u8 s_rnr_retry; /* requester RNR retry counter */
476 u8 s_pkey_index; /* PKEY index to use */
477 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
478 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
479 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
480 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
481 u8 s_srate;
482 u8 s_draining;
483 u8 s_mig_state;
484 u8 timeout; /* Timeout for this QP */
485 u8 alt_timeout; /* Alternate path timeout for this QP */
486 u8 port_num;
487 enum ib_mtu path_mtu;
488 u32 remote_qpn;
489 u32 qkey; /* QKEY for this QP (for UD or RD) */
490 u32 s_size; /* send work queue size */
491 u32 s_head; /* new entries added here */
492 u32 s_tail; /* next entry to process */
493 u32 s_cur; /* current work queue entry */
494 u32 s_acked; /* last un-ACK'ed entry */
495 u32 s_last; /* last completed entry */
496 u32 s_ssn; /* SSN of tail entry */
497 u32 s_lsn; /* limit sequence number (credit) */
498 struct qib_swqe *s_wq; /* send work queue */
499 struct qib_swqe *s_wqe;
500 struct qib_rq r_rq; /* receive work queue */
501 struct qib_sge r_sg_list[0]; /* verified SGEs */
502};
503
504/*
505 * Atomic bit definitions for r_aflags.
506 */
507#define QIB_R_WRID_VALID 0
508#define QIB_R_REWIND_SGE 1
509
510/*
511 * Bit definitions for r_flags.
512 */
513#define QIB_R_REUSE_SGE 0x01
514#define QIB_R_RDMAR_SEQ 0x02
515#define QIB_R_RSP_NAK 0x04
516#define QIB_R_RSP_SEND 0x08
517#define QIB_R_COMM_EST 0x10
518
519/*
520 * Bit definitions for s_flags.
521 *
522 * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
523 * QIB_S_BUSY - send tasklet is processing the QP
524 * QIB_S_TIMER - the RC retry timer is active
525 * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
526 * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
527 * before processing the next SWQE
528 * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
529 * before processing the next SWQE
530 * QIB_S_WAIT_RNR - waiting for RNR timeout
531 * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
532 * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
533 * next send completion entry not via send DMA
534 * QIB_S_WAIT_PIO - waiting for a send buffer to be available
535 * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
536 * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
537 * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
538 * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
539 * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
540 * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
541 */
542#define QIB_S_SIGNAL_REQ_WR 0x0001
543#define QIB_S_BUSY 0x0002
544#define QIB_S_TIMER 0x0004
545#define QIB_S_RESP_PENDING 0x0008
546#define QIB_S_ACK_PENDING 0x0010
547#define QIB_S_WAIT_FENCE 0x0020
548#define QIB_S_WAIT_RDMAR 0x0040
549#define QIB_S_WAIT_RNR 0x0080
550#define QIB_S_WAIT_SSN_CREDIT 0x0100
551#define QIB_S_WAIT_DMA 0x0200
552#define QIB_S_WAIT_PIO 0x0400
553#define QIB_S_WAIT_TX 0x0800
554#define QIB_S_WAIT_DMA_DESC 0x1000
555#define QIB_S_WAIT_KMEM 0x2000
556#define QIB_S_WAIT_PSN 0x4000
557#define QIB_S_WAIT_ACK 0x8000
558#define QIB_S_SEND_ONE 0x10000
559#define QIB_S_UNLIMITED_CREDIT 0x20000
560
561/*
562 * Wait flags that would prevent any packet type from being sent.
563 */
564#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
565 QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
566
567/*
568 * Wait flags that would prevent send work requests from making progress.
569 */
570#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
571 QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
572 QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
573
574#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
575
576#define QIB_PSN_CREDIT 16
577
578/*
579 * Since struct qib_swqe is not a fixed size, we can't simply index into
580 * struct qib_qp.s_wq. This function does the array index computation.
581 */
582static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
583 unsigned n)
584{
585 return (struct qib_swqe *)((char *)qp->s_wq +
586 (sizeof(struct qib_swqe) +
587 qp->s_max_sge *
588 sizeof(struct qib_sge)) * n);
589}
590
591/*
592 * Since struct qib_rwqe is not a fixed size, we can't simply index into
593 * struct qib_rwq.wq. This function does the array index computation.
594 */
595static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
596{
597 return (struct qib_rwqe *)
598 ((char *) rq->wq->wq +
599 (sizeof(struct qib_rwqe) +
600 rq->max_sge * sizeof(struct ib_sge)) * n);
601}
602
603/*
604 * QPN-map pages start out as NULL, they get allocated upon
605 * first use and are never deallocated. This way,
606 * large bitmaps are not allocated unless large numbers of QPs are used.
607 */
608struct qpn_map {
609 void *page;
610};
611
612struct qib_qpn_table {
613 spinlock_t lock; /* protect changes in this struct */
614 unsigned flags; /* flags for QP0/1 allocated for each port */
615 u32 last; /* last QP number allocated */
616 u32 nmaps; /* size of the map table */
617 u16 limit;
618 u16 mask;
619 /* bit map of free QP numbers other than 0/1 */
620 struct qpn_map map[QPNMAP_ENTRIES];
621};
622
623struct qib_lkey_table {
624 spinlock_t lock; /* protect changes in this struct */
625 u32 next; /* next unused index (speeds search) */
626 u32 gen; /* generation count */
627 u32 max; /* size of the table */
628 struct qib_mregion **table;
629};
630
631struct qib_opcode_stats {
632 u64 n_packets; /* number of packets */
633 u64 n_bytes; /* total number of bytes */
634};
635
636struct qib_ibport {
637 struct qib_qp *qp0;
638 struct qib_qp *qp1;
639 struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
640 struct qib_ah *sm_ah;
641 struct qib_ah *smi_ah;
642 struct rb_root mcast_tree;
643 spinlock_t lock; /* protect changes in this struct */
644
645 /* non-zero when timer is set */
646 unsigned long mkey_lease_timeout;
647 unsigned long trap_timeout;
648 __be64 gid_prefix; /* in network order */
649 __be64 mkey;
650 __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
651 u64 tid; /* TID for traps */
652 u64 n_unicast_xmit; /* total unicast packets sent */
653 u64 n_unicast_rcv; /* total unicast packets received */
654 u64 n_multicast_xmit; /* total multicast packets sent */
655 u64 n_multicast_rcv; /* total multicast packets received */
656 u64 z_symbol_error_counter; /* starting count for PMA */
657 u64 z_link_error_recovery_counter; /* starting count for PMA */
658 u64 z_link_downed_counter; /* starting count for PMA */
659 u64 z_port_rcv_errors; /* starting count for PMA */
660 u64 z_port_rcv_remphys_errors; /* starting count for PMA */
661 u64 z_port_xmit_discards; /* starting count for PMA */
662 u64 z_port_xmit_data; /* starting count for PMA */
663 u64 z_port_rcv_data; /* starting count for PMA */
664 u64 z_port_xmit_packets; /* starting count for PMA */
665 u64 z_port_rcv_packets; /* starting count for PMA */
666 u32 z_local_link_integrity_errors; /* starting count for PMA */
667 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
668 u32 z_vl15_dropped; /* starting count for PMA */
669 u32 n_rc_resends;
670 u32 n_rc_acks;
671 u32 n_rc_qacks;
672 u32 n_rc_delayed_comp;
673 u32 n_seq_naks;
674 u32 n_rdma_seq;
675 u32 n_rnr_naks;
676 u32 n_other_naks;
677 u32 n_loop_pkts;
678 u32 n_pkt_drops;
679 u32 n_vl15_dropped;
680 u32 n_rc_timeouts;
681 u32 n_dmawait;
682 u32 n_unaligned;
683 u32 n_rc_dupreq;
684 u32 n_rc_seqnak;
685 u32 port_cap_flags;
686 u32 pma_sample_start;
687 u32 pma_sample_interval;
688 __be16 pma_counter_select[5];
689 u16 pma_tag;
690 u16 pkey_violations;
691 u16 qkey_violations;
692 u16 mkey_violations;
693 u16 mkey_lease_period;
694 u16 sm_lid;
695 u16 repress_traps;
696 u8 sm_sl;
697 u8 mkeyprot;
698 u8 subnet_timeout;
699 u8 vl_high_limit;
700 u8 sl_to_vl[16];
701
702 struct qib_opcode_stats opstats[128];
703};
704
705struct qib_ibdev {
706 struct ib_device ibdev;
707 struct list_head pending_mmaps;
708 spinlock_t mmap_offset_lock; /* protect mmap_offset */
709 u32 mmap_offset;
710 struct qib_mregion *dma_mr;
711
712 /* QP numbers are shared by all IB ports */
713 struct qib_qpn_table qpn_table;
714 struct qib_lkey_table lk_table;
715 struct list_head piowait; /* list for wait PIO buf */
716 struct list_head dmawait; /* list for wait DMA */
717 struct list_head txwait; /* list for wait qib_verbs_txreq */
718 struct list_head memwait; /* list for wait kernel memory */
719 struct list_head txreq_free;
720 struct timer_list mem_timer;
721 struct qib_qp **qp_table;
722 struct qib_pio_header *pio_hdrs;
723 dma_addr_t pio_hdrs_phys;
724 /* list of QPs waiting for RNR timer */
725 spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
726 unsigned qp_table_size; /* size of the hash table */
727 spinlock_t qpt_lock;
728
729 u32 n_piowait;
730 u32 n_txwait;
731
732 u32 n_pds_allocated; /* number of PDs allocated for device */
733 spinlock_t n_pds_lock;
734 u32 n_ahs_allocated; /* number of AHs allocated for device */
735 spinlock_t n_ahs_lock;
736 u32 n_cqs_allocated; /* number of CQs allocated for device */
737 spinlock_t n_cqs_lock;
738 u32 n_qps_allocated; /* number of QPs allocated for device */
739 spinlock_t n_qps_lock;
740 u32 n_srqs_allocated; /* number of SRQs allocated for device */
741 spinlock_t n_srqs_lock;
742 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
743 spinlock_t n_mcast_grps_lock;
744};
745
746struct qib_verbs_counters {
747 u64 symbol_error_counter;
748 u64 link_error_recovery_counter;
749 u64 link_downed_counter;
750 u64 port_rcv_errors;
751 u64 port_rcv_remphys_errors;
752 u64 port_xmit_discards;
753 u64 port_xmit_data;
754 u64 port_rcv_data;
755 u64 port_xmit_packets;
756 u64 port_rcv_packets;
757 u32 local_link_integrity_errors;
758 u32 excessive_buffer_overrun_errors;
759 u32 vl15_dropped;
760};
761
762static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
763{
764 return container_of(ibmr, struct qib_mr, ibmr);
765}
766
767static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
768{
769 return container_of(ibpd, struct qib_pd, ibpd);
770}
771
772static inline struct qib_ah *to_iah(struct ib_ah *ibah)
773{
774 return container_of(ibah, struct qib_ah, ibah);
775}
776
777static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
778{
779 return container_of(ibcq, struct qib_cq, ibcq);
780}
781
782static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
783{
784 return container_of(ibsrq, struct qib_srq, ibsrq);
785}
786
787static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
788{
789 return container_of(ibqp, struct qib_qp, ibqp);
790}
791
792static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
793{
794 return container_of(ibdev, struct qib_ibdev, ibdev);
795}
796
797/*
798 * Send if not busy or waiting for I/O and either
799 * a RC response is pending or we can process send work requests.
800 */
801static inline int qib_send_ok(struct qib_qp *qp)
802{
803 return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
804 (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
805 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
806}
807
808extern struct workqueue_struct *qib_wq;
809extern struct workqueue_struct *qib_cq_wq;
810
811/*
812 * This must be called with s_lock held.
813 */
814static inline void qib_schedule_send(struct qib_qp *qp)
815{
816 if (qib_send_ok(qp)) {
817 if (qp->processor_id == smp_processor_id())
818 queue_work(qib_wq, &qp->s_work);
819 else
820 queue_work_on(qp->processor_id,
821 qib_wq, &qp->s_work);
822 }
823}
824
825static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
826{
827 u16 p1 = pkey1 & 0x7FFF;
828 u16 p2 = pkey2 & 0x7FFF;
829
830 /*
831 * Low 15 bits must be non-zero and match, and
832 * one of the two must be a full member.
833 */
834 return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
835}
836
837void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
838 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
839void qib_cap_mask_chg(struct qib_ibport *ibp);
840void qib_sys_guid_chg(struct qib_ibport *ibp);
841void qib_node_desc_chg(struct qib_ibport *ibp);
842int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
843 struct ib_wc *in_wc, struct ib_grh *in_grh,
844 struct ib_mad *in_mad, struct ib_mad *out_mad);
845int qib_create_agents(struct qib_ibdev *dev);
846void qib_free_agents(struct qib_ibdev *dev);
847
848/*
849 * Compare the lower 24 bits of the two values.
850 * Returns an integer <, ==, or > than zero.
851 */
852static inline int qib_cmp24(u32 a, u32 b)
853{
854 return (((int) a) - ((int) b)) << 8;
855}
856
857struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
858
859int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
860 u64 *rwords, u64 *spkts, u64 *rpkts,
861 u64 *xmit_wait);
862
863int qib_get_counters(struct qib_pportdata *ppd,
864 struct qib_verbs_counters *cntrs);
865
866int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
867
868int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
869
870int qib_mcast_tree_empty(struct qib_ibport *ibp);
871
872__be32 qib_compute_aeth(struct qib_qp *qp);
873
874struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
875
876struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
877 struct ib_qp_init_attr *init_attr,
878 struct ib_udata *udata);
879
880int qib_destroy_qp(struct ib_qp *ibqp);
881
882int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
883
884int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
885 int attr_mask, struct ib_udata *udata);
886
887int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
888 int attr_mask, struct ib_qp_init_attr *init_attr);
889
890unsigned qib_free_all_qps(struct qib_devdata *dd);
891
892void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
893
894void qib_free_qpn_table(struct qib_qpn_table *qpt);
895
896void qib_get_credit(struct qib_qp *qp, u32 aeth);
897
898unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
899
900void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
901
902void qib_put_txreq(struct qib_verbs_txreq *tx);
903
904int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
905 u32 hdrwords, struct qib_sge_state *ss, u32 len);
906
907void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
908 int release);
909
910void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
911
912void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
913 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
914
915void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
916 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
917
918int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
919
920void qib_rc_rnr_retry(unsigned long arg);
921
922void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
923
924void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
925
926int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
927
928void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
929 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
930
931int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr);
932
933int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr);
934
935int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
936 struct qib_sge *isge, struct ib_sge *sge, int acc);
937
938int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
939 u32 len, u64 vaddr, u32 rkey, int acc);
940
941int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
942 struct ib_recv_wr **bad_wr);
943
944struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
945 struct ib_srq_init_attr *srq_init_attr,
946 struct ib_udata *udata);
947
948int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
949 enum ib_srq_attr_mask attr_mask,
950 struct ib_udata *udata);
951
952int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
953
954int qib_destroy_srq(struct ib_srq *ibsrq);
955
956void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
957
958int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
959
960struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
961 int comp_vector, struct ib_ucontext *context,
962 struct ib_udata *udata);
963
964int qib_destroy_cq(struct ib_cq *ibcq);
965
966int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
967
968int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
969
970struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
971
972struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
973 struct ib_phys_buf *buffer_list,
974 int num_phys_buf, int acc, u64 *iova_start);
975
976struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
977 u64 virt_addr, int mr_access_flags,
978 struct ib_udata *udata);
979
980int qib_dereg_mr(struct ib_mr *ibmr);
981
982struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
983
984struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
985 struct ib_device *ibdev, int page_list_len);
986
987void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
988
989int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
990
991struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
992 struct ib_fmr_attr *fmr_attr);
993
994int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
995 int list_len, u64 iova);
996
997int qib_unmap_fmr(struct list_head *fmr_list);
998
999int qib_dealloc_fmr(struct ib_fmr *ibfmr);
1000
1001void qib_release_mmap_info(struct kref *ref);
1002
1003struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
1004 struct ib_ucontext *context,
1005 void *obj);
1006
1007void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
1008 u32 size, void *obj);
1009
1010int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1011
1012int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
1013
1014void qib_migrate_qp(struct qib_qp *qp);
1015
1016int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
1017 int has_grh, struct qib_qp *qp, u32 bth0);
1018
1019u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
1020 struct ib_global_route *grh, u32 hwords, u32 nwords);
1021
1022void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
1023 u32 bth0, u32 bth2);
1024
1025void qib_do_send(struct work_struct *work);
1026
1027void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
1028 enum ib_wc_status status);
1029
1030void qib_send_rc_ack(struct qib_qp *qp);
1031
1032int qib_make_rc_req(struct qib_qp *qp);
1033
1034int qib_make_uc_req(struct qib_qp *qp);
1035
1036int qib_make_ud_req(struct qib_qp *qp);
1037
1038int qib_register_ib_device(struct qib_devdata *);
1039
1040void qib_unregister_ib_device(struct qib_devdata *);
1041
1042void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
1043
1044void qib_ib_piobufavail(struct qib_devdata *);
1045
1046unsigned qib_get_npkeys(struct qib_devdata *);
1047
1048unsigned qib_get_pkey(struct qib_ibport *, unsigned);
1049
1050extern const enum ib_wc_opcode ib_qib_wc_opcode[];
1051
1052/*
1053 * Below HCA-independent IB PhysPortState values, returned
1054 * by the f_ibphys_portstate() routine.
1055 */
1056#define IB_PHYSPORTSTATE_SLEEP 1
1057#define IB_PHYSPORTSTATE_POLL 2
1058#define IB_PHYSPORTSTATE_DISABLED 3
1059#define IB_PHYSPORTSTATE_CFG_TRAIN 4
1060#define IB_PHYSPORTSTATE_LINKUP 5
1061#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
1062#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
1063#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
1064#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
1065#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
1066#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
1067#define IB_PHYSPORTSTATE_CFG_ENH 0x10
1068#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
1069
1070extern const int ib_qib_state_ops[];
1071
1072extern __be64 ib_qib_sys_image_guid; /* in network order */
1073
1074extern unsigned int ib_qib_lkey_table_size;
1075
1076extern unsigned int ib_qib_max_cqes;
1077
1078extern unsigned int ib_qib_max_cqs;
1079
1080extern unsigned int ib_qib_max_qp_wrs;
1081
1082extern unsigned int ib_qib_max_qps;
1083
1084extern unsigned int ib_qib_max_sges;
1085
1086extern unsigned int ib_qib_max_mcast_grps;
1087
1088extern unsigned int ib_qib_max_mcast_qp_attached;
1089
1090extern unsigned int ib_qib_max_srqs;
1091
1092extern unsigned int ib_qib_max_srq_sges;
1093
1094extern unsigned int ib_qib_max_srq_wrs;
1095
1096extern const u32 ib_qib_rnr_table[];
1097
1098extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
1099
1100#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
new file mode 100644
index 000000000000..dabb697b1c2a
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -0,0 +1,368 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/rculist.h>
35
36#include "qib.h"
37
38/**
39 * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
40 * @qp: the QP to link
41 */
42static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
43{
44 struct qib_mcast_qp *mqp;
45
46 mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
47 if (!mqp)
48 goto bail;
49
50 mqp->qp = qp;
51 atomic_inc(&qp->refcount);
52
53bail:
54 return mqp;
55}
56
57static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
58{
59 struct qib_qp *qp = mqp->qp;
60
61 /* Notify qib_destroy_qp() if it is waiting. */
62 if (atomic_dec_and_test(&qp->refcount))
63 wake_up(&qp->wait);
64
65 kfree(mqp);
66}
67
68/**
69 * qib_mcast_alloc - allocate the multicast GID structure
70 * @mgid: the multicast GID
71 *
72 * A list of QPs will be attached to this structure.
73 */
74static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
75{
76 struct qib_mcast *mcast;
77
78 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
79 if (!mcast)
80 goto bail;
81
82 mcast->mgid = *mgid;
83 INIT_LIST_HEAD(&mcast->qp_list);
84 init_waitqueue_head(&mcast->wait);
85 atomic_set(&mcast->refcount, 0);
86 mcast->n_attached = 0;
87
88bail:
89 return mcast;
90}
91
92static void qib_mcast_free(struct qib_mcast *mcast)
93{
94 struct qib_mcast_qp *p, *tmp;
95
96 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
97 qib_mcast_qp_free(p);
98
99 kfree(mcast);
100}
101
102/**
103 * qib_mcast_find - search the global table for the given multicast GID
104 * @ibp: the IB port structure
105 * @mgid: the multicast GID to search for
106 *
107 * Returns NULL if not found.
108 *
109 * The caller is responsible for decrementing the reference count if found.
110 */
111struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
112{
113 struct rb_node *n;
114 unsigned long flags;
115 struct qib_mcast *mcast;
116
117 spin_lock_irqsave(&ibp->lock, flags);
118 n = ibp->mcast_tree.rb_node;
119 while (n) {
120 int ret;
121
122 mcast = rb_entry(n, struct qib_mcast, rb_node);
123
124 ret = memcmp(mgid->raw, mcast->mgid.raw,
125 sizeof(union ib_gid));
126 if (ret < 0)
127 n = n->rb_left;
128 else if (ret > 0)
129 n = n->rb_right;
130 else {
131 atomic_inc(&mcast->refcount);
132 spin_unlock_irqrestore(&ibp->lock, flags);
133 goto bail;
134 }
135 }
136 spin_unlock_irqrestore(&ibp->lock, flags);
137
138 mcast = NULL;
139
140bail:
141 return mcast;
142}
143
144/**
145 * qib_mcast_add - insert mcast GID into table and attach QP struct
146 * @mcast: the mcast GID table
147 * @mqp: the QP to attach
148 *
149 * Return zero if both were added. Return EEXIST if the GID was already in
150 * the table but the QP was added. Return ESRCH if the QP was already
151 * attached and neither structure was added.
152 */
153static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
154 struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
155{
156 struct rb_node **n = &ibp->mcast_tree.rb_node;
157 struct rb_node *pn = NULL;
158 int ret;
159
160 spin_lock_irq(&ibp->lock);
161
162 while (*n) {
163 struct qib_mcast *tmcast;
164 struct qib_mcast_qp *p;
165
166 pn = *n;
167 tmcast = rb_entry(pn, struct qib_mcast, rb_node);
168
169 ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
170 sizeof(union ib_gid));
171 if (ret < 0) {
172 n = &pn->rb_left;
173 continue;
174 }
175 if (ret > 0) {
176 n = &pn->rb_right;
177 continue;
178 }
179
180 /* Search the QP list to see if this is already there. */
181 list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
182 if (p->qp == mqp->qp) {
183 ret = ESRCH;
184 goto bail;
185 }
186 }
187 if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
188 ret = ENOMEM;
189 goto bail;
190 }
191
192 tmcast->n_attached++;
193
194 list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
195 ret = EEXIST;
196 goto bail;
197 }
198
199 spin_lock(&dev->n_mcast_grps_lock);
200 if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
201 spin_unlock(&dev->n_mcast_grps_lock);
202 ret = ENOMEM;
203 goto bail;
204 }
205
206 dev->n_mcast_grps_allocated++;
207 spin_unlock(&dev->n_mcast_grps_lock);
208
209 mcast->n_attached++;
210
211 list_add_tail_rcu(&mqp->list, &mcast->qp_list);
212
213 atomic_inc(&mcast->refcount);
214 rb_link_node(&mcast->rb_node, pn, n);
215 rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
216
217 ret = 0;
218
219bail:
220 spin_unlock_irq(&ibp->lock);
221
222 return ret;
223}
224
225int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
226{
227 struct qib_qp *qp = to_iqp(ibqp);
228 struct qib_ibdev *dev = to_idev(ibqp->device);
229 struct qib_ibport *ibp;
230 struct qib_mcast *mcast;
231 struct qib_mcast_qp *mqp;
232 int ret;
233
234 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
235 ret = -EINVAL;
236 goto bail;
237 }
238
239 /*
240 * Allocate data structures since its better to do this outside of
241 * spin locks and it will most likely be needed.
242 */
243 mcast = qib_mcast_alloc(gid);
244 if (mcast == NULL) {
245 ret = -ENOMEM;
246 goto bail;
247 }
248 mqp = qib_mcast_qp_alloc(qp);
249 if (mqp == NULL) {
250 qib_mcast_free(mcast);
251 ret = -ENOMEM;
252 goto bail;
253 }
254 ibp = to_iport(ibqp->device, qp->port_num);
255 switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
256 case ESRCH:
257 /* Neither was used: OK to attach the same QP twice. */
258 qib_mcast_qp_free(mqp);
259 qib_mcast_free(mcast);
260 break;
261
262 case EEXIST: /* The mcast wasn't used */
263 qib_mcast_free(mcast);
264 break;
265
266 case ENOMEM:
267 /* Exceeded the maximum number of mcast groups. */
268 qib_mcast_qp_free(mqp);
269 qib_mcast_free(mcast);
270 ret = -ENOMEM;
271 goto bail;
272
273 default:
274 break;
275 }
276
277 ret = 0;
278
279bail:
280 return ret;
281}
282
283int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
284{
285 struct qib_qp *qp = to_iqp(ibqp);
286 struct qib_ibdev *dev = to_idev(ibqp->device);
287 struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
288 struct qib_mcast *mcast = NULL;
289 struct qib_mcast_qp *p, *tmp;
290 struct rb_node *n;
291 int last = 0;
292 int ret;
293
294 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
295 ret = -EINVAL;
296 goto bail;
297 }
298
299 spin_lock_irq(&ibp->lock);
300
301 /* Find the GID in the mcast table. */
302 n = ibp->mcast_tree.rb_node;
303 while (1) {
304 if (n == NULL) {
305 spin_unlock_irq(&ibp->lock);
306 ret = -EINVAL;
307 goto bail;
308 }
309
310 mcast = rb_entry(n, struct qib_mcast, rb_node);
311 ret = memcmp(gid->raw, mcast->mgid.raw,
312 sizeof(union ib_gid));
313 if (ret < 0)
314 n = n->rb_left;
315 else if (ret > 0)
316 n = n->rb_right;
317 else
318 break;
319 }
320
321 /* Search the QP list. */
322 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
323 if (p->qp != qp)
324 continue;
325 /*
326 * We found it, so remove it, but don't poison the forward
327 * link until we are sure there are no list walkers.
328 */
329 list_del_rcu(&p->list);
330 mcast->n_attached--;
331
332 /* If this was the last attached QP, remove the GID too. */
333 if (list_empty(&mcast->qp_list)) {
334 rb_erase(&mcast->rb_node, &ibp->mcast_tree);
335 last = 1;
336 }
337 break;
338 }
339
340 spin_unlock_irq(&ibp->lock);
341
342 if (p) {
343 /*
344 * Wait for any list walkers to finish before freeing the
345 * list element.
346 */
347 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
348 qib_mcast_qp_free(p);
349 }
350 if (last) {
351 atomic_dec(&mcast->refcount);
352 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
353 qib_mcast_free(mcast);
354 spin_lock_irq(&dev->n_mcast_grps_lock);
355 dev->n_mcast_grps_allocated--;
356 spin_unlock_irq(&dev->n_mcast_grps_lock);
357 }
358
359 ret = 0;
360
361bail:
362 return ret;
363}
364
365int qib_mcast_tree_empty(struct qib_ibport *ibp)
366{
367 return ibp->mcast_tree.rb_node == NULL;
368}
diff --git a/drivers/infiniband/hw/qib/qib_wc_ppc64.c b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
new file mode 100644
index 000000000000..673cf4c22ebd
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file is conditionally built on PowerPC only. Otherwise weak symbol
35 * versions of the functions exported from here are used.
36 */
37
38#include "qib.h"
39
40/**
41 * qib_enable_wc - enable write combining for MMIO writes to the device
42 * @dd: qlogic_ib device
43 *
44 * Nothing to do on PowerPC, so just return without error.
45 */
46int qib_enable_wc(struct qib_devdata *dd)
47{
48 return 0;
49}
50
51/**
52 * qib_unordered_wc - indicate whether write combining is unordered
53 *
54 * Because our performance depends on our ability to do write
55 * combining mmio writes in the most efficient way, we need to
56 * know if we are on a processor that may reorder stores when
57 * write combining.
58 */
59int qib_unordered_wc(void)
60{
61 return 1;
62}
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
new file mode 100644
index 000000000000..561b8bca4060
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -0,0 +1,171 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * This file is conditionally built on x86_64 only. Otherwise weak symbol
36 * versions of the functions exported from here are used.
37 */
38
39#include <linux/pci.h>
40#include <asm/mtrr.h>
41#include <asm/processor.h>
42
43#include "qib.h"
44
45/**
46 * qib_enable_wc - enable write combining for MMIO writes to the device
47 * @dd: qlogic_ib device
48 *
49 * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
50 * write combining.
51 */
52int qib_enable_wc(struct qib_devdata *dd)
53{
54 int ret = 0;
55 u64 pioaddr, piolen;
56 unsigned bits;
57 const unsigned long addr = pci_resource_start(dd->pcidev, 0);
58 const size_t len = pci_resource_len(dd->pcidev, 0);
59
60 /*
61 * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
62 * chip. Linux (possibly the hardware) requires it to be on a power
63 * of 2 address matching the length (which has to be a power of 2).
64 * For rev1, that means the base address, for rev2, it will be just
65 * the PIO buffers themselves.
66 * For chips with two sets of buffers, the calculations are
67 * somewhat more complicated; we need to sum, and the piobufbase
68 * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
69 * The buffers are still packed, so a single range covers both.
70 */
71 if (dd->piobcnt2k && dd->piobcnt4k) {
72 /* 2 sizes for chip */
73 unsigned long pio2kbase, pio4kbase;
74 pio2kbase = dd->piobufbase & 0xffffffffUL;
75 pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
76 if (pio2kbase < pio4kbase) {
77 /* all current chips */
78 pioaddr = addr + pio2kbase;
79 piolen = pio4kbase - pio2kbase +
80 dd->piobcnt4k * dd->align4k;
81 } else {
82 pioaddr = addr + pio4kbase;
83 piolen = pio2kbase - pio4kbase +
84 dd->piobcnt2k * dd->palign;
85 }
86 } else { /* single buffer size (2K, currently) */
87 pioaddr = addr + dd->piobufbase;
88 piolen = dd->piobcnt2k * dd->palign +
89 dd->piobcnt4k * dd->align4k;
90 }
91
92 for (bits = 0; !(piolen & (1ULL << bits)); bits++)
93 /* do nothing */ ;
94
95 if (piolen != (1ULL << bits)) {
96 piolen >>= bits;
97 while (piolen >>= 1)
98 bits++;
99 piolen = 1ULL << (bits + 1);
100 }
101 if (pioaddr & (piolen - 1)) {
102 u64 atmp;
103 atmp = pioaddr & ~(piolen - 1);
104 if (atmp < addr || (atmp + piolen) > (addr + len)) {
105 qib_dev_err(dd, "No way to align address/size "
106 "(%llx/%llx), no WC mtrr\n",
107 (unsigned long long) atmp,
108 (unsigned long long) piolen << 1);
109 ret = -ENODEV;
110 } else {
111 pioaddr = atmp;
112 piolen <<= 1;
113 }
114 }
115
116 if (!ret) {
117 int cookie;
118
119 cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
120 if (cookie < 0) {
121 {
122 qib_devinfo(dd->pcidev,
123 "mtrr_add() WC for PIO bufs "
124 "failed (%d)\n",
125 cookie);
126 ret = -EINVAL;
127 }
128 } else {
129 dd->wc_cookie = cookie;
130 dd->wc_base = (unsigned long) pioaddr;
131 dd->wc_len = (unsigned long) piolen;
132 }
133 }
134
135 return ret;
136}
137
138/**
139 * qib_disable_wc - disable write combining for MMIO writes to the device
140 * @dd: qlogic_ib device
141 */
142void qib_disable_wc(struct qib_devdata *dd)
143{
144 if (dd->wc_cookie) {
145 int r;
146
147 r = mtrr_del(dd->wc_cookie, dd->wc_base,
148 dd->wc_len);
149 if (r < 0)
150 qib_devinfo(dd->pcidev,
151 "mtrr_del(%lx, %lx, %lx) failed: %d\n",
152 dd->wc_cookie, dd->wc_base,
153 dd->wc_len, r);
154 dd->wc_cookie = 0; /* even on failure */
155 }
156}
157
158/**
159 * qib_unordered_wc - indicate whether write combining is ordered
160 *
161 * Because our performance depends on our ability to do write combining mmio
162 * writes in the most efficient way, we need to know if we are on an Intel
163 * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
164 * the order completed, and so no special flushing is required to get
165 * correct ordering. Intel processors, however, will flush write buffers
166 * out in "random" orders, and so explicit ordering is needed at times.
167 */
168int qib_unordered_wc(void)
169{
170 return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
171}