aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorArthur Jones <arthur.jones@qlogic.com>2007-07-19 14:32:49 -0400
committerRoland Dreier <rolandd@cisco.com>2007-07-21 00:19:43 -0400
commitbd631048116df40837667a72c578b170c906dd30 (patch)
tree35b62c8982a7f1f817e27e8364dbff490baf5aeb /drivers/infiniband/hw
parentf5b404317b79823ec643dfbb71d62f65a48cc178 (diff)
IB/ipath: Remove ipath_layer dead code
The ipath_layer.[ch] code was an attempt to provide a single interface for the ipath verbs and ipath_ether code to use. As verbs functionality increased, the layer's functionality became insufficient and the verbs code broke away to interface directly to the driver. The failed attempt to get ipath_ether upstream was the final nail in the coffin and now it sits quietly in a dark kernel.org corner waiting for someone to notice the smell and send it along to it's final resting place. Roland Dreier was that someone -- this patch expands on his work... Signed-off-by: Arthur Jones <arthur.jones@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ipath/Makefile1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c365
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.h71
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h2
4 files changed, 0 insertions, 439 deletions
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index ec2e603ea24..fe673882686 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -14,7 +14,6 @@ ib_ipath-y := \
14 ipath_init_chip.o \ 14 ipath_init_chip.o \
15 ipath_intr.o \ 15 ipath_intr.o \
16 ipath_keys.o \ 16 ipath_keys.o \
17 ipath_layer.o \
18 ipath_mad.o \ 17 ipath_mad.o \
19 ipath_mmap.o \ 18 ipath_mmap.o \
20 ipath_mr.o \ 19 ipath_mr.o \
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
deleted file mode 100644
index 82616b779e2..00000000000
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ /dev/null
@@ -1,365 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * These are the routines used by layered drivers, currently just the
36 * layered ethernet driver and verbs layer.
37 */
38
39#include <linux/io.h>
40#include <asm/byteorder.h>
41
42#include "ipath_kernel.h"
43#include "ipath_layer.h"
44#include "ipath_verbs.h"
45#include "ipath_common.h"
46
47/* Acquire before ipath_devs_lock. */
48static DEFINE_MUTEX(ipath_layer_mutex);
49
50u16 ipath_layer_rcv_opcode;
51
52static int (*layer_intr)(void *, u32);
53static int (*layer_rcv)(void *, void *, struct sk_buff *);
54static int (*layer_rcv_lid)(void *, void *);
55
56static void *(*layer_add_one)(int, struct ipath_devdata *);
57static void (*layer_remove_one)(void *);
58
59int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
60{
61 int ret = -ENODEV;
62
63 if (dd->ipath_layer.l_arg && layer_intr)
64 ret = layer_intr(dd->ipath_layer.l_arg, arg);
65
66 return ret;
67}
68
69int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
70{
71 int ret;
72
73 mutex_lock(&ipath_layer_mutex);
74
75 ret = __ipath_layer_intr(dd, arg);
76
77 mutex_unlock(&ipath_layer_mutex);
78
79 return ret;
80}
81
82int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
83 struct sk_buff *skb)
84{
85 int ret = -ENODEV;
86
87 if (dd->ipath_layer.l_arg && layer_rcv)
88 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
89
90 return ret;
91}
92
93int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
94{
95 int ret = -ENODEV;
96
97 if (dd->ipath_layer.l_arg && layer_rcv_lid)
98 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
99
100 return ret;
101}
102
103void ipath_layer_lid_changed(struct ipath_devdata *dd)
104{
105 mutex_lock(&ipath_layer_mutex);
106
107 if (dd->ipath_layer.l_arg && layer_intr)
108 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
109
110 mutex_unlock(&ipath_layer_mutex);
111}
112
113void ipath_layer_add(struct ipath_devdata *dd)
114{
115 mutex_lock(&ipath_layer_mutex);
116
117 if (layer_add_one)
118 dd->ipath_layer.l_arg =
119 layer_add_one(dd->ipath_unit, dd);
120
121 mutex_unlock(&ipath_layer_mutex);
122}
123
124void ipath_layer_remove(struct ipath_devdata *dd)
125{
126 mutex_lock(&ipath_layer_mutex);
127
128 if (dd->ipath_layer.l_arg && layer_remove_one) {
129 layer_remove_one(dd->ipath_layer.l_arg);
130 dd->ipath_layer.l_arg = NULL;
131 }
132
133 mutex_unlock(&ipath_layer_mutex);
134}
135
136int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
137 void (*l_remove)(void *),
138 int (*l_intr)(void *, u32),
139 int (*l_rcv)(void *, void *, struct sk_buff *),
140 u16 l_rcv_opcode,
141 int (*l_rcv_lid)(void *, void *))
142{
143 struct ipath_devdata *dd, *tmp;
144 unsigned long flags;
145
146 mutex_lock(&ipath_layer_mutex);
147
148 layer_add_one = l_add;
149 layer_remove_one = l_remove;
150 layer_intr = l_intr;
151 layer_rcv = l_rcv;
152 layer_rcv_lid = l_rcv_lid;
153 ipath_layer_rcv_opcode = l_rcv_opcode;
154
155 spin_lock_irqsave(&ipath_devs_lock, flags);
156
157 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
158 if (!(dd->ipath_flags & IPATH_INITTED))
159 continue;
160
161 if (dd->ipath_layer.l_arg)
162 continue;
163
164 spin_unlock_irqrestore(&ipath_devs_lock, flags);
165 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
166 spin_lock_irqsave(&ipath_devs_lock, flags);
167 }
168
169 spin_unlock_irqrestore(&ipath_devs_lock, flags);
170 mutex_unlock(&ipath_layer_mutex);
171
172 return 0;
173}
174
175EXPORT_SYMBOL_GPL(ipath_layer_register);
176
177void ipath_layer_unregister(void)
178{
179 struct ipath_devdata *dd, *tmp;
180 unsigned long flags;
181
182 mutex_lock(&ipath_layer_mutex);
183 spin_lock_irqsave(&ipath_devs_lock, flags);
184
185 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
186 if (dd->ipath_layer.l_arg && layer_remove_one) {
187 spin_unlock_irqrestore(&ipath_devs_lock, flags);
188 layer_remove_one(dd->ipath_layer.l_arg);
189 spin_lock_irqsave(&ipath_devs_lock, flags);
190 dd->ipath_layer.l_arg = NULL;
191 }
192 }
193
194 spin_unlock_irqrestore(&ipath_devs_lock, flags);
195
196 layer_add_one = NULL;
197 layer_remove_one = NULL;
198 layer_intr = NULL;
199 layer_rcv = NULL;
200 layer_rcv_lid = NULL;
201
202 mutex_unlock(&ipath_layer_mutex);
203}
204
205EXPORT_SYMBOL_GPL(ipath_layer_unregister);
206
207int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
208{
209 int ret;
210 u32 intval = 0;
211
212 mutex_lock(&ipath_layer_mutex);
213
214 if (!dd->ipath_layer.l_arg) {
215 ret = -EINVAL;
216 goto bail;
217 }
218
219 ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
220
221 if (ret < 0)
222 goto bail;
223
224 *pktmax = dd->ipath_ibmaxlen;
225
226 if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
227 intval |= IPATH_LAYER_INT_IF_UP;
228 if (dd->ipath_lid)
229 intval |= IPATH_LAYER_INT_LID;
230 if (dd->ipath_mlid)
231 intval |= IPATH_LAYER_INT_BCAST;
232 /*
233 * do this on open, in case low level is already up and
234 * just layered driver was reloaded, etc.
235 */
236 if (intval)
237 layer_intr(dd->ipath_layer.l_arg, intval);
238
239 ret = 0;
240bail:
241 mutex_unlock(&ipath_layer_mutex);
242
243 return ret;
244}
245
246EXPORT_SYMBOL_GPL(ipath_layer_open);
247
248u16 ipath_layer_get_lid(struct ipath_devdata *dd)
249{
250 return dd->ipath_lid;
251}
252
253EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
254
255/**
256 * ipath_layer_get_mac - get the MAC address
257 * @dd: the infinipath device
258 * @mac: the MAC is put here
259 *
260 * This is the EUID-64 OUI octets (top 3), then
261 * skip the next 2 (which should both be zero or 0xff).
262 * The returned MAC is in network order
263 * mac points to at least 6 bytes of buffer
264 * We assume that by the time the LID is set, that the GUID is as valid
265 * as it's ever going to be, rather than adding yet another status bit.
266 */
267
268int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
269{
270 u8 *guid;
271
272 guid = (u8 *) &dd->ipath_guid;
273
274 mac[0] = guid[0];
275 mac[1] = guid[1];
276 mac[2] = guid[2];
277 mac[3] = guid[5];
278 mac[4] = guid[6];
279 mac[5] = guid[7];
280 if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
281 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
282 "%x %x\n", guid[3], guid[4]);
283 return 0;
284}
285
286EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
287
288u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
289{
290 return dd->ipath_mlid;
291}
292
293EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
294
295int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
296{
297 int ret = 0;
298 u32 __iomem *piobuf;
299 u32 plen, *uhdr;
300 size_t count;
301 __be16 vlsllnh;
302
303 if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
304 ipath_dbg("send while not open\n");
305 ret = -EINVAL;
306 } else
307 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
308 dd->ipath_lid == 0) {
309 /*
310 * lid check is for when sma hasn't yet configured
311 */
312 ret = -ENETDOWN;
313 ipath_cdbg(VERBOSE, "send while not ready, "
314 "mylid=%u, flags=0x%x\n",
315 dd->ipath_lid, dd->ipath_flags);
316 }
317
318 vlsllnh = *((__be16 *) hdr);
319 if (vlsllnh != htons(IPATH_LRH_BTH)) {
320 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
321 "not sending\n", be16_to_cpu(vlsllnh),
322 IPATH_LRH_BTH);
323 ret = -EINVAL;
324 }
325 if (ret)
326 goto done;
327
328 /* Get a PIO buffer to use. */
329 piobuf = ipath_getpiobuf(dd, NULL);
330 if (piobuf == NULL) {
331 ret = -EBUSY;
332 goto done;
333 }
334
335 plen = (sizeof(*hdr) >> 2); /* actual length */
336 ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
337
338 writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
339 ipath_flush_wc();
340 piobuf += 2;
341 uhdr = (u32 *)hdr;
342 count = plen-1; /* amount we can copy before trigger word */
343 __iowrite32_copy(piobuf, uhdr, count);
344 ipath_flush_wc();
345 __raw_writel(uhdr[count], piobuf + count);
346 ipath_flush_wc(); /* ensure it's sent, now */
347
348 ipath_stats.sps_ether_spkts++; /* ether packet sent */
349
350done:
351 return ret;
352}
353
354EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
355
356int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
357{
358 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
359
360 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
361 dd->ipath_sendctrl);
362 return 0;
363}
364
365EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.h b/drivers/infiniband/hw/ipath/ipath_layer.h
deleted file mode 100644
index 415709c4d85..00000000000
--- a/drivers/infiniband/hw/ipath/ipath_layer.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef _IPATH_LAYER_H
35#define _IPATH_LAYER_H
36
37/*
38 * This header file is for symbols shared between the infinipath driver
39 * and drivers layered upon it (such as ipath).
40 */
41
42struct sk_buff;
43struct ipath_devdata;
44struct ether_header;
45
46int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
47 void (*l_remove)(void *),
48 int (*l_intr)(void *, u32),
49 int (*l_rcv)(void *, void *,
50 struct sk_buff *),
51 u16 rcv_opcode,
52 int (*l_rcv_lid)(void *, void *));
53void ipath_layer_unregister(void);
54int ipath_layer_open(struct ipath_devdata *, u32 * pktmax);
55u16 ipath_layer_get_lid(struct ipath_devdata *dd);
56int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *);
57u16 ipath_layer_get_bcast(struct ipath_devdata *dd);
58int ipath_layer_send_hdr(struct ipath_devdata *dd,
59 struct ether_header *hdr);
60int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd);
61
62/* ipath_ether interrupt values */
63#define IPATH_LAYER_INT_IF_UP 0x2
64#define IPATH_LAYER_INT_IF_DOWN 0x4
65#define IPATH_LAYER_INT_LID 0x8
66#define IPATH_LAYER_INT_SEND_CONTINUE 0x10
67#define IPATH_LAYER_INT_BCAST 0x40
68
69extern unsigned ipath_debug; /* debugging bit mask */
70
71#endif /* _IPATH_LAYER_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 9bbe81967f1..1a24c6a4a81 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -42,8 +42,6 @@
42#include <rdma/ib_pack.h> 42#include <rdma/ib_pack.h>
43#include <rdma/ib_user_verbs.h> 43#include <rdma/ib_user_verbs.h>
44 44
45#include "ipath_layer.h"
46
47#define IPATH_MAX_RDMA_ATOMIC 4 45#define IPATH_MAX_RDMA_ATOMIC 4
48 46
49#define QPN_MAX (1 << 24) 47#define QPN_MAX (1 << 24)