aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_layer.c
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-03-29 18:23:32 -0500
committerRoland Dreier <rolandd@cisco.com>2006-03-31 16:14:20 -0500
commit889ab795a34247c8085e65648051e34f9fec952c (patch)
tree7bfe86ff3105a92a68d9ebfed29c501f016db843 /drivers/infiniband/hw/ipath/ipath_layer.c
parent7f510b46e4771cfb89af134b3aa827d46125a2ce (diff)
IB/ipath: layering interfaces used by higher-level driver code
The layering interfaces are used to implement the Infiniband protocols and the ethernet emulation driver. Signed-off-by: Bryan O'Sullivan <bos@pathscale.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_layer.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c1515
1 files changed, 1515 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
new file mode 100644
index 000000000000..2cabf6340572
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -0,0 +1,1515 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * These are the routines used by layered drivers, currently just the
35 * layered ethernet driver and verbs layer.
36 */
37
38#include <linux/io.h>
39#include <linux/pci.h>
40#include <asm/byteorder.h>
41
42#include "ipath_kernel.h"
43#include "ips_common.h"
44#include "ipath_layer.h"
45
46/* Acquire before ipath_devs_lock. */
47static DEFINE_MUTEX(ipath_layer_mutex);
48
49u16 ipath_layer_rcv_opcode;
50static int (*layer_intr)(void *, u32);
51static int (*layer_rcv)(void *, void *, struct sk_buff *);
52static int (*layer_rcv_lid)(void *, void *);
53static int (*verbs_piobufavail)(void *);
54static void (*verbs_rcv)(void *, void *, void *, u32);
55int ipath_verbs_registered;
56
57static void *(*layer_add_one)(int, struct ipath_devdata *);
58static void (*layer_remove_one)(void *);
59static void *(*verbs_add_one)(int, struct ipath_devdata *);
60static void (*verbs_remove_one)(void *);
61static void (*verbs_timer_cb)(void *);
62
63int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
64{
65 int ret = -ENODEV;
66
67 if (dd->ipath_layer.l_arg && layer_intr)
68 ret = layer_intr(dd->ipath_layer.l_arg, arg);
69
70 return ret;
71}
72
73int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
74{
75 int ret;
76
77 mutex_lock(&ipath_layer_mutex);
78
79 ret = __ipath_layer_intr(dd, arg);
80
81 mutex_unlock(&ipath_layer_mutex);
82
83 return ret;
84}
85
86int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
87 struct sk_buff *skb)
88{
89 int ret = -ENODEV;
90
91 if (dd->ipath_layer.l_arg && layer_rcv)
92 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
93
94 return ret;
95}
96
97int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
98{
99 int ret = -ENODEV;
100
101 if (dd->ipath_layer.l_arg && layer_rcv_lid)
102 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
103
104 return ret;
105}
106
107int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
108{
109 int ret = -ENODEV;
110
111 if (dd->verbs_layer.l_arg && verbs_piobufavail)
112 ret = verbs_piobufavail(dd->verbs_layer.l_arg);
113
114 return ret;
115}
116
117int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
118 u32 tlen)
119{
120 int ret = -ENODEV;
121
122 if (dd->verbs_layer.l_arg && verbs_rcv) {
123 verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
124 ret = 0;
125 }
126
127 return ret;
128}
129
130int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
131{
132 u32 lstate;
133 int ret;
134
135 switch (newstate) {
136 case IPATH_IB_LINKDOWN:
137 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
138 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
139 /* don't wait */
140 ret = 0;
141 goto bail;
142
143 case IPATH_IB_LINKDOWN_SLEEP:
144 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
145 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
146 /* don't wait */
147 ret = 0;
148 goto bail;
149
150 case IPATH_IB_LINKDOWN_DISABLE:
151 ipath_set_ib_lstate(dd,
152 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
153 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
154 /* don't wait */
155 ret = 0;
156 goto bail;
157
158 case IPATH_IB_LINKINIT:
159 if (dd->ipath_flags & IPATH_LINKINIT) {
160 ret = 0;
161 goto bail;
162 }
163 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
164 INFINIPATH_IBCC_LINKCMD_SHIFT);
165 lstate = IPATH_LINKINIT;
166 break;
167
168 case IPATH_IB_LINKARM:
169 if (dd->ipath_flags & IPATH_LINKARMED) {
170 ret = 0;
171 goto bail;
172 }
173 if (!(dd->ipath_flags &
174 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
175 ret = -EINVAL;
176 goto bail;
177 }
178 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
179 INFINIPATH_IBCC_LINKCMD_SHIFT);
180 /*
181 * Since the port can transition to ACTIVE by receiving
182 * a non VL 15 packet, wait for either state.
183 */
184 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
185 break;
186
187 case IPATH_IB_LINKACTIVE:
188 if (dd->ipath_flags & IPATH_LINKACTIVE) {
189 ret = 0;
190 goto bail;
191 }
192 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
193 ret = -EINVAL;
194 goto bail;
195 }
196 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
197 INFINIPATH_IBCC_LINKCMD_SHIFT);
198 lstate = IPATH_LINKACTIVE;
199 break;
200
201 default:
202 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
203 ret = -EINVAL;
204 goto bail;
205 }
206 ret = ipath_wait_linkstate(dd, lstate, 2000);
207
208bail:
209 return ret;
210}
211
212EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
213
214/**
215 * ipath_layer_set_mtu - set the MTU
216 * @dd: the infinipath device
217 * @arg: the new MTU
218 *
219 * we can handle "any" incoming size, the issue here is whether we
220 * need to restrict our outgoing size. For now, we don't do any
221 * sanity checking on this, and we don't deal with what happens to
222 * programs that are already running when the size changes.
223 * NOTE: changing the MTU will usually cause the IBC to go back to
224 * link initialize (IPATH_IBSTATE_INIT) state...
225 */
226int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
227{
228 u32 piosize;
229 int changed = 0;
230 int ret;
231
232 /*
233 * mtu is IB data payload max. It's the largest power of 2 less
234 * than piosize (or even larger, since it only really controls the
235 * largest we can receive; we can send the max of the mtu and
236 * piosize). We check that it's one of the valid IB sizes.
237 */
238 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
239 arg != 4096) {
240 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
241 ret = -EINVAL;
242 goto bail;
243 }
244 if (dd->ipath_ibmtu == arg) {
245 ret = 0; /* same as current */
246 goto bail;
247 }
248
249 piosize = dd->ipath_ibmaxlen;
250 dd->ipath_ibmtu = arg;
251
252 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
253 /* Only if it's not the initial value (or reset to it) */
254 if (piosize != dd->ipath_init_ibmaxlen) {
255 dd->ipath_ibmaxlen = piosize;
256 changed = 1;
257 }
258 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
259 piosize = arg + IPATH_PIO_MAXIBHDR;
260 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
261 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
262 arg);
263 dd->ipath_ibmaxlen = piosize;
264 changed = 1;
265 }
266
267 if (changed) {
268 /*
269 * set the IBC maxpktlength to the size of our pio
270 * buffers in words
271 */
272 u64 ibc = dd->ipath_ibcctrl;
273 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
274 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
275
276 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
277 dd->ipath_ibmaxlen = piosize;
278 piosize /= sizeof(u32); /* in words */
279 /*
280 * for ICRC, which we only send in diag test pkt mode, and
281 * we don't need to worry about that for mtu
282 */
283 piosize += 1;
284
285 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
286 dd->ipath_ibcctrl = ibc;
287 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
288 dd->ipath_ibcctrl);
289 dd->ipath_f_tidtemplate(dd);
290 }
291
292 ret = 0;
293
294bail:
295 return ret;
296}
297
298EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
299
300int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
301{
302 ipath_stats.sps_lid[dd->ipath_unit] = arg;
303 dd->ipath_lid = arg;
304 dd->ipath_lmc = lmc;
305
306 mutex_lock(&ipath_layer_mutex);
307
308 if (dd->ipath_layer.l_arg && layer_intr)
309 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
310
311 mutex_unlock(&ipath_layer_mutex);
312
313 return 0;
314}
315
316EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
317
318int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
319{
320 /* XXX - need to inform anyone who cares this just happened. */
321 dd->ipath_guid = guid;
322 return 0;
323}
324
325EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
326
327__be64 ipath_layer_get_guid(struct ipath_devdata *dd)
328{
329 return dd->ipath_guid;
330}
331
332EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
333
334u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
335{
336 return dd->ipath_nguid;
337}
338
339EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
340
341int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor,
342 u32 * boardrev, u32 * majrev, u32 * minrev)
343{
344 *vendor = dd->ipath_vendorid;
345 *boardrev = dd->ipath_boardrev;
346 *majrev = dd->ipath_majrev;
347 *minrev = dd->ipath_minrev;
348
349 return 0;
350}
351
352EXPORT_SYMBOL_GPL(ipath_layer_query_device);
353
354u32 ipath_layer_get_flags(struct ipath_devdata *dd)
355{
356 return dd->ipath_flags;
357}
358
359EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
360
361struct device *ipath_layer_get_device(struct ipath_devdata *dd)
362{
363 return &dd->pcidev->dev;
364}
365
366EXPORT_SYMBOL_GPL(ipath_layer_get_device);
367
368u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
369{
370 return dd->ipath_deviceid;
371}
372
373EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
374
375u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
376{
377 return dd->ipath_lastibcstat;
378}
379
380EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
381
382u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
383{
384 return dd->ipath_ibmtu;
385}
386
387EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
388
389void ipath_layer_add(struct ipath_devdata *dd)
390{
391 mutex_lock(&ipath_layer_mutex);
392
393 if (layer_add_one)
394 dd->ipath_layer.l_arg =
395 layer_add_one(dd->ipath_unit, dd);
396
397 if (verbs_add_one)
398 dd->verbs_layer.l_arg =
399 verbs_add_one(dd->ipath_unit, dd);
400
401 mutex_unlock(&ipath_layer_mutex);
402}
403
404void ipath_layer_del(struct ipath_devdata *dd)
405{
406 mutex_lock(&ipath_layer_mutex);
407
408 if (dd->ipath_layer.l_arg && layer_remove_one) {
409 layer_remove_one(dd->ipath_layer.l_arg);
410 dd->ipath_layer.l_arg = NULL;
411 }
412
413 if (dd->verbs_layer.l_arg && verbs_remove_one) {
414 verbs_remove_one(dd->verbs_layer.l_arg);
415 dd->verbs_layer.l_arg = NULL;
416 }
417
418 mutex_unlock(&ipath_layer_mutex);
419}
420
421int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
422 void (*l_remove)(void *),
423 int (*l_intr)(void *, u32),
424 int (*l_rcv)(void *, void *, struct sk_buff *),
425 u16 l_rcv_opcode,
426 int (*l_rcv_lid)(void *, void *))
427{
428 struct ipath_devdata *dd, *tmp;
429 unsigned long flags;
430
431 mutex_lock(&ipath_layer_mutex);
432
433 layer_add_one = l_add;
434 layer_remove_one = l_remove;
435 layer_intr = l_intr;
436 layer_rcv = l_rcv;
437 layer_rcv_lid = l_rcv_lid;
438 ipath_layer_rcv_opcode = l_rcv_opcode;
439
440 spin_lock_irqsave(&ipath_devs_lock, flags);
441
442 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
443 if (!(dd->ipath_flags & IPATH_INITTED))
444 continue;
445
446 if (dd->ipath_layer.l_arg)
447 continue;
448
449 if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
450 *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
451
452 spin_unlock_irqrestore(&ipath_devs_lock, flags);
453 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
454 spin_lock_irqsave(&ipath_devs_lock, flags);
455 }
456
457 spin_unlock_irqrestore(&ipath_devs_lock, flags);
458 mutex_unlock(&ipath_layer_mutex);
459
460 return 0;
461}
462
463EXPORT_SYMBOL_GPL(ipath_layer_register);
464
465void ipath_layer_unregister(void)
466{
467 struct ipath_devdata *dd, *tmp;
468 unsigned long flags;
469
470 mutex_lock(&ipath_layer_mutex);
471 spin_lock_irqsave(&ipath_devs_lock, flags);
472
473 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
474 if (dd->ipath_layer.l_arg && layer_remove_one) {
475 spin_unlock_irqrestore(&ipath_devs_lock, flags);
476 layer_remove_one(dd->ipath_layer.l_arg);
477 spin_lock_irqsave(&ipath_devs_lock, flags);
478 dd->ipath_layer.l_arg = NULL;
479 }
480 }
481
482 spin_unlock_irqrestore(&ipath_devs_lock, flags);
483
484 layer_add_one = NULL;
485 layer_remove_one = NULL;
486 layer_intr = NULL;
487 layer_rcv = NULL;
488 layer_rcv_lid = NULL;
489
490 mutex_unlock(&ipath_layer_mutex);
491}
492
493EXPORT_SYMBOL_GPL(ipath_layer_unregister);
494
495static void __ipath_verbs_timer(unsigned long arg)
496{
497 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
498
499 /*
500 * If port 0 receive packet interrupts are not available, or
501 * can be missed, poll the receive queue
502 */
503 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
504 ipath_kreceive(dd);
505
506 /* Handle verbs layer timeouts. */
507 if (dd->verbs_layer.l_arg && verbs_timer_cb)
508 verbs_timer_cb(dd->verbs_layer.l_arg);
509
510 mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
511}
512
513/**
514 * ipath_verbs_register - verbs layer registration
515 * @l_piobufavail: callback for when PIO buffers become available
516 * @l_rcv: callback for receiving a packet
517 * @l_timer_cb: timer callback
518 * @ipath_devdata: device data structure is put here
519 */
520int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
521 void (*l_remove)(void *arg),
522 int (*l_piobufavail) (void *arg),
523 void (*l_rcv) (void *arg, void *rhdr,
524 void *data, u32 tlen),
525 void (*l_timer_cb) (void *arg))
526{
527 struct ipath_devdata *dd, *tmp;
528 unsigned long flags;
529
530 mutex_lock(&ipath_layer_mutex);
531
532 verbs_add_one = l_add;
533 verbs_remove_one = l_remove;
534 verbs_piobufavail = l_piobufavail;
535 verbs_rcv = l_rcv;
536 verbs_timer_cb = l_timer_cb;
537
538 spin_lock_irqsave(&ipath_devs_lock, flags);
539
540 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
541 if (!(dd->ipath_flags & IPATH_INITTED))
542 continue;
543
544 if (dd->verbs_layer.l_arg)
545 continue;
546
547 spin_unlock_irqrestore(&ipath_devs_lock, flags);
548 dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
549 spin_lock_irqsave(&ipath_devs_lock, flags);
550 }
551
552 spin_unlock_irqrestore(&ipath_devs_lock, flags);
553 mutex_unlock(&ipath_layer_mutex);
554
555 ipath_verbs_registered = 1;
556
557 return 0;
558}
559
560EXPORT_SYMBOL_GPL(ipath_verbs_register);
561
562void ipath_verbs_unregister(void)
563{
564 struct ipath_devdata *dd, *tmp;
565 unsigned long flags;
566
567 mutex_lock(&ipath_layer_mutex);
568 spin_lock_irqsave(&ipath_devs_lock, flags);
569
570 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
571 *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
572
573 if (dd->verbs_layer.l_arg && verbs_remove_one) {
574 spin_unlock_irqrestore(&ipath_devs_lock, flags);
575 verbs_remove_one(dd->verbs_layer.l_arg);
576 spin_lock_irqsave(&ipath_devs_lock, flags);
577 dd->verbs_layer.l_arg = NULL;
578 }
579 }
580
581 spin_unlock_irqrestore(&ipath_devs_lock, flags);
582
583 verbs_add_one = NULL;
584 verbs_remove_one = NULL;
585 verbs_piobufavail = NULL;
586 verbs_rcv = NULL;
587 verbs_timer_cb = NULL;
588
589 mutex_unlock(&ipath_layer_mutex);
590}
591
592EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
593
594int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
595{
596 int ret;
597 u32 intval = 0;
598
599 mutex_lock(&ipath_layer_mutex);
600
601 if (!dd->ipath_layer.l_arg) {
602 ret = -EINVAL;
603 goto bail;
604 }
605
606 ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
607
608 if (ret < 0)
609 goto bail;
610
611 *pktmax = dd->ipath_ibmaxlen;
612
613 if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
614 intval |= IPATH_LAYER_INT_IF_UP;
615 if (ipath_stats.sps_lid[dd->ipath_unit])
616 intval |= IPATH_LAYER_INT_LID;
617 if (ipath_stats.sps_mlid[dd->ipath_unit])
618 intval |= IPATH_LAYER_INT_BCAST;
619 /*
620 * do this on open, in case low level is already up and
621 * just layered driver was reloaded, etc.
622 */
623 if (intval)
624 layer_intr(dd->ipath_layer.l_arg, intval);
625
626 ret = 0;
627bail:
628 mutex_unlock(&ipath_layer_mutex);
629
630 return ret;
631}
632
633EXPORT_SYMBOL_GPL(ipath_layer_open);
634
635u16 ipath_layer_get_lid(struct ipath_devdata *dd)
636{
637 return dd->ipath_lid;
638}
639
640EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
641
642/**
643 * ipath_layer_get_mac - get the MAC address
644 * @dd: the infinipath device
645 * @mac: the MAC is put here
646 *
647 * This is the EUID-64 OUI octets (top 3), then
648 * skip the next 2 (which should both be zero or 0xff).
649 * The returned MAC is in network order
650 * mac points to at least 6 bytes of buffer
651 * We assume that by the time the LID is set, that the GUID is as valid
652 * as it's ever going to be, rather than adding yet another status bit.
653 */
654
655int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
656{
657 u8 *guid;
658
659 guid = (u8 *) &dd->ipath_guid;
660
661 mac[0] = guid[0];
662 mac[1] = guid[1];
663 mac[2] = guid[2];
664 mac[3] = guid[5];
665 mac[4] = guid[6];
666 mac[5] = guid[7];
667 if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
668 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
669 "%x %x\n", guid[3], guid[4]);
670 return 0;
671}
672
673EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
674
675u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
676{
677 return dd->ipath_mlid;
678}
679
680EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
681
682u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
683{
684 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
685}
686
687EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
688
689static void update_sge(struct ipath_sge_state *ss, u32 length)
690{
691 struct ipath_sge *sge = &ss->sge;
692
693 sge->vaddr += length;
694 sge->length -= length;
695 sge->sge_length -= length;
696 if (sge->sge_length == 0) {
697 if (--ss->num_sge)
698 *sge = *ss->sg_list++;
699 } else if (sge->length == 0 && sge->mr != NULL) {
700 if (++sge->n >= IPATH_SEGSZ) {
701 if (++sge->m >= sge->mr->mapsz)
702 return;
703 sge->n = 0;
704 }
705 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
706 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
707 }
708}
709
710#ifdef __LITTLE_ENDIAN
711static inline u32 get_upper_bits(u32 data, u32 shift)
712{
713 return data >> shift;
714}
715
716static inline u32 set_upper_bits(u32 data, u32 shift)
717{
718 return data << shift;
719}
720
721static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
722{
723 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
724 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
725 return data;
726}
727#else
728static inline u32 get_upper_bits(u32 data, u32 shift)
729{
730 return data << shift;
731}
732
733static inline u32 set_upper_bits(u32 data, u32 shift)
734{
735 return data >> shift;
736}
737
738static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
739{
740 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
741 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
742 return data;
743}
744#endif
745
746static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
747 u32 length)
748{
749 u32 extra = 0;
750 u32 data = 0;
751 u32 last;
752
753 while (1) {
754 u32 len = ss->sge.length;
755 u32 off;
756
757 BUG_ON(len == 0);
758 if (len > length)
759 len = length;
760 if (len > ss->sge.sge_length)
761 len = ss->sge.sge_length;
762 /* If the source address is not aligned, try to align it. */
763 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
764 if (off) {
765 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
766 ~(sizeof(u32) - 1));
767 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
768 u32 y;
769
770 y = sizeof(u32) - off;
771 if (len > y)
772 len = y;
773 if (len + extra >= sizeof(u32)) {
774 data |= set_upper_bits(v, extra *
775 BITS_PER_BYTE);
776 len = sizeof(u32) - extra;
777 if (len == length) {
778 last = data;
779 break;
780 }
781 __raw_writel(data, piobuf);
782 piobuf++;
783 extra = 0;
784 data = 0;
785 } else {
786 /* Clear unused upper bytes */
787 data |= clear_upper_bytes(v, len, extra);
788 if (len == length) {
789 last = data;
790 break;
791 }
792 extra += len;
793 }
794 } else if (extra) {
795 /* Source address is aligned. */
796 u32 *addr = (u32 *) ss->sge.vaddr;
797 int shift = extra * BITS_PER_BYTE;
798 int ushift = 32 - shift;
799 u32 l = len;
800
801 while (l >= sizeof(u32)) {
802 u32 v = *addr;
803
804 data |= set_upper_bits(v, shift);
805 __raw_writel(data, piobuf);
806 data = get_upper_bits(v, ushift);
807 piobuf++;
808 addr++;
809 l -= sizeof(u32);
810 }
811 /*
812 * We still have 'extra' number of bytes leftover.
813 */
814 if (l) {
815 u32 v = *addr;
816
817 if (l + extra >= sizeof(u32)) {
818 data |= set_upper_bits(v, shift);
819 len -= l + extra - sizeof(u32);
820 if (len == length) {
821 last = data;
822 break;
823 }
824 __raw_writel(data, piobuf);
825 piobuf++;
826 extra = 0;
827 data = 0;
828 } else {
829 /* Clear unused upper bytes */
830 data |= clear_upper_bytes(v, l,
831 extra);
832 if (len == length) {
833 last = data;
834 break;
835 }
836 extra += l;
837 }
838 } else if (len == length) {
839 last = data;
840 break;
841 }
842 } else if (len == length) {
843 u32 w;
844
845 /*
846 * Need to round up for the last dword in the
847 * packet.
848 */
849 w = (len + 3) >> 2;
850 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
851 piobuf += w - 1;
852 last = ((u32 *) ss->sge.vaddr)[w - 1];
853 break;
854 } else {
855 u32 w = len >> 2;
856
857 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
858 piobuf += w;
859
860 extra = len & (sizeof(u32) - 1);
861 if (extra) {
862 u32 v = ((u32 *) ss->sge.vaddr)[w];
863
864 /* Clear unused upper bytes */
865 data = clear_upper_bytes(v, extra, 0);
866 }
867 }
868 update_sge(ss, len);
869 length -= len;
870 }
871 /* must flush early everything before trigger word */
872 ipath_flush_wc();
873 __raw_writel(last, piobuf);
874 /* be sure trigger word is written */
875 ipath_flush_wc();
876 update_sge(ss, length);
877}
878
879/**
880 * ipath_verbs_send - send a packet from the verbs layer
881 * @dd: the infinipath device
882 * @hdrwords: the number of works in the header
883 * @hdr: the packet header
884 * @len: the length of the packet in bytes
885 * @ss: the SGE to send
886 *
887 * This is like ipath_sma_send_pkt() in that we need to be able to send
888 * packets after the chip is initialized (MADs) but also like
889 * ipath_layer_send_hdr() since its used by the verbs layer.
890 */
891int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
892 u32 *hdr, u32 len, struct ipath_sge_state *ss)
893{
894 u32 __iomem *piobuf;
895 u32 plen;
896 int ret;
897
898 /* +1 is for the qword padding of pbc */
899 plen = hdrwords + ((len + 3) >> 2) + 1;
900 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
901 ipath_dbg("packet len 0x%x too long, failing\n", plen);
902 ret = -EINVAL;
903 goto bail;
904 }
905
906 /* Get a PIO buffer to use. */
907 piobuf = ipath_getpiobuf(dd, NULL);
908 if (unlikely(piobuf == NULL)) {
909 ret = -EBUSY;
910 goto bail;
911 }
912
913 /*
914 * Write len to control qword, no flags.
915 * We have to flush after the PBC for correctness on some cpus
916 * or WC buffer can be written out of order.
917 */
918 writeq(plen, piobuf);
919 ipath_flush_wc();
920 piobuf += 2;
921 if (len == 0) {
922 /*
923 * If there is just the header portion, must flush before
924 * writing last word of header for correctness, and after
925 * the last header word (trigger word).
926 */
927 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
928 ipath_flush_wc();
929 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
930 ipath_flush_wc();
931 ret = 0;
932 goto bail;
933 }
934
935 __iowrite32_copy(piobuf, hdr, hdrwords);
936 piobuf += hdrwords;
937
938 /* The common case is aligned and contained in one segment. */
939 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
940 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
941 u32 w;
942
943 /* Need to round up for the last dword in the packet. */
944 w = (len + 3) >> 2;
945 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
946 /* must flush early everything before trigger word */
947 ipath_flush_wc();
948 __raw_writel(((u32 *) ss->sge.vaddr)[w - 1],
949 piobuf + w - 1);
950 /* be sure trigger word is written */
951 ipath_flush_wc();
952 update_sge(ss, len);
953 ret = 0;
954 goto bail;
955 }
956 copy_io(piobuf, ss, len);
957 ret = 0;
958
959bail:
960 return ret;
961}
962
963EXPORT_SYMBOL_GPL(ipath_verbs_send);
964
965int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
966 u64 *rwords, u64 *spkts, u64 *rpkts,
967 u64 *xmit_wait)
968{
969 int ret;
970
971 if (!(dd->ipath_flags & IPATH_INITTED)) {
972 /* no hardware, freeze, etc. */
973 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
974 ret = -EINVAL;
975 goto bail;
976 }
977 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
978 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
979 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
980 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
981 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
982
983 ret = 0;
984
985bail:
986 return ret;
987}
988
989EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
990
991/**
992 * ipath_layer_get_counters - get various chip counters
993 * @dd: the infinipath device
994 * @cntrs: counters are placed here
995 *
996 * Return the counters needed by recv_pma_get_portcounters().
997 */
998int ipath_layer_get_counters(struct ipath_devdata *dd,
999 struct ipath_layer_counters *cntrs)
1000{
1001 int ret;
1002
1003 if (!(dd->ipath_flags & IPATH_INITTED)) {
1004 /* no hardware, freeze, etc. */
1005 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
1006 ret = -EINVAL;
1007 goto bail;
1008 }
1009 cntrs->symbol_error_counter =
1010 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
1011 cntrs->link_error_recovery_counter =
1012 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
1013 cntrs->link_downed_counter =
1014 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
1015 cntrs->port_rcv_errors =
1016 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
1017 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
1018 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
1019 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) +
1020 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
1021 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
1022 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
1023 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
1024 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
1025 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) +
1026 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
1027 cntrs->port_rcv_remphys_errors =
1028 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
1029 cntrs->port_xmit_discards =
1030 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
1031 cntrs->port_xmit_data =
1032 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1033 cntrs->port_rcv_data =
1034 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1035 cntrs->port_xmit_packets =
1036 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1037 cntrs->port_rcv_packets =
1038 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1039
1040 ret = 0;
1041
1042bail:
1043 return ret;
1044}
1045
1046EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
1047
1048int ipath_layer_want_buffer(struct ipath_devdata *dd)
1049{
1050 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1051 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1052 dd->ipath_sendctrl);
1053
1054 return 0;
1055}
1056
1057EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
1058
1059int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
1060{
1061 int ret = 0;
1062 u32 __iomem *piobuf;
1063 u32 plen, *uhdr;
1064 size_t count;
1065 __be16 vlsllnh;
1066
1067 if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
1068 ipath_dbg("send while not open\n");
1069 ret = -EINVAL;
1070 } else
1071 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
1072 dd->ipath_lid == 0) {
1073 /*
1074 * lid check is for when sma hasn't yet configured
1075 */
1076 ret = -ENETDOWN;
1077 ipath_cdbg(VERBOSE, "send while not ready, "
1078 "mylid=%u, flags=0x%x\n",
1079 dd->ipath_lid, dd->ipath_flags);
1080 }
1081
1082 vlsllnh = *((__be16 *) hdr);
1083 if (vlsllnh != htons(IPS_LRH_BTH)) {
1084 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
1085 "not sending\n", be16_to_cpu(vlsllnh),
1086 IPS_LRH_BTH);
1087 ret = -EINVAL;
1088 }
1089 if (ret)
1090 goto done;
1091
1092 /* Get a PIO buffer to use. */
1093 piobuf = ipath_getpiobuf(dd, NULL);
1094 if (piobuf == NULL) {
1095 ret = -EBUSY;
1096 goto done;
1097 }
1098
1099 plen = (sizeof(*hdr) >> 2); /* actual length */
1100 ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
1101
1102 writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
1103 ipath_flush_wc();
1104 piobuf += 2;
1105 uhdr = (u32 *)hdr;
1106 count = plen-1; /* amount we can copy before trigger word */
1107 __iowrite32_copy(piobuf, uhdr, count);
1108 ipath_flush_wc();
1109 __raw_writel(uhdr[count], piobuf + count);
1110 ipath_flush_wc(); /* ensure it's sent, now */
1111
1112 ipath_stats.sps_ether_spkts++; /* ether packet sent */
1113
1114done:
1115 return ret;
1116}
1117
1118EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
1119
1120int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
1121{
1122 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1123
1124 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1125 dd->ipath_sendctrl);
1126 return 0;
1127}
1128
1129EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
1130
1131int ipath_layer_enable_timer(struct ipath_devdata *dd)
1132{
1133 /*
1134 * HT-400 has a design flaw where the chip and kernel idea
1135 * of the tail register don't always agree, and therefore we won't
1136 * get an interrupt on the next packet received.
1137 * If the board supports per packet receive interrupts, use it.
1138 * Otherwise, the timer function periodically checks for packets
1139 * to cover this case.
1140 * Either way, the timer is needed for verbs layer related
1141 * processing.
1142 */
1143 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1144 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1145 0x2074076542310ULL);
1146 /* Enable GPIO bit 2 interrupt */
1147 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1148 (u64) (1 << 2));
1149 }
1150
1151 init_timer(&dd->verbs_layer.l_timer);
1152 dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
1153 dd->verbs_layer.l_timer.data = (unsigned long)dd;
1154 dd->verbs_layer.l_timer.expires = jiffies + 1;
1155 add_timer(&dd->verbs_layer.l_timer);
1156
1157 return 0;
1158}
1159
1160EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
1161
1162int ipath_layer_disable_timer(struct ipath_devdata *dd)
1163{
1164 /* Disable GPIO bit 2 interrupt */
1165 if (dd->ipath_flags & IPATH_GPIO_INTR)
1166 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1167
1168 del_timer_sync(&dd->verbs_layer.l_timer);
1169
1170 return 0;
1171}
1172
1173EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
1174
1175/**
1176 * ipath_layer_set_verbs_flags - set the verbs layer flags
1177 * @dd: the infinipath device
1178 * @flags: the flags to set
1179 */
1180int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1181{
1182 struct ipath_devdata *ss;
1183 unsigned long lflags;
1184
1185 spin_lock_irqsave(&ipath_devs_lock, lflags);
1186
1187 list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1188 if (!(ss->ipath_flags & IPATH_INITTED))
1189 continue;
1190 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1191 !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1192 *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1193 else
1194 *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1195 }
1196
1197 spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1198
1199 return 0;
1200}
1201
1202EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
1203
1204/**
1205 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1206 * @dd: the infinipath device
1207 */
1208unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1209{
1210 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1211}
1212
1213EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
1214
1215/**
1216 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1217 * @dd: the infinipath device
1218 * @index: the PKEY index
1219 */
1220unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1221{
1222 unsigned ret;
1223
1224 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1225 ret = 0;
1226 else
1227 ret = dd->ipath_pd[0]->port_pkeys[index];
1228
1229 return ret;
1230}
1231
1232EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
1233
1234/**
1235 * ipath_layer_get_pkeys - return the PKEY table for port 0
1236 * @dd: the infinipath device
1237 * @pkeys: the pkey table is placed here
1238 */
1239int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1240{
1241 struct ipath_portdata *pd = dd->ipath_pd[0];
1242
1243 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1244
1245 return 0;
1246}
1247
1248EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
1249
1250/**
1251 * rm_pkey - decrecment the reference count for the given PKEY
1252 * @dd: the infinipath device
1253 * @key: the PKEY index
1254 *
1255 * Return true if this was the last reference and the hardware table entry
1256 * needs to be changed.
1257 */
1258static int rm_pkey(struct ipath_devdata *dd, u16 key)
1259{
1260 int i;
1261 int ret;
1262
1263 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1264 if (dd->ipath_pkeys[i] != key)
1265 continue;
1266 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1267 dd->ipath_pkeys[i] = 0;
1268 ret = 1;
1269 goto bail;
1270 }
1271 break;
1272 }
1273
1274 ret = 0;
1275
1276bail:
1277 return ret;
1278}
1279
1280/**
1281 * add_pkey - add the given PKEY to the hardware table
1282 * @dd: the infinipath device
1283 * @key: the PKEY
1284 *
1285 * Return an error code if unable to add the entry, zero if no change,
1286 * or 1 if the hardware PKEY register needs to be updated.
1287 */
1288static int add_pkey(struct ipath_devdata *dd, u16 key)
1289{
1290 int i;
1291 u16 lkey = key & 0x7FFF;
1292 int any = 0;
1293 int ret;
1294
1295 if (lkey == 0x7FFF) {
1296 ret = 0;
1297 goto bail;
1298 }
1299
1300 /* Look for an empty slot or a matching PKEY. */
1301 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1302 if (!dd->ipath_pkeys[i]) {
1303 any++;
1304 continue;
1305 }
1306 /* If it matches exactly, try to increment the ref count */
1307 if (dd->ipath_pkeys[i] == key) {
1308 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1309 ret = 0;
1310 goto bail;
1311 }
1312 /* Lost the race. Look for an empty slot below. */
1313 atomic_dec(&dd->ipath_pkeyrefs[i]);
1314 any++;
1315 }
1316 /*
1317 * It makes no sense to have both the limited and unlimited
1318 * PKEY set at the same time since the unlimited one will
1319 * disable the limited one.
1320 */
1321 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1322 ret = -EEXIST;
1323 goto bail;
1324 }
1325 }
1326 if (!any) {
1327 ret = -EBUSY;
1328 goto bail;
1329 }
1330 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1331 if (!dd->ipath_pkeys[i] &&
1332 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1333 /* for ipathstats, etc. */
1334 ipath_stats.sps_pkeys[i] = lkey;
1335 dd->ipath_pkeys[i] = key;
1336 ret = 1;
1337 goto bail;
1338 }
1339 }
1340 ret = -EBUSY;
1341
1342bail:
1343 return ret;
1344}
1345
1346/**
1347 * ipath_layer_set_pkeys - set the PKEY table for port 0
1348 * @dd: the infinipath device
1349 * @pkeys: the PKEY table
1350 */
1351int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1352{
1353 struct ipath_portdata *pd;
1354 int i;
1355 int changed = 0;
1356
1357 pd = dd->ipath_pd[0];
1358
1359 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1360 u16 key = pkeys[i];
1361 u16 okey = pd->port_pkeys[i];
1362
1363 if (key == okey)
1364 continue;
1365 /*
1366 * The value of this PKEY table entry is changing.
1367 * Remove the old entry in the hardware's array of PKEYs.
1368 */
1369 if (okey & 0x7FFF)
1370 changed |= rm_pkey(dd, okey);
1371 if (key & 0x7FFF) {
1372 int ret = add_pkey(dd, key);
1373
1374 if (ret < 0)
1375 key = 0;
1376 else
1377 changed |= ret;
1378 }
1379 pd->port_pkeys[i] = key;
1380 }
1381 if (changed) {
1382 u64 pkey;
1383
1384 pkey = (u64) dd->ipath_pkeys[0] |
1385 ((u64) dd->ipath_pkeys[1] << 16) |
1386 ((u64) dd->ipath_pkeys[2] << 32) |
1387 ((u64) dd->ipath_pkeys[3] << 48);
1388 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1389 (unsigned long long) pkey);
1390 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1391 pkey);
1392 }
1393 return 0;
1394}
1395
1396EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
1397
1398/**
1399 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1400 * @dd: the infinipath device
1401 *
1402 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1403 */
1404int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1405{
1406 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1407}
1408
1409EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
1410
1411/**
1412 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1413 * @dd: the infinipath device
1414 * @sleep: the new state
1415 *
1416 * Note that this will only take effect when the link state changes.
1417 */
1418int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1419 int sleep)
1420{
1421 if (sleep)
1422 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1423 else
1424 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1425 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1426 dd->ipath_ibcctrl);
1427 return 0;
1428}
1429
1430EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
1431
1432int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1433{
1434 return (dd->ipath_ibcctrl >>
1435 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1436 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1437}
1438
1439EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
1440
1441/**
1442 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1443 * @dd: the infinipath device
1444 * @n: the new threshold
1445 *
1446 * Note that this will only take effect when the link state changes.
1447 */
1448int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1449{
1450 unsigned v;
1451
1452 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1453 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1454 if (v != n) {
1455 dd->ipath_ibcctrl &=
1456 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1457 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1458 dd->ipath_ibcctrl |=
1459 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1460 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1461 dd->ipath_ibcctrl);
1462 }
1463 return 0;
1464}
1465
1466EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
1467
1468int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1469{
1470 return (dd->ipath_ibcctrl >>
1471 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1472 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1473}
1474
1475EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
1476
1477/**
1478 * ipath_layer_set_overrunthreshold - set the overrun threshold
1479 * @dd: the infinipath device
1480 * @n: the new threshold
1481 *
1482 * Note that this will only take effect when the link state changes.
1483 */
1484int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1485{
1486 unsigned v;
1487
1488 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1489 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1490 if (v != n) {
1491 dd->ipath_ibcctrl &=
1492 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1493 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1494 dd->ipath_ibcctrl |=
1495 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1496 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1497 dd->ipath_ibcctrl);
1498 }
1499 return 0;
1500}
1501
1502EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
1503
1504int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1505 size_t namelen)
1506{
1507 return dd->ipath_f_get_boardname(dd, name, namelen);
1508}
1509EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
1510
1511u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1512{
1513 return dd->ipath_rcvhdrentsize;
1514}
1515EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);