aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_layer.c
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-08-25 14:24:32 -0400
committerRoland Dreier <rolandd@cisco.com>2006-09-22 18:22:31 -0400
commit34b2aafea38efdf02cd8107a6e1057e2a297c447 (patch)
treefc800510f947696156df70cf6608f8283bab868c /drivers/infiniband/hw/ipath/ipath_layer.c
parentb1c1b6a30eac88665a35a207cc5e6233090b9d65 (diff)
IB/ipath: simplify layering code
A lot of ipath layer code was only called in one place. Now that the ipath_core and ib_ipath drivers are merged, it's more sensible to simply inline the simple stuff that the layer code was doing. Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_layer.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c978
1 files changed, 1 insertions, 977 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
index acc32200cc0e..10f578e2aed6 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -101,242 +101,14 @@ int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
101 return ret; 101 return ret;
102} 102}
103 103
104int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate) 104void ipath_layer_lid_changed(struct ipath_devdata *dd)
105{ 105{
106 u32 lstate;
107 int ret;
108
109 switch (newstate) {
110 case IPATH_IB_LINKDOWN:
111 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
112 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
113 /* don't wait */
114 ret = 0;
115 goto bail;
116
117 case IPATH_IB_LINKDOWN_SLEEP:
118 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
119 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
120 /* don't wait */
121 ret = 0;
122 goto bail;
123
124 case IPATH_IB_LINKDOWN_DISABLE:
125 ipath_set_ib_lstate(dd,
126 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
127 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
128 /* don't wait */
129 ret = 0;
130 goto bail;
131
132 case IPATH_IB_LINKINIT:
133 if (dd->ipath_flags & IPATH_LINKINIT) {
134 ret = 0;
135 goto bail;
136 }
137 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
138 INFINIPATH_IBCC_LINKCMD_SHIFT);
139 lstate = IPATH_LINKINIT;
140 break;
141
142 case IPATH_IB_LINKARM:
143 if (dd->ipath_flags & IPATH_LINKARMED) {
144 ret = 0;
145 goto bail;
146 }
147 if (!(dd->ipath_flags &
148 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
149 ret = -EINVAL;
150 goto bail;
151 }
152 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
153 INFINIPATH_IBCC_LINKCMD_SHIFT);
154 /*
155 * Since the port can transition to ACTIVE by receiving
156 * a non VL 15 packet, wait for either state.
157 */
158 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
159 break;
160
161 case IPATH_IB_LINKACTIVE:
162 if (dd->ipath_flags & IPATH_LINKACTIVE) {
163 ret = 0;
164 goto bail;
165 }
166 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
167 ret = -EINVAL;
168 goto bail;
169 }
170 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
171 INFINIPATH_IBCC_LINKCMD_SHIFT);
172 lstate = IPATH_LINKACTIVE;
173 break;
174
175 default:
176 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
177 ret = -EINVAL;
178 goto bail;
179 }
180 ret = ipath_wait_linkstate(dd, lstate, 2000);
181
182bail:
183 return ret;
184}
185
186/**
187 * ipath_layer_set_mtu - set the MTU
188 * @dd: the infinipath device
189 * @arg: the new MTU
190 *
191 * we can handle "any" incoming size, the issue here is whether we
192 * need to restrict our outgoing size. For now, we don't do any
193 * sanity checking on this, and we don't deal with what happens to
194 * programs that are already running when the size changes.
195 * NOTE: changing the MTU will usually cause the IBC to go back to
196 * link initialize (IPATH_IBSTATE_INIT) state...
197 */
198int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
199{
200 u32 piosize;
201 int changed = 0;
202 int ret;
203
204 /*
205 * mtu is IB data payload max. It's the largest power of 2 less
206 * than piosize (or even larger, since it only really controls the
207 * largest we can receive; we can send the max of the mtu and
208 * piosize). We check that it's one of the valid IB sizes.
209 */
210 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
211 arg != 4096) {
212 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
213 ret = -EINVAL;
214 goto bail;
215 }
216 if (dd->ipath_ibmtu == arg) {
217 ret = 0; /* same as current */
218 goto bail;
219 }
220
221 piosize = dd->ipath_ibmaxlen;
222 dd->ipath_ibmtu = arg;
223
224 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
225 /* Only if it's not the initial value (or reset to it) */
226 if (piosize != dd->ipath_init_ibmaxlen) {
227 dd->ipath_ibmaxlen = piosize;
228 changed = 1;
229 }
230 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
231 piosize = arg + IPATH_PIO_MAXIBHDR;
232 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
233 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
234 arg);
235 dd->ipath_ibmaxlen = piosize;
236 changed = 1;
237 }
238
239 if (changed) {
240 /*
241 * set the IBC maxpktlength to the size of our pio
242 * buffers in words
243 */
244 u64 ibc = dd->ipath_ibcctrl;
245 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
246 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
247
248 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
249 dd->ipath_ibmaxlen = piosize;
250 piosize /= sizeof(u32); /* in words */
251 /*
252 * for ICRC, which we only send in diag test pkt mode, and
253 * we don't need to worry about that for mtu
254 */
255 piosize += 1;
256
257 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
258 dd->ipath_ibcctrl = ibc;
259 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
260 dd->ipath_ibcctrl);
261 dd->ipath_f_tidtemplate(dd);
262 }
263
264 ret = 0;
265
266bail:
267 return ret;
268}
269
270int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
271{
272 dd->ipath_lid = arg;
273 dd->ipath_lmc = lmc;
274
275 mutex_lock(&ipath_layer_mutex); 106 mutex_lock(&ipath_layer_mutex);
276 107
277 if (dd->ipath_layer.l_arg && layer_intr) 108 if (dd->ipath_layer.l_arg && layer_intr)
278 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID); 109 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
279 110
280 mutex_unlock(&ipath_layer_mutex); 111 mutex_unlock(&ipath_layer_mutex);
281
282 return 0;
283}
284
285int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
286{
287 /* XXX - need to inform anyone who cares this just happened. */
288 dd->ipath_guid = guid;
289 return 0;
290}
291
292__be64 ipath_layer_get_guid(struct ipath_devdata *dd)
293{
294 return dd->ipath_guid;
295}
296
297u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
298{
299 return dd->ipath_majrev;
300}
301
302u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
303{
304 return dd->ipath_minrev;
305}
306
307u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
308{
309 return dd->ipath_pcirev;
310}
311
312u32 ipath_layer_get_flags(struct ipath_devdata *dd)
313{
314 return dd->ipath_flags;
315}
316
317struct device *ipath_layer_get_device(struct ipath_devdata *dd)
318{
319 return &dd->pcidev->dev;
320}
321
322u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
323{
324 return dd->ipath_deviceid;
325}
326
327u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
328{
329 return dd->ipath_vendorid;
330}
331
332u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
333{
334 return dd->ipath_lastibcstat;
335}
336
337u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
338{
339 return dd->ipath_ibmtu;
340} 112}
341 113
342void ipath_layer_add(struct ipath_devdata *dd) 114void ipath_layer_add(struct ipath_devdata *dd)
@@ -436,22 +208,6 @@ void ipath_layer_unregister(void)
436 208
437EXPORT_SYMBOL_GPL(ipath_layer_unregister); 209EXPORT_SYMBOL_GPL(ipath_layer_unregister);
438 210
439static void __ipath_verbs_timer(unsigned long arg)
440{
441 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
442
443 /*
444 * If port 0 receive packet interrupts are not available, or
445 * can be missed, poll the receive queue
446 */
447 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
448 ipath_kreceive(dd);
449
450 /* Handle verbs layer timeouts. */
451 ipath_ib_timer(dd->verbs_dev);
452 mod_timer(&dd->verbs_timer, jiffies + 1);
453}
454
455int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax) 211int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
456{ 212{
457 int ret; 213 int ret;
@@ -540,380 +296,6 @@ u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
540 296
541EXPORT_SYMBOL_GPL(ipath_layer_get_bcast); 297EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
542 298
543u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
544{
545 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
546}
547
548static void update_sge(struct ipath_sge_state *ss, u32 length)
549{
550 struct ipath_sge *sge = &ss->sge;
551
552 sge->vaddr += length;
553 sge->length -= length;
554 sge->sge_length -= length;
555 if (sge->sge_length == 0) {
556 if (--ss->num_sge)
557 *sge = *ss->sg_list++;
558 } else if (sge->length == 0 && sge->mr != NULL) {
559 if (++sge->n >= IPATH_SEGSZ) {
560 if (++sge->m >= sge->mr->mapsz)
561 return;
562 sge->n = 0;
563 }
564 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
565 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
566 }
567}
568
569#ifdef __LITTLE_ENDIAN
570static inline u32 get_upper_bits(u32 data, u32 shift)
571{
572 return data >> shift;
573}
574
575static inline u32 set_upper_bits(u32 data, u32 shift)
576{
577 return data << shift;
578}
579
580static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
581{
582 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
583 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
584 return data;
585}
586#else
587static inline u32 get_upper_bits(u32 data, u32 shift)
588{
589 return data << shift;
590}
591
592static inline u32 set_upper_bits(u32 data, u32 shift)
593{
594 return data >> shift;
595}
596
597static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
598{
599 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
600 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
601 return data;
602}
603#endif
604
605static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
606 u32 length)
607{
608 u32 extra = 0;
609 u32 data = 0;
610 u32 last;
611
612 while (1) {
613 u32 len = ss->sge.length;
614 u32 off;
615
616 BUG_ON(len == 0);
617 if (len > length)
618 len = length;
619 if (len > ss->sge.sge_length)
620 len = ss->sge.sge_length;
621 /* If the source address is not aligned, try to align it. */
622 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
623 if (off) {
624 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
625 ~(sizeof(u32) - 1));
626 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
627 u32 y;
628
629 y = sizeof(u32) - off;
630 if (len > y)
631 len = y;
632 if (len + extra >= sizeof(u32)) {
633 data |= set_upper_bits(v, extra *
634 BITS_PER_BYTE);
635 len = sizeof(u32) - extra;
636 if (len == length) {
637 last = data;
638 break;
639 }
640 __raw_writel(data, piobuf);
641 piobuf++;
642 extra = 0;
643 data = 0;
644 } else {
645 /* Clear unused upper bytes */
646 data |= clear_upper_bytes(v, len, extra);
647 if (len == length) {
648 last = data;
649 break;
650 }
651 extra += len;
652 }
653 } else if (extra) {
654 /* Source address is aligned. */
655 u32 *addr = (u32 *) ss->sge.vaddr;
656 int shift = extra * BITS_PER_BYTE;
657 int ushift = 32 - shift;
658 u32 l = len;
659
660 while (l >= sizeof(u32)) {
661 u32 v = *addr;
662
663 data |= set_upper_bits(v, shift);
664 __raw_writel(data, piobuf);
665 data = get_upper_bits(v, ushift);
666 piobuf++;
667 addr++;
668 l -= sizeof(u32);
669 }
670 /*
671 * We still have 'extra' number of bytes leftover.
672 */
673 if (l) {
674 u32 v = *addr;
675
676 if (l + extra >= sizeof(u32)) {
677 data |= set_upper_bits(v, shift);
678 len -= l + extra - sizeof(u32);
679 if (len == length) {
680 last = data;
681 break;
682 }
683 __raw_writel(data, piobuf);
684 piobuf++;
685 extra = 0;
686 data = 0;
687 } else {
688 /* Clear unused upper bytes */
689 data |= clear_upper_bytes(v, l,
690 extra);
691 if (len == length) {
692 last = data;
693 break;
694 }
695 extra += l;
696 }
697 } else if (len == length) {
698 last = data;
699 break;
700 }
701 } else if (len == length) {
702 u32 w;
703
704 /*
705 * Need to round up for the last dword in the
706 * packet.
707 */
708 w = (len + 3) >> 2;
709 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
710 piobuf += w - 1;
711 last = ((u32 *) ss->sge.vaddr)[w - 1];
712 break;
713 } else {
714 u32 w = len >> 2;
715
716 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
717 piobuf += w;
718
719 extra = len & (sizeof(u32) - 1);
720 if (extra) {
721 u32 v = ((u32 *) ss->sge.vaddr)[w];
722
723 /* Clear unused upper bytes */
724 data = clear_upper_bytes(v, extra, 0);
725 }
726 }
727 update_sge(ss, len);
728 length -= len;
729 }
730 /* Update address before sending packet. */
731 update_sge(ss, length);
732 /* must flush early everything before trigger word */
733 ipath_flush_wc();
734 __raw_writel(last, piobuf);
735 /* be sure trigger word is written */
736 ipath_flush_wc();
737}
738
739/**
740 * ipath_verbs_send - send a packet from the verbs layer
741 * @dd: the infinipath device
742 * @hdrwords: the number of words in the header
743 * @hdr: the packet header
744 * @len: the length of the packet in bytes
745 * @ss: the SGE to send
746 *
747 * This is like ipath_sma_send_pkt() in that we need to be able to send
748 * packets after the chip is initialized (MADs) but also like
749 * ipath_layer_send_hdr() since its used by the verbs layer.
750 */
751int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
752 u32 *hdr, u32 len, struct ipath_sge_state *ss)
753{
754 u32 __iomem *piobuf;
755 u32 plen;
756 int ret;
757
758 /* +1 is for the qword padding of pbc */
759 plen = hdrwords + ((len + 3) >> 2) + 1;
760 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
761 ipath_dbg("packet len 0x%x too long, failing\n", plen);
762 ret = -EINVAL;
763 goto bail;
764 }
765
766 /* Get a PIO buffer to use. */
767 piobuf = ipath_getpiobuf(dd, NULL);
768 if (unlikely(piobuf == NULL)) {
769 ret = -EBUSY;
770 goto bail;
771 }
772
773 /*
774 * Write len to control qword, no flags.
775 * We have to flush after the PBC for correctness on some cpus
776 * or WC buffer can be written out of order.
777 */
778 writeq(plen, piobuf);
779 ipath_flush_wc();
780 piobuf += 2;
781 if (len == 0) {
782 /*
783 * If there is just the header portion, must flush before
784 * writing last word of header for correctness, and after
785 * the last header word (trigger word).
786 */
787 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
788 ipath_flush_wc();
789 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
790 ipath_flush_wc();
791 ret = 0;
792 goto bail;
793 }
794
795 __iowrite32_copy(piobuf, hdr, hdrwords);
796 piobuf += hdrwords;
797
798 /* The common case is aligned and contained in one segment. */
799 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
800 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
801 u32 w;
802 u32 *addr = (u32 *) ss->sge.vaddr;
803
804 /* Update address before sending packet. */
805 update_sge(ss, len);
806 /* Need to round up for the last dword in the packet. */
807 w = (len + 3) >> 2;
808 __iowrite32_copy(piobuf, addr, w - 1);
809 /* must flush early everything before trigger word */
810 ipath_flush_wc();
811 __raw_writel(addr[w - 1], piobuf + w - 1);
812 /* be sure trigger word is written */
813 ipath_flush_wc();
814 ret = 0;
815 goto bail;
816 }
817 copy_io(piobuf, ss, len);
818 ret = 0;
819
820bail:
821 return ret;
822}
823
824int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
825 u64 *rwords, u64 *spkts, u64 *rpkts,
826 u64 *xmit_wait)
827{
828 int ret;
829
830 if (!(dd->ipath_flags & IPATH_INITTED)) {
831 /* no hardware, freeze, etc. */
832 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
833 ret = -EINVAL;
834 goto bail;
835 }
836 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
837 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
838 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
839 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
840 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
841
842 ret = 0;
843
844bail:
845 return ret;
846}
847
848/**
849 * ipath_layer_get_counters - get various chip counters
850 * @dd: the infinipath device
851 * @cntrs: counters are placed here
852 *
853 * Return the counters needed by recv_pma_get_portcounters().
854 */
855int ipath_layer_get_counters(struct ipath_devdata *dd,
856 struct ipath_layer_counters *cntrs)
857{
858 int ret;
859
860 if (!(dd->ipath_flags & IPATH_INITTED)) {
861 /* no hardware, freeze, etc. */
862 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
863 ret = -EINVAL;
864 goto bail;
865 }
866 cntrs->symbol_error_counter =
867 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
868 cntrs->link_error_recovery_counter =
869 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
870 /*
871 * The link downed counter counts when the other side downs the
872 * connection. We add in the number of times we downed the link
873 * due to local link integrity errors to compensate.
874 */
875 cntrs->link_downed_counter =
876 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
877 cntrs->port_rcv_errors =
878 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
879 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
880 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
881 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
882 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
883 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
884 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
885 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
886 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
887 cntrs->port_rcv_remphys_errors =
888 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
889 cntrs->port_xmit_discards =
890 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
891 cntrs->port_xmit_data =
892 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
893 cntrs->port_rcv_data =
894 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
895 cntrs->port_xmit_packets =
896 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
897 cntrs->port_rcv_packets =
898 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
899 cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
900 cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
901
902 ret = 0;
903
904bail:
905 return ret;
906}
907
908int ipath_layer_want_buffer(struct ipath_devdata *dd)
909{
910 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
911 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
912 dd->ipath_sendctrl);
913
914 return 0;
915}
916
917int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr) 299int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
918{ 300{
919 int ret = 0; 301 int ret = 0;
@@ -985,361 +367,3 @@ int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
985} 367}
986 368
987EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int); 369EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
988
989int ipath_layer_enable_timer(struct ipath_devdata *dd)
990{
991 /*
992 * HT-400 has a design flaw where the chip and kernel idea
993 * of the tail register don't always agree, and therefore we won't
994 * get an interrupt on the next packet received.
995 * If the board supports per packet receive interrupts, use it.
996 * Otherwise, the timer function periodically checks for packets
997 * to cover this case.
998 * Either way, the timer is needed for verbs layer related
999 * processing.
1000 */
1001 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1002 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1003 0x2074076542310ULL);
1004 /* Enable GPIO bit 2 interrupt */
1005 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1006 (u64) (1 << 2));
1007 }
1008
1009 init_timer(&dd->verbs_timer);
1010 dd->verbs_timer.function = __ipath_verbs_timer;
1011 dd->verbs_timer.data = (unsigned long)dd;
1012 dd->verbs_timer.expires = jiffies + 1;
1013 add_timer(&dd->verbs_timer);
1014
1015 return 0;
1016}
1017
1018int ipath_layer_disable_timer(struct ipath_devdata *dd)
1019{
1020 /* Disable GPIO bit 2 interrupt */
1021 if (dd->ipath_flags & IPATH_GPIO_INTR)
1022 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1023
1024 del_timer_sync(&dd->verbs_timer);
1025
1026 return 0;
1027}
1028
1029/**
1030 * ipath_layer_set_verbs_flags - set the verbs layer flags
1031 * @dd: the infinipath device
1032 * @flags: the flags to set
1033 */
1034int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1035{
1036 struct ipath_devdata *ss;
1037 unsigned long lflags;
1038
1039 spin_lock_irqsave(&ipath_devs_lock, lflags);
1040
1041 list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1042 if (!(ss->ipath_flags & IPATH_INITTED))
1043 continue;
1044 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1045 !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1046 *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1047 else
1048 *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1049 }
1050
1051 spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1052
1053 return 0;
1054}
1055
1056/**
1057 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1058 * @dd: the infinipath device
1059 */
1060unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1061{
1062 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1063}
1064
1065/**
1066 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1067 * @dd: the infinipath device
1068 * @index: the PKEY index
1069 */
1070unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1071{
1072 unsigned ret;
1073
1074 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1075 ret = 0;
1076 else
1077 ret = dd->ipath_pd[0]->port_pkeys[index];
1078
1079 return ret;
1080}
1081
1082/**
1083 * ipath_layer_get_pkeys - return the PKEY table for port 0
1084 * @dd: the infinipath device
1085 * @pkeys: the pkey table is placed here
1086 */
1087int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1088{
1089 struct ipath_portdata *pd = dd->ipath_pd[0];
1090
1091 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1092
1093 return 0;
1094}
1095
1096/**
1097 * rm_pkey - decrecment the reference count for the given PKEY
1098 * @dd: the infinipath device
1099 * @key: the PKEY index
1100 *
1101 * Return true if this was the last reference and the hardware table entry
1102 * needs to be changed.
1103 */
1104static int rm_pkey(struct ipath_devdata *dd, u16 key)
1105{
1106 int i;
1107 int ret;
1108
1109 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1110 if (dd->ipath_pkeys[i] != key)
1111 continue;
1112 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1113 dd->ipath_pkeys[i] = 0;
1114 ret = 1;
1115 goto bail;
1116 }
1117 break;
1118 }
1119
1120 ret = 0;
1121
1122bail:
1123 return ret;
1124}
1125
1126/**
1127 * add_pkey - add the given PKEY to the hardware table
1128 * @dd: the infinipath device
1129 * @key: the PKEY
1130 *
1131 * Return an error code if unable to add the entry, zero if no change,
1132 * or 1 if the hardware PKEY register needs to be updated.
1133 */
1134static int add_pkey(struct ipath_devdata *dd, u16 key)
1135{
1136 int i;
1137 u16 lkey = key & 0x7FFF;
1138 int any = 0;
1139 int ret;
1140
1141 if (lkey == 0x7FFF) {
1142 ret = 0;
1143 goto bail;
1144 }
1145
1146 /* Look for an empty slot or a matching PKEY. */
1147 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1148 if (!dd->ipath_pkeys[i]) {
1149 any++;
1150 continue;
1151 }
1152 /* If it matches exactly, try to increment the ref count */
1153 if (dd->ipath_pkeys[i] == key) {
1154 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1155 ret = 0;
1156 goto bail;
1157 }
1158 /* Lost the race. Look for an empty slot below. */
1159 atomic_dec(&dd->ipath_pkeyrefs[i]);
1160 any++;
1161 }
1162 /*
1163 * It makes no sense to have both the limited and unlimited
1164 * PKEY set at the same time since the unlimited one will
1165 * disable the limited one.
1166 */
1167 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1168 ret = -EEXIST;
1169 goto bail;
1170 }
1171 }
1172 if (!any) {
1173 ret = -EBUSY;
1174 goto bail;
1175 }
1176 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1177 if (!dd->ipath_pkeys[i] &&
1178 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1179 /* for ipathstats, etc. */
1180 ipath_stats.sps_pkeys[i] = lkey;
1181 dd->ipath_pkeys[i] = key;
1182 ret = 1;
1183 goto bail;
1184 }
1185 }
1186 ret = -EBUSY;
1187
1188bail:
1189 return ret;
1190}
1191
1192/**
1193 * ipath_layer_set_pkeys - set the PKEY table for port 0
1194 * @dd: the infinipath device
1195 * @pkeys: the PKEY table
1196 */
1197int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1198{
1199 struct ipath_portdata *pd;
1200 int i;
1201 int changed = 0;
1202
1203 pd = dd->ipath_pd[0];
1204
1205 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1206 u16 key = pkeys[i];
1207 u16 okey = pd->port_pkeys[i];
1208
1209 if (key == okey)
1210 continue;
1211 /*
1212 * The value of this PKEY table entry is changing.
1213 * Remove the old entry in the hardware's array of PKEYs.
1214 */
1215 if (okey & 0x7FFF)
1216 changed |= rm_pkey(dd, okey);
1217 if (key & 0x7FFF) {
1218 int ret = add_pkey(dd, key);
1219
1220 if (ret < 0)
1221 key = 0;
1222 else
1223 changed |= ret;
1224 }
1225 pd->port_pkeys[i] = key;
1226 }
1227 if (changed) {
1228 u64 pkey;
1229
1230 pkey = (u64) dd->ipath_pkeys[0] |
1231 ((u64) dd->ipath_pkeys[1] << 16) |
1232 ((u64) dd->ipath_pkeys[2] << 32) |
1233 ((u64) dd->ipath_pkeys[3] << 48);
1234 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1235 (unsigned long long) pkey);
1236 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1237 pkey);
1238 }
1239 return 0;
1240}
1241
1242/**
1243 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1244 * @dd: the infinipath device
1245 *
1246 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1247 */
1248int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1249{
1250 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1251}
1252
1253/**
1254 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1255 * @dd: the infinipath device
1256 * @sleep: the new state
1257 *
1258 * Note that this will only take effect when the link state changes.
1259 */
1260int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1261 int sleep)
1262{
1263 if (sleep)
1264 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1265 else
1266 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1267 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1268 dd->ipath_ibcctrl);
1269 return 0;
1270}
1271
1272int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1273{
1274 return (dd->ipath_ibcctrl >>
1275 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1276 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1277}
1278
1279/**
1280 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1281 * @dd: the infinipath device
1282 * @n: the new threshold
1283 *
1284 * Note that this will only take effect when the link state changes.
1285 */
1286int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1287{
1288 unsigned v;
1289
1290 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1291 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1292 if (v != n) {
1293 dd->ipath_ibcctrl &=
1294 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1295 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1296 dd->ipath_ibcctrl |=
1297 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1298 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1299 dd->ipath_ibcctrl);
1300 }
1301 return 0;
1302}
1303
1304int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1305{
1306 return (dd->ipath_ibcctrl >>
1307 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1308 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1309}
1310
1311/**
1312 * ipath_layer_set_overrunthreshold - set the overrun threshold
1313 * @dd: the infinipath device
1314 * @n: the new threshold
1315 *
1316 * Note that this will only take effect when the link state changes.
1317 */
1318int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1319{
1320 unsigned v;
1321
1322 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1323 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1324 if (v != n) {
1325 dd->ipath_ibcctrl &=
1326 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1327 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1328 dd->ipath_ibcctrl |=
1329 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1330 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1331 dd->ipath_ibcctrl);
1332 }
1333 return 0;
1334}
1335
1336int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1337 size_t namelen)
1338{
1339 return dd->ipath_f_get_boardname(dd, name, namelen);
1340}
1341
1342u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1343{
1344 return dd->ipath_rcvhdrentsize;
1345}