diff options
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_layer.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_layer.c | 1179 |
1 files changed, 2 insertions, 1177 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c index b28c6f81c731..e46aa4ed2a7e 100644 --- a/drivers/infiniband/hw/ipath/ipath_layer.c +++ b/drivers/infiniband/hw/ipath/ipath_layer.c | |||
@@ -42,26 +42,20 @@ | |||
42 | 42 | ||
43 | #include "ipath_kernel.h" | 43 | #include "ipath_kernel.h" |
44 | #include "ipath_layer.h" | 44 | #include "ipath_layer.h" |
45 | #include "ipath_verbs.h" | ||
45 | #include "ipath_common.h" | 46 | #include "ipath_common.h" |
46 | 47 | ||
47 | /* Acquire before ipath_devs_lock. */ | 48 | /* Acquire before ipath_devs_lock. */ |
48 | static DEFINE_MUTEX(ipath_layer_mutex); | 49 | static DEFINE_MUTEX(ipath_layer_mutex); |
49 | 50 | ||
50 | static int ipath_verbs_registered; | ||
51 | |||
52 | u16 ipath_layer_rcv_opcode; | 51 | u16 ipath_layer_rcv_opcode; |
53 | 52 | ||
54 | static int (*layer_intr)(void *, u32); | 53 | static int (*layer_intr)(void *, u32); |
55 | static int (*layer_rcv)(void *, void *, struct sk_buff *); | 54 | static int (*layer_rcv)(void *, void *, struct sk_buff *); |
56 | static int (*layer_rcv_lid)(void *, void *); | 55 | static int (*layer_rcv_lid)(void *, void *); |
57 | static int (*verbs_piobufavail)(void *); | ||
58 | static void (*verbs_rcv)(void *, void *, void *, u32); | ||
59 | 56 | ||
60 | static void *(*layer_add_one)(int, struct ipath_devdata *); | 57 | static void *(*layer_add_one)(int, struct ipath_devdata *); |
61 | static void (*layer_remove_one)(void *); | 58 | static void (*layer_remove_one)(void *); |
62 | static void *(*verbs_add_one)(int, struct ipath_devdata *); | ||
63 | static void (*verbs_remove_one)(void *); | ||
64 | static void (*verbs_timer_cb)(void *); | ||
65 | 59 | ||
66 | int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg) | 60 | int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg) |
67 | { | 61 | { |
@@ -107,302 +101,16 @@ int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr) | |||
107 | return ret; | 101 | return ret; |
108 | } | 102 | } |
109 | 103 | ||
110 | int __ipath_verbs_piobufavail(struct ipath_devdata *dd) | 104 | void ipath_layer_lid_changed(struct ipath_devdata *dd) |
111 | { | ||
112 | int ret = -ENODEV; | ||
113 | |||
114 | if (dd->verbs_layer.l_arg && verbs_piobufavail) | ||
115 | ret = verbs_piobufavail(dd->verbs_layer.l_arg); | ||
116 | |||
117 | return ret; | ||
118 | } | ||
119 | |||
120 | int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf, | ||
121 | u32 tlen) | ||
122 | { | ||
123 | int ret = -ENODEV; | ||
124 | |||
125 | if (dd->verbs_layer.l_arg && verbs_rcv) { | ||
126 | verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen); | ||
127 | ret = 0; | ||
128 | } | ||
129 | |||
130 | return ret; | ||
131 | } | ||
132 | |||
133 | int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate) | ||
134 | { | 105 | { |
135 | u32 lstate; | ||
136 | int ret; | ||
137 | |||
138 | switch (newstate) { | ||
139 | case IPATH_IB_LINKDOWN: | ||
140 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL << | ||
141 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); | ||
142 | /* don't wait */ | ||
143 | ret = 0; | ||
144 | goto bail; | ||
145 | |||
146 | case IPATH_IB_LINKDOWN_SLEEP: | ||
147 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP << | ||
148 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); | ||
149 | /* don't wait */ | ||
150 | ret = 0; | ||
151 | goto bail; | ||
152 | |||
153 | case IPATH_IB_LINKDOWN_DISABLE: | ||
154 | ipath_set_ib_lstate(dd, | ||
155 | INFINIPATH_IBCC_LINKINITCMD_DISABLE << | ||
156 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); | ||
157 | /* don't wait */ | ||
158 | ret = 0; | ||
159 | goto bail; | ||
160 | |||
161 | case IPATH_IB_LINKINIT: | ||
162 | if (dd->ipath_flags & IPATH_LINKINIT) { | ||
163 | ret = 0; | ||
164 | goto bail; | ||
165 | } | ||
166 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT << | ||
167 | INFINIPATH_IBCC_LINKCMD_SHIFT); | ||
168 | lstate = IPATH_LINKINIT; | ||
169 | break; | ||
170 | |||
171 | case IPATH_IB_LINKARM: | ||
172 | if (dd->ipath_flags & IPATH_LINKARMED) { | ||
173 | ret = 0; | ||
174 | goto bail; | ||
175 | } | ||
176 | if (!(dd->ipath_flags & | ||
177 | (IPATH_LINKINIT | IPATH_LINKACTIVE))) { | ||
178 | ret = -EINVAL; | ||
179 | goto bail; | ||
180 | } | ||
181 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED << | ||
182 | INFINIPATH_IBCC_LINKCMD_SHIFT); | ||
183 | /* | ||
184 | * Since the port can transition to ACTIVE by receiving | ||
185 | * a non VL 15 packet, wait for either state. | ||
186 | */ | ||
187 | lstate = IPATH_LINKARMED | IPATH_LINKACTIVE; | ||
188 | break; | ||
189 | |||
190 | case IPATH_IB_LINKACTIVE: | ||
191 | if (dd->ipath_flags & IPATH_LINKACTIVE) { | ||
192 | ret = 0; | ||
193 | goto bail; | ||
194 | } | ||
195 | if (!(dd->ipath_flags & IPATH_LINKARMED)) { | ||
196 | ret = -EINVAL; | ||
197 | goto bail; | ||
198 | } | ||
199 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE << | ||
200 | INFINIPATH_IBCC_LINKCMD_SHIFT); | ||
201 | lstate = IPATH_LINKACTIVE; | ||
202 | break; | ||
203 | |||
204 | default: | ||
205 | ipath_dbg("Invalid linkstate 0x%x requested\n", newstate); | ||
206 | ret = -EINVAL; | ||
207 | goto bail; | ||
208 | } | ||
209 | ret = ipath_wait_linkstate(dd, lstate, 2000); | ||
210 | |||
211 | bail: | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate); | ||
216 | |||
217 | /** | ||
218 | * ipath_layer_set_mtu - set the MTU | ||
219 | * @dd: the infinipath device | ||
220 | * @arg: the new MTU | ||
221 | * | ||
222 | * we can handle "any" incoming size, the issue here is whether we | ||
223 | * need to restrict our outgoing size. For now, we don't do any | ||
224 | * sanity checking on this, and we don't deal with what happens to | ||
225 | * programs that are already running when the size changes. | ||
226 | * NOTE: changing the MTU will usually cause the IBC to go back to | ||
227 | * link initialize (IPATH_IBSTATE_INIT) state... | ||
228 | */ | ||
229 | int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg) | ||
230 | { | ||
231 | u32 piosize; | ||
232 | int changed = 0; | ||
233 | int ret; | ||
234 | |||
235 | /* | ||
236 | * mtu is IB data payload max. It's the largest power of 2 less | ||
237 | * than piosize (or even larger, since it only really controls the | ||
238 | * largest we can receive; we can send the max of the mtu and | ||
239 | * piosize). We check that it's one of the valid IB sizes. | ||
240 | */ | ||
241 | if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && | ||
242 | arg != 4096) { | ||
243 | ipath_dbg("Trying to set invalid mtu %u, failing\n", arg); | ||
244 | ret = -EINVAL; | ||
245 | goto bail; | ||
246 | } | ||
247 | if (dd->ipath_ibmtu == arg) { | ||
248 | ret = 0; /* same as current */ | ||
249 | goto bail; | ||
250 | } | ||
251 | |||
252 | piosize = dd->ipath_ibmaxlen; | ||
253 | dd->ipath_ibmtu = arg; | ||
254 | |||
255 | if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) { | ||
256 | /* Only if it's not the initial value (or reset to it) */ | ||
257 | if (piosize != dd->ipath_init_ibmaxlen) { | ||
258 | dd->ipath_ibmaxlen = piosize; | ||
259 | changed = 1; | ||
260 | } | ||
261 | } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) { | ||
262 | piosize = arg + IPATH_PIO_MAXIBHDR; | ||
263 | ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x " | ||
264 | "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize, | ||
265 | arg); | ||
266 | dd->ipath_ibmaxlen = piosize; | ||
267 | changed = 1; | ||
268 | } | ||
269 | |||
270 | if (changed) { | ||
271 | /* | ||
272 | * set the IBC maxpktlength to the size of our pio | ||
273 | * buffers in words | ||
274 | */ | ||
275 | u64 ibc = dd->ipath_ibcctrl; | ||
276 | ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK << | ||
277 | INFINIPATH_IBCC_MAXPKTLEN_SHIFT); | ||
278 | |||
279 | piosize = piosize - 2 * sizeof(u32); /* ignore pbc */ | ||
280 | dd->ipath_ibmaxlen = piosize; | ||
281 | piosize /= sizeof(u32); /* in words */ | ||
282 | /* | ||
283 | * for ICRC, which we only send in diag test pkt mode, and | ||
284 | * we don't need to worry about that for mtu | ||
285 | */ | ||
286 | piosize += 1; | ||
287 | |||
288 | ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT; | ||
289 | dd->ipath_ibcctrl = ibc; | ||
290 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
291 | dd->ipath_ibcctrl); | ||
292 | dd->ipath_f_tidtemplate(dd); | ||
293 | } | ||
294 | |||
295 | ret = 0; | ||
296 | |||
297 | bail: | ||
298 | return ret; | ||
299 | } | ||
300 | |||
301 | EXPORT_SYMBOL_GPL(ipath_layer_set_mtu); | ||
302 | |||
303 | int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc) | ||
304 | { | ||
305 | dd->ipath_lid = arg; | ||
306 | dd->ipath_lmc = lmc; | ||
307 | |||
308 | mutex_lock(&ipath_layer_mutex); | 106 | mutex_lock(&ipath_layer_mutex); |
309 | 107 | ||
310 | if (dd->ipath_layer.l_arg && layer_intr) | 108 | if (dd->ipath_layer.l_arg && layer_intr) |
311 | layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID); | 109 | layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID); |
312 | 110 | ||
313 | mutex_unlock(&ipath_layer_mutex); | 111 | mutex_unlock(&ipath_layer_mutex); |
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | EXPORT_SYMBOL_GPL(ipath_set_lid); | ||
319 | |||
320 | int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid) | ||
321 | { | ||
322 | /* XXX - need to inform anyone who cares this just happened. */ | ||
323 | dd->ipath_guid = guid; | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | EXPORT_SYMBOL_GPL(ipath_layer_set_guid); | ||
328 | |||
329 | __be64 ipath_layer_get_guid(struct ipath_devdata *dd) | ||
330 | { | ||
331 | return dd->ipath_guid; | ||
332 | } | ||
333 | |||
334 | EXPORT_SYMBOL_GPL(ipath_layer_get_guid); | ||
335 | |||
336 | u32 ipath_layer_get_nguid(struct ipath_devdata *dd) | ||
337 | { | ||
338 | return dd->ipath_nguid; | ||
339 | } | ||
340 | |||
341 | EXPORT_SYMBOL_GPL(ipath_layer_get_nguid); | ||
342 | |||
343 | u32 ipath_layer_get_majrev(struct ipath_devdata *dd) | ||
344 | { | ||
345 | return dd->ipath_majrev; | ||
346 | } | 112 | } |
347 | 113 | ||
348 | EXPORT_SYMBOL_GPL(ipath_layer_get_majrev); | ||
349 | |||
350 | u32 ipath_layer_get_minrev(struct ipath_devdata *dd) | ||
351 | { | ||
352 | return dd->ipath_minrev; | ||
353 | } | ||
354 | |||
355 | EXPORT_SYMBOL_GPL(ipath_layer_get_minrev); | ||
356 | |||
357 | u32 ipath_layer_get_pcirev(struct ipath_devdata *dd) | ||
358 | { | ||
359 | return dd->ipath_pcirev; | ||
360 | } | ||
361 | |||
362 | EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev); | ||
363 | |||
364 | u32 ipath_layer_get_flags(struct ipath_devdata *dd) | ||
365 | { | ||
366 | return dd->ipath_flags; | ||
367 | } | ||
368 | |||
369 | EXPORT_SYMBOL_GPL(ipath_layer_get_flags); | ||
370 | |||
371 | struct device *ipath_layer_get_device(struct ipath_devdata *dd) | ||
372 | { | ||
373 | return &dd->pcidev->dev; | ||
374 | } | ||
375 | |||
376 | EXPORT_SYMBOL_GPL(ipath_layer_get_device); | ||
377 | |||
378 | u16 ipath_layer_get_deviceid(struct ipath_devdata *dd) | ||
379 | { | ||
380 | return dd->ipath_deviceid; | ||
381 | } | ||
382 | |||
383 | EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid); | ||
384 | |||
385 | u32 ipath_layer_get_vendorid(struct ipath_devdata *dd) | ||
386 | { | ||
387 | return dd->ipath_vendorid; | ||
388 | } | ||
389 | |||
390 | EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid); | ||
391 | |||
392 | u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd) | ||
393 | { | ||
394 | return dd->ipath_lastibcstat; | ||
395 | } | ||
396 | |||
397 | EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat); | ||
398 | |||
399 | u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd) | ||
400 | { | ||
401 | return dd->ipath_ibmtu; | ||
402 | } | ||
403 | |||
404 | EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu); | ||
405 | |||
406 | void ipath_layer_add(struct ipath_devdata *dd) | 114 | void ipath_layer_add(struct ipath_devdata *dd) |
407 | { | 115 | { |
408 | mutex_lock(&ipath_layer_mutex); | 116 | mutex_lock(&ipath_layer_mutex); |
@@ -411,10 +119,6 @@ void ipath_layer_add(struct ipath_devdata *dd) | |||
411 | dd->ipath_layer.l_arg = | 119 | dd->ipath_layer.l_arg = |
412 | layer_add_one(dd->ipath_unit, dd); | 120 | layer_add_one(dd->ipath_unit, dd); |
413 | 121 | ||
414 | if (verbs_add_one) | ||
415 | dd->verbs_layer.l_arg = | ||
416 | verbs_add_one(dd->ipath_unit, dd); | ||
417 | |||
418 | mutex_unlock(&ipath_layer_mutex); | 122 | mutex_unlock(&ipath_layer_mutex); |
419 | } | 123 | } |
420 | 124 | ||
@@ -427,11 +131,6 @@ void ipath_layer_remove(struct ipath_devdata *dd) | |||
427 | dd->ipath_layer.l_arg = NULL; | 131 | dd->ipath_layer.l_arg = NULL; |
428 | } | 132 | } |
429 | 133 | ||
430 | if (dd->verbs_layer.l_arg && verbs_remove_one) { | ||
431 | verbs_remove_one(dd->verbs_layer.l_arg); | ||
432 | dd->verbs_layer.l_arg = NULL; | ||
433 | } | ||
434 | |||
435 | mutex_unlock(&ipath_layer_mutex); | 134 | mutex_unlock(&ipath_layer_mutex); |
436 | } | 135 | } |
437 | 136 | ||
@@ -463,9 +162,6 @@ int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *), | |||
463 | if (dd->ipath_layer.l_arg) | 162 | if (dd->ipath_layer.l_arg) |
464 | continue; | 163 | continue; |
465 | 164 | ||
466 | if (!(*dd->ipath_statusp & IPATH_STATUS_SMA)) | ||
467 | *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA; | ||
468 | |||
469 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | 165 | spin_unlock_irqrestore(&ipath_devs_lock, flags); |
470 | dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd); | 166 | dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd); |
471 | spin_lock_irqsave(&ipath_devs_lock, flags); | 167 | spin_lock_irqsave(&ipath_devs_lock, flags); |
@@ -509,107 +205,6 @@ void ipath_layer_unregister(void) | |||
509 | 205 | ||
510 | EXPORT_SYMBOL_GPL(ipath_layer_unregister); | 206 | EXPORT_SYMBOL_GPL(ipath_layer_unregister); |
511 | 207 | ||
512 | static void __ipath_verbs_timer(unsigned long arg) | ||
513 | { | ||
514 | struct ipath_devdata *dd = (struct ipath_devdata *) arg; | ||
515 | |||
516 | /* | ||
517 | * If port 0 receive packet interrupts are not available, or | ||
518 | * can be missed, poll the receive queue | ||
519 | */ | ||
520 | if (dd->ipath_flags & IPATH_POLL_RX_INTR) | ||
521 | ipath_kreceive(dd); | ||
522 | |||
523 | /* Handle verbs layer timeouts. */ | ||
524 | if (dd->verbs_layer.l_arg && verbs_timer_cb) | ||
525 | verbs_timer_cb(dd->verbs_layer.l_arg); | ||
526 | |||
527 | mod_timer(&dd->verbs_layer.l_timer, jiffies + 1); | ||
528 | } | ||
529 | |||
530 | /** | ||
531 | * ipath_verbs_register - verbs layer registration | ||
532 | * @l_piobufavail: callback for when PIO buffers become available | ||
533 | * @l_rcv: callback for receiving a packet | ||
534 | * @l_timer_cb: timer callback | ||
535 | * @ipath_devdata: device data structure is put here | ||
536 | */ | ||
537 | int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *), | ||
538 | void (*l_remove)(void *arg), | ||
539 | int (*l_piobufavail) (void *arg), | ||
540 | void (*l_rcv) (void *arg, void *rhdr, | ||
541 | void *data, u32 tlen), | ||
542 | void (*l_timer_cb) (void *arg)) | ||
543 | { | ||
544 | struct ipath_devdata *dd, *tmp; | ||
545 | unsigned long flags; | ||
546 | |||
547 | mutex_lock(&ipath_layer_mutex); | ||
548 | |||
549 | verbs_add_one = l_add; | ||
550 | verbs_remove_one = l_remove; | ||
551 | verbs_piobufavail = l_piobufavail; | ||
552 | verbs_rcv = l_rcv; | ||
553 | verbs_timer_cb = l_timer_cb; | ||
554 | |||
555 | spin_lock_irqsave(&ipath_devs_lock, flags); | ||
556 | |||
557 | list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { | ||
558 | if (!(dd->ipath_flags & IPATH_INITTED)) | ||
559 | continue; | ||
560 | |||
561 | if (dd->verbs_layer.l_arg) | ||
562 | continue; | ||
563 | |||
564 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | ||
565 | dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd); | ||
566 | spin_lock_irqsave(&ipath_devs_lock, flags); | ||
567 | } | ||
568 | |||
569 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | ||
570 | mutex_unlock(&ipath_layer_mutex); | ||
571 | |||
572 | ipath_verbs_registered = 1; | ||
573 | |||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | EXPORT_SYMBOL_GPL(ipath_verbs_register); | ||
578 | |||
579 | void ipath_verbs_unregister(void) | ||
580 | { | ||
581 | struct ipath_devdata *dd, *tmp; | ||
582 | unsigned long flags; | ||
583 | |||
584 | mutex_lock(&ipath_layer_mutex); | ||
585 | spin_lock_irqsave(&ipath_devs_lock, flags); | ||
586 | |||
587 | list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { | ||
588 | *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA; | ||
589 | |||
590 | if (dd->verbs_layer.l_arg && verbs_remove_one) { | ||
591 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | ||
592 | verbs_remove_one(dd->verbs_layer.l_arg); | ||
593 | spin_lock_irqsave(&ipath_devs_lock, flags); | ||
594 | dd->verbs_layer.l_arg = NULL; | ||
595 | } | ||
596 | } | ||
597 | |||
598 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | ||
599 | |||
600 | verbs_add_one = NULL; | ||
601 | verbs_remove_one = NULL; | ||
602 | verbs_piobufavail = NULL; | ||
603 | verbs_rcv = NULL; | ||
604 | verbs_timer_cb = NULL; | ||
605 | |||
606 | ipath_verbs_registered = 0; | ||
607 | |||
608 | mutex_unlock(&ipath_layer_mutex); | ||
609 | } | ||
610 | |||
611 | EXPORT_SYMBOL_GPL(ipath_verbs_unregister); | ||
612 | |||
613 | int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax) | 208 | int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax) |
614 | { | 209 | { |
615 | int ret; | 210 | int ret; |
@@ -698,390 +293,6 @@ u16 ipath_layer_get_bcast(struct ipath_devdata *dd) | |||
698 | 293 | ||
699 | EXPORT_SYMBOL_GPL(ipath_layer_get_bcast); | 294 | EXPORT_SYMBOL_GPL(ipath_layer_get_bcast); |
700 | 295 | ||
701 | u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd) | ||
702 | { | ||
703 | return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey); | ||
704 | } | ||
705 | |||
706 | EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey); | ||
707 | |||
708 | static void update_sge(struct ipath_sge_state *ss, u32 length) | ||
709 | { | ||
710 | struct ipath_sge *sge = &ss->sge; | ||
711 | |||
712 | sge->vaddr += length; | ||
713 | sge->length -= length; | ||
714 | sge->sge_length -= length; | ||
715 | if (sge->sge_length == 0) { | ||
716 | if (--ss->num_sge) | ||
717 | *sge = *ss->sg_list++; | ||
718 | } else if (sge->length == 0 && sge->mr != NULL) { | ||
719 | if (++sge->n >= IPATH_SEGSZ) { | ||
720 | if (++sge->m >= sge->mr->mapsz) | ||
721 | return; | ||
722 | sge->n = 0; | ||
723 | } | ||
724 | sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; | ||
725 | sge->length = sge->mr->map[sge->m]->segs[sge->n].length; | ||
726 | } | ||
727 | } | ||
728 | |||
729 | #ifdef __LITTLE_ENDIAN | ||
730 | static inline u32 get_upper_bits(u32 data, u32 shift) | ||
731 | { | ||
732 | return data >> shift; | ||
733 | } | ||
734 | |||
735 | static inline u32 set_upper_bits(u32 data, u32 shift) | ||
736 | { | ||
737 | return data << shift; | ||
738 | } | ||
739 | |||
740 | static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) | ||
741 | { | ||
742 | data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); | ||
743 | data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); | ||
744 | return data; | ||
745 | } | ||
746 | #else | ||
747 | static inline u32 get_upper_bits(u32 data, u32 shift) | ||
748 | { | ||
749 | return data << shift; | ||
750 | } | ||
751 | |||
752 | static inline u32 set_upper_bits(u32 data, u32 shift) | ||
753 | { | ||
754 | return data >> shift; | ||
755 | } | ||
756 | |||
757 | static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) | ||
758 | { | ||
759 | data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); | ||
760 | data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); | ||
761 | return data; | ||
762 | } | ||
763 | #endif | ||
764 | |||
765 | static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, | ||
766 | u32 length) | ||
767 | { | ||
768 | u32 extra = 0; | ||
769 | u32 data = 0; | ||
770 | u32 last; | ||
771 | |||
772 | while (1) { | ||
773 | u32 len = ss->sge.length; | ||
774 | u32 off; | ||
775 | |||
776 | BUG_ON(len == 0); | ||
777 | if (len > length) | ||
778 | len = length; | ||
779 | if (len > ss->sge.sge_length) | ||
780 | len = ss->sge.sge_length; | ||
781 | /* If the source address is not aligned, try to align it. */ | ||
782 | off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); | ||
783 | if (off) { | ||
784 | u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & | ||
785 | ~(sizeof(u32) - 1)); | ||
786 | u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); | ||
787 | u32 y; | ||
788 | |||
789 | y = sizeof(u32) - off; | ||
790 | if (len > y) | ||
791 | len = y; | ||
792 | if (len + extra >= sizeof(u32)) { | ||
793 | data |= set_upper_bits(v, extra * | ||
794 | BITS_PER_BYTE); | ||
795 | len = sizeof(u32) - extra; | ||
796 | if (len == length) { | ||
797 | last = data; | ||
798 | break; | ||
799 | } | ||
800 | __raw_writel(data, piobuf); | ||
801 | piobuf++; | ||
802 | extra = 0; | ||
803 | data = 0; | ||
804 | } else { | ||
805 | /* Clear unused upper bytes */ | ||
806 | data |= clear_upper_bytes(v, len, extra); | ||
807 | if (len == length) { | ||
808 | last = data; | ||
809 | break; | ||
810 | } | ||
811 | extra += len; | ||
812 | } | ||
813 | } else if (extra) { | ||
814 | /* Source address is aligned. */ | ||
815 | u32 *addr = (u32 *) ss->sge.vaddr; | ||
816 | int shift = extra * BITS_PER_BYTE; | ||
817 | int ushift = 32 - shift; | ||
818 | u32 l = len; | ||
819 | |||
820 | while (l >= sizeof(u32)) { | ||
821 | u32 v = *addr; | ||
822 | |||
823 | data |= set_upper_bits(v, shift); | ||
824 | __raw_writel(data, piobuf); | ||
825 | data = get_upper_bits(v, ushift); | ||
826 | piobuf++; | ||
827 | addr++; | ||
828 | l -= sizeof(u32); | ||
829 | } | ||
830 | /* | ||
831 | * We still have 'extra' number of bytes leftover. | ||
832 | */ | ||
833 | if (l) { | ||
834 | u32 v = *addr; | ||
835 | |||
836 | if (l + extra >= sizeof(u32)) { | ||
837 | data |= set_upper_bits(v, shift); | ||
838 | len -= l + extra - sizeof(u32); | ||
839 | if (len == length) { | ||
840 | last = data; | ||
841 | break; | ||
842 | } | ||
843 | __raw_writel(data, piobuf); | ||
844 | piobuf++; | ||
845 | extra = 0; | ||
846 | data = 0; | ||
847 | } else { | ||
848 | /* Clear unused upper bytes */ | ||
849 | data |= clear_upper_bytes(v, l, | ||
850 | extra); | ||
851 | if (len == length) { | ||
852 | last = data; | ||
853 | break; | ||
854 | } | ||
855 | extra += l; | ||
856 | } | ||
857 | } else if (len == length) { | ||
858 | last = data; | ||
859 | break; | ||
860 | } | ||
861 | } else if (len == length) { | ||
862 | u32 w; | ||
863 | |||
864 | /* | ||
865 | * Need to round up for the last dword in the | ||
866 | * packet. | ||
867 | */ | ||
868 | w = (len + 3) >> 2; | ||
869 | __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); | ||
870 | piobuf += w - 1; | ||
871 | last = ((u32 *) ss->sge.vaddr)[w - 1]; | ||
872 | break; | ||
873 | } else { | ||
874 | u32 w = len >> 2; | ||
875 | |||
876 | __iowrite32_copy(piobuf, ss->sge.vaddr, w); | ||
877 | piobuf += w; | ||
878 | |||
879 | extra = len & (sizeof(u32) - 1); | ||
880 | if (extra) { | ||
881 | u32 v = ((u32 *) ss->sge.vaddr)[w]; | ||
882 | |||
883 | /* Clear unused upper bytes */ | ||
884 | data = clear_upper_bytes(v, extra, 0); | ||
885 | } | ||
886 | } | ||
887 | update_sge(ss, len); | ||
888 | length -= len; | ||
889 | } | ||
890 | /* Update address before sending packet. */ | ||
891 | update_sge(ss, length); | ||
892 | /* must flush early everything before trigger word */ | ||
893 | ipath_flush_wc(); | ||
894 | __raw_writel(last, piobuf); | ||
895 | /* be sure trigger word is written */ | ||
896 | ipath_flush_wc(); | ||
897 | } | ||
898 | |||
899 | /** | ||
900 | * ipath_verbs_send - send a packet from the verbs layer | ||
901 | * @dd: the infinipath device | ||
902 | * @hdrwords: the number of words in the header | ||
903 | * @hdr: the packet header | ||
904 | * @len: the length of the packet in bytes | ||
905 | * @ss: the SGE to send | ||
906 | * | ||
907 | * This is like ipath_sma_send_pkt() in that we need to be able to send | ||
908 | * packets after the chip is initialized (MADs) but also like | ||
909 | * ipath_layer_send_hdr() since its used by the verbs layer. | ||
910 | */ | ||
911 | int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, | ||
912 | u32 *hdr, u32 len, struct ipath_sge_state *ss) | ||
913 | { | ||
914 | u32 __iomem *piobuf; | ||
915 | u32 plen; | ||
916 | int ret; | ||
917 | |||
918 | /* +1 is for the qword padding of pbc */ | ||
919 | plen = hdrwords + ((len + 3) >> 2) + 1; | ||
920 | if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) { | ||
921 | ipath_dbg("packet len 0x%x too long, failing\n", plen); | ||
922 | ret = -EINVAL; | ||
923 | goto bail; | ||
924 | } | ||
925 | |||
926 | /* Get a PIO buffer to use. */ | ||
927 | piobuf = ipath_getpiobuf(dd, NULL); | ||
928 | if (unlikely(piobuf == NULL)) { | ||
929 | ret = -EBUSY; | ||
930 | goto bail; | ||
931 | } | ||
932 | |||
933 | /* | ||
934 | * Write len to control qword, no flags. | ||
935 | * We have to flush after the PBC for correctness on some cpus | ||
936 | * or WC buffer can be written out of order. | ||
937 | */ | ||
938 | writeq(plen, piobuf); | ||
939 | ipath_flush_wc(); | ||
940 | piobuf += 2; | ||
941 | if (len == 0) { | ||
942 | /* | ||
943 | * If there is just the header portion, must flush before | ||
944 | * writing last word of header for correctness, and after | ||
945 | * the last header word (trigger word). | ||
946 | */ | ||
947 | __iowrite32_copy(piobuf, hdr, hdrwords - 1); | ||
948 | ipath_flush_wc(); | ||
949 | __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); | ||
950 | ipath_flush_wc(); | ||
951 | ret = 0; | ||
952 | goto bail; | ||
953 | } | ||
954 | |||
955 | __iowrite32_copy(piobuf, hdr, hdrwords); | ||
956 | piobuf += hdrwords; | ||
957 | |||
958 | /* The common case is aligned and contained in one segment. */ | ||
959 | if (likely(ss->num_sge == 1 && len <= ss->sge.length && | ||
960 | !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { | ||
961 | u32 w; | ||
962 | u32 *addr = (u32 *) ss->sge.vaddr; | ||
963 | |||
964 | /* Update address before sending packet. */ | ||
965 | update_sge(ss, len); | ||
966 | /* Need to round up for the last dword in the packet. */ | ||
967 | w = (len + 3) >> 2; | ||
968 | __iowrite32_copy(piobuf, addr, w - 1); | ||
969 | /* must flush early everything before trigger word */ | ||
970 | ipath_flush_wc(); | ||
971 | __raw_writel(addr[w - 1], piobuf + w - 1); | ||
972 | /* be sure trigger word is written */ | ||
973 | ipath_flush_wc(); | ||
974 | ret = 0; | ||
975 | goto bail; | ||
976 | } | ||
977 | copy_io(piobuf, ss, len); | ||
978 | ret = 0; | ||
979 | |||
980 | bail: | ||
981 | return ret; | ||
982 | } | ||
983 | |||
984 | EXPORT_SYMBOL_GPL(ipath_verbs_send); | ||
985 | |||
986 | int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords, | ||
987 | u64 *rwords, u64 *spkts, u64 *rpkts, | ||
988 | u64 *xmit_wait) | ||
989 | { | ||
990 | int ret; | ||
991 | |||
992 | if (!(dd->ipath_flags & IPATH_INITTED)) { | ||
993 | /* no hardware, freeze, etc. */ | ||
994 | ipath_dbg("unit %u not usable\n", dd->ipath_unit); | ||
995 | ret = -EINVAL; | ||
996 | goto bail; | ||
997 | } | ||
998 | *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); | ||
999 | *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); | ||
1000 | *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); | ||
1001 | *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); | ||
1002 | *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt); | ||
1003 | |||
1004 | ret = 0; | ||
1005 | |||
1006 | bail: | ||
1007 | return ret; | ||
1008 | } | ||
1009 | |||
1010 | EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters); | ||
1011 | |||
1012 | /** | ||
1013 | * ipath_layer_get_counters - get various chip counters | ||
1014 | * @dd: the infinipath device | ||
1015 | * @cntrs: counters are placed here | ||
1016 | * | ||
1017 | * Return the counters needed by recv_pma_get_portcounters(). | ||
1018 | */ | ||
1019 | int ipath_layer_get_counters(struct ipath_devdata *dd, | ||
1020 | struct ipath_layer_counters *cntrs) | ||
1021 | { | ||
1022 | int ret; | ||
1023 | |||
1024 | if (!(dd->ipath_flags & IPATH_INITTED)) { | ||
1025 | /* no hardware, freeze, etc. */ | ||
1026 | ipath_dbg("unit %u not usable\n", dd->ipath_unit); | ||
1027 | ret = -EINVAL; | ||
1028 | goto bail; | ||
1029 | } | ||
1030 | cntrs->symbol_error_counter = | ||
1031 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
1032 | cntrs->link_error_recovery_counter = | ||
1033 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
1034 | /* | ||
1035 | * The link downed counter counts when the other side downs the | ||
1036 | * connection. We add in the number of times we downed the link | ||
1037 | * due to local link integrity errors to compensate. | ||
1038 | */ | ||
1039 | cntrs->link_downed_counter = | ||
1040 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt); | ||
1041 | cntrs->port_rcv_errors = | ||
1042 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) + | ||
1043 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) + | ||
1044 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) + | ||
1045 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) + | ||
1046 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) + | ||
1047 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) + | ||
1048 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) + | ||
1049 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) + | ||
1050 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt); | ||
1051 | cntrs->port_rcv_remphys_errors = | ||
1052 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt); | ||
1053 | cntrs->port_xmit_discards = | ||
1054 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt); | ||
1055 | cntrs->port_xmit_data = | ||
1056 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); | ||
1057 | cntrs->port_rcv_data = | ||
1058 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); | ||
1059 | cntrs->port_xmit_packets = | ||
1060 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); | ||
1061 | cntrs->port_rcv_packets = | ||
1062 | ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); | ||
1063 | cntrs->local_link_integrity_errors = dd->ipath_lli_errors; | ||
1064 | cntrs->excessive_buffer_overrun_errors = 0; /* XXX */ | ||
1065 | |||
1066 | ret = 0; | ||
1067 | |||
1068 | bail: | ||
1069 | return ret; | ||
1070 | } | ||
1071 | |||
1072 | EXPORT_SYMBOL_GPL(ipath_layer_get_counters); | ||
1073 | |||
1074 | int ipath_layer_want_buffer(struct ipath_devdata *dd) | ||
1075 | { | ||
1076 | set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); | ||
1077 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, | ||
1078 | dd->ipath_sendctrl); | ||
1079 | |||
1080 | return 0; | ||
1081 | } | ||
1082 | |||
1083 | EXPORT_SYMBOL_GPL(ipath_layer_want_buffer); | ||
1084 | |||
1085 | int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr) | 296 | int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr) |
1086 | { | 297 | { |
1087 | int ret = 0; | 298 | int ret = 0; |
@@ -1153,389 +364,3 @@ int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd) | |||
1153 | } | 364 | } |
1154 | 365 | ||
1155 | EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int); | 366 | EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int); |
1156 | |||
1157 | int ipath_layer_enable_timer(struct ipath_devdata *dd) | ||
1158 | { | ||
1159 | /* | ||
1160 | * HT-400 has a design flaw where the chip and kernel idea | ||
1161 | * of the tail register don't always agree, and therefore we won't | ||
1162 | * get an interrupt on the next packet received. | ||
1163 | * If the board supports per packet receive interrupts, use it. | ||
1164 | * Otherwise, the timer function periodically checks for packets | ||
1165 | * to cover this case. | ||
1166 | * Either way, the timer is needed for verbs layer related | ||
1167 | * processing. | ||
1168 | */ | ||
1169 | if (dd->ipath_flags & IPATH_GPIO_INTR) { | ||
1170 | ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, | ||
1171 | 0x2074076542310ULL); | ||
1172 | /* Enable GPIO bit 2 interrupt */ | ||
1173 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, | ||
1174 | (u64) (1 << 2)); | ||
1175 | } | ||
1176 | |||
1177 | init_timer(&dd->verbs_layer.l_timer); | ||
1178 | dd->verbs_layer.l_timer.function = __ipath_verbs_timer; | ||
1179 | dd->verbs_layer.l_timer.data = (unsigned long)dd; | ||
1180 | dd->verbs_layer.l_timer.expires = jiffies + 1; | ||
1181 | add_timer(&dd->verbs_layer.l_timer); | ||
1182 | |||
1183 | return 0; | ||
1184 | } | ||
1185 | |||
1186 | EXPORT_SYMBOL_GPL(ipath_layer_enable_timer); | ||
1187 | |||
1188 | int ipath_layer_disable_timer(struct ipath_devdata *dd) | ||
1189 | { | ||
1190 | /* Disable GPIO bit 2 interrupt */ | ||
1191 | if (dd->ipath_flags & IPATH_GPIO_INTR) | ||
1192 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0); | ||
1193 | |||
1194 | del_timer_sync(&dd->verbs_layer.l_timer); | ||
1195 | |||
1196 | return 0; | ||
1197 | } | ||
1198 | |||
1199 | EXPORT_SYMBOL_GPL(ipath_layer_disable_timer); | ||
1200 | |||
1201 | /** | ||
1202 | * ipath_layer_set_verbs_flags - set the verbs layer flags | ||
1203 | * @dd: the infinipath device | ||
1204 | * @flags: the flags to set | ||
1205 | */ | ||
1206 | int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags) | ||
1207 | { | ||
1208 | struct ipath_devdata *ss; | ||
1209 | unsigned long lflags; | ||
1210 | |||
1211 | spin_lock_irqsave(&ipath_devs_lock, lflags); | ||
1212 | |||
1213 | list_for_each_entry(ss, &ipath_dev_list, ipath_list) { | ||
1214 | if (!(ss->ipath_flags & IPATH_INITTED)) | ||
1215 | continue; | ||
1216 | if ((flags & IPATH_VERBS_KERNEL_SMA) && | ||
1217 | !(*ss->ipath_statusp & IPATH_STATUS_SMA)) | ||
1218 | *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA; | ||
1219 | else | ||
1220 | *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA; | ||
1221 | } | ||
1222 | |||
1223 | spin_unlock_irqrestore(&ipath_devs_lock, lflags); | ||
1224 | |||
1225 | return 0; | ||
1226 | } | ||
1227 | |||
1228 | EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags); | ||
1229 | |||
1230 | /** | ||
1231 | * ipath_layer_get_npkeys - return the size of the PKEY table for port 0 | ||
1232 | * @dd: the infinipath device | ||
1233 | */ | ||
1234 | unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd) | ||
1235 | { | ||
1236 | return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys); | ||
1237 | } | ||
1238 | |||
1239 | EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys); | ||
1240 | |||
1241 | /** | ||
1242 | * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table | ||
1243 | * @dd: the infinipath device | ||
1244 | * @index: the PKEY index | ||
1245 | */ | ||
1246 | unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index) | ||
1247 | { | ||
1248 | unsigned ret; | ||
1249 | |||
1250 | if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) | ||
1251 | ret = 0; | ||
1252 | else | ||
1253 | ret = dd->ipath_pd[0]->port_pkeys[index]; | ||
1254 | |||
1255 | return ret; | ||
1256 | } | ||
1257 | |||
1258 | EXPORT_SYMBOL_GPL(ipath_layer_get_pkey); | ||
1259 | |||
1260 | /** | ||
1261 | * ipath_layer_get_pkeys - return the PKEY table for port 0 | ||
1262 | * @dd: the infinipath device | ||
1263 | * @pkeys: the pkey table is placed here | ||
1264 | */ | ||
1265 | int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys) | ||
1266 | { | ||
1267 | struct ipath_portdata *pd = dd->ipath_pd[0]; | ||
1268 | |||
1269 | memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); | ||
1270 | |||
1271 | return 0; | ||
1272 | } | ||
1273 | |||
1274 | EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys); | ||
1275 | |||
1276 | /** | ||
1277 | * rm_pkey - decrecment the reference count for the given PKEY | ||
1278 | * @dd: the infinipath device | ||
1279 | * @key: the PKEY index | ||
1280 | * | ||
1281 | * Return true if this was the last reference and the hardware table entry | ||
1282 | * needs to be changed. | ||
1283 | */ | ||
1284 | static int rm_pkey(struct ipath_devdata *dd, u16 key) | ||
1285 | { | ||
1286 | int i; | ||
1287 | int ret; | ||
1288 | |||
1289 | for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | ||
1290 | if (dd->ipath_pkeys[i] != key) | ||
1291 | continue; | ||
1292 | if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) { | ||
1293 | dd->ipath_pkeys[i] = 0; | ||
1294 | ret = 1; | ||
1295 | goto bail; | ||
1296 | } | ||
1297 | break; | ||
1298 | } | ||
1299 | |||
1300 | ret = 0; | ||
1301 | |||
1302 | bail: | ||
1303 | return ret; | ||
1304 | } | ||
1305 | |||
1306 | /** | ||
1307 | * add_pkey - add the given PKEY to the hardware table | ||
1308 | * @dd: the infinipath device | ||
1309 | * @key: the PKEY | ||
1310 | * | ||
1311 | * Return an error code if unable to add the entry, zero if no change, | ||
1312 | * or 1 if the hardware PKEY register needs to be updated. | ||
1313 | */ | ||
1314 | static int add_pkey(struct ipath_devdata *dd, u16 key) | ||
1315 | { | ||
1316 | int i; | ||
1317 | u16 lkey = key & 0x7FFF; | ||
1318 | int any = 0; | ||
1319 | int ret; | ||
1320 | |||
1321 | if (lkey == 0x7FFF) { | ||
1322 | ret = 0; | ||
1323 | goto bail; | ||
1324 | } | ||
1325 | |||
1326 | /* Look for an empty slot or a matching PKEY. */ | ||
1327 | for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | ||
1328 | if (!dd->ipath_pkeys[i]) { | ||
1329 | any++; | ||
1330 | continue; | ||
1331 | } | ||
1332 | /* If it matches exactly, try to increment the ref count */ | ||
1333 | if (dd->ipath_pkeys[i] == key) { | ||
1334 | if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) { | ||
1335 | ret = 0; | ||
1336 | goto bail; | ||
1337 | } | ||
1338 | /* Lost the race. Look for an empty slot below. */ | ||
1339 | atomic_dec(&dd->ipath_pkeyrefs[i]); | ||
1340 | any++; | ||
1341 | } | ||
1342 | /* | ||
1343 | * It makes no sense to have both the limited and unlimited | ||
1344 | * PKEY set at the same time since the unlimited one will | ||
1345 | * disable the limited one. | ||
1346 | */ | ||
1347 | if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { | ||
1348 | ret = -EEXIST; | ||
1349 | goto bail; | ||
1350 | } | ||
1351 | } | ||
1352 | if (!any) { | ||
1353 | ret = -EBUSY; | ||
1354 | goto bail; | ||
1355 | } | ||
1356 | for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | ||
1357 | if (!dd->ipath_pkeys[i] && | ||
1358 | atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { | ||
1359 | /* for ipathstats, etc. */ | ||
1360 | ipath_stats.sps_pkeys[i] = lkey; | ||
1361 | dd->ipath_pkeys[i] = key; | ||
1362 | ret = 1; | ||
1363 | goto bail; | ||
1364 | } | ||
1365 | } | ||
1366 | ret = -EBUSY; | ||
1367 | |||
1368 | bail: | ||
1369 | return ret; | ||
1370 | } | ||
1371 | |||
1372 | /** | ||
1373 | * ipath_layer_set_pkeys - set the PKEY table for port 0 | ||
1374 | * @dd: the infinipath device | ||
1375 | * @pkeys: the PKEY table | ||
1376 | */ | ||
1377 | int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys) | ||
1378 | { | ||
1379 | struct ipath_portdata *pd; | ||
1380 | int i; | ||
1381 | int changed = 0; | ||
1382 | |||
1383 | pd = dd->ipath_pd[0]; | ||
1384 | |||
1385 | for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { | ||
1386 | u16 key = pkeys[i]; | ||
1387 | u16 okey = pd->port_pkeys[i]; | ||
1388 | |||
1389 | if (key == okey) | ||
1390 | continue; | ||
1391 | /* | ||
1392 | * The value of this PKEY table entry is changing. | ||
1393 | * Remove the old entry in the hardware's array of PKEYs. | ||
1394 | */ | ||
1395 | if (okey & 0x7FFF) | ||
1396 | changed |= rm_pkey(dd, okey); | ||
1397 | if (key & 0x7FFF) { | ||
1398 | int ret = add_pkey(dd, key); | ||
1399 | |||
1400 | if (ret < 0) | ||
1401 | key = 0; | ||
1402 | else | ||
1403 | changed |= ret; | ||
1404 | } | ||
1405 | pd->port_pkeys[i] = key; | ||
1406 | } | ||
1407 | if (changed) { | ||
1408 | u64 pkey; | ||
1409 | |||
1410 | pkey = (u64) dd->ipath_pkeys[0] | | ||
1411 | ((u64) dd->ipath_pkeys[1] << 16) | | ||
1412 | ((u64) dd->ipath_pkeys[2] << 32) | | ||
1413 | ((u64) dd->ipath_pkeys[3] << 48); | ||
1414 | ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n", | ||
1415 | (unsigned long long) pkey); | ||
1416 | ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, | ||
1417 | pkey); | ||
1418 | } | ||
1419 | return 0; | ||
1420 | } | ||
1421 | |||
1422 | EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys); | ||
1423 | |||
1424 | /** | ||
1425 | * ipath_layer_get_linkdowndefaultstate - get the default linkdown state | ||
1426 | * @dd: the infinipath device | ||
1427 | * | ||
1428 | * Returns zero if the default is POLL, 1 if the default is SLEEP. | ||
1429 | */ | ||
1430 | int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd) | ||
1431 | { | ||
1432 | return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE); | ||
1433 | } | ||
1434 | |||
1435 | EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate); | ||
1436 | |||
1437 | /** | ||
1438 | * ipath_layer_set_linkdowndefaultstate - set the default linkdown state | ||
1439 | * @dd: the infinipath device | ||
1440 | * @sleep: the new state | ||
1441 | * | ||
1442 | * Note that this will only take effect when the link state changes. | ||
1443 | */ | ||
1444 | int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd, | ||
1445 | int sleep) | ||
1446 | { | ||
1447 | if (sleep) | ||
1448 | dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; | ||
1449 | else | ||
1450 | dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; | ||
1451 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
1452 | dd->ipath_ibcctrl); | ||
1453 | return 0; | ||
1454 | } | ||
1455 | |||
1456 | EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate); | ||
1457 | |||
1458 | int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd) | ||
1459 | { | ||
1460 | return (dd->ipath_ibcctrl >> | ||
1461 | INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & | ||
1462 | INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; | ||
1463 | } | ||
1464 | |||
1465 | EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold); | ||
1466 | |||
1467 | /** | ||
1468 | * ipath_layer_set_phyerrthreshold - set the physical error threshold | ||
1469 | * @dd: the infinipath device | ||
1470 | * @n: the new threshold | ||
1471 | * | ||
1472 | * Note that this will only take effect when the link state changes. | ||
1473 | */ | ||
1474 | int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n) | ||
1475 | { | ||
1476 | unsigned v; | ||
1477 | |||
1478 | v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & | ||
1479 | INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; | ||
1480 | if (v != n) { | ||
1481 | dd->ipath_ibcctrl &= | ||
1482 | ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK << | ||
1483 | INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT); | ||
1484 | dd->ipath_ibcctrl |= | ||
1485 | (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; | ||
1486 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
1487 | dd->ipath_ibcctrl); | ||
1488 | } | ||
1489 | return 0; | ||
1490 | } | ||
1491 | |||
1492 | EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold); | ||
1493 | |||
1494 | int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd) | ||
1495 | { | ||
1496 | return (dd->ipath_ibcctrl >> | ||
1497 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & | ||
1498 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; | ||
1499 | } | ||
1500 | |||
1501 | EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold); | ||
1502 | |||
1503 | /** | ||
1504 | * ipath_layer_set_overrunthreshold - set the overrun threshold | ||
1505 | * @dd: the infinipath device | ||
1506 | * @n: the new threshold | ||
1507 | * | ||
1508 | * Note that this will only take effect when the link state changes. | ||
1509 | */ | ||
1510 | int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n) | ||
1511 | { | ||
1512 | unsigned v; | ||
1513 | |||
1514 | v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & | ||
1515 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; | ||
1516 | if (v != n) { | ||
1517 | dd->ipath_ibcctrl &= | ||
1518 | ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK << | ||
1519 | INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT); | ||
1520 | dd->ipath_ibcctrl |= | ||
1521 | (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; | ||
1522 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | ||
1523 | dd->ipath_ibcctrl); | ||
1524 | } | ||
1525 | return 0; | ||
1526 | } | ||
1527 | |||
1528 | EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold); | ||
1529 | |||
1530 | int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name, | ||
1531 | size_t namelen) | ||
1532 | { | ||
1533 | return dd->ipath_f_get_boardname(dd, name, namelen); | ||
1534 | } | ||
1535 | EXPORT_SYMBOL_GPL(ipath_layer_get_boardname); | ||
1536 | |||
1537 | u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd) | ||
1538 | { | ||
1539 | return dd->ipath_rcvhdrentsize; | ||
1540 | } | ||
1541 | EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize); | ||