diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_tx.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_tx.c | 557 |
1 files changed, 557 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c new file mode 100644 index 000000000000..f7eb1ddff5f3 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_tx.c | |||
@@ -0,0 +1,557 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/pci.h> | ||
35 | #include <linux/io.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/netdevice.h> | ||
38 | #include <linux/vmalloc.h> | ||
39 | |||
40 | #include "qib.h" | ||
41 | |||
42 | static unsigned qib_hol_timeout_ms = 3000; | ||
43 | module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO); | ||
44 | MODULE_PARM_DESC(hol_timeout_ms, | ||
45 | "duration of user app suspension after link failure"); | ||
46 | |||
47 | unsigned qib_sdma_fetch_arb = 1; | ||
48 | module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO); | ||
49 | MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration"); | ||
50 | |||
51 | /** | ||
52 | * qib_disarm_piobufs - cancel a range of PIO buffers | ||
53 | * @dd: the qlogic_ib device | ||
54 | * @first: the first PIO buffer to cancel | ||
55 | * @cnt: the number of PIO buffers to cancel | ||
56 | * | ||
57 | * Cancel a range of PIO buffers. Used at user process close, | ||
58 | * in case it died while writing to a PIO buffer. | ||
59 | */ | ||
60 | void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt) | ||
61 | { | ||
62 | unsigned long flags; | ||
63 | unsigned i; | ||
64 | unsigned last; | ||
65 | |||
66 | last = first + cnt; | ||
67 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
68 | for (i = first; i < last; i++) { | ||
69 | __clear_bit(i, dd->pio_need_disarm); | ||
70 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); | ||
71 | } | ||
72 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * This is called by a user process when it sees the DISARM_BUFS event | ||
77 | * bit is set. | ||
78 | */ | ||
79 | int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd) | ||
80 | { | ||
81 | struct qib_devdata *dd = rcd->dd; | ||
82 | unsigned i; | ||
83 | unsigned last; | ||
84 | unsigned n = 0; | ||
85 | |||
86 | last = rcd->pio_base + rcd->piocnt; | ||
87 | /* | ||
88 | * Don't need uctxt_lock here, since user has called in to us. | ||
89 | * Clear at start in case more interrupts set bits while we | ||
90 | * are disarming | ||
91 | */ | ||
92 | if (rcd->user_event_mask) { | ||
93 | /* | ||
94 | * subctxt_cnt is 0 if not shared, so do base | ||
95 | * separately, first, then remaining subctxt, if any | ||
96 | */ | ||
97 | clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]); | ||
98 | for (i = 1; i < rcd->subctxt_cnt; i++) | ||
99 | clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
100 | &rcd->user_event_mask[i]); | ||
101 | } | ||
102 | spin_lock_irq(&dd->pioavail_lock); | ||
103 | for (i = rcd->pio_base; i < last; i++) { | ||
104 | if (__test_and_clear_bit(i, dd->pio_need_disarm)) { | ||
105 | n++; | ||
106 | dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); | ||
107 | } | ||
108 | } | ||
109 | spin_unlock_irq(&dd->pioavail_lock); | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i) | ||
114 | { | ||
115 | struct qib_pportdata *ppd; | ||
116 | unsigned pidx; | ||
117 | |||
118 | for (pidx = 0; pidx < dd->num_pports; pidx++) { | ||
119 | ppd = dd->pport + pidx; | ||
120 | if (i >= ppd->sdma_state.first_sendbuf && | ||
121 | i < ppd->sdma_state.last_sendbuf) | ||
122 | return ppd; | ||
123 | } | ||
124 | return NULL; | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Return true if send buffer is being used by a user context. | ||
129 | * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect | ||
130 | */ | ||
131 | static int find_ctxt(struct qib_devdata *dd, unsigned bufn) | ||
132 | { | ||
133 | struct qib_ctxtdata *rcd; | ||
134 | unsigned ctxt; | ||
135 | int ret = 0; | ||
136 | |||
137 | spin_lock(&dd->uctxt_lock); | ||
138 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { | ||
139 | rcd = dd->rcd[ctxt]; | ||
140 | if (!rcd || bufn < rcd->pio_base || | ||
141 | bufn >= rcd->pio_base + rcd->piocnt) | ||
142 | continue; | ||
143 | if (rcd->user_event_mask) { | ||
144 | int i; | ||
145 | /* | ||
146 | * subctxt_cnt is 0 if not shared, so do base | ||
147 | * separately, first, then remaining subctxt, if any | ||
148 | */ | ||
149 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
150 | &rcd->user_event_mask[0]); | ||
151 | for (i = 1; i < rcd->subctxt_cnt; i++) | ||
152 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
153 | &rcd->user_event_mask[i]); | ||
154 | } | ||
155 | ret = 1; | ||
156 | break; | ||
157 | } | ||
158 | spin_unlock(&dd->uctxt_lock); | ||
159 | |||
160 | return ret; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Disarm a set of send buffers. If the buffer might be actively being | ||
165 | * written to, mark the buffer to be disarmed later when it is not being | ||
166 | * written to. | ||
167 | * | ||
168 | * This should only be called from the IRQ error handler. | ||
169 | */ | ||
170 | void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, | ||
171 | unsigned cnt) | ||
172 | { | ||
173 | struct qib_pportdata *ppd, *pppd[dd->num_pports]; | ||
174 | unsigned i; | ||
175 | unsigned long flags; | ||
176 | |||
177 | for (i = 0; i < dd->num_pports; i++) | ||
178 | pppd[i] = NULL; | ||
179 | |||
180 | for (i = 0; i < cnt; i++) { | ||
181 | int which; | ||
182 | if (!test_bit(i, mask)) | ||
183 | continue; | ||
184 | /* | ||
185 | * If the buffer is owned by the DMA hardware, | ||
186 | * reset the DMA engine. | ||
187 | */ | ||
188 | ppd = is_sdma_buf(dd, i); | ||
189 | if (ppd) { | ||
190 | pppd[ppd->port] = ppd; | ||
191 | continue; | ||
192 | } | ||
193 | /* | ||
194 | * If the kernel is writing the buffer or the buffer is | ||
195 | * owned by a user process, we can't clear it yet. | ||
196 | */ | ||
197 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
198 | if (test_bit(i, dd->pio_writing) || | ||
199 | (!test_bit(i << 1, dd->pioavailkernel) && | ||
200 | find_ctxt(dd, i))) { | ||
201 | __set_bit(i, dd->pio_need_disarm); | ||
202 | which = 0; | ||
203 | } else { | ||
204 | which = 1; | ||
205 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); | ||
206 | } | ||
207 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
208 | } | ||
209 | |||
210 | /* do cancel_sends once per port that had sdma piobufs in error */ | ||
211 | for (i = 0; i < dd->num_pports; i++) | ||
212 | if (pppd[i]) | ||
213 | qib_cancel_sends(pppd[i]); | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * update_send_bufs - update shadow copy of the PIO availability map | ||
218 | * @dd: the qlogic_ib device | ||
219 | * | ||
220 | * called whenever our local copy indicates we have run out of send buffers | ||
221 | */ | ||
222 | static void update_send_bufs(struct qib_devdata *dd) | ||
223 | { | ||
224 | unsigned long flags; | ||
225 | unsigned i; | ||
226 | const unsigned piobregs = dd->pioavregs; | ||
227 | |||
228 | /* | ||
229 | * If the generation (check) bits have changed, then we update the | ||
230 | * busy bit for the corresponding PIO buffer. This algorithm will | ||
231 | * modify positions to the value they already have in some cases | ||
232 | * (i.e., no change), but it's faster than changing only the bits | ||
233 | * that have changed. | ||
234 | * | ||
235 | * We would like to do this atomicly, to avoid spinlocks in the | ||
236 | * critical send path, but that's not really possible, given the | ||
237 | * type of changes, and that this routine could be called on | ||
238 | * multiple cpu's simultaneously, so we lock in this routine only, | ||
239 | * to avoid conflicting updates; all we change is the shadow, and | ||
240 | * it's a single 64 bit memory location, so by definition the update | ||
241 | * is atomic in terms of what other cpu's can see in testing the | ||
242 | * bits. The spin_lock overhead isn't too bad, since it only | ||
243 | * happens when all buffers are in use, so only cpu overhead, not | ||
244 | * latency or bandwidth is affected. | ||
245 | */ | ||
246 | if (!dd->pioavailregs_dma) | ||
247 | return; | ||
248 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
249 | for (i = 0; i < piobregs; i++) { | ||
250 | u64 pchbusy, pchg, piov, pnew; | ||
251 | |||
252 | piov = le64_to_cpu(dd->pioavailregs_dma[i]); | ||
253 | pchg = dd->pioavailkernel[i] & | ||
254 | ~(dd->pioavailshadow[i] ^ piov); | ||
255 | pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT; | ||
256 | if (pchg && (pchbusy & dd->pioavailshadow[i])) { | ||
257 | pnew = dd->pioavailshadow[i] & ~pchbusy; | ||
258 | pnew |= piov & pchbusy; | ||
259 | dd->pioavailshadow[i] = pnew; | ||
260 | } | ||
261 | } | ||
262 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * Debugging code and stats updates if no pio buffers available. | ||
267 | */ | ||
268 | static noinline void no_send_bufs(struct qib_devdata *dd) | ||
269 | { | ||
270 | dd->upd_pio_shadow = 1; | ||
271 | |||
272 | /* not atomic, but if we lose a stat count in a while, that's OK */ | ||
273 | qib_stats.sps_nopiobufs++; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * Common code for normal driver send buffer allocation, and reserved | ||
278 | * allocation. | ||
279 | * | ||
280 | * Do appropriate marking as busy, etc. | ||
281 | * Returns buffer pointer if one is found, otherwise NULL. | ||
282 | */ | ||
283 | u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum, | ||
284 | u32 first, u32 last) | ||
285 | { | ||
286 | unsigned i, j, updated = 0; | ||
287 | unsigned nbufs; | ||
288 | unsigned long flags; | ||
289 | unsigned long *shadow = dd->pioavailshadow; | ||
290 | u32 __iomem *buf; | ||
291 | |||
292 | if (!(dd->flags & QIB_PRESENT)) | ||
293 | return NULL; | ||
294 | |||
295 | nbufs = last - first + 1; /* number in range to check */ | ||
296 | if (dd->upd_pio_shadow) { | ||
297 | /* | ||
298 | * Minor optimization. If we had no buffers on last call, | ||
299 | * start out by doing the update; continue and do scan even | ||
300 | * if no buffers were updated, to be paranoid. | ||
301 | */ | ||
302 | update_send_bufs(dd); | ||
303 | updated++; | ||
304 | } | ||
305 | i = first; | ||
306 | rescan: | ||
307 | /* | ||
308 | * While test_and_set_bit() is atomic, we do that and then the | ||
309 | * change_bit(), and the pair is not. See if this is the cause | ||
310 | * of the remaining armlaunch errors. | ||
311 | */ | ||
312 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
313 | for (j = 0; j < nbufs; j++, i++) { | ||
314 | if (i > last) | ||
315 | i = first; | ||
316 | if (__test_and_set_bit((2 * i) + 1, shadow)) | ||
317 | continue; | ||
318 | /* flip generation bit */ | ||
319 | __change_bit(2 * i, shadow); | ||
320 | /* remember that the buffer can be written to now */ | ||
321 | __set_bit(i, dd->pio_writing); | ||
322 | break; | ||
323 | } | ||
324 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
325 | |||
326 | if (j == nbufs) { | ||
327 | if (!updated) { | ||
328 | /* | ||
329 | * First time through; shadow exhausted, but may be | ||
330 | * buffers available, try an update and then rescan. | ||
331 | */ | ||
332 | update_send_bufs(dd); | ||
333 | updated++; | ||
334 | i = first; | ||
335 | goto rescan; | ||
336 | } | ||
337 | no_send_bufs(dd); | ||
338 | buf = NULL; | ||
339 | } else { | ||
340 | if (i < dd->piobcnt2k) | ||
341 | buf = (u32 __iomem *)(dd->pio2kbase + | ||
342 | i * dd->palign); | ||
343 | else | ||
344 | buf = (u32 __iomem *)(dd->pio4kbase + | ||
345 | (i - dd->piobcnt2k) * dd->align4k); | ||
346 | if (pbufnum) | ||
347 | *pbufnum = i; | ||
348 | dd->upd_pio_shadow = 0; | ||
349 | } | ||
350 | |||
351 | return buf; | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * Record that the caller is finished writing to the buffer so we don't | ||
356 | * disarm it while it is being written and disarm it now if needed. | ||
357 | */ | ||
358 | void qib_sendbuf_done(struct qib_devdata *dd, unsigned n) | ||
359 | { | ||
360 | unsigned long flags; | ||
361 | |||
362 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
363 | __clear_bit(n, dd->pio_writing); | ||
364 | if (__test_and_clear_bit(n, dd->pio_need_disarm)) | ||
365 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); | ||
366 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
367 | } | ||
368 | |||
369 | /** | ||
370 | * qib_chg_pioavailkernel - change which send buffers are available for kernel | ||
371 | * @dd: the qlogic_ib device | ||
372 | * @start: the starting send buffer number | ||
373 | * @len: the number of send buffers | ||
374 | * @avail: true if the buffers are available for kernel use, false otherwise | ||
375 | */ | ||
376 | void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start, | ||
377 | unsigned len, u32 avail, struct qib_ctxtdata *rcd) | ||
378 | { | ||
379 | unsigned long flags; | ||
380 | unsigned end; | ||
381 | unsigned ostart = start; | ||
382 | |||
383 | /* There are two bits per send buffer (busy and generation) */ | ||
384 | start *= 2; | ||
385 | end = start + len * 2; | ||
386 | |||
387 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
388 | /* Set or clear the busy bit in the shadow. */ | ||
389 | while (start < end) { | ||
390 | if (avail) { | ||
391 | unsigned long dma; | ||
392 | int i; | ||
393 | |||
394 | /* | ||
395 | * The BUSY bit will never be set, because we disarm | ||
396 | * the user buffers before we hand them back to the | ||
397 | * kernel. We do have to make sure the generation | ||
398 | * bit is set correctly in shadow, since it could | ||
399 | * have changed many times while allocated to user. | ||
400 | * We can't use the bitmap functions on the full | ||
401 | * dma array because it is always little-endian, so | ||
402 | * we have to flip to host-order first. | ||
403 | * BITS_PER_LONG is slightly wrong, since it's | ||
404 | * always 64 bits per register in chip... | ||
405 | * We only work on 64 bit kernels, so that's OK. | ||
406 | */ | ||
407 | i = start / BITS_PER_LONG; | ||
408 | __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start, | ||
409 | dd->pioavailshadow); | ||
410 | dma = (unsigned long) | ||
411 | le64_to_cpu(dd->pioavailregs_dma[i]); | ||
412 | if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT + | ||
413 | start) % BITS_PER_LONG, &dma)) | ||
414 | __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT + | ||
415 | start, dd->pioavailshadow); | ||
416 | else | ||
417 | __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT | ||
418 | + start, dd->pioavailshadow); | ||
419 | __set_bit(start, dd->pioavailkernel); | ||
420 | } else { | ||
421 | __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT, | ||
422 | dd->pioavailshadow); | ||
423 | __clear_bit(start, dd->pioavailkernel); | ||
424 | } | ||
425 | start += 2; | ||
426 | } | ||
427 | |||
428 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
429 | |||
430 | dd->f_txchk_change(dd, ostart, len, avail, rcd); | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * Flush all sends that might be in the ready to send state, as well as any | ||
435 | * that are in the process of being sent. Used whenever we need to be | ||
436 | * sure the send side is idle. Cleans up all buffer state by canceling | ||
437 | * all pio buffers, and issuing an abort, which cleans up anything in the | ||
438 | * launch fifo. The cancel is superfluous on some chip versions, but | ||
439 | * it's safer to always do it. | ||
440 | * PIOAvail bits are updated by the chip as if a normal send had happened. | ||
441 | */ | ||
442 | void qib_cancel_sends(struct qib_pportdata *ppd) | ||
443 | { | ||
444 | struct qib_devdata *dd = ppd->dd; | ||
445 | struct qib_ctxtdata *rcd; | ||
446 | unsigned long flags; | ||
447 | unsigned ctxt; | ||
448 | unsigned i; | ||
449 | unsigned last; | ||
450 | |||
451 | /* | ||
452 | * Tell PSM to disarm buffers again before trying to reuse them. | ||
453 | * We need to be sure the rcd doesn't change out from under us | ||
454 | * while we do so. We hold the two locks sequentially. We might | ||
455 | * needlessly set some need_disarm bits as a result, if the | ||
456 | * context is closed after we release the uctxt_lock, but that's | ||
457 | * fairly benign, and safer than nesting the locks. | ||
458 | */ | ||
459 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { | ||
460 | spin_lock_irqsave(&dd->uctxt_lock, flags); | ||
461 | rcd = dd->rcd[ctxt]; | ||
462 | if (rcd && rcd->ppd == ppd) { | ||
463 | last = rcd->pio_base + rcd->piocnt; | ||
464 | if (rcd->user_event_mask) { | ||
465 | /* | ||
466 | * subctxt_cnt is 0 if not shared, so do base | ||
467 | * separately, first, then remaining subctxt, | ||
468 | * if any | ||
469 | */ | ||
470 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
471 | &rcd->user_event_mask[0]); | ||
472 | for (i = 1; i < rcd->subctxt_cnt; i++) | ||
473 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
474 | &rcd->user_event_mask[i]); | ||
475 | } | ||
476 | i = rcd->pio_base; | ||
477 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
478 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
479 | for (; i < last; i++) | ||
480 | __set_bit(i, dd->pio_need_disarm); | ||
481 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
482 | } else | ||
483 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
484 | } | ||
485 | |||
486 | if (!(dd->flags & QIB_HAS_SEND_DMA)) | ||
487 | dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL | | ||
488 | QIB_SENDCTRL_FLUSH); | ||
489 | } | ||
490 | |||
491 | /* | ||
492 | * Force an update of in-memory copy of the pioavail registers, when | ||
493 | * needed for any of a variety of reasons. | ||
494 | * If already off, this routine is a nop, on the assumption that the | ||
495 | * caller (or set of callers) will "do the right thing". | ||
496 | * This is a per-device operation, so just the first port. | ||
497 | */ | ||
498 | void qib_force_pio_avail_update(struct qib_devdata *dd) | ||
499 | { | ||
500 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
501 | } | ||
502 | |||
503 | void qib_hol_down(struct qib_pportdata *ppd) | ||
504 | { | ||
505 | /* | ||
506 | * Cancel sends when the link goes DOWN so that we aren't doing it | ||
507 | * at INIT when we might be trying to send SMI packets. | ||
508 | */ | ||
509 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | ||
510 | qib_cancel_sends(ppd); | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * Link is at INIT. | ||
515 | * We start the HoL timer so we can detect stuck packets blocking SMP replies. | ||
516 | * Timer may already be running, so use mod_timer, not add_timer. | ||
517 | */ | ||
518 | void qib_hol_init(struct qib_pportdata *ppd) | ||
519 | { | ||
520 | if (ppd->hol_state != QIB_HOL_INIT) { | ||
521 | ppd->hol_state = QIB_HOL_INIT; | ||
522 | mod_timer(&ppd->hol_timer, | ||
523 | jiffies + msecs_to_jiffies(qib_hol_timeout_ms)); | ||
524 | } | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * Link is up, continue any user processes, and ensure timer | ||
529 | * is a nop, if running. Let timer keep running, if set; it | ||
530 | * will nop when it sees the link is up. | ||
531 | */ | ||
532 | void qib_hol_up(struct qib_pportdata *ppd) | ||
533 | { | ||
534 | ppd->hol_state = QIB_HOL_UP; | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * This is only called via the timer. | ||
539 | */ | ||
540 | void qib_hol_event(unsigned long opaque) | ||
541 | { | ||
542 | struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; | ||
543 | |||
544 | /* If hardware error, etc, skip. */ | ||
545 | if (!(ppd->dd->flags & QIB_INITTED)) | ||
546 | return; | ||
547 | |||
548 | if (ppd->hol_state != QIB_HOL_UP) { | ||
549 | /* | ||
550 | * Try to flush sends in case a stuck packet is blocking | ||
551 | * SMP replies. | ||
552 | */ | ||
553 | qib_hol_down(ppd); | ||
554 | mod_timer(&ppd->hol_timer, | ||
555 | jiffies + msecs_to_jiffies(qib_hol_timeout_ms)); | ||
556 | } | ||
557 | } | ||