aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-03-29 18:23:24 -0500
committerRoland Dreier <rolandd@cisco.com>2006-03-31 16:14:18 -0500
commit7bb206e3b20477c8bcbbdf20834d456b0b6d82c4 (patch)
tree020464ec844664ebdcee40e05630751a20075afb /drivers/infiniband/hw/ipath
parent064c94f9da8845f12446ab37142aa10f3c6f66ac (diff)
IB/ipath: core device driver
The ipath driver is a low-level driver for PathScale InfiniPath host channel adapters (HCAs) based on the HT-400 and PE-800 chips, including the InfiniPath HT-460, the small form factor InfiniPath HT-460, the InfiniPath HT-470 and the Linux Networx LS/X. The ipath_driver.c file contains much of the low-level device handling code. Signed-off-by: Bryan O'Sullivan <bos@pathscale.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c1983
1 files changed, 1983 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
new file mode 100644
index 00000000000..58a94efb007
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -0,0 +1,1983 @@
1/*
2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34#include <linux/idr.h>
35#include <linux/pci.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39
40#include "ipath_kernel.h"
41#include "ips_common.h"
42#include "ipath_layer.h"
43
44static void ipath_update_pio_bufs(struct ipath_devdata *);
45
46const char *ipath_get_unit_name(int unit)
47{
48 static char iname[16];
49 snprintf(iname, sizeof iname, "infinipath%u", unit);
50 return iname;
51}
52
53EXPORT_SYMBOL_GPL(ipath_get_unit_name);
54
55#define DRIVER_LOAD_MSG "PathScale " IPATH_DRV_NAME " loaded: "
56#define PFX IPATH_DRV_NAME ": "
57
58/*
59 * The size has to be longer than this string, so we can append
60 * board/chip information to it in the init code.
61 */
62const char ipath_core_version[] = IPATH_IDSTR "\n";
63
64static struct idr unit_table;
65DEFINE_SPINLOCK(ipath_devs_lock);
66LIST_HEAD(ipath_dev_list);
67
68wait_queue_head_t ipath_sma_state_wait;
69
70unsigned ipath_debug = __IPATH_INFO;
71
72module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
73MODULE_PARM_DESC(debug, "mask for debug prints");
74EXPORT_SYMBOL_GPL(ipath_debug);
75
76MODULE_LICENSE("GPL");
77MODULE_AUTHOR("PathScale <support@pathscale.com>");
78MODULE_DESCRIPTION("Pathscale InfiniPath driver");
79
80const char *ipath_ibcstatus_str[] = {
81 "Disabled",
82 "LinkUp",
83 "PollActive",
84 "PollQuiet",
85 "SleepDelay",
86 "SleepQuiet",
87 "LState6", /* unused */
88 "LState7", /* unused */
89 "CfgDebounce",
90 "CfgRcvfCfg",
91 "CfgWaitRmt",
92 "CfgIdle",
93 "RecovRetrain",
94 "LState0xD", /* unused */
95 "RecovWaitRmt",
96 "RecovIdle",
97};
98
99/*
100 * These variables are initialized in the chip-specific files
101 * but are defined here.
102 */
103u16 ipath_gpio_sda_num, ipath_gpio_scl_num;
104u64 ipath_gpio_sda, ipath_gpio_scl;
105u64 infinipath_i_bitsextant;
106ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant;
107u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask;
108
109static void __devexit ipath_remove_one(struct pci_dev *);
110static int __devinit ipath_init_one(struct pci_dev *,
111 const struct pci_device_id *);
112
113/* Only needed for registration, nothing else needs this info */
114#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
115#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
116#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
117
118static const struct pci_device_id ipath_pci_tbl[] = {
119 {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE,
120 PCI_DEVICE_ID_INFINIPATH_HT)},
121 {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE,
122 PCI_DEVICE_ID_INFINIPATH_PE800)},
123};
124
125MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
126
127static struct pci_driver ipath_driver = {
128 .name = IPATH_DRV_NAME,
129 .probe = ipath_init_one,
130 .remove = __devexit_p(ipath_remove_one),
131 .id_table = ipath_pci_tbl,
132};
133
134/*
135 * This is where port 0's rcvhdrtail register is written back; we also
136 * want nothing else sharing the cache line, so make it a cache line
137 * in size. Used for all units.
138 */
139volatile __le64 *ipath_port0_rcvhdrtail;
140dma_addr_t ipath_port0_rcvhdrtail_dma;
141static int port0_rcvhdrtail_refs;
142
143static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
144 u32 *bar0, u32 *bar1)
145{
146 int ret;
147
148 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
149 if (ret)
150 ipath_dev_err(dd, "failed to read bar0 before enable: "
151 "error %d\n", -ret);
152
153 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
154 if (ret)
155 ipath_dev_err(dd, "failed to read bar1 before enable: "
156 "error %d\n", -ret);
157
158 ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
159}
160
161static void ipath_free_devdata(struct pci_dev *pdev,
162 struct ipath_devdata *dd)
163{
164 unsigned long flags;
165
166 pci_set_drvdata(pdev, NULL);
167
168 if (dd->ipath_unit != -1) {
169 spin_lock_irqsave(&ipath_devs_lock, flags);
170 idr_remove(&unit_table, dd->ipath_unit);
171 list_del(&dd->ipath_list);
172 spin_unlock_irqrestore(&ipath_devs_lock, flags);
173 }
174 dma_free_coherent(&pdev->dev, sizeof(*dd), dd, dd->ipath_dma_addr);
175}
176
177static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
178{
179 unsigned long flags;
180 struct ipath_devdata *dd;
181 dma_addr_t dma_addr;
182 int ret;
183
184 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
185 dd = ERR_PTR(-ENOMEM);
186 goto bail;
187 }
188
189 dd = dma_alloc_coherent(&pdev->dev, sizeof(*dd), &dma_addr,
190 GFP_KERNEL);
191
192 if (!dd) {
193 dd = ERR_PTR(-ENOMEM);
194 goto bail;
195 }
196
197 dd->ipath_dma_addr = dma_addr;
198 dd->ipath_unit = -1;
199
200 spin_lock_irqsave(&ipath_devs_lock, flags);
201
202 ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
203 if (ret < 0) {
204 printk(KERN_ERR IPATH_DRV_NAME
205 ": Could not allocate unit ID: error %d\n", -ret);
206 ipath_free_devdata(pdev, dd);
207 dd = ERR_PTR(ret);
208 goto bail_unlock;
209 }
210
211 dd->pcidev = pdev;
212 pci_set_drvdata(pdev, dd);
213
214 list_add(&dd->ipath_list, &ipath_dev_list);
215
216bail_unlock:
217 spin_unlock_irqrestore(&ipath_devs_lock, flags);
218
219bail:
220 return dd;
221}
222
223static inline struct ipath_devdata *__ipath_lookup(int unit)
224{
225 return idr_find(&unit_table, unit);
226}
227
228struct ipath_devdata *ipath_lookup(int unit)
229{
230 struct ipath_devdata *dd;
231 unsigned long flags;
232
233 spin_lock_irqsave(&ipath_devs_lock, flags);
234 dd = __ipath_lookup(unit);
235 spin_unlock_irqrestore(&ipath_devs_lock, flags);
236
237 return dd;
238}
239
240int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp)
241{
242 int nunits, npresent, nup;
243 struct ipath_devdata *dd;
244 unsigned long flags;
245 u32 maxports;
246
247 nunits = npresent = nup = maxports = 0;
248
249 spin_lock_irqsave(&ipath_devs_lock, flags);
250
251 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
252 nunits++;
253 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
254 npresent++;
255 if (dd->ipath_lid &&
256 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
257 | IPATH_LINKUNK)))
258 nup++;
259 if (dd->ipath_cfgports > maxports)
260 maxports = dd->ipath_cfgports;
261 }
262
263 spin_unlock_irqrestore(&ipath_devs_lock, flags);
264
265 if (npresentp)
266 *npresentp = npresent;
267 if (nupp)
268 *nupp = nup;
269 if (maxportsp)
270 *maxportsp = maxports;
271
272 return nunits;
273}
274
275static int init_port0_rcvhdrtail(struct pci_dev *pdev)
276{
277 int ret;
278
279 mutex_lock(&ipath_mutex);
280
281 if (!ipath_port0_rcvhdrtail) {
282 ipath_port0_rcvhdrtail =
283 dma_alloc_coherent(&pdev->dev,
284 IPATH_PORT0_RCVHDRTAIL_SIZE,
285 &ipath_port0_rcvhdrtail_dma,
286 GFP_KERNEL);
287
288 if (!ipath_port0_rcvhdrtail) {
289 ret = -ENOMEM;
290 goto bail;
291 }
292 }
293 port0_rcvhdrtail_refs++;
294 ret = 0;
295
296bail:
297 mutex_unlock(&ipath_mutex);
298
299 return ret;
300}
301
302static void cleanup_port0_rcvhdrtail(struct pci_dev *pdev)
303{
304 mutex_lock(&ipath_mutex);
305
306 if (!--port0_rcvhdrtail_refs) {
307 dma_free_coherent(&pdev->dev, IPATH_PORT0_RCVHDRTAIL_SIZE,
308 (void *) ipath_port0_rcvhdrtail,
309 ipath_port0_rcvhdrtail_dma);
310 ipath_port0_rcvhdrtail = NULL;
311 }
312
313 mutex_unlock(&ipath_mutex);
314}
315
316/*
317 * These next two routines are placeholders in case we don't have per-arch
318 * code for controlling write combining. If explicit control of write
319 * combining is not available, performance will probably be awful.
320 */
321
322int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
323{
324 return -EOPNOTSUPP;
325}
326
327void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
328{
329}
330
331static int __devinit ipath_init_one(struct pci_dev *pdev,
332 const struct pci_device_id *ent)
333{
334 int ret, len, j;
335 struct ipath_devdata *dd;
336 unsigned long long addr;
337 u32 bar0 = 0, bar1 = 0;
338 u8 rev;
339
340 ret = init_port0_rcvhdrtail(pdev);
341 if (ret < 0) {
342 printk(KERN_ERR IPATH_DRV_NAME
343 ": Could not allocate port0_rcvhdrtail: error %d\n",
344 -ret);
345 goto bail;
346 }
347
348 dd = ipath_alloc_devdata(pdev);
349 if (IS_ERR(dd)) {
350 ret = PTR_ERR(dd);
351 printk(KERN_ERR IPATH_DRV_NAME
352 ": Could not allocate devdata: error %d\n", -ret);
353 goto bail_rcvhdrtail;
354 }
355
356 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
357
358 read_bars(dd, pdev, &bar0, &bar1);
359
360 ret = pci_enable_device(pdev);
361 if (ret) {
362 /* This can happen iff:
363 *
364 * We did a chip reset, and then failed to reprogram the
365 * BAR, or the chip reset due to an internal error. We then
366 * unloaded the driver and reloaded it.
367 *
368 * Both reset cases set the BAR back to initial state. For
369 * the latter case, the AER sticky error bit at offset 0x718
370 * should be set, but the Linux kernel doesn't yet know
371 * about that, it appears. If the original BAR was retained
372 * in the kernel data structures, this may be OK.
373 */
374 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
375 dd->ipath_unit, -ret);
376 goto bail_devdata;
377 }
378 addr = pci_resource_start(pdev, 0);
379 len = pci_resource_len(pdev, 0);
380 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %x, vend %x/%x "
381 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
382 ent->device, ent->driver_data);
383
384 read_bars(dd, pdev, &bar0, &bar1);
385
386 if (!bar1 && !(bar0 & ~0xf)) {
387 if (addr) {
388 dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
389 "rewriting as %llx\n", addr);
390 ret = pci_write_config_dword(
391 pdev, PCI_BASE_ADDRESS_0, addr);
392 if (ret) {
393 ipath_dev_err(dd, "rewrite of BAR0 "
394 "failed: err %d\n", -ret);
395 goto bail_disable;
396 }
397 ret = pci_write_config_dword(
398 pdev, PCI_BASE_ADDRESS_1, addr >> 32);
399 if (ret) {
400 ipath_dev_err(dd, "rewrite of BAR1 "
401 "failed: err %d\n", -ret);
402 goto bail_disable;
403 }
404 } else {
405 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
406 "not usable until reboot\n");
407 ret = -ENODEV;
408 goto bail_disable;
409 }
410 }
411
412 ret = pci_request_regions(pdev, IPATH_DRV_NAME);
413 if (ret) {
414 dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
415 "err %d\n", dd->ipath_unit, -ret);
416 goto bail_disable;
417 }
418
419 ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
420 if (ret) {
421 dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
422 "fails: %d\n", dd->ipath_unit, ret);
423 goto bail_regions;
424 }
425
426 pci_set_master(pdev);
427
428 /*
429 * Save BARs to rewrite after device reset. Save all 64 bits of
430 * BAR, just in case.
431 */
432 dd->ipath_pcibar0 = addr;
433 dd->ipath_pcibar1 = addr >> 32;
434 dd->ipath_deviceid = ent->device; /* save for later use */
435 dd->ipath_vendorid = ent->vendor;
436
437 /* setup the chip-specific functions, as early as possible. */
438 switch (ent->device) {
439 case PCI_DEVICE_ID_INFINIPATH_HT:
440 ipath_init_ht400_funcs(dd);
441 break;
442 case PCI_DEVICE_ID_INFINIPATH_PE800:
443 ipath_init_pe800_funcs(dd);
444 break;
445 default:
446 ipath_dev_err(dd, "Found unknown PathScale deviceid 0x%x, "
447 "failing\n", ent->device);
448 return -ENODEV;
449 }
450
451 for (j = 0; j < 6; j++) {
452 if (!pdev->resource[j].start)
453 continue;
454 ipath_cdbg(VERBOSE, "BAR %d start %lx, end %lx, len %lx\n",
455 j, pdev->resource[j].start,
456 pdev->resource[j].end,
457 pci_resource_len(pdev, j));
458 }
459
460 if (!addr) {
461 ipath_dev_err(dd, "No valid address in BAR 0!\n");
462 ret = -ENODEV;
463 goto bail_regions;
464 }
465
466 dd->ipath_deviceid = ent->device; /* save for later use */
467 dd->ipath_vendorid = ent->vendor;
468
469 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
470 if (ret) {
471 ipath_dev_err(dd, "Failed to read PCI revision ID unit "
472 "%u: err %d\n", dd->ipath_unit, -ret);
473 goto bail_regions; /* shouldn't ever happen */
474 }
475 dd->ipath_pcirev = rev;
476
477 dd->ipath_kregbase = ioremap_nocache(addr, len);
478
479 if (!dd->ipath_kregbase) {
480 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
481 addr);
482 ret = -ENOMEM;
483 goto bail_iounmap;
484 }
485 dd->ipath_kregend = (u64 __iomem *)
486 ((void __iomem *)dd->ipath_kregbase + len);
487 dd->ipath_physaddr = addr; /* used for io_remap, etc. */
488 /* for user mmap */
489 dd->ipath_kregvirt = (u64 __iomem *) phys_to_virt(addr);
490 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p "
491 "kregvirt %p\n", addr, dd->ipath_kregbase,
492 dd->ipath_kregvirt);
493
494 /*
495 * clear ipath_flags here instead of in ipath_init_chip as it is set
496 * by ipath_setup_htconfig.
497 */
498 dd->ipath_flags = 0;
499
500 if (dd->ipath_f_bus(dd, pdev))
501 ipath_dev_err(dd, "Failed to setup config space; "
502 "continuing anyway\n");
503
504 /*
505 * set up our interrupt handler; SA_SHIRQ probably not needed,
506 * since MSI interrupts shouldn't be shared but won't hurt for now.
507 * check 0 irq after we return from chip-specific bus setup, since
508 * that can affect this due to setup
509 */
510 if (!pdev->irq)
511 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
512 "work\n");
513 else {
514 ret = request_irq(pdev->irq, ipath_intr, SA_SHIRQ,
515 IPATH_DRV_NAME, dd);
516 if (ret) {
517 ipath_dev_err(dd, "Couldn't setup irq handler, "
518 "irq=%u: %d\n", pdev->irq, ret);
519 goto bail_iounmap;
520 }
521 }
522
523 ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
524 if (ret)
525 goto bail_iounmap;
526
527 ret = ipath_enable_wc(dd);
528
529 if (ret) {
530 ipath_dev_err(dd, "Write combining not enabled "
531 "(err %d): performance may be poor\n",
532 -ret);
533 ret = 0;
534 }
535
536 ipath_device_create_group(&pdev->dev, dd);
537 ipathfs_add_device(dd);
538 ipath_user_add(dd);
539 ipath_layer_add(dd);
540
541 goto bail;
542
543bail_iounmap:
544 iounmap((volatile void __iomem *) dd->ipath_kregbase);
545
546bail_regions:
547 pci_release_regions(pdev);
548
549bail_disable:
550 pci_disable_device(pdev);
551
552bail_devdata:
553 ipath_free_devdata(pdev, dd);
554
555bail_rcvhdrtail:
556 cleanup_port0_rcvhdrtail(pdev);
557
558bail:
559 return ret;
560}
561
562static void __devexit ipath_remove_one(struct pci_dev *pdev)
563{
564 struct ipath_devdata *dd;
565
566 ipath_cdbg(VERBOSE, "removing, pdev=%p\n", pdev);
567 if (!pdev)
568 return;
569
570 dd = pci_get_drvdata(pdev);
571 ipath_layer_del(dd);
572 ipath_user_del(dd);
573 ipathfs_remove_device(dd);
574 ipath_device_remove_group(&pdev->dev, dd);
575 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
576 "unit %u\n", dd, (u32) dd->ipath_unit);
577 if (dd->ipath_kregbase) {
578 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n",
579 dd->ipath_kregbase);
580 iounmap((volatile void __iomem *) dd->ipath_kregbase);
581 dd->ipath_kregbase = NULL;
582 }
583 pci_release_regions(pdev);
584 ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
585 pci_disable_device(pdev);
586
587 ipath_free_devdata(pdev, dd);
588 cleanup_port0_rcvhdrtail(pdev);
589}
590
591/* general driver use */
592DEFINE_MUTEX(ipath_mutex);
593
594static DEFINE_SPINLOCK(ipath_pioavail_lock);
595
596/**
597 * ipath_disarm_piobufs - cancel a range of PIO buffers
598 * @dd: the infinipath device
599 * @first: the first PIO buffer to cancel
600 * @cnt: the number of PIO buffers to cancel
601 *
602 * cancel a range of PIO buffers, used when they might be armed, but
603 * not triggered. Used at init to ensure buffer state, and also user
604 * process close, in case it died while writing to a PIO buffer
605 * Also after errors.
606 */
607void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
608 unsigned cnt)
609{
610 unsigned i, last = first + cnt;
611 u64 sendctrl, sendorig;
612
613 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
614 sendorig = dd->ipath_sendctrl | INFINIPATH_S_DISARM;
615 for (i = first; i < last; i++) {
616 sendctrl = sendorig |
617 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
618 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
619 sendctrl);
620 }
621
622 /*
623 * Write it again with current value, in case ipath_sendctrl changed
624 * while we were looping; no critical bits that would require
625 * locking.
626 *
627 * Write a 0, and then the original value, reading scratch in
628 * between. This seems to avoid a chip timing race that causes
629 * pioavail updates to memory to stop.
630 */
631 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
632 0);
633 sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
634 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
635 dd->ipath_sendctrl);
636}
637
638/**
639 * ipath_wait_linkstate - wait for an IB link state change to occur
640 * @dd: the infinipath device
641 * @state: the state to wait for
642 * @msecs: the number of milliseconds to wait
643 *
644 * wait up to msecs milliseconds for IB link state change to occur for
645 * now, take the easy polling route. Currently used only by
646 * ipath_layer_set_linkstate. Returns 0 if state reached, otherwise
647 * -ETIMEDOUT state can have multiple states set, for any of several
648 * transitions.
649 */
650int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
651{
652 dd->ipath_sma_state_wanted = state;
653 wait_event_interruptible_timeout(ipath_sma_state_wait,
654 (dd->ipath_flags & state),
655 msecs_to_jiffies(msecs));
656 dd->ipath_sma_state_wanted = 0;
657
658 if (!(dd->ipath_flags & state)) {
659 u64 val;
660 ipath_cdbg(SMA, "Didn't reach linkstate %s within %u ms\n",
661 /* test INIT ahead of DOWN, both can be set */
662 (state & IPATH_LINKINIT) ? "INIT" :
663 ((state & IPATH_LINKDOWN) ? "DOWN" :
664 ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
665 msecs);
666 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
667 ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
668 (unsigned long long) ipath_read_kreg64(
669 dd, dd->ipath_kregs->kr_ibcctrl),
670 (unsigned long long) val,
671 ipath_ibcstatus_str[val & 0xf]);
672 }
673 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
674}
675
676void ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
677{
678 *buf = '\0';
679 if (err & INFINIPATH_E_RHDRLEN)
680 strlcat(buf, "rhdrlen ", blen);
681 if (err & INFINIPATH_E_RBADTID)
682 strlcat(buf, "rbadtid ", blen);
683 if (err & INFINIPATH_E_RBADVERSION)
684 strlcat(buf, "rbadversion ", blen);
685 if (err & INFINIPATH_E_RHDR)
686 strlcat(buf, "rhdr ", blen);
687 if (err & INFINIPATH_E_RLONGPKTLEN)
688 strlcat(buf, "rlongpktlen ", blen);
689 if (err & INFINIPATH_E_RSHORTPKTLEN)
690 strlcat(buf, "rshortpktlen ", blen);
691 if (err & INFINIPATH_E_RMAXPKTLEN)
692 strlcat(buf, "rmaxpktlen ", blen);
693 if (err & INFINIPATH_E_RMINPKTLEN)
694 strlcat(buf, "rminpktlen ", blen);
695 if (err & INFINIPATH_E_RFORMATERR)
696 strlcat(buf, "rformaterr ", blen);
697 if (err & INFINIPATH_E_RUNSUPVL)
698 strlcat(buf, "runsupvl ", blen);
699 if (err & INFINIPATH_E_RUNEXPCHAR)
700 strlcat(buf, "runexpchar ", blen);
701 if (err & INFINIPATH_E_RIBFLOW)
702 strlcat(buf, "ribflow ", blen);
703 if (err & INFINIPATH_E_REBP)
704 strlcat(buf, "EBP ", blen);
705 if (err & INFINIPATH_E_SUNDERRUN)
706 strlcat(buf, "sunderrun ", blen);
707 if (err & INFINIPATH_E_SPIOARMLAUNCH)
708 strlcat(buf, "spioarmlaunch ", blen);
709 if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
710 strlcat(buf, "sunexperrpktnum ", blen);
711 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
712 strlcat(buf, "sdroppeddatapkt ", blen);
713 if (err & INFINIPATH_E_SDROPPEDSMPPKT)
714 strlcat(buf, "sdroppedsmppkt ", blen);
715 if (err & INFINIPATH_E_SMAXPKTLEN)
716 strlcat(buf, "smaxpktlen ", blen);
717 if (err & INFINIPATH_E_SMINPKTLEN)
718 strlcat(buf, "sminpktlen ", blen);
719 if (err & INFINIPATH_E_SUNSUPVL)
720 strlcat(buf, "sunsupVL ", blen);
721 if (err & INFINIPATH_E_SPKTLEN)
722 strlcat(buf, "spktlen ", blen);
723 if (err & INFINIPATH_E_INVALIDADDR)
724 strlcat(buf, "invalidaddr ", blen);
725 if (err & INFINIPATH_E_RICRC)
726 strlcat(buf, "CRC ", blen);
727 if (err & INFINIPATH_E_RVCRC)
728 strlcat(buf, "VCRC ", blen);
729 if (err & INFINIPATH_E_RRCVEGRFULL)
730 strlcat(buf, "rcvegrfull ", blen);
731 if (err & INFINIPATH_E_RRCVHDRFULL)
732 strlcat(buf, "rcvhdrfull ", blen);
733 if (err & INFINIPATH_E_IBSTATUSCHANGED)
734 strlcat(buf, "ibcstatuschg ", blen);
735 if (err & INFINIPATH_E_RIBLOSTLINK)
736 strlcat(buf, "riblostlink ", blen);
737 if (err & INFINIPATH_E_HARDWARE)
738 strlcat(buf, "hardware ", blen);
739 if (err & INFINIPATH_E_RESET)
740 strlcat(buf, "reset ", blen);
741}
742
743/**
744 * get_rhf_errstring - decode RHF errors
745 * @err: the err number
746 * @msg: the output buffer
747 * @len: the length of the output buffer
748 *
749 * only used one place now, may want more later
750 */
751static void get_rhf_errstring(u32 err, char *msg, size_t len)
752{
753 /* if no errors, and so don't need to check what's first */
754 *msg = '\0';
755
756 if (err & INFINIPATH_RHF_H_ICRCERR)
757 strlcat(msg, "icrcerr ", len);
758 if (err & INFINIPATH_RHF_H_VCRCERR)
759 strlcat(msg, "vcrcerr ", len);
760 if (err & INFINIPATH_RHF_H_PARITYERR)
761 strlcat(msg, "parityerr ", len);
762 if (err & INFINIPATH_RHF_H_LENERR)
763 strlcat(msg, "lenerr ", len);
764 if (err & INFINIPATH_RHF_H_MTUERR)
765 strlcat(msg, "mtuerr ", len);
766 if (err & INFINIPATH_RHF_H_IHDRERR)
767 /* infinipath hdr checksum error */
768 strlcat(msg, "ipathhdrerr ", len);
769 if (err & INFINIPATH_RHF_H_TIDERR)
770 strlcat(msg, "tiderr ", len);
771 if (err & INFINIPATH_RHF_H_MKERR)
772 /* bad port, offset, etc. */
773 strlcat(msg, "invalid ipathhdr ", len);
774 if (err & INFINIPATH_RHF_H_IBERR)
775 strlcat(msg, "iberr ", len);
776 if (err & INFINIPATH_RHF_L_SWA)
777 strlcat(msg, "swA ", len);
778 if (err & INFINIPATH_RHF_L_SWB)
779 strlcat(msg, "swB ", len);
780}
781
782/**
783 * ipath_get_egrbuf - get an eager buffer
784 * @dd: the infinipath device
785 * @bufnum: the eager buffer to get
786 * @err: unused
787 *
788 * must only be called if ipath_pd[port] is known to be allocated
789 */
790static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum,
791 int err)
792{
793 return dd->ipath_port0_skbs ?
794 (void *)dd->ipath_port0_skbs[bufnum]->data : NULL;
795}
796
797/**
798 * ipath_alloc_skb - allocate an skb and buffer with possible constraints
799 * @dd: the infinipath device
800 * @gfp_mask: the sk_buff SFP mask
801 */
802struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
803 gfp_t gfp_mask)
804{
805 struct sk_buff *skb;
806 u32 len;
807
808 /*
809 * Only fully supported way to handle this is to allocate lots
810 * extra, align as needed, and then do skb_reserve(). That wastes
811 * a lot of memory... I'll have to hack this into infinipath_copy
812 * also.
813 */
814
815 /*
816 * We need 4 extra bytes for unaligned transfer copying
817 */
818 if (dd->ipath_flags & IPATH_4BYTE_TID) {
819 /* we need a 4KB multiple alignment, and there is no way
820 * to do it except to allocate extra and then skb_reserve
821 * enough to bring it up to the right alignment.
822 */
823 len = dd->ipath_ibmaxlen + 4 + (1 << 11) - 1;
824 }
825 else
826 len = dd->ipath_ibmaxlen + 4;
827 skb = __dev_alloc_skb(len, gfp_mask);
828 if (!skb) {
829 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
830 len);
831 goto bail;
832 }
833 if (dd->ipath_flags & IPATH_4BYTE_TID) {
834 u32 una = ((1 << 11) - 1) & (unsigned long)(skb->data + 4);
835 if (una)
836 skb_reserve(skb, 4 + (1 << 11) - una);
837 else
838 skb_reserve(skb, 4);
839 } else
840 skb_reserve(skb, 4);
841
842bail:
843 return skb;
844}
845
846/**
847 * ipath_rcv_layer - receive a packet for the layered (ethernet) driver
848 * @dd: the infinipath device
849 * @etail: the sk_buff number
850 * @tlen: the total packet length
851 * @hdr: the ethernet header
852 *
853 * Separate routine for better overall optimization
854 */
855static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
856 u32 tlen, struct ether_header *hdr)
857{
858 u32 elen;
859 u8 pad, *bthbytes;
860 struct sk_buff *skb, *nskb;
861
862 if (dd->ipath_port0_skbs && hdr->sub_opcode == OPCODE_ENCAP) {
863 /*
864 * Allocate a new sk_buff to replace the one we give
865 * to the network stack.
866 */
867 nskb = ipath_alloc_skb(dd, GFP_ATOMIC);
868 if (!nskb) {
869 /* count OK packets that we drop */
870 ipath_stats.sps_krdrops++;
871 return;
872 }
873
874 bthbytes = (u8 *) hdr->bth;
875 pad = (bthbytes[1] >> 4) & 3;
876 /* +CRC32 */
877 elen = tlen - (sizeof(*hdr) + pad + sizeof(u32));
878
879 skb = dd->ipath_port0_skbs[etail];
880 dd->ipath_port0_skbs[etail] = nskb;
881 skb_put(skb, elen);
882
883 dd->ipath_f_put_tid(dd, etail + (u64 __iomem *)
884 ((char __iomem *) dd->ipath_kregbase
885 + dd->ipath_rcvegrbase), 0,
886 virt_to_phys(nskb->data));
887
888 __ipath_layer_rcv(dd, hdr, skb);
889
890 /* another ether packet received */
891 ipath_stats.sps_ether_rpkts++;
892 }
893 else if (hdr->sub_opcode == OPCODE_LID_ARP)
894 __ipath_layer_rcv_lid(dd, hdr);
895}
896
897/*
898 * ipath_kreceive - receive a packet
899 * @dd: the infinipath device
900 *
901 * called from interrupt handler for errors or receive interrupt
902 */
903void ipath_kreceive(struct ipath_devdata *dd)
904{
905 u64 *rc;
906 void *ebuf;
907 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
908 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
909 u32 etail = -1, l, hdrqtail;
910 struct ips_message_header *hdr;
911 u32 eflags, i, etype, tlen, pkttot = 0;
912 static u64 totcalls; /* stats, may eventually remove */
913 char emsg[128];
914
915 if (!dd->ipath_hdrqtailptr) {
916 ipath_dev_err(dd,
917 "hdrqtailptr not set, can't do receives\n");
918 goto bail;
919 }
920
921 /* There is already a thread processing this queue. */
922 if (test_and_set_bit(0, &dd->ipath_rcv_pending))
923 goto bail;
924
925 if (dd->ipath_port0head ==
926 (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
927 goto done;
928
929gotmore:
930 /*
931 * read only once at start. If in flood situation, this helps
932 * performance slightly. If more arrive while we are processing,
933 * we'll come back here and do them
934 */
935 hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
936
937 for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) {
938 u32 qp;
939 u8 *bthbytes;
940
941 rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
942 hdr = (struct ips_message_header *)&rc[1];
943 /*
944 * could make a network order version of IPATH_KD_QP, and
945 * do the obvious shift before masking to speed this up.
946 */
947 qp = ntohl(hdr->bth[1]) & 0xffffff;
948 bthbytes = (u8 *) hdr->bth;
949
950 eflags = ips_get_hdr_err_flags((__le32 *) rc);
951 etype = ips_get_rcv_type((__le32 *) rc);
952 /* total length */
953 tlen = ips_get_length_in_bytes((__le32 *) rc);
954 ebuf = NULL;
955 if (etype != RCVHQ_RCV_TYPE_EXPECTED) {
956 /*
957 * it turns out that the chips uses an eager buffer
958 * for all non-expected packets, whether it "needs"
959 * one or not. So always get the index, but don't
960 * set ebuf (so we try to copy data) unless the
961 * length requires it.
962 */
963 etail = ips_get_index((__le32 *) rc);
964 if (tlen > sizeof(*hdr) ||
965 etype == RCVHQ_RCV_TYPE_NON_KD)
966 ebuf = ipath_get_egrbuf(dd, etail, 0);
967 }
968
969 /*
970 * both tiderr and ipathhdrerr are set for all plain IB
971 * packets; only ipathhdrerr should be set.
972 */
973
974 if (etype != RCVHQ_RCV_TYPE_NON_KD && etype !=
975 RCVHQ_RCV_TYPE_ERROR && ips_get_ipath_ver(
976 hdr->iph.ver_port_tid_offset) !=
977 IPS_PROTO_VERSION) {
978 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
979 "%x\n", etype);
980 }
981
982 if (eflags & ~(INFINIPATH_RHF_H_TIDERR |
983 INFINIPATH_RHF_H_IHDRERR)) {
984 get_rhf_errstring(eflags, emsg, sizeof emsg);
985 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
986 "tlen=%x opcode=%x egridx=%x: %s\n",
987 eflags, l, etype, tlen, bthbytes[0],
988 ips_get_index((__le32 *) rc), emsg);
989 } else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
990 int ret = __ipath_verbs_rcv(dd, rc + 1,
991 ebuf, tlen);
992 if (ret == -ENODEV)
993 ipath_cdbg(VERBOSE,
994 "received IB packet, "
995 "not SMA (QP=%x)\n", qp);
996 } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
997 if (qp == IPATH_KD_QP &&
998 bthbytes[0] == ipath_layer_rcv_opcode &&
999 ebuf)
1000 ipath_rcv_layer(dd, etail, tlen,
1001 (struct ether_header *)hdr);
1002 else
1003 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1004 "qp=%x), len %x; ignored\n",
1005 etype, bthbytes[0], qp, tlen);
1006 }
1007 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
1008 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1009 be32_to_cpu(hdr->bth[0]) & 0xff);
1010 else if (eflags & (INFINIPATH_RHF_H_TIDERR |
1011 INFINIPATH_RHF_H_IHDRERR)) {
1012 /*
1013 * This is a type 3 packet, only the LRH is in the
1014 * rcvhdrq, the rest of the header is in the eager
1015 * buffer.
1016 */
1017 u8 opcode;
1018 if (ebuf) {
1019 bthbytes = (u8 *) ebuf;
1020 opcode = *bthbytes;
1021 }
1022 else
1023 opcode = 0;
1024 get_rhf_errstring(eflags, emsg, sizeof emsg);
1025 ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, "
1026 "len %x\n", eflags, emsg, opcode, etail,
1027 tlen);
1028 } else {
1029 /*
1030 * error packet, type of error unknown.
1031 * Probably type 3, but we don't know, so don't
1032 * even try to print the opcode, etc.
1033 */
1034 ipath_dbg("Error Pkt, but no eflags! egrbuf %x, "
1035 "len %x\nhdrq@%lx;hdrq+%x rhf: %llx; "
1036 "hdr %llx %llx %llx %llx %llx\n",
1037 etail, tlen, (unsigned long) rc, l,
1038 (unsigned long long) rc[0],
1039 (unsigned long long) rc[1],
1040 (unsigned long long) rc[2],
1041 (unsigned long long) rc[3],
1042 (unsigned long long) rc[4],
1043 (unsigned long long) rc[5]);
1044 }
1045 l += rsize;
1046 if (l >= maxcnt)
1047 l = 0;
1048 /*
1049 * update for each packet, to help prevent overflows if we
1050 * have lots of packets.
1051 */
1052 (void)ipath_write_ureg(dd, ur_rcvhdrhead,
1053 dd->ipath_rhdrhead_intr_off | l, 0);
1054 if (etype != RCVHQ_RCV_TYPE_EXPECTED)
1055 (void)ipath_write_ureg(dd, ur_rcvegrindexhead,
1056 etail, 0);
1057 }
1058
1059 pkttot += i;
1060
1061 dd->ipath_port0head = l;
1062
1063 if (hdrqtail != (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
1064 /* more arrived while we handled first batch */
1065 goto gotmore;
1066
1067 if (pkttot > ipath_stats.sps_maxpkts_call)
1068 ipath_stats.sps_maxpkts_call = pkttot;
1069 ipath_stats.sps_port0pkts += pkttot;
1070 ipath_stats.sps_avgpkts_call =
1071 ipath_stats.sps_port0pkts / ++totcalls;
1072
1073done:
1074 clear_bit(0, &dd->ipath_rcv_pending);
1075 smp_mb__after_clear_bit();
1076
1077bail:;
1078}
1079
1080/**
1081 * ipath_update_pio_bufs - update shadow copy of the PIO availability map
1082 * @dd: the infinipath device
1083 *
1084 * called whenever our local copy indicates we have run out of send buffers
1085 * NOTE: This can be called from interrupt context by some code
1086 * and from non-interrupt context by ipath_getpiobuf().
1087 */
1088
1089static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1090{
1091 unsigned long flags;
1092 int i;
1093 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1094
1095 /* If the generation (check) bits have changed, then we update the
1096 * busy bit for the corresponding PIO buffer. This algorithm will
1097 * modify positions to the value they already have in some cases
1098 * (i.e., no change), but it's faster than changing only the bits
1099 * that have changed.
1100 *
1101 * We would like to do this atomicly, to avoid spinlocks in the
1102 * critical send path, but that's not really possible, given the
1103 * type of changes, and that this routine could be called on
1104 * multiple cpu's simultaneously, so we lock in this routine only,
1105 * to avoid conflicting updates; all we change is the shadow, and
1106 * it's a single 64 bit memory location, so by definition the update
1107 * is atomic in terms of what other cpu's can see in testing the
1108 * bits. The spin_lock overhead isn't too bad, since it only
1109 * happens when all buffers are in use, so only cpu overhead, not
1110 * latency or bandwidth is affected.
1111 */
1112#define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
1113 if (!dd->ipath_pioavailregs_dma) {
1114 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1115 return;
1116 }
1117 if (ipath_debug & __IPATH_VERBDBG) {
1118 /* only if packet debug and verbose */
1119 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1120 unsigned long *shadow = dd->ipath_pioavailshadow;
1121
1122 ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
1123 "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
1124 "s3=%lx\n",
1125 (unsigned long long) le64_to_cpu(dma[0]),
1126 shadow[0],
1127 (unsigned long long) le64_to_cpu(dma[1]),
1128 shadow[1],
1129 (unsigned long long) le64_to_cpu(dma[2]),
1130 shadow[2],
1131 (unsigned long long) le64_to_cpu(dma[3]),
1132 shadow[3]);
1133 if (piobregs > 4)
1134 ipath_cdbg(
1135 PKT, "2nd group, dma4=%llx shad4=%lx, "
1136 "d5=%llx s5=%lx, d6=%llx s6=%lx, "
1137 "d7=%llx s7=%lx\n",
1138 (unsigned long long) le64_to_cpu(dma[4]),
1139 shadow[4],
1140 (unsigned long long) le64_to_cpu(dma[5]),
1141 shadow[5],
1142 (unsigned long long) le64_to_cpu(dma[6]),
1143 shadow[6],
1144 (unsigned long long) le64_to_cpu(dma[7]),
1145 shadow[7]);
1146 }
1147 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1148 for (i = 0; i < piobregs; i++) {
1149 u64 pchbusy, pchg, piov, pnew;
1150 /*
1151 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
1152 */
1153 if (i > 3) {
1154 if (i & 1)
1155 piov = le64_to_cpu(
1156 dd->ipath_pioavailregs_dma[i - 1]);
1157 else
1158 piov = le64_to_cpu(
1159 dd->ipath_pioavailregs_dma[i + 1]);
1160 } else
1161 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1162 pchg = _IPATH_ALL_CHECKBITS &
1163 ~(dd->ipath_pioavailshadow[i] ^ piov);
1164 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1165 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1166 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1167 pnew |= piov & pchbusy;
1168 dd->ipath_pioavailshadow[i] = pnew;
1169 }
1170 }
1171 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1172}
1173
1174/**
1175 * ipath_setrcvhdrsize - set the receive header size
1176 * @dd: the infinipath device
1177 * @rhdrsize: the receive header size
1178 *
1179 * called from user init code, and also layered driver init
1180 */
1181int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1182{
1183 int ret = 0;
1184
1185 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1186 if (dd->ipath_rcvhdrsize != rhdrsize) {
1187 dev_info(&dd->pcidev->dev,
1188 "Error: can't set protocol header "
1189 "size %u, already %u\n",
1190 rhdrsize, dd->ipath_rcvhdrsize);
1191 ret = -EAGAIN;
1192 } else
1193 ipath_cdbg(VERBOSE, "Reuse same protocol header "
1194 "size %u\n", dd->ipath_rcvhdrsize);
1195 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1196 (sizeof(u64) / sizeof(u32)))) {
1197 ipath_dbg("Error: can't set protocol header size %u "
1198 "(> max %u)\n", rhdrsize,
1199 dd->ipath_rcvhdrentsize -
1200 (u32) (sizeof(u64) / sizeof(u32)));
1201 ret = -EOVERFLOW;
1202 } else {
1203 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1204 dd->ipath_rcvhdrsize = rhdrsize;
1205 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1206 dd->ipath_rcvhdrsize);
1207 ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
1208 dd->ipath_rcvhdrsize);
1209 }
1210 return ret;
1211}
1212
1213/**
1214 * ipath_getpiobuf - find an available pio buffer
1215 * @dd: the infinipath device
1216 * @pbufnum: the buffer number is placed here
1217 *
1218 * do appropriate marking as busy, etc.
1219 * returns buffer number if one found (>=0), negative number is error.
1220 * Used by ipath_sma_send_pkt and ipath_layer_send
1221 */
1222u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
1223{
1224 int i, j, starti, updated = 0;
1225 unsigned piobcnt, iter;
1226 unsigned long flags;
1227 unsigned long *shadow = dd->ipath_pioavailshadow;
1228 u32 __iomem *buf;
1229
1230 piobcnt = (unsigned)(dd->ipath_piobcnt2k
1231 + dd->ipath_piobcnt4k);
1232 starti = dd->ipath_lastport_piobuf;
1233 iter = piobcnt - starti;
1234 if (dd->ipath_upd_pio_shadow) {
1235 /*
1236 * Minor optimization. If we had no buffers on last call,
1237 * start out by doing the update; continue and do scan even
1238 * if no buffers were updated, to be paranoid
1239 */
1240 ipath_update_pio_bufs(dd);
1241 /* we scanned here, don't do it at end of scan */
1242 updated = 1;
1243 i = starti;
1244 } else
1245 i = dd->ipath_lastpioindex;
1246
1247rescan:
1248 /*
1249 * while test_and_set_bit() is atomic, we do that and then the
1250 * change_bit(), and the pair is not. See if this is the cause
1251 * of the remaining armlaunch errors.
1252 */
1253 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1254 for (j = 0; j < iter; j++, i++) {
1255 if (i >= piobcnt)
1256 i = starti;
1257 /*
1258 * To avoid bus lock overhead, we first find a candidate
1259 * buffer, then do the test and set, and continue if that
1260 * fails.
1261 */
1262 if (test_bit((2 * i) + 1, shadow) ||
1263 test_and_set_bit((2 * i) + 1, shadow))
1264 continue;
1265 /* flip generation bit */
1266 change_bit(2 * i, shadow);
1267 break;
1268 }
1269 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1270
1271 if (j == iter) {
1272 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1273
1274 /*
1275 * first time through; shadow exhausted, but may be real
1276 * buffers available, so go see; if any updated, rescan
1277 * (once)
1278 */
1279 if (!updated) {
1280 ipath_update_pio_bufs(dd);
1281 updated = 1;
1282 i = starti;
1283 goto rescan;
1284 }
1285 dd->ipath_upd_pio_shadow = 1;
1286 /*
1287 * not atomic, but if we lose one once in a while, that's OK
1288 */
1289 ipath_stats.sps_nopiobufs++;
1290 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1291 ipath_dbg(
1292 "%u pio sends with no bufavail; dmacopy: "
1293 "%llx %llx %llx %llx; shadow: "
1294 "%lx %lx %lx %lx\n",
1295 dd->ipath_consec_nopiobuf,
1296 (unsigned long long) le64_to_cpu(dma[0]),
1297 (unsigned long long) le64_to_cpu(dma[1]),
1298 (unsigned long long) le64_to_cpu(dma[2]),
1299 (unsigned long long) le64_to_cpu(dma[3]),
1300 shadow[0], shadow[1], shadow[2],
1301 shadow[3]);
1302 /*
1303 * 4 buffers per byte, 4 registers above, cover rest
1304 * below
1305 */
1306 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1307 (sizeof(shadow[0]) * 4 * 4))
1308 ipath_dbg("2nd group: dmacopy: %llx %llx "
1309 "%llx %llx; shadow: %lx %lx "
1310 "%lx %lx\n",
1311 (unsigned long long)
1312 le64_to_cpu(dma[4]),
1313 (unsigned long long)
1314 le64_to_cpu(dma[5]),
1315 (unsigned long long)
1316 le64_to_cpu(dma[6]),
1317 (unsigned long long)
1318 le64_to_cpu(dma[7]),
1319 shadow[4], shadow[5],
1320 shadow[6], shadow[7]);
1321 }
1322 buf = NULL;
1323 goto bail;
1324 }
1325
1326 if (updated)
1327 /*
1328 * ran out of bufs, now some (at least this one we just
1329 * got) are now available, so tell the layered driver.
1330 */
1331 __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
1332
1333 /*
1334 * set next starting place. Since it's just an optimization,
1335 * it doesn't matter who wins on this, so no locking
1336 */
1337 dd->ipath_lastpioindex = i + 1;
1338 if (dd->ipath_upd_pio_shadow)
1339 dd->ipath_upd_pio_shadow = 0;
1340 if (dd->ipath_consec_nopiobuf)
1341 dd->ipath_consec_nopiobuf = 0;
1342 if (i < dd->ipath_piobcnt2k)
1343 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1344 i * dd->ipath_palign);
1345 else
1346 buf = (u32 __iomem *)
1347 (dd->ipath_pio4kbase +
1348 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1349 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1350 i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1351 if (pbufnum)
1352 *pbufnum = i;
1353
1354bail:
1355 return buf;
1356}
1357
1358/**
1359 * ipath_create_rcvhdrq - create a receive header queue
1360 * @dd: the infinipath device
1361 * @pd: the port data
1362 *
1363 * this *must* be physically contiguous memory, and for now,
1364 * that limits it to what kmalloc can do.
1365 */
1366int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1367 struct ipath_portdata *pd)
1368{
1369 int ret = 0, amt;
1370
1371 amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1372 sizeof(u32), PAGE_SIZE);
1373 if (!pd->port_rcvhdrq) {
1374 /*
1375 * not using REPEAT isn't viable; at 128KB, we can easily
1376 * fail this. The problem with REPEAT is we can block here
1377 * "forever". There isn't an inbetween, unfortunately. We
1378 * could reduce the risk by never freeing the rcvhdrq except
1379 * at unload, but even then, the first time a port is used,
1380 * we could delay for some time...
1381 */
1382 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
1383
1384 pd->port_rcvhdrq = dma_alloc_coherent(
1385 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1386 gfp_flags);
1387
1388 if (!pd->port_rcvhdrq) {
1389 ipath_dev_err(dd, "attempt to allocate %d bytes "
1390 "for port %u rcvhdrq failed\n",
1391 amt, pd->port_port);
1392 ret = -ENOMEM;
1393 goto bail;
1394 }
1395
1396 pd->port_rcvhdrq_size = amt;
1397
1398 ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
1399 "for port %u rcvhdr Q\n",
1400 amt >> PAGE_SHIFT, pd->port_rcvhdrq,
1401 (unsigned long) pd->port_rcvhdrq_phys,
1402 (unsigned long) pd->port_rcvhdrq_size,
1403 pd->port_port);
1404 } else {
1405 /*
1406 * clear for security, sanity, and/or debugging, each
1407 * time we reuse
1408 */
1409 memset(pd->port_rcvhdrq, 0, amt);
1410 }
1411
1412 /*
1413 * tell chip each time we init it, even if we are re-using previous
1414 * memory (we zero it at process close)
1415 */
1416 ipath_cdbg(VERBOSE, "writing port %d rcvhdraddr as %lx\n",
1417 pd->port_port, (unsigned long) pd->port_rcvhdrq_phys);
1418 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1419 pd->port_port, pd->port_rcvhdrq_phys);
1420
1421 ret = 0;
1422bail:
1423 return ret;
1424}
1425
1426int ipath_waitfor_complete(struct ipath_devdata *dd, ipath_kreg reg_id,
1427 u64 bits_to_wait_for, u64 * valp)
1428{
1429 unsigned long timeout;
1430 u64 lastval, val;
1431 int ret;
1432
1433 lastval = ipath_read_kreg64(dd, reg_id);
1434 /* wait a ridiculously long time */
1435 timeout = jiffies + msecs_to_jiffies(5);
1436 do {
1437 val = ipath_read_kreg64(dd, reg_id);
1438 /* set so they have something, even on failures. */
1439 *valp = val;
1440 if ((val & bits_to_wait_for) == bits_to_wait_for) {
1441 ret = 0;
1442 break;
1443 }
1444 if (val != lastval)
1445 ipath_cdbg(VERBOSE, "Changed from %llx to %llx, "
1446 "waiting for %llx bits\n",
1447 (unsigned long long) lastval,
1448 (unsigned long long) val,
1449 (unsigned long long) bits_to_wait_for);
1450 cond_resched();
1451 if (time_after(jiffies, timeout)) {
1452 ipath_dbg("Didn't get bits %llx in register 0x%x, "
1453 "got %llx\n",
1454 (unsigned long long) bits_to_wait_for,
1455 reg_id, (unsigned long long) *valp);
1456 ret = -ENODEV;
1457 break;
1458 }
1459 } while (1);
1460
1461 return ret;
1462}
1463
1464/**
1465 * ipath_waitfor_mdio_cmdready - wait for last command to complete
1466 * @dd: the infinipath device
1467 *
1468 * Like ipath_waitfor_complete(), but we wait for the CMDVALID bit to go
1469 * away indicating the last command has completed. It doesn't return data
1470 */
1471int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
1472{
1473 unsigned long timeout;
1474 u64 val;
1475 int ret;
1476
1477 /* wait a ridiculously long time */
1478 timeout = jiffies + msecs_to_jiffies(5);
1479 do {
1480 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_mdio);
1481 if (!(val & IPATH_MDIO_CMDVALID)) {
1482 ret = 0;
1483 break;
1484 }
1485 cond_resched();
1486 if (time_after(jiffies, timeout)) {
1487 ipath_dbg("CMDVALID stuck in mdio reg? (%llx)\n",
1488 (unsigned long long) val);
1489 ret = -ENODEV;
1490 break;
1491 }
1492 } while (1);
1493
1494 return ret;
1495}
1496
1497void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
1498{
1499 static const char *what[4] = {
1500 [0] = "DOWN",
1501 [INFINIPATH_IBCC_LINKCMD_INIT] = "INIT",
1502 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1503 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
1504 };
1505 ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate "
1506 "is %s\n", dd->ipath_unit,
1507 what[(which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
1508 INFINIPATH_IBCC_LINKCMD_MASK],
1509 ipath_ibcstatus_str[
1510 (ipath_read_kreg64
1511 (dd, dd->ipath_kregs->kr_ibcstatus) >>
1512 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1513 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
1514
1515 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1516 dd->ipath_ibcctrl | which);
1517}
1518
1519/**
1520 * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register
1521 * @dd: the infinipath device
1522 * @regno: the register number to read
1523 * @port: the port containing the register
1524 *
1525 * Registers that vary with the chip implementation constants (port)
1526 * use this routine.
1527 */
1528u64 ipath_read_kreg64_port(const struct ipath_devdata *dd, ipath_kreg regno,
1529 unsigned port)
1530{
1531 u16 where;
1532
1533 if (port < dd->ipath_portcnt &&
1534 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
1535 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
1536 where = regno + port;
1537 else
1538 where = -1;
1539
1540 return ipath_read_kreg64(dd, where);
1541}
1542
1543/**
1544 * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
1545 * @dd: the infinipath device
1546 * @regno: the register number to write
1547 * @port: the port containing the register
1548 * @value: the value to write
1549 *
1550 * Registers that vary with the chip implementation constants (port)
1551 * use this routine.
1552 */
1553void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
1554 unsigned port, u64 value)
1555{
1556 u16 where;
1557
1558 if (port < dd->ipath_portcnt &&
1559 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
1560 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
1561 where = regno + port;
1562 else
1563 where = -1;
1564
1565 ipath_write_kreg(dd, where, value);
1566}
1567
1568/**
1569 * ipath_shutdown_device - shut down a device
1570 * @dd: the infinipath device
1571 *
1572 * This is called to make the device quiet when we are about to
1573 * unload the driver, and also when the device is administratively
1574 * disabled. It does not free any data structures.
1575 * Everything it does has to be setup again by ipath_init_chip(dd,1)
1576 */
1577void ipath_shutdown_device(struct ipath_devdata *dd)
1578{
1579 u64 val;
1580
1581 ipath_dbg("Shutting down the device\n");
1582
1583 dd->ipath_flags |= IPATH_LINKUNK;
1584 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
1585 IPATH_LINKINIT | IPATH_LINKARMED |
1586 IPATH_LINKACTIVE);
1587 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
1588 IPATH_STATUS_IB_READY);
1589
1590 /* mask interrupts, but not errors */
1591 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
1592
1593 dd->ipath_rcvctrl = 0;
1594 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1595 dd->ipath_rcvctrl);
1596
1597 /*
1598 * gracefully stop all sends allowing any in progress to trickle out
1599 * first.
1600 */
1601 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
1602 /* flush it */
1603 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1604 /*
1605 * enough for anything that's going to trickle out to have actually
1606 * done so.
1607 */
1608 udelay(5);
1609
1610 /*
1611 * abort any armed or launched PIO buffers that didn't go. (self
1612 * clearing). Will cause any packet currently being transmitted to
1613 * go out with an EBP, and may also cause a short packet error on
1614 * the receiver.
1615 */
1616 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1617 INFINIPATH_S_ABORT);
1618
1619 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
1620 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1621
1622 /*
1623 * we are shutting down, so tell the layered driver. We don't do
1624 * this on just a link state change, much like ethernet, a cable
1625 * unplug, etc. doesn't change driver state
1626 */
1627 ipath_layer_intr(dd, IPATH_LAYER_INT_IF_DOWN);
1628
1629 /* disable IBC */
1630 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
1631 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
1632 dd->ipath_control);
1633
1634 /*
1635 * clear SerdesEnable and turn the leds off; do this here because
1636 * we are unloading, so don't count on interrupts to move along
1637 * Turn the LEDs off explictly for the same reason.
1638 */
1639 dd->ipath_f_quiet_serdes(dd);
1640 dd->ipath_f_setextled(dd, 0, 0);
1641
1642 if (dd->ipath_stats_timer_active) {
1643 del_timer_sync(&dd->ipath_stats_timer);
1644 dd->ipath_stats_timer_active = 0;
1645 }
1646
1647 /*
1648 * clear all interrupts and errors, so that the next time the driver
1649 * is loaded or device is enabled, we know that whatever is set
1650 * happened while we were unloaded
1651 */
1652 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
1653 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
1654 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
1655 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
1656}
1657
1658/**
1659 * ipath_free_pddata - free a port's allocated data
1660 * @dd: the infinipath device
1661 * @port: the port
1662 * @freehdrq: free the port data structure if true
1663 *
1664 * when closing, free up any allocated data for a port, if the
1665 * reference count goes to zero
1666 * Note: this also optionally frees the portdata itself!
1667 * Any changes here have to be matched up with the reinit case
1668 * of ipath_init_chip(), which calls this routine on reinit after reset.
1669 */
1670void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq)
1671{
1672 struct ipath_portdata *pd = dd->ipath_pd[port];
1673
1674 if (!pd)
1675 return;
1676 if (freehdrq)
1677 /*
1678 * only clear and free portdata if we are going to also
1679 * release the hdrq, otherwise we leak the hdrq on each
1680 * open/close cycle
1681 */
1682 dd->ipath_pd[port] = NULL;
1683 if (freehdrq && pd->port_rcvhdrq) {
1684 ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
1685 "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
1686 (unsigned long) pd->port_rcvhdrq_size);
1687 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
1688 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
1689 pd->port_rcvhdrq = NULL;
1690 }
1691 if (port && pd->port_rcvegrbuf) {
1692 /* always free this */
1693 if (pd->port_rcvegrbuf) {
1694 unsigned e;
1695
1696 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
1697 void *base = pd->port_rcvegrbuf[e];
1698 size_t size = pd->port_rcvegrbuf_size;
1699
1700 ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
1701 "chunk %u/%u\n", base,
1702 (unsigned long) size,
1703 e, pd->port_rcvegrbuf_chunks);
1704 dma_free_coherent(
1705 &dd->pcidev->dev, size, base,
1706 pd->port_rcvegrbuf_phys[e]);
1707 }
1708 vfree(pd->port_rcvegrbuf);
1709 pd->port_rcvegrbuf = NULL;
1710 vfree(pd->port_rcvegrbuf_phys);
1711 pd->port_rcvegrbuf_phys = NULL;
1712 }
1713 pd->port_rcvegrbuf_chunks = 0;
1714 } else if (port == 0 && dd->ipath_port0_skbs) {
1715 unsigned e;
1716 struct sk_buff **skbs = dd->ipath_port0_skbs;
1717
1718 dd->ipath_port0_skbs = NULL;
1719 ipath_cdbg(VERBOSE, "free closed port %d ipath_port0_skbs "
1720 "@ %p\n", pd->port_port, skbs);
1721 for (e = 0; e < dd->ipath_rcvegrcnt; e++)
1722 if (skbs[e])
1723 dev_kfree_skb(skbs[e]);
1724 vfree(skbs);
1725 }
1726 if (freehdrq) {
1727 kfree(pd->port_tid_pg_list);
1728 kfree(pd);
1729 }
1730}
1731
1732int __init infinipath_init(void)
1733{
1734 int ret;
1735
1736 ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ipath_core_version);
1737
1738 /*
1739 * These must be called before the driver is registered with
1740 * the PCI subsystem.
1741 */
1742 idr_init(&unit_table);
1743 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
1744 ret = -ENOMEM;
1745 goto bail;
1746 }
1747
1748 ret = pci_register_driver(&ipath_driver);
1749 if (ret < 0) {
1750 printk(KERN_ERR IPATH_DRV_NAME
1751 ": Unable to register driver: error %d\n", -ret);
1752 goto bail_unit;
1753 }
1754
1755 ret = ipath_driver_create_group(&ipath_driver.driver);
1756 if (ret < 0) {
1757 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create driver "
1758 "sysfs entries: error %d\n", -ret);
1759 goto bail_pci;
1760 }
1761
1762 ret = ipath_init_ipathfs();
1763 if (ret < 0) {
1764 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
1765 "ipathfs: error %d\n", -ret);
1766 goto bail_group;
1767 }
1768
1769 goto bail;
1770
1771bail_group:
1772 ipath_driver_remove_group(&ipath_driver.driver);
1773
1774bail_pci:
1775 pci_unregister_driver(&ipath_driver);
1776
1777bail_unit:
1778 idr_destroy(&unit_table);
1779
1780bail:
1781 return ret;
1782}
1783
1784static void cleanup_device(struct ipath_devdata *dd)
1785{
1786 int port;
1787
1788 ipath_shutdown_device(dd);
1789
1790 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
1791 /* can't do anything more with chip; needs re-init */
1792 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
1793 if (dd->ipath_kregbase) {
1794 /*
1795 * if we haven't already cleaned up before these are
1796 * to ensure any register reads/writes "fail" until
1797 * re-init
1798 */
1799 dd->ipath_kregbase = NULL;
1800 dd->ipath_kregvirt = NULL;
1801 dd->ipath_uregbase = 0;
1802 dd->ipath_sregbase = 0;
1803 dd->ipath_cregbase = 0;
1804 dd->ipath_kregsize = 0;
1805 }
1806 ipath_disable_wc(dd);
1807 }
1808
1809 if (dd->ipath_pioavailregs_dma) {
1810 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1811 (void *) dd->ipath_pioavailregs_dma,
1812 dd->ipath_pioavailregs_phys);
1813 dd->ipath_pioavailregs_dma = NULL;
1814 }
1815
1816 if (dd->ipath_pageshadow) {
1817 struct page **tmpp = dd->ipath_pageshadow;
1818 int i, cnt = 0;
1819
1820 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
1821 "locked\n");
1822 for (port = 0; port < dd->ipath_cfgports; port++) {
1823 int port_tidbase = port * dd->ipath_rcvtidcnt;
1824 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
1825 for (i = port_tidbase; i < maxtid; i++) {
1826 if (!tmpp[i])
1827 continue;
1828 ipath_release_user_pages(&tmpp[i], 1);
1829 tmpp[i] = NULL;
1830 cnt++;
1831 }
1832 }
1833 if (cnt) {
1834 ipath_stats.sps_pageunlocks += cnt;
1835 ipath_cdbg(VERBOSE, "There were still %u expTID "
1836 "entries locked\n", cnt);
1837 }
1838 if (ipath_stats.sps_pagelocks ||
1839 ipath_stats.sps_pageunlocks)
1840 ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
1841 "unlocked via ipath_m{un}lock\n",
1842 (unsigned long long)
1843 ipath_stats.sps_pagelocks,
1844 (unsigned long long)
1845 ipath_stats.sps_pageunlocks);
1846
1847 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
1848 dd->ipath_pageshadow);
1849 vfree(dd->ipath_pageshadow);
1850 dd->ipath_pageshadow = NULL;
1851 }
1852
1853 /*
1854 * free any resources still in use (usually just kernel ports)
1855 * at unload
1856 */
1857 for (port = 0; port < dd->ipath_cfgports; port++)
1858 ipath_free_pddata(dd, port, 1);
1859 kfree(dd->ipath_pd);
1860 /*
1861 * debuggability, in case some cleanup path tries to use it
1862 * after this
1863 */
1864 dd->ipath_pd = NULL;
1865}
1866
1867static void __exit infinipath_cleanup(void)
1868{
1869 struct ipath_devdata *dd, *tmp;
1870 unsigned long flags;
1871
1872 ipath_exit_ipathfs();
1873
1874 ipath_driver_remove_group(&ipath_driver.driver);
1875
1876 spin_lock_irqsave(&ipath_devs_lock, flags);
1877
1878 /*
1879 * turn off rcv, send, and interrupts for all ports, all drivers
1880 * should also hard reset the chip here?
1881 * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
1882 * for all versions of the driver, if they were allocated
1883 */
1884 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
1885 spin_unlock_irqrestore(&ipath_devs_lock, flags);
1886
1887 if (dd->ipath_kregbase)
1888 cleanup_device(dd);
1889
1890 if (dd->pcidev) {
1891 if (dd->pcidev->irq) {
1892 ipath_cdbg(VERBOSE,
1893 "unit %u free_irq of irq %x\n",
1894 dd->ipath_unit, dd->pcidev->irq);
1895 free_irq(dd->pcidev->irq, dd);
1896 } else
1897 ipath_dbg("irq is 0, not doing free_irq "
1898 "for unit %u\n", dd->ipath_unit);
1899 dd->pcidev = NULL;
1900 }
1901
1902 /*
1903 * we check for NULL here, because it's outside the kregbase
1904 * check, and we need to call it after the free_irq. Thus
1905 * it's possible that the function pointers were never
1906 * initialized.
1907 */
1908 if (dd->ipath_f_cleanup)
1909 /* clean up chip-specific stuff */
1910 dd->ipath_f_cleanup(dd);
1911
1912 spin_lock_irqsave(&ipath_devs_lock, flags);
1913 }
1914
1915 spin_unlock_irqrestore(&ipath_devs_lock, flags);
1916
1917 ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
1918 pci_unregister_driver(&ipath_driver);
1919
1920 idr_destroy(&unit_table);
1921}
1922
1923/**
1924 * ipath_reset_device - reset the chip if possible
1925 * @unit: the device to reset
1926 *
1927 * Whether or not reset is successful, we attempt to re-initialize the chip
1928 * (that is, much like a driver unload/reload). We clear the INITTED flag
1929 * so that the various entry points will fail until we reinitialize. For
1930 * now, we only allow this if no user ports are open that use chip resources
1931 */
1932int ipath_reset_device(int unit)
1933{
1934 int ret, i;
1935 struct ipath_devdata *dd = ipath_lookup(unit);
1936
1937 if (!dd) {
1938 ret = -ENODEV;
1939 goto bail;
1940 }
1941
1942 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
1943
1944 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
1945 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
1946 "not initialized or not present\n", unit);
1947 ret = -ENXIO;
1948 goto bail;
1949 }
1950
1951 if (dd->ipath_pd)
1952 for (i = 1; i < dd->ipath_portcnt; i++) {
1953 if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
1954 ipath_dbg("unit %u port %d is in use "
1955 "(PID %u cmd %s), can't reset\n",
1956 unit, i,
1957 dd->ipath_pd[i]->port_pid,
1958 dd->ipath_pd[i]->port_comm);
1959 ret = -EBUSY;
1960 goto bail;
1961 }
1962 }
1963
1964 dd->ipath_flags &= ~IPATH_INITTED;
1965 ret = dd->ipath_f_reset(dd);
1966 if (ret != 1)
1967 ipath_dbg("reset was not successful\n");
1968 ipath_dbg("Trying to reinitialize unit %u after reset attempt\n",
1969 unit);
1970 ret = ipath_init_chip(dd, 1);
1971 if (ret)
1972 ipath_dev_err(dd, "Reinitialize unit %u after "
1973 "reset failed with %d\n", unit, ret);
1974 else
1975 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
1976 "resetting\n", unit);
1977
1978bail:
1979 return ret;
1980}
1981
1982module_init(infinipath_init);
1983module_exit(infinipath_cleanup);