aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-12 19:45:40 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-12 19:45:40 -0400
commit0cdf6990e992902ae59cbc625d28cb41390f378e (patch)
tree0c01cf792be5f36ea34064036005f424ab95a571 /drivers/infiniband/hw/ipath
parentde081fa517fed81b0369f2e90ca87c30182879c8 (diff)
parentcec7c893d8654723028f09d33341e42673558057 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (76 commits) IB: Update MAINTAINERS with Hal's new email address IB/mlx4: Implement query SRQ IB/mlx4: Implement query QP IB/cm: Send no match if a SIDR REQ does not match a listen IB/cm: Fix handling of duplicate SIDR REQs IB/cm: cm_msgs.h should include ib_cm.h IB/cm: Include HCA ACK delay in local ACK timeout IB/cm: Use spin_lock_irq() instead of spin_lock_irqsave() when possible IB/sa: Make sure SA queries use default P_Key IPoIB: Recycle loopback skbs instead of freeing and reallocating IB/mthca: Replace memset(<addr>, 0, PAGE_SIZE) with clear_page(<addr>) IPoIB/cm: Fix warning if IPV6 is not enabled IB/core: Take sizeof the correct pointer when calling kmalloc() IB/ehca: Improve latency by unlocking after triggering the hardware IB/ehca: Notify consumers of LID/PKEY/SM changes after nondisruptive events IB/ehca: Return QP pointer in poll_cq() IB/ehca: Change idr spinlocks into rwlocks IB/ehca: Refactor sync between completions and destroy_cq using atomic_t IB/ehca: Lock renaming, static initializers IB/ehca: Report RDMA atomic attributes in query_qp() ...
Diffstat (limited to 'drivers/infiniband/hw/ipath')
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h33
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c41
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c187
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c303
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c205
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c101
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c92
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c26
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c141
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h85
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mmap.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c19
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c116
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c36
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c25
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c43
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c29
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs_mcast.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_ppc64.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_x86_64.c29
35 files changed, 1237 insertions, 345 deletions
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
index 90c14543677d..044da5828a78 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -1,6 +1,6 @@
1config INFINIBAND_IPATH 1config INFINIBAND_IPATH
2 tristate "QLogic InfiniPath Driver" 2 tristate "QLogic InfiniPath Driver"
3 depends on (PCI_MSI || HT_IRQ) && 64BIT && INFINIBAND && NET 3 depends on (PCI_MSI || HT_IRQ) && 64BIT && NET
4 ---help--- 4 ---help---
5 This is a driver for QLogic InfiniPath host channel adapters, 5 This is a driver for QLogic InfiniPath host channel adapters,
6 including InfiniBand verbs support. This driver allows these 6 including InfiniBand verbs support. This driver allows these
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 10c008f22ba6..b4b786d0dfca 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -189,8 +189,7 @@ typedef enum _ipath_ureg {
189#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4 189#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
190#define IPATH_RUNTIME_RCVHDR_COPY 0x8 190#define IPATH_RUNTIME_RCVHDR_COPY 0x8
191#define IPATH_RUNTIME_MASTER 0x10 191#define IPATH_RUNTIME_MASTER 0x10
192#define IPATH_RUNTIME_PBC_REWRITE 0x20 192/* 0x20 and 0x40 are no longer used, but are reserved for ABI compatibility */
193#define IPATH_RUNTIME_LOOSE_DMA_ALIGN 0x40
194 193
195/* 194/*
196 * This structure is returned by ipath_userinit() immediately after 195 * This structure is returned by ipath_userinit() immediately after
@@ -432,8 +431,15 @@ struct ipath_user_info {
432#define IPATH_CMD_UNUSED_1 25 431#define IPATH_CMD_UNUSED_1 25
433#define IPATH_CMD_UNUSED_2 26 432#define IPATH_CMD_UNUSED_2 26
434#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */ 433#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
434#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */
435 435
436#define IPATH_CMD_MAX 27 436#define IPATH_CMD_MAX 28
437
438/*
439 * Poll types
440 */
441#define IPATH_POLL_TYPE_URGENT 0x01
442#define IPATH_POLL_TYPE_OVERFLOW 0x02
437 443
438struct ipath_port_info { 444struct ipath_port_info {
439 __u32 num_active; /* number of active units */ 445 __u32 num_active; /* number of active units */
@@ -474,6 +480,8 @@ struct ipath_cmd {
474 __u16 part_key; 480 __u16 part_key;
475 /* user address of __u32 bitmask of active slaves */ 481 /* user address of __u32 bitmask of active slaves */
476 __u64 slave_mask_addr; 482 __u64 slave_mask_addr;
483 /* type of polling we want */
484 __u16 poll_type;
477 } cmd; 485 } cmd;
478}; 486};
479 487
@@ -502,13 +510,30 @@ struct __ipath_sendpkt {
502 struct ipath_iovec sps_iov[4]; 510 struct ipath_iovec sps_iov[4];
503}; 511};
504 512
505/* Passed into diag data special file's ->write method. */ 513/*
514 * diagnostics can send a packet by "writing" one of the following
515 * two structs to diag data special file
516 * The first is the legacy version for backward compatibility
517 */
506struct ipath_diag_pkt { 518struct ipath_diag_pkt {
507 __u32 unit; 519 __u32 unit;
508 __u64 data; 520 __u64 data;
509 __u32 len; 521 __u32 len;
510}; 522};
511 523
524/* The second diag_pkt struct is the expanded version that allows
525 * more control over the packet, specifically, by allowing a custom
526 * pbc (+ extra) qword, so that special modes and deliberate
527 * changes to CRCs can be used. The elements were also re-ordered
528 * for better alignment and to avoid padding issues.
529 */
530struct ipath_diag_xpkt {
531 __u64 data;
532 __u64 pbc_wd;
533 __u32 unit;
534 __u32 len;
535};
536
512/* 537/*
513 * Data layout in I2C flash (for GUID, etc.) 538 * Data layout in I2C flash (for GUID, etc.)
514 * All fields are little-endian binary unless otherwise stated 539 * All fields are little-endian binary unless otherwise stated
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 3e9241badba0..a6f04d27ec57 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -90,6 +90,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
90 wc->queue[head].sl = entry->sl; 90 wc->queue[head].sl = entry->sl;
91 wc->queue[head].dlid_path_bits = entry->dlid_path_bits; 91 wc->queue[head].dlid_path_bits = entry->dlid_path_bits;
92 wc->queue[head].port_num = entry->port_num; 92 wc->queue[head].port_num = entry->port_num;
93 /* Make sure queue entry is written before the head index. */
94 smp_wmb();
93 wc->head = next; 95 wc->head = next;
94 96
95 if (cq->notify == IB_CQ_NEXT_COMP || 97 if (cq->notify == IB_CQ_NEXT_COMP ||
@@ -139,7 +141,8 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
139 141
140 if (tail == wc->head) 142 if (tail == wc->head)
141 break; 143 break;
142 144 /* Make sure entry is read after head index is read. */
145 smp_rmb();
143 qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table, 146 qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table,
144 wc->queue[tail].qp_num); 147 wc->queue[tail].qp_num);
145 entry->qp = &qp->ibqp; 148 entry->qp = &qp->ibqp;
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index 42bfbdb0d3e6..19c56e6491eb 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 63e8368b0e95..a698f1949d10 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -323,13 +323,14 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
323{ 323{
324 u32 __iomem *piobuf; 324 u32 __iomem *piobuf;
325 u32 plen, clen, pbufn; 325 u32 plen, clen, pbufn;
326 struct ipath_diag_pkt dp; 326 struct ipath_diag_pkt odp;
327 struct ipath_diag_xpkt dp;
327 u32 *tmpbuf = NULL; 328 u32 *tmpbuf = NULL;
328 struct ipath_devdata *dd; 329 struct ipath_devdata *dd;
329 ssize_t ret = 0; 330 ssize_t ret = 0;
330 u64 val; 331 u64 val;
331 332
332 if (count < sizeof(dp)) { 333 if (count != sizeof(dp)) {
333 ret = -EINVAL; 334 ret = -EINVAL;
334 goto bail; 335 goto bail;
335 } 336 }
@@ -339,6 +340,29 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
339 goto bail; 340 goto bail;
340 } 341 }
341 342
343 /*
344 * Due to padding/alignment issues (lessened with new struct)
345 * the old and new structs are the same length. We need to
346 * disambiguate them, which we can do because odp.len has never
347 * been less than the total of LRH+BTH+DETH so far, while
348 * dp.unit (same offset) unit is unlikely to get that high.
349 * Similarly, dp.data, the pointer to user at the same offset
350 * as odp.unit, is almost certainly at least one (512byte)page
351 * "above" NULL. The if-block below can be omitted if compatibility
352 * between a new driver and older diagnostic code is unimportant.
353 * compatibility the other direction (new diags, old driver) is
354 * handled in the diagnostic code, with a warning.
355 */
356 if (dp.unit >= 20 && dp.data < 512) {
357 /* very probable version mismatch. Fix it up */
358 memcpy(&odp, &dp, sizeof(odp));
359 /* We got a legacy dp, copy elements to dp */
360 dp.unit = odp.unit;
361 dp.data = odp.data;
362 dp.len = odp.len;
363 dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */
364 }
365
342 /* send count must be an exact number of dwords */ 366 /* send count must be an exact number of dwords */
343 if (dp.len & 3) { 367 if (dp.len & 3) {
344 ret = -EINVAL; 368 ret = -EINVAL;
@@ -371,9 +395,10 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
371 ret = -ENODEV; 395 ret = -ENODEV;
372 goto bail; 396 goto bail;
373 } 397 }
398 /* Check link state, but not if we have custom PBC */
374 val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; 399 val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
375 if (val != IPATH_IBSTATE_INIT && val != IPATH_IBSTATE_ARM && 400 if (!dp.pbc_wd && val != IPATH_IBSTATE_INIT &&
376 val != IPATH_IBSTATE_ACTIVE) { 401 val != IPATH_IBSTATE_ARM && val != IPATH_IBSTATE_ACTIVE) {
377 ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n", 402 ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
378 dd->ipath_unit, (unsigned long long) val); 403 dd->ipath_unit, (unsigned long long) val);
379 ret = -EINVAL; 404 ret = -EINVAL;
@@ -419,9 +444,13 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
419 ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n", 444 ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
420 dd->ipath_unit, plen - 1, pbufn); 445 dd->ipath_unit, plen - 1, pbufn);
421 446
447 if (dp.pbc_wd == 0)
448 /* Legacy operation, use computed pbc_wd */
449 dp.pbc_wd = plen;
450
422 /* we have to flush after the PBC for correctness on some cpus 451 /* we have to flush after the PBC for correctness on some cpus
423 * or WC buffer can be written out of order */ 452 * or WC buffer can be written out of order */
424 writeq(plen, piobuf); 453 writeq(dp.pbc_wd, piobuf);
425 ipath_flush_wc(); 454 ipath_flush_wc();
426 /* copy all by the trigger word, then flush, so it's written 455 /* copy all by the trigger word, then flush, so it's written
427 * to chip before trigger word, then write trigger word, then 456 * to chip before trigger word, then write trigger word, then
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 834e86f6c04e..9361f5ab8bd6 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -104,6 +104,9 @@ static int __devinit ipath_init_one(struct pci_dev *,
104#define PCI_DEVICE_ID_INFINIPATH_HT 0xd 104#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
105#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 105#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
106 106
107/* Number of seconds before our card status check... */
108#define STATUS_TIMEOUT 60
109
107static const struct pci_device_id ipath_pci_tbl[] = { 110static const struct pci_device_id ipath_pci_tbl[] = {
108 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, 111 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
109 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) }, 112 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
@@ -119,6 +122,18 @@ static struct pci_driver ipath_driver = {
119 .id_table = ipath_pci_tbl, 122 .id_table = ipath_pci_tbl,
120}; 123};
121 124
125static void ipath_check_status(struct work_struct *work)
126{
127 struct ipath_devdata *dd = container_of(work, struct ipath_devdata,
128 status_work.work);
129
130 /*
131 * If we don't have any interrupts, let the user know and
132 * don't bother checking again.
133 */
134 if (dd->ipath_int_counter == 0)
135 dev_err(&dd->pcidev->dev, "No interrupts detected.\n");
136}
122 137
123static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev, 138static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
124 u32 *bar0, u32 *bar1) 139 u32 *bar0, u32 *bar1)
@@ -187,6 +202,8 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
187 dd->pcidev = pdev; 202 dd->pcidev = pdev;
188 pci_set_drvdata(pdev, dd); 203 pci_set_drvdata(pdev, dd);
189 204
205 INIT_DELAYED_WORK(&dd->status_work, ipath_check_status);
206
190 list_add(&dd->ipath_list, &ipath_dev_list); 207 list_add(&dd->ipath_list, &ipath_dev_list);
191 208
192bail_unlock: 209bail_unlock:
@@ -504,6 +521,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
504 ipath_diag_add(dd); 521 ipath_diag_add(dd);
505 ipath_register_ib_device(dd); 522 ipath_register_ib_device(dd);
506 523
524 /* Check that card status in STATUS_TIMEOUT seconds. */
525 schedule_delayed_work(&dd->status_work, HZ * STATUS_TIMEOUT);
526
507 goto bail; 527 goto bail;
508 528
509bail_irqsetup: 529bail_irqsetup:
@@ -631,6 +651,9 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
631 */ 651 */
632 ipath_shutdown_device(dd); 652 ipath_shutdown_device(dd);
633 653
654 cancel_delayed_work(&dd->status_work);
655 flush_scheduled_work();
656
634 if (dd->verbs_dev) 657 if (dd->verbs_dev)
635 ipath_unregister_ib_device(dd->verbs_dev); 658 ipath_unregister_ib_device(dd->verbs_dev);
636 659
@@ -699,9 +722,9 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
699 u64 sendctrl, sendorig; 722 u64 sendctrl, sendorig;
700 723
701 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first); 724 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
702 sendorig = dd->ipath_sendctrl | INFINIPATH_S_DISARM; 725 sendorig = dd->ipath_sendctrl;
703 for (i = first; i < last; i++) { 726 for (i = first; i < last; i++) {
704 sendctrl = sendorig | 727 sendctrl = sendorig | INFINIPATH_S_DISARM |
705 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT); 728 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
706 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 729 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
707 sendctrl); 730 sendctrl);
@@ -712,12 +735,12 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
712 * while we were looping; no critical bits that would require 735 * while we were looping; no critical bits that would require
713 * locking. 736 * locking.
714 * 737 *
715 * Write a 0, and then the original value, reading scratch in 738 * disable PIOAVAILUPD, then re-enable, reading scratch in
716 * between. This seems to avoid a chip timing race that causes 739 * between. This seems to avoid a chip timing race that causes
717 * pioavail updates to memory to stop. 740 * pioavail updates to memory to stop.
718 */ 741 */
719 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 742 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
720 0); 743 sendorig & ~IPATH_S_PIOBUFAVAILUPD);
721 sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 744 sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
722 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 745 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
723 dd->ipath_sendctrl); 746 dd->ipath_sendctrl);
@@ -1014,14 +1037,10 @@ void ipath_kreceive(struct ipath_devdata *dd)
1014 goto bail; 1037 goto bail;
1015 } 1038 }
1016 1039
1017 /* There is already a thread processing this queue. */
1018 if (test_and_set_bit(0, &dd->ipath_rcv_pending))
1019 goto bail;
1020
1021 l = dd->ipath_port0head; 1040 l = dd->ipath_port0head;
1022 hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr); 1041 hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr);
1023 if (l == hdrqtail) 1042 if (l == hdrqtail)
1024 goto done; 1043 goto bail;
1025 1044
1026reloop: 1045reloop:
1027 for (i = 0; l != hdrqtail; i++) { 1046 for (i = 0; l != hdrqtail; i++) {
@@ -1156,10 +1175,6 @@ reloop:
1156 ipath_stats.sps_avgpkts_call = 1175 ipath_stats.sps_avgpkts_call =
1157 ipath_stats.sps_port0pkts / ++totcalls; 1176 ipath_stats.sps_port0pkts / ++totcalls;
1158 1177
1159done:
1160 clear_bit(0, &dd->ipath_rcv_pending);
1161 smp_mb__after_clear_bit();
1162
1163bail:; 1178bail:;
1164} 1179}
1165 1180
@@ -1589,6 +1604,35 @@ int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
1589 return ret; 1604 return ret;
1590} 1605}
1591 1606
1607
1608/*
1609 * Flush all sends that might be in the ready to send state, as well as any
1610 * that are in the process of being sent. Used whenever we need to be
1611 * sure the send side is idle. Cleans up all buffer state by canceling
1612 * all pio buffers, and issuing an abort, which cleans up anything in the
1613 * launch fifo. The cancel is superfluous on some chip versions, but
1614 * it's safer to always do it.
1615 * PIOAvail bits are updated by the chip as if normal send had happened.
1616 */
1617void ipath_cancel_sends(struct ipath_devdata *dd)
1618{
1619 ipath_dbg("Cancelling all in-progress send buffers\n");
1620 dd->ipath_lastcancel = jiffies+HZ/2; /* skip armlaunch errs a bit */
1621 /*
1622 * the abort bit is auto-clearing. We read scratch to be sure
1623 * that cancels and the abort have taken effect in the chip.
1624 */
1625 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1626 INFINIPATH_S_ABORT);
1627 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1628 ipath_disarm_piobufs(dd, 0,
1629 (unsigned)(dd->ipath_piobcnt2k + dd->ipath_piobcnt4k));
1630
1631 /* and again, be sure all have hit the chip */
1632 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1633}
1634
1635
1592static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) 1636static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
1593{ 1637{
1594 static const char *what[4] = { 1638 static const char *what[4] = {
@@ -1610,14 +1654,8 @@ static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
1610 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); 1654 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
1611 /* flush all queued sends when going to DOWN or INIT, to be sure that 1655 /* flush all queued sends when going to DOWN or INIT, to be sure that
1612 * they don't block MAD packets */ 1656 * they don't block MAD packets */
1613 if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) { 1657 if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT)
1614 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 1658 ipath_cancel_sends(dd);
1615 INFINIPATH_S_ABORT);
1616 ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
1617 (unsigned)(dd->ipath_piobcnt2k +
1618 dd->ipath_piobcnt4k) -
1619 dd->ipath_lastport_piobuf);
1620 }
1621 1659
1622 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, 1660 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1623 dd->ipath_ibcctrl | which); 1661 dd->ipath_ibcctrl | which);
@@ -1839,6 +1877,87 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
1839 ipath_write_kreg(dd, where, value); 1877 ipath_write_kreg(dd, where, value);
1840} 1878}
1841 1879
1880/*
1881 * Following deal with the "obviously simple" task of overriding the state
1882 * of the LEDS, which normally indicate link physical and logical status.
1883 * The complications arise in dealing with different hardware mappings
1884 * and the board-dependent routine being called from interrupts.
1885 * and then there's the requirement to _flash_ them.
1886 */
1887#define LED_OVER_FREQ_SHIFT 8
1888#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
1889/* Below is "non-zero" to force override, but both actual LEDs are off */
1890#define LED_OVER_BOTH_OFF (8)
1891
1892void ipath_run_led_override(unsigned long opaque)
1893{
1894 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
1895 int timeoff;
1896 int pidx;
1897 u64 lstate, ltstate, val;
1898
1899 if (!(dd->ipath_flags & IPATH_INITTED))
1900 return;
1901
1902 pidx = dd->ipath_led_override_phase++ & 1;
1903 dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
1904 timeoff = dd->ipath_led_override_timeoff;
1905
1906 /*
1907 * below potentially restores the LED values per current status,
1908 * should also possibly setup the traffic-blink register,
1909 * but leave that to per-chip functions.
1910 */
1911 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
1912 ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1913 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
1914 lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
1915 INFINIPATH_IBCS_LINKSTATE_MASK;
1916
1917 dd->ipath_f_setextled(dd, lstate, ltstate);
1918 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
1919}
1920
1921void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
1922{
1923 int timeoff, freq;
1924
1925 if (!(dd->ipath_flags & IPATH_INITTED))
1926 return;
1927
1928 /* First check if we are blinking. If not, use 1HZ polling */
1929 timeoff = HZ;
1930 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
1931
1932 if (freq) {
1933 /* For blink, set each phase from one nybble of val */
1934 dd->ipath_led_override_vals[0] = val & 0xF;
1935 dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
1936 timeoff = (HZ << 4)/freq;
1937 } else {
1938 /* Non-blink set both phases the same. */
1939 dd->ipath_led_override_vals[0] = val & 0xF;
1940 dd->ipath_led_override_vals[1] = val & 0xF;
1941 }
1942 dd->ipath_led_override_timeoff = timeoff;
1943
1944 /*
1945 * If the timer has not already been started, do so. Use a "quick"
1946 * timeout so the function will be called soon, to look at our request.
1947 */
1948 if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
1949 /* Need to start timer */
1950 init_timer(&dd->ipath_led_override_timer);
1951 dd->ipath_led_override_timer.function =
1952 ipath_run_led_override;
1953 dd->ipath_led_override_timer.data = (unsigned long) dd;
1954 dd->ipath_led_override_timer.expires = jiffies + 1;
1955 add_timer(&dd->ipath_led_override_timer);
1956 } else {
1957 atomic_dec(&dd->ipath_led_override_timer_active);
1958 }
1959}
1960
1842/** 1961/**
1843 * ipath_shutdown_device - shut down a device 1962 * ipath_shutdown_device - shut down a device
1844 * @dd: the infinipath device 1963 * @dd: the infinipath device
@@ -1879,17 +1998,9 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
1879 */ 1998 */
1880 udelay(5); 1999 udelay(5);
1881 2000
1882 /*
1883 * abort any armed or launched PIO buffers that didn't go. (self
1884 * clearing). Will cause any packet currently being transmitted to
1885 * go out with an EBP, and may also cause a short packet error on
1886 * the receiver.
1887 */
1888 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1889 INFINIPATH_S_ABORT);
1890
1891 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE << 2001 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
1892 INFINIPATH_IBCC_LINKINITCMD_SHIFT); 2002 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
2003 ipath_cancel_sends(dd);
1893 2004
1894 /* disable IBC */ 2005 /* disable IBC */
1895 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE; 2006 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
@@ -1902,7 +2013,6 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
1902 * Turn the LEDs off explictly for the same reason. 2013 * Turn the LEDs off explictly for the same reason.
1903 */ 2014 */
1904 dd->ipath_f_quiet_serdes(dd); 2015 dd->ipath_f_quiet_serdes(dd);
1905 dd->ipath_f_setextled(dd, 0, 0);
1906 2016
1907 if (dd->ipath_stats_timer_active) { 2017 if (dd->ipath_stats_timer_active) {
1908 del_timer_sync(&dd->ipath_stats_timer); 2018 del_timer_sync(&dd->ipath_stats_timer);
@@ -1918,6 +2028,9 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
1918 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED); 2028 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
1919 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL); 2029 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
1920 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL); 2030 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
2031
2032 ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
2033 ipath_update_eeprom_log(dd);
1921} 2034}
1922 2035
1923/** 2036/**
@@ -2078,6 +2191,16 @@ int ipath_reset_device(int unit)
2078 goto bail; 2191 goto bail;
2079 } 2192 }
2080 2193
2194 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2195 /* Need to stop LED timer, _then_ shut off LEDs */
2196 del_timer_sync(&dd->ipath_led_override_timer);
2197 atomic_set(&dd->ipath_led_override_timer_active, 0);
2198 }
2199
2200 /* Shut off LEDs after we are sure timer is not running */
2201 dd->ipath_led_override = LED_OVER_BOTH_OFF;
2202 dd->ipath_f_setextled(dd, 0, 0);
2203
2081 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit); 2204 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
2082 2205
2083 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) { 2206 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index 030185f90ee2..6b9147964a4f 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -95,39 +95,37 @@ static int i2c_gpio_set(struct ipath_devdata *dd,
95 enum i2c_type line, 95 enum i2c_type line,
96 enum i2c_state new_line_state) 96 enum i2c_state new_line_state)
97{ 97{
98 u64 read_val, write_val, mask, *gpioval; 98 u64 out_mask, dir_mask, *gpioval;
99 unsigned long flags = 0;
99 100
100 gpioval = &dd->ipath_gpio_out; 101 gpioval = &dd->ipath_gpio_out;
101 read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
102 if (line == i2c_line_scl)
103 mask = dd->ipath_gpio_scl;
104 else
105 mask = dd->ipath_gpio_sda;
106 102
107 if (new_line_state == i2c_line_high) 103 if (line == i2c_line_scl) {
104 dir_mask = dd->ipath_gpio_scl;
105 out_mask = (1UL << dd->ipath_gpio_scl_num);
106 } else {
107 dir_mask = dd->ipath_gpio_sda;
108 out_mask = (1UL << dd->ipath_gpio_sda_num);
109 }
110
111 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
112 if (new_line_state == i2c_line_high) {
108 /* tri-state the output rather than force high */ 113 /* tri-state the output rather than force high */
109 write_val = read_val & ~mask; 114 dd->ipath_extctrl &= ~dir_mask;
110 else 115 } else {
111 /* config line to be an output */ 116 /* config line to be an output */
112 write_val = read_val | mask; 117 dd->ipath_extctrl |= dir_mask;
113 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val); 118 }
119 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
114 120
115 /* set high and verify */ 121 /* set output as well (no real verify) */
116 if (new_line_state == i2c_line_high) 122 if (new_line_state == i2c_line_high)
117 write_val = 0x1UL; 123 *gpioval |= out_mask;
118 else 124 else
119 write_val = 0x0UL; 125 *gpioval &= ~out_mask;
120 126
121 if (line == i2c_line_scl) {
122 write_val <<= dd->ipath_gpio_scl_num;
123 *gpioval = *gpioval & ~(1UL << dd->ipath_gpio_scl_num);
124 *gpioval |= write_val;
125 } else {
126 write_val <<= dd->ipath_gpio_sda_num;
127 *gpioval = *gpioval & ~(1UL << dd->ipath_gpio_sda_num);
128 *gpioval |= write_val;
129 }
130 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval); 127 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval);
128 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
131 129
132 return 0; 130 return 0;
133} 131}
@@ -145,8 +143,9 @@ static int i2c_gpio_get(struct ipath_devdata *dd,
145 enum i2c_type line, 143 enum i2c_type line,
146 enum i2c_state *curr_statep) 144 enum i2c_state *curr_statep)
147{ 145{
148 u64 read_val, write_val, mask; 146 u64 read_val, mask;
149 int ret; 147 int ret;
148 unsigned long flags = 0;
150 149
151 /* check args */ 150 /* check args */
152 if (curr_statep == NULL) { 151 if (curr_statep == NULL) {
@@ -154,15 +153,21 @@ static int i2c_gpio_get(struct ipath_devdata *dd,
154 goto bail; 153 goto bail;
155 } 154 }
156 155
157 read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
158 /* config line to be an input */ 156 /* config line to be an input */
159 if (line == i2c_line_scl) 157 if (line == i2c_line_scl)
160 mask = dd->ipath_gpio_scl; 158 mask = dd->ipath_gpio_scl;
161 else 159 else
162 mask = dd->ipath_gpio_sda; 160 mask = dd->ipath_gpio_sda;
163 write_val = read_val & ~mask; 161
164 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val); 162 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
163 dd->ipath_extctrl &= ~mask;
164 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
165 /*
166 * Below is very unlikely to reflect true input state if Output
167 * Enable actually changed.
168 */
165 read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus); 169 read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
170 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
166 171
167 if (read_val & mask) 172 if (read_val & mask)
168 *curr_statep = i2c_line_high; 173 *curr_statep = i2c_line_high;
@@ -192,6 +197,7 @@ static void i2c_wait_for_writes(struct ipath_devdata *dd)
192 197
193static void scl_out(struct ipath_devdata *dd, u8 bit) 198static void scl_out(struct ipath_devdata *dd, u8 bit)
194{ 199{
200 udelay(1);
195 i2c_gpio_set(dd, i2c_line_scl, bit ? i2c_line_high : i2c_line_low); 201 i2c_gpio_set(dd, i2c_line_scl, bit ? i2c_line_high : i2c_line_low);
196 202
197 i2c_wait_for_writes(dd); 203 i2c_wait_for_writes(dd);
@@ -314,12 +320,18 @@ static int eeprom_reset(struct ipath_devdata *dd)
314 int clock_cycles_left = 9; 320 int clock_cycles_left = 9;
315 u64 *gpioval = &dd->ipath_gpio_out; 321 u64 *gpioval = &dd->ipath_gpio_out;
316 int ret; 322 int ret;
323 unsigned long flags;
317 324
318 eeprom_init = 1; 325 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
326 /* Make sure shadows are consistent */
327 dd->ipath_extctrl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
319 *gpioval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_out); 328 *gpioval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_out);
329 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
330
320 ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg " 331 ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
321 "is %llx\n", (unsigned long long) *gpioval); 332 "is %llx\n", (unsigned long long) *gpioval);
322 333
334 eeprom_init = 1;
323 /* 335 /*
324 * This is to get the i2c into a known state, by first going low, 336 * This is to get the i2c into a known state, by first going low,
325 * then tristate sda (and then tristate scl as first thing 337 * then tristate sda (and then tristate scl as first thing
@@ -355,8 +367,8 @@ bail:
355 * @len: number of bytes to receive 367 * @len: number of bytes to receive
356 */ 368 */
357 369
358int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset, 370static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
359 void *buffer, int len) 371 u8 eeprom_offset, void *buffer, int len)
360{ 372{
361 /* compiler complains unless initialized */ 373 /* compiler complains unless initialized */
362 u8 single_byte = 0; 374 u8 single_byte = 0;
@@ -406,6 +418,7 @@ bail:
406 return ret; 418 return ret;
407} 419}
408 420
421
409/** 422/**
410 * ipath_eeprom_write - writes data to the eeprom via I2C 423 * ipath_eeprom_write - writes data to the eeprom via I2C
411 * @dd: the infinipath device 424 * @dd: the infinipath device
@@ -413,8 +426,8 @@ bail:
413 * @buffer: data to write 426 * @buffer: data to write
414 * @len: number of bytes to write 427 * @len: number of bytes to write
415 */ 428 */
416int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset, 429int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
417 const void *buffer, int len) 430 const void *buffer, int len)
418{ 431{
419 u8 single_byte; 432 u8 single_byte;
420 int sub_len; 433 int sub_len;
@@ -488,6 +501,38 @@ bail:
488 return ret; 501 return ret;
489} 502}
490 503
504/*
505 * The public entry-points ipath_eeprom_read() and ipath_eeprom_write()
506 * are now just wrappers around the internal functions.
507 */
508int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
509 void *buff, int len)
510{
511 int ret;
512
513 ret = down_interruptible(&dd->ipath_eep_sem);
514 if (!ret) {
515 ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len);
516 up(&dd->ipath_eep_sem);
517 }
518
519 return ret;
520}
521
522int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
523 const void *buff, int len)
524{
525 int ret;
526
527 ret = down_interruptible(&dd->ipath_eep_sem);
528 if (!ret) {
529 ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len);
530 up(&dd->ipath_eep_sem);
531 }
532
533 return ret;
534}
535
491static u8 flash_csum(struct ipath_flash *ifp, int adjust) 536static u8 flash_csum(struct ipath_flash *ifp, int adjust)
492{ 537{
493 u8 *ip = (u8 *) ifp; 538 u8 *ip = (u8 *) ifp;
@@ -515,7 +560,7 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
515 void *buf; 560 void *buf;
516 struct ipath_flash *ifp; 561 struct ipath_flash *ifp;
517 __be64 guid; 562 __be64 guid;
518 int len; 563 int len, eep_stat;
519 u8 csum, *bguid; 564 u8 csum, *bguid;
520 int t = dd->ipath_unit; 565 int t = dd->ipath_unit;
521 struct ipath_devdata *dd0 = ipath_lookup(0); 566 struct ipath_devdata *dd0 = ipath_lookup(0);
@@ -559,7 +604,11 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
559 goto bail; 604 goto bail;
560 } 605 }
561 606
562 if (ipath_eeprom_read(dd, 0, buf, len)) { 607 down(&dd->ipath_eep_sem);
608 eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len);
609 up(&dd->ipath_eep_sem);
610
611 if (eep_stat) {
563 ipath_dev_err(dd, "Failed reading GUID from eeprom\n"); 612 ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
564 goto done; 613 goto done;
565 } 614 }
@@ -634,8 +683,192 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
634 ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n", 683 ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
635 (unsigned long long) be64_to_cpu(dd->ipath_guid)); 684 (unsigned long long) be64_to_cpu(dd->ipath_guid));
636 685
686 memcpy(&dd->ipath_eep_st_errs, &ifp->if_errcntp, IPATH_EEP_LOG_CNT);
687 /*
688 * Power-on (actually "active") hours are kept as little-endian value
689 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
690 * atomic_t while running.
691 */
692 atomic_set(&dd->ipath_active_time, 0);
693 dd->ipath_eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
694
637done: 695done:
638 vfree(buf); 696 vfree(buf);
639 697
640bail:; 698bail:;
641} 699}
700
701/**
702 * ipath_update_eeprom_log - copy active-time and error counters to eeprom
703 * @dd: the infinipath device
704 *
705 * Although the time is kept as seconds in the ipath_devdata struct, it is
706 * rounded to hours for re-write, as we have only 16 bits in EEPROM.
707 * First-cut code reads whole (expected) struct ipath_flash, modifies,
708 * re-writes. Future direction: read/write only what we need, assuming
709 * that the EEPROM had to have been "good enough" for driver init, and
710 * if not, we aren't making it worse.
711 *
712 */
713
714int ipath_update_eeprom_log(struct ipath_devdata *dd)
715{
716 void *buf;
717 struct ipath_flash *ifp;
718 int len, hi_water;
719 uint32_t new_time, new_hrs;
720 u8 csum;
721 int ret, idx;
722 unsigned long flags;
723
724 /* first, check if we actually need to do anything. */
725 ret = 0;
726 for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
727 if (dd->ipath_eep_st_new_errs[idx]) {
728 ret = 1;
729 break;
730 }
731 }
732 new_time = atomic_read(&dd->ipath_active_time);
733
734 if (ret == 0 && new_time < 3600)
735 return 0;
736
737 /*
738 * The quick-check above determined that there is something worthy
739 * of logging, so get current contents and do a more detailed idea.
740 */
741 len = offsetof(struct ipath_flash, if_future);
742 buf = vmalloc(len);
743 ret = 1;
744 if (!buf) {
745 ipath_dev_err(dd, "Couldn't allocate memory to read %u "
746 "bytes from eeprom for logging\n", len);
747 goto bail;
748 }
749
750 /* Grab semaphore and read current EEPROM. If we get an
751 * error, let go, but if not, keep it until we finish write.
752 */
753 ret = down_interruptible(&dd->ipath_eep_sem);
754 if (ret) {
755 ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n");
756 goto free_bail;
757 }
758 ret = ipath_eeprom_internal_read(dd, 0, buf, len);
759 if (ret) {
760 up(&dd->ipath_eep_sem);
761 ipath_dev_err(dd, "Unable read EEPROM for logging\n");
762 goto free_bail;
763 }
764 ifp = (struct ipath_flash *)buf;
765
766 csum = flash_csum(ifp, 0);
767 if (csum != ifp->if_csum) {
768 up(&dd->ipath_eep_sem);
769 ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
770 csum, ifp->if_csum);
771 ret = 1;
772 goto free_bail;
773 }
774 hi_water = 0;
775 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
776 for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
777 int new_val = dd->ipath_eep_st_new_errs[idx];
778 if (new_val) {
779 /*
780 * If we have seen any errors, add to EEPROM values
781 * We need to saturate at 0xFF (255) and we also
782 * would need to adjust the checksum if we were
783 * trying to minimize EEPROM traffic
784 * Note that we add to actual current count in EEPROM,
785 * in case it was altered while we were running.
786 */
787 new_val += ifp->if_errcntp[idx];
788 if (new_val > 0xFF)
789 new_val = 0xFF;
790 if (ifp->if_errcntp[idx] != new_val) {
791 ifp->if_errcntp[idx] = new_val;
792 hi_water = offsetof(struct ipath_flash,
793 if_errcntp) + idx;
794 }
795 /*
796 * update our shadow (used to minimize EEPROM
797 * traffic), to match what we are about to write.
798 */
799 dd->ipath_eep_st_errs[idx] = new_val;
800 dd->ipath_eep_st_new_errs[idx] = 0;
801 }
802 }
803 /*
804 * now update active-time. We would like to round to the nearest hour
805 * but unless atomic_t are sure to be proper signed ints we cannot,
806 * because we need to account for what we "transfer" to EEPROM and
807 * if we log an hour at 31 minutes, then we would need to set
808 * active_time to -29 to accurately count the _next_ hour.
809 */
810 if (new_time > 3600) {
811 new_hrs = new_time / 3600;
812 atomic_sub((new_hrs * 3600), &dd->ipath_active_time);
813 new_hrs += dd->ipath_eep_hrs;
814 if (new_hrs > 0xFFFF)
815 new_hrs = 0xFFFF;
816 dd->ipath_eep_hrs = new_hrs;
817 if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
818 ifp->if_powerhour[0] = new_hrs & 0xFF;
819 hi_water = offsetof(struct ipath_flash, if_powerhour);
820 }
821 if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
822 ifp->if_powerhour[1] = new_hrs >> 8;
823 hi_water = offsetof(struct ipath_flash, if_powerhour)
824 + 1;
825 }
826 }
827 /*
828 * There is a tiny possibility that we could somehow fail to write
829 * the EEPROM after updating our shadows, but problems from holding
830 * the spinlock too long are a much bigger issue.
831 */
832 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
833 if (hi_water) {
834 /* we made some change to the data, uopdate cksum and write */
835 csum = flash_csum(ifp, 1);
836 ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1);
837 }
838 up(&dd->ipath_eep_sem);
839 if (ret)
840 ipath_dev_err(dd, "Failed updating EEPROM\n");
841
842free_bail:
843 vfree(buf);
844bail:
845 return ret;
846
847}
848
849/**
850 * ipath_inc_eeprom_err - increment one of the four error counters
851 * that are logged to EEPROM.
852 * @dd: the infinipath device
853 * @eidx: 0..3, the counter to increment
854 * @incr: how much to add
855 *
856 * Each counter is 8-bits, and saturates at 255 (0xFF). They
857 * are copied to the EEPROM (aka flash) whenever ipath_update_eeprom_log()
858 * is called, but it can only be called in a context that allows sleep.
859 * This function can be called even at interrupt level.
860 */
861
862void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr)
863{
864 uint new_val;
865 unsigned long flags;
866
867 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
868 new_val = dd->ipath_eep_st_new_errs[eidx] + incr;
869 if (new_val > 255)
870 new_val = 255;
871 dd->ipath_eep_st_new_errs[eidx] = new_val;
872 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
873 return;
874}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 1272aaf2a785..33ab0d6b80ff 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -396,7 +396,8 @@ static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
396 "TID %u, vaddr %lx, physaddr %llx pgp %p\n", 396 "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
397 tid, vaddr, (unsigned long long) physaddr, 397 tid, vaddr, (unsigned long long) physaddr,
398 pagep[i]); 398 pagep[i]);
399 dd->ipath_f_put_tid(dd, &tidbase[tid], 1, physaddr); 399 dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED,
400 physaddr);
400 /* 401 /*
401 * don't check this tid in ipath_portshadow, since we 402 * don't check this tid in ipath_portshadow, since we
402 * just filled it in; start with the next one. 403 * just filled it in; start with the next one.
@@ -422,7 +423,8 @@ static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
422 if (dd->ipath_pageshadow[porttid + tid]) { 423 if (dd->ipath_pageshadow[porttid + tid]) {
423 ipath_cdbg(VERBOSE, "Freeing TID %u\n", 424 ipath_cdbg(VERBOSE, "Freeing TID %u\n",
424 tid); 425 tid);
425 dd->ipath_f_put_tid(dd, &tidbase[tid], 1, 426 dd->ipath_f_put_tid(dd, &tidbase[tid],
427 RCVHQ_RCV_TYPE_EXPECTED,
426 dd->ipath_tidinvalid); 428 dd->ipath_tidinvalid);
427 pci_unmap_page(dd->pcidev, 429 pci_unmap_page(dd->pcidev,
428 dd->ipath_physshadow[porttid + tid], 430 dd->ipath_physshadow[porttid + tid],
@@ -538,7 +540,8 @@ static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
538 if (dd->ipath_pageshadow[porttid + tid]) { 540 if (dd->ipath_pageshadow[porttid + tid]) {
539 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", 541 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
540 pd->port_pid, tid); 542 pd->port_pid, tid);
541 dd->ipath_f_put_tid(dd, &tidbase[tid], 1, 543 dd->ipath_f_put_tid(dd, &tidbase[tid],
544 RCVHQ_RCV_TYPE_EXPECTED,
542 dd->ipath_tidinvalid); 545 dd->ipath_tidinvalid);
543 pci_unmap_page(dd->pcidev, 546 pci_unmap_page(dd->pcidev,
544 dd->ipath_physshadow[porttid + tid], 547 dd->ipath_physshadow[porttid + tid],
@@ -921,7 +924,8 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
921 (u64 __iomem *) 924 (u64 __iomem *)
922 ((char __iomem *) 925 ((char __iomem *)
923 dd->ipath_kregbase + 926 dd->ipath_kregbase +
924 dd->ipath_rcvegrbase), 0, pa); 927 dd->ipath_rcvegrbase),
928 RCVHQ_RCV_TYPE_EAGER, pa);
925 pa += egrsize; 929 pa += egrsize;
926 } 930 }
927 cond_resched(); /* don't hog the cpu */ 931 cond_resched(); /* don't hog the cpu */
@@ -1337,68 +1341,133 @@ bail:
1337 return ret; 1341 return ret;
1338} 1342}
1339 1343
1340static unsigned int ipath_poll(struct file *fp, 1344static unsigned int ipath_poll_urgent(struct ipath_portdata *pd,
1341 struct poll_table_struct *pt) 1345 struct file *fp,
1346 struct poll_table_struct *pt)
1342{ 1347{
1343 struct ipath_portdata *pd;
1344 u32 head, tail;
1345 int bit;
1346 unsigned pollflag = 0; 1348 unsigned pollflag = 0;
1347 struct ipath_devdata *dd; 1349 struct ipath_devdata *dd;
1348 1350
1349 pd = port_fp(fp);
1350 if (!pd)
1351 goto bail;
1352 dd = pd->port_dd; 1351 dd = pd->port_dd;
1353 1352
1354 bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT; 1353 if (test_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag)) {
1355 set_bit(bit, &dd->ipath_rcvctrl); 1354 pollflag |= POLLERR;
1355 clear_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag);
1356 }
1356 1357
1357 /* 1358 if (test_bit(IPATH_PORT_WAITING_URG, &pd->int_flag)) {
1358 * Before blocking, make sure that head is still == tail, 1359 pollflag |= POLLIN | POLLRDNORM;
1359 * reading from the chip, so we can be sure the interrupt 1360 clear_bit(IPATH_PORT_WAITING_URG, &pd->int_flag);
1360 * enable has made it to the chip. If not equal, disable 1361 }
1361 * interrupt again and return immediately. This avoids races,
1362 * and the overhead of the chip read doesn't matter much at
1363 * this point, since we are waiting for something anyway.
1364 */
1365 1362
1366 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1363 if (!pollflag) {
1367 dd->ipath_rcvctrl); 1364 set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag);
1365 if (pd->poll_type & IPATH_POLL_TYPE_OVERFLOW)
1366 set_bit(IPATH_PORT_WAITING_OVERFLOW,
1367 &pd->port_flag);
1368
1369 poll_wait(fp, &pd->port_wait, pt);
1370 }
1371
1372 return pollflag;
1373}
1374
1375static unsigned int ipath_poll_next(struct ipath_portdata *pd,
1376 struct file *fp,
1377 struct poll_table_struct *pt)
1378{
1379 u32 head, tail;
1380 unsigned pollflag = 0;
1381 struct ipath_devdata *dd;
1382
1383 dd = pd->port_dd;
1368 1384
1369 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); 1385 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
1370 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 1386 tail = *(volatile u64 *)pd->port_rcvhdrtail_kvaddr;
1371 1387
1372 if (tail == head) { 1388 if (test_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag)) {
1389 pollflag |= POLLERR;
1390 clear_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag);
1391 }
1392
1393 if (tail != head ||
1394 test_bit(IPATH_PORT_WAITING_RCV, &pd->int_flag)) {
1395 pollflag |= POLLIN | POLLRDNORM;
1396 clear_bit(IPATH_PORT_WAITING_RCV, &pd->int_flag);
1397 }
1398
1399 if (!pollflag) {
1373 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); 1400 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1401 if (pd->poll_type & IPATH_POLL_TYPE_OVERFLOW)
1402 set_bit(IPATH_PORT_WAITING_OVERFLOW,
1403 &pd->port_flag);
1404
1405 set_bit(pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT,
1406 &dd->ipath_rcvctrl);
1407
1408 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1409 dd->ipath_rcvctrl);
1410
1374 if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ 1411 if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
1375 (void)ipath_write_ureg(dd, ur_rcvhdrhead, 1412 ipath_write_ureg(dd, ur_rcvhdrhead,
1376 dd->ipath_rhdrhead_intr_off 1413 dd->ipath_rhdrhead_intr_off | head,
1377 | head, pd->port_port); 1414 pd->port_port);
1378 poll_wait(fp, &pd->port_wait, pt);
1379 1415
1380 if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { 1416 poll_wait(fp, &pd->port_wait, pt);
1381 /* timed out, no packets received */
1382 clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1383 pd->port_rcvwait_to++;
1384 }
1385 else
1386 pollflag = POLLIN | POLLRDNORM;
1387 }
1388 else {
1389 /* it's already happened; don't do wait_event overhead */
1390 pollflag = POLLIN | POLLRDNORM;
1391 pd->port_rcvnowait++;
1392 } 1417 }
1393 1418
1394 clear_bit(bit, &dd->ipath_rcvctrl); 1419 return pollflag;
1395 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1420}
1396 dd->ipath_rcvctrl); 1421
1422static unsigned int ipath_poll(struct file *fp,
1423 struct poll_table_struct *pt)
1424{
1425 struct ipath_portdata *pd;
1426 unsigned pollflag;
1427
1428 pd = port_fp(fp);
1429 if (!pd)
1430 pollflag = 0;
1431 else if (pd->poll_type & IPATH_POLL_TYPE_URGENT)
1432 pollflag = ipath_poll_urgent(pd, fp, pt);
1433 else
1434 pollflag = ipath_poll_next(pd, fp, pt);
1397 1435
1398bail:
1399 return pollflag; 1436 return pollflag;
1400} 1437}
1401 1438
1439static int ipath_supports_subports(int user_swmajor, int user_swminor)
1440{
1441 /* no subport implementation prior to software version 1.3 */
1442 return (user_swmajor > 1) || (user_swminor >= 3);
1443}
1444
1445static int ipath_compatible_subports(int user_swmajor, int user_swminor)
1446{
1447 /* this code is written long-hand for clarity */
1448 if (IPATH_USER_SWMAJOR != user_swmajor) {
1449 /* no promise of compatibility if major mismatch */
1450 return 0;
1451 }
1452 if (IPATH_USER_SWMAJOR == 1) {
1453 switch (IPATH_USER_SWMINOR) {
1454 case 0:
1455 case 1:
1456 case 2:
1457 /* no subport implementation so cannot be compatible */
1458 return 0;
1459 case 3:
1460 /* 3 is only compatible with itself */
1461 return user_swminor == 3;
1462 default:
1463 /* >= 4 are compatible (or are expected to be) */
1464 return user_swminor >= 4;
1465 }
1466 }
1467 /* make no promises yet for future major versions */
1468 return 0;
1469}
1470
1402static int init_subports(struct ipath_devdata *dd, 1471static int init_subports(struct ipath_devdata *dd,
1403 struct ipath_portdata *pd, 1472 struct ipath_portdata *pd,
1404 const struct ipath_user_info *uinfo) 1473 const struct ipath_user_info *uinfo)
@@ -1408,20 +1477,32 @@ static int init_subports(struct ipath_devdata *dd,
1408 size_t size; 1477 size_t size;
1409 1478
1410 /* 1479 /*
1411 * If the user is requesting zero or one port, 1480 * If the user is requesting zero subports,
1412 * skip the subport allocation. 1481 * skip the subport allocation.
1413 */ 1482 */
1414 if (uinfo->spu_subport_cnt <= 1) 1483 if (uinfo->spu_subport_cnt <= 0)
1484 goto bail;
1485
1486 /* Self-consistency check for ipath_compatible_subports() */
1487 if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) &&
1488 !ipath_compatible_subports(IPATH_USER_SWMAJOR,
1489 IPATH_USER_SWMINOR)) {
1490 dev_info(&dd->pcidev->dev,
1491 "Inconsistent ipath_compatible_subports()\n");
1415 goto bail; 1492 goto bail;
1493 }
1416 1494
1417 /* Old user binaries don't know about new subport implementation */ 1495 /* Check for subport compatibility */
1418 if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR) { 1496 if (!ipath_compatible_subports(uinfo->spu_userversion >> 16,
1497 uinfo->spu_userversion & 0xffff)) {
1419 dev_info(&dd->pcidev->dev, 1498 dev_info(&dd->pcidev->dev,
1420 "Mismatched user minor version (%d) and driver " 1499 "Mismatched user version (%d.%d) and driver "
1421 "minor version (%d) while port sharing. Ensure " 1500 "version (%d.%d) while port sharing. Ensure "
1422 "that driver and library are from the same " 1501 "that driver and library are from the same "
1423 "release.\n", 1502 "release.\n",
1503 (int) (uinfo->spu_userversion >> 16),
1424 (int) (uinfo->spu_userversion & 0xffff), 1504 (int) (uinfo->spu_userversion & 0xffff),
1505 IPATH_USER_SWMAJOR,
1425 IPATH_USER_SWMINOR); 1506 IPATH_USER_SWMINOR);
1426 goto bail; 1507 goto bail;
1427 } 1508 }
@@ -1725,14 +1806,13 @@ static int ipath_open(struct inode *in, struct file *fp)
1725 return fp->private_data ? 0 : -ENOMEM; 1806 return fp->private_data ? 0 : -ENOMEM;
1726} 1807}
1727 1808
1728
1729/* Get port early, so can set affinity prior to memory allocation */ 1809/* Get port early, so can set affinity prior to memory allocation */
1730static int ipath_assign_port(struct file *fp, 1810static int ipath_assign_port(struct file *fp,
1731 const struct ipath_user_info *uinfo) 1811 const struct ipath_user_info *uinfo)
1732{ 1812{
1733 int ret; 1813 int ret;
1734 int i_minor; 1814 int i_minor;
1735 unsigned swminor; 1815 unsigned swmajor, swminor;
1736 1816
1737 /* Check to be sure we haven't already initialized this file */ 1817 /* Check to be sure we haven't already initialized this file */
1738 if (port_fp(fp)) { 1818 if (port_fp(fp)) {
@@ -1741,7 +1821,8 @@ static int ipath_assign_port(struct file *fp,
1741 } 1821 }
1742 1822
1743 /* for now, if major version is different, bail */ 1823 /* for now, if major version is different, bail */
1744 if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) { 1824 swmajor = uinfo->spu_userversion >> 16;
1825 if (swmajor != IPATH_USER_SWMAJOR) {
1745 ipath_dbg("User major version %d not same as driver " 1826 ipath_dbg("User major version %d not same as driver "
1746 "major %d\n", uinfo->spu_userversion >> 16, 1827 "major %d\n", uinfo->spu_userversion >> 16,
1747 IPATH_USER_SWMAJOR); 1828 IPATH_USER_SWMAJOR);
@@ -1756,7 +1837,8 @@ static int ipath_assign_port(struct file *fp,
1756 1837
1757 mutex_lock(&ipath_mutex); 1838 mutex_lock(&ipath_mutex);
1758 1839
1759 if (swminor == IPATH_USER_SWMINOR && uinfo->spu_subport_cnt && 1840 if (ipath_compatible_subports(swmajor, swminor) &&
1841 uinfo->spu_subport_cnt &&
1760 (ret = find_shared_port(fp, uinfo))) { 1842 (ret = find_shared_port(fp, uinfo))) {
1761 mutex_unlock(&ipath_mutex); 1843 mutex_unlock(&ipath_mutex);
1762 if (ret > 0) 1844 if (ret > 0)
@@ -2020,7 +2102,8 @@ static int ipath_port_info(struct ipath_portdata *pd, u16 subport,
2020 info.port = pd->port_port; 2102 info.port = pd->port_port;
2021 info.subport = subport; 2103 info.subport = subport;
2022 /* Don't return new fields if old library opened the port. */ 2104 /* Don't return new fields if old library opened the port. */
2023 if ((pd->userversion & 0xffff) == IPATH_USER_SWMINOR) { 2105 if (ipath_supports_subports(pd->userversion >> 16,
2106 pd->userversion & 0xffff)) {
2024 /* Number of user ports available for this device. */ 2107 /* Number of user ports available for this device. */
2025 info.num_ports = pd->port_dd->ipath_cfgports - 1; 2108 info.num_ports = pd->port_dd->ipath_cfgports - 1;
2026 info.num_subports = pd->port_subport_cnt; 2109 info.num_subports = pd->port_subport_cnt;
@@ -2123,6 +2206,11 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2123 src = NULL; 2206 src = NULL;
2124 dest = NULL; 2207 dest = NULL;
2125 break; 2208 break;
2209 case IPATH_CMD_POLL_TYPE:
2210 copy = sizeof(cmd.cmd.poll_type);
2211 dest = &cmd.cmd.poll_type;
2212 src = &ucmd->cmd.poll_type;
2213 break;
2126 default: 2214 default:
2127 ret = -EINVAL; 2215 ret = -EINVAL;
2128 goto bail; 2216 goto bail;
@@ -2195,6 +2283,9 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2195 case IPATH_CMD_PIOAVAILUPD: 2283 case IPATH_CMD_PIOAVAILUPD:
2196 ret = ipath_force_pio_avail_update(pd->port_dd); 2284 ret = ipath_force_pio_avail_update(pd->port_dd);
2197 break; 2285 break;
2286 case IPATH_CMD_POLL_TYPE:
2287 pd->poll_type = cmd.cmd.poll_type;
2288 break;
2198 } 2289 }
2199 2290
2200 if (ret >= 0) 2291 if (ret >= 0)
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index ebd5c7bd2cdb..2e689b974e1f 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -257,9 +257,14 @@ static ssize_t atomic_port_info_read(struct file *file, char __user *buf,
257 /* Notimpl InitType (actually, an SMA decision) */ 257 /* Notimpl InitType (actually, an SMA decision) */
258 /* VLHighLimit is 0 (only one VL) */ 258 /* VLHighLimit is 0 (only one VL) */
259 ; /* VLArbitrationHighCap is 0 (only one VL) */ 259 ; /* VLArbitrationHighCap is 0 (only one VL) */
260 /*
261 * Note: the chips support a maximum MTU of 4096, but the driver
262 * hasn't implemented this feature yet, so set the maximum
263 * to 2048.
264 */
260 portinfo[10] = /* VLArbitrationLowCap is 0 (only one VL) */ 265 portinfo[10] = /* VLArbitrationLowCap is 0 (only one VL) */
261 /* InitTypeReply is SMA decision */ 266 /* InitTypeReply is SMA decision */
262 (5 << 16) /* MTUCap 4096 */ 267 (4 << 16) /* MTUCap 2048 */
263 | (7 << 13) /* VLStallCount */ 268 | (7 << 13) /* VLStallCount */
264 | (0x1f << 8) /* HOQLife */ 269 | (0x1f << 8) /* HOQLife */
265 | (1 << 4) 270 | (1 << 4)
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 4171198fc202..650745d83fac 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -36,6 +36,7 @@
36 * HT chip. 36 * HT chip.
37 */ 37 */
38 38
39#include <linux/vmalloc.h>
39#include <linux/pci.h> 40#include <linux/pci.h>
40#include <linux/delay.h> 41#include <linux/delay.h>
41#include <linux/htirq.h> 42#include <linux/htirq.h>
@@ -439,6 +440,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
439 u32 bits, ctrl; 440 u32 bits, ctrl;
440 int isfatal = 0; 441 int isfatal = 0;
441 char bitsmsg[64]; 442 char bitsmsg[64];
443 int log_idx;
442 444
443 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus); 445 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
444 446
@@ -467,6 +469,11 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
467 469
468 hwerrs &= dd->ipath_hwerrmask; 470 hwerrs &= dd->ipath_hwerrmask;
469 471
472 /* We log some errors to EEPROM, check if we have any of those. */
473 for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
474 if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
475 ipath_inc_eeprom_err(dd, log_idx, 1);
476
470 /* 477 /*
471 * make sure we get this much out, unless told to be quiet, 478 * make sure we get this much out, unless told to be quiet,
472 * it's a parity error we may recover from, 479 * it's a parity error we may recover from,
@@ -502,9 +509,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
502 if (!hwerrs) { 509 if (!hwerrs) {
503 ipath_dbg("Clearing freezemode on ignored or " 510 ipath_dbg("Clearing freezemode on ignored or "
504 "recovered hardware error\n"); 511 "recovered hardware error\n");
505 ctrl &= ~INFINIPATH_C_FREEZEMODE; 512 ipath_clear_freeze(dd);
506 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
507 ctrl);
508 } 513 }
509 } 514 }
510 515
@@ -672,10 +677,16 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
672 if (n) 677 if (n)
673 snprintf(name, namelen, "%s", n); 678 snprintf(name, namelen, "%s", n);
674 679
680 if (dd->ipath_boardrev != 6 && dd->ipath_boardrev != 7 &&
681 dd->ipath_boardrev != 11) {
682 ipath_dev_err(dd, "Unsupported InfiniPath board %s!\n", name);
683 ret = 1;
684 goto bail;
685 }
675 if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || 686 if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 ||
676 dd->ipath_minrev > 3)) { 687 dd->ipath_minrev > 4)) {
677 /* 688 /*
678 * This version of the driver only supports Rev 3.2 and 3.3 689 * This version of the driver only supports Rev 3.2 - 3.4
679 */ 690 */
680 ipath_dev_err(dd, 691 ipath_dev_err(dd,
681 "Unsupported InfiniPath hardware revision %u.%u!\n", 692 "Unsupported InfiniPath hardware revision %u.%u!\n",
@@ -689,36 +700,11 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
689 * copies 700 * copies
690 */ 701 */
691 dd->ipath_flags |= IPATH_32BITCOUNTERS; 702 dd->ipath_flags |= IPATH_32BITCOUNTERS;
703 dd->ipath_flags |= IPATH_GPIO_INTR;
692 if (dd->ipath_htspeed != 800) 704 if (dd->ipath_htspeed != 800)
693 ipath_dev_err(dd, 705 ipath_dev_err(dd,
694 "Incorrectly configured for HT @ %uMHz\n", 706 "Incorrectly configured for HT @ %uMHz\n",
695 dd->ipath_htspeed); 707 dd->ipath_htspeed);
696 if (dd->ipath_boardrev == 7 || dd->ipath_boardrev == 11 ||
697 dd->ipath_boardrev == 6)
698 dd->ipath_flags |= IPATH_GPIO_INTR;
699 else
700 dd->ipath_flags |= IPATH_POLL_RX_INTR;
701 if (dd->ipath_boardrev == 8) { /* LS/X-1 */
702 u64 val;
703 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
704 if (val & INFINIPATH_EXTS_SERDESSEL) {
705 /*
706 * hardware disabled
707 *
708 * This means that the chip is hardware disabled,
709 * and will not be able to bring up the link,
710 * in any case. We special case this and abort
711 * early, to avoid later messages. We also set
712 * the DISABLED status bit
713 */
714 ipath_dbg("Unit %u is hardware-disabled\n",
715 dd->ipath_unit);
716 *dd->ipath_statusp |= IPATH_STATUS_DISABLED;
717 /* this value is handled differently */
718 ret = 2;
719 goto bail;
720 }
721 }
722 ret = 0; 708 ret = 0;
723 709
724bail: 710bail:
@@ -1058,12 +1044,24 @@ static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
1058 u64 lst, u64 ltst) 1044 u64 lst, u64 ltst)
1059{ 1045{
1060 u64 extctl; 1046 u64 extctl;
1047 unsigned long flags = 0;
1061 1048
1062 /* the diags use the LED to indicate diag info, so we leave 1049 /* the diags use the LED to indicate diag info, so we leave
1063 * the external LED alone when the diags are running */ 1050 * the external LED alone when the diags are running */
1064 if (ipath_diag_inuse) 1051 if (ipath_diag_inuse)
1065 return; 1052 return;
1066 1053
1054 /* Allow override of LED display for, e.g. Locating system in rack */
1055 if (dd->ipath_led_override) {
1056 ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
1057 ? INFINIPATH_IBCS_LT_STATE_LINKUP
1058 : INFINIPATH_IBCS_LT_STATE_DISABLED;
1059 lst = (dd->ipath_led_override & IPATH_LED_LOG)
1060 ? INFINIPATH_IBCS_L_STATE_ACTIVE
1061 : INFINIPATH_IBCS_L_STATE_DOWN;
1062 }
1063
1064 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
1067 /* 1065 /*
1068 * start by setting both LED control bits to off, then turn 1066 * start by setting both LED control bits to off, then turn
1069 * on the appropriate bit(s). 1067 * on the appropriate bit(s).
@@ -1092,6 +1090,7 @@ static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
1092 } 1090 }
1093 dd->ipath_extctrl = extctl; 1091 dd->ipath_extctrl = extctl;
1094 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl); 1092 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
1093 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
1095} 1094}
1096 1095
1097static void ipath_init_ht_variables(struct ipath_devdata *dd) 1096static void ipath_init_ht_variables(struct ipath_devdata *dd)
@@ -1157,6 +1156,22 @@ static void ipath_init_ht_variables(struct ipath_devdata *dd)
1157 1156
1158 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; 1157 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
1159 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; 1158 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
1159
1160 /*
1161 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
1162 * 2 is Some Misc, 3 is reserved for future.
1163 */
1164 dd->ipath_eep_st_masks[0].hwerrs_to_log =
1165 INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1166 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
1167
1168 dd->ipath_eep_st_masks[1].hwerrs_to_log =
1169 INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1170 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
1171
1172 dd->ipath_eep_st_masks[2].errs_to_log =
1173 INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
1174
1160} 1175}
1161 1176
1162/** 1177/**
@@ -1372,7 +1387,7 @@ static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
1372 * ipath_pe_put_tid - write a TID in chip 1387 * ipath_pe_put_tid - write a TID in chip
1373 * @dd: the infinipath device 1388 * @dd: the infinipath device
1374 * @tidptr: pointer to the expected TID (in chip) to udpate 1389 * @tidptr: pointer to the expected TID (in chip) to udpate
1375 * @tidtype: 0 for eager, 1 for expected 1390 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
1376 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing 1391 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1377 * 1392 *
1378 * This exists as a separate routine to allow for special locking etc. 1393 * This exists as a separate routine to allow for special locking etc.
@@ -1393,7 +1408,7 @@ static void ipath_ht_put_tid(struct ipath_devdata *dd,
1393 "40 bits, using only 40!!!\n", pa); 1408 "40 bits, using only 40!!!\n", pa);
1394 pa &= INFINIPATH_RT_ADDR_MASK; 1409 pa &= INFINIPATH_RT_ADDR_MASK;
1395 } 1410 }
1396 if (type == 0) 1411 if (type == RCVHQ_RCV_TYPE_EAGER)
1397 pa |= dd->ipath_tidtemplate; 1412 pa |= dd->ipath_tidtemplate;
1398 else { 1413 else {
1399 /* in words (fixed, full page). */ 1414 /* in words (fixed, full page). */
@@ -1433,7 +1448,8 @@ static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
1433 port * dd->ipath_rcvtidcnt * 1448 port * dd->ipath_rcvtidcnt *
1434 sizeof(*tidbase)); 1449 sizeof(*tidbase));
1435 for (i = 0; i < dd->ipath_rcvtidcnt; i++) 1450 for (i = 0; i < dd->ipath_rcvtidcnt; i++)
1436 ipath_ht_put_tid(dd, &tidbase[i], 1, dd->ipath_tidinvalid); 1451 ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1452 dd->ipath_tidinvalid);
1437 1453
1438 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + 1454 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
1439 dd->ipath_rcvegrbase + 1455 dd->ipath_rcvegrbase +
@@ -1441,7 +1457,8 @@ static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
1441 sizeof(*tidbase)); 1457 sizeof(*tidbase));
1442 1458
1443 for (i = 0; i < dd->ipath_rcvegrcnt; i++) 1459 for (i = 0; i < dd->ipath_rcvegrcnt; i++)
1444 ipath_ht_put_tid(dd, &tidbase[i], 0, dd->ipath_tidinvalid); 1460 ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
1461 dd->ipath_tidinvalid);
1445} 1462}
1446 1463
1447/** 1464/**
@@ -1528,11 +1545,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1528 writel(16, piobuf); 1545 writel(16, piobuf);
1529 piobuf += pioincr; 1546 piobuf += pioincr;
1530 } 1547 }
1531 /*
1532 * self-clearing
1533 */
1534 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1535 INFINIPATH_S_ABORT);
1536 1548
1537 ipath_get_eeprom_info(dd); 1549 ipath_get_eeprom_info(dd);
1538 if (dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && 1550 if (dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
@@ -1543,8 +1555,10 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1543 * with 128, rather than 112. 1555 * with 128, rather than 112.
1544 */ 1556 */
1545 dd->ipath_flags |= IPATH_GPIO_INTR; 1557 dd->ipath_flags |= IPATH_GPIO_INTR;
1546 dd->ipath_flags &= ~IPATH_POLL_RX_INTR; 1558 } else
1547 } 1559 ipath_dev_err(dd, "Unsupported InfiniPath serial "
1560 "number %.16s!\n", dd->ipath_serial);
1561
1548 return 0; 1562 return 0;
1549} 1563}
1550 1564
@@ -1561,7 +1575,6 @@ static int ipath_ht_txe_recover(struct ipath_devdata *dd)
1561 } 1575 }
1562 dev_info(&dd->pcidev->dev, 1576 dev_info(&dd->pcidev->dev,
1563 "Recovering from TXE PIO parity error\n"); 1577 "Recovering from TXE PIO parity error\n");
1564 ipath_disarm_senderrbufs(dd, 1);
1565 return 1; 1578 return 1;
1566} 1579}
1567 1580
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index 4e2e3dfeb2c8..9868ccda5f26 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -296,13 +296,6 @@ static const struct ipath_cregs ipath_pe_cregs = {
296#define IPATH_GPIO_SCL (1ULL << \ 296#define IPATH_GPIO_SCL (1ULL << \
297 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) 297 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
298 298
299/*
300 * Rev2 silicon allows suppressing check for ArmLaunch errors.
301 * this can speed up short packet sends on systems that do
302 * not guaranteee write-order.
303 */
304#define INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR (1ULL<<63)
305
306/* 6120 specific hardware errors... */ 299/* 6120 specific hardware errors... */
307static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = { 300static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
308 INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"), 301 INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
@@ -347,6 +340,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
347 u32 bits, ctrl; 340 u32 bits, ctrl;
348 int isfatal = 0; 341 int isfatal = 0;
349 char bitsmsg[64]; 342 char bitsmsg[64];
343 int log_idx;
350 344
351 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus); 345 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
352 if (!hwerrs) { 346 if (!hwerrs) {
@@ -374,6 +368,11 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
374 368
375 hwerrs &= dd->ipath_hwerrmask; 369 hwerrs &= dd->ipath_hwerrmask;
376 370
371 /* We log some errors to EEPROM, check if we have any of those. */
372 for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
373 if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
374 ipath_inc_eeprom_err(dd, log_idx, 1);
375
377 /* 376 /*
378 * make sure we get this much out, unless told to be quiet, 377 * make sure we get this much out, unless told to be quiet,
379 * or it's occurred within the last 5 seconds 378 * or it's occurred within the last 5 seconds
@@ -431,10 +430,12 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
431 *dd->ipath_statusp |= IPATH_STATUS_HWERROR; 430 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
432 dd->ipath_flags &= ~IPATH_INITTED; 431 dd->ipath_flags &= ~IPATH_INITTED;
433 } else { 432 } else {
434 ipath_dbg("Clearing freezemode on ignored hardware " 433 static u32 freeze_cnt;
435 "error\n"); 434
436 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 435 freeze_cnt++;
437 dd->ipath_control); 436 ipath_dbg("Clearing freezemode on ignored or recovered "
437 "hardware error (%u)\n", freeze_cnt);
438 ipath_clear_freeze(dd);
438 } 439 }
439 } 440 }
440 441
@@ -680,17 +681,6 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
680 val |= dd->ipath_rx_pol_inv << 681 val |= dd->ipath_rx_pol_inv <<
681 INFINIPATH_XGXS_RX_POL_SHIFT; 682 INFINIPATH_XGXS_RX_POL_SHIFT;
682 } 683 }
683 if (dd->ipath_minrev >= 2) {
684 /* Rev 2. can tolerate multiple writes to PBC, and
685 * allowing them can provide lower latency on some
686 * CPUs, but this feature is off by default, only
687 * turned on by setting D63 of XGXSconfig reg.
688 * May want to make this conditional more
689 * fine-grained in future. This is not exactly
690 * related to XGXS, but where the bit ended up.
691 */
692 val |= INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR;
693 }
694 if (val != prev_val) 684 if (val != prev_val)
695 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); 685 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
696 686
@@ -791,12 +781,24 @@ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
791 u64 ltst) 781 u64 ltst)
792{ 782{
793 u64 extctl; 783 u64 extctl;
784 unsigned long flags = 0;
794 785
795 /* the diags use the LED to indicate diag info, so we leave 786 /* the diags use the LED to indicate diag info, so we leave
796 * the external LED alone when the diags are running */ 787 * the external LED alone when the diags are running */
797 if (ipath_diag_inuse) 788 if (ipath_diag_inuse)
798 return; 789 return;
799 790
791 /* Allow override of LED display for, e.g. Locating system in rack */
792 if (dd->ipath_led_override) {
793 ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
794 ? INFINIPATH_IBCS_LT_STATE_LINKUP
795 : INFINIPATH_IBCS_LT_STATE_DISABLED;
796 lst = (dd->ipath_led_override & IPATH_LED_LOG)
797 ? INFINIPATH_IBCS_L_STATE_ACTIVE
798 : INFINIPATH_IBCS_L_STATE_DOWN;
799 }
800
801 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
800 extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON | 802 extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
801 INFINIPATH_EXTC_LED2PRIPORT_ON); 803 INFINIPATH_EXTC_LED2PRIPORT_ON);
802 804
@@ -806,6 +808,7 @@ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
806 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON; 808 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
807 dd->ipath_extctrl = extctl; 809 dd->ipath_extctrl = extctl;
808 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl); 810 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
811 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
809} 812}
810 813
811/** 814/**
@@ -955,6 +958,27 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
955 958
956 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; 959 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
957 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; 960 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
961
962 /*
963 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
964 * 2 is Some Misc, 3 is reserved for future.
965 */
966 dd->ipath_eep_st_masks[0].hwerrs_to_log =
967 INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
968 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
969
970 /* Ignore errors in PIO/PBC on systems with unordered write-combining */
971 if (ipath_unordered_wc())
972 dd->ipath_eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
973
974 dd->ipath_eep_st_masks[1].hwerrs_to_log =
975 INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
976 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
977
978 dd->ipath_eep_st_masks[2].errs_to_log =
979 INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
980
981
958} 982}
959 983
960/* setup the MSI stuff again after a reset. I'd like to just call 984/* setup the MSI stuff again after a reset. I'd like to just call
@@ -1082,7 +1106,7 @@ bail:
1082 * ipath_pe_put_tid - write a TID in chip 1106 * ipath_pe_put_tid - write a TID in chip
1083 * @dd: the infinipath device 1107 * @dd: the infinipath device
1084 * @tidptr: pointer to the expected TID (in chip) to udpate 1108 * @tidptr: pointer to the expected TID (in chip) to udpate
1085 * @tidtype: 0 for eager, 1 for expected 1109 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
1086 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing 1110 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1087 * 1111 *
1088 * This exists as a separate routine to allow for special locking etc. 1112 * This exists as a separate routine to allow for special locking etc.
@@ -1108,7 +1132,7 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1108 "BUG: Physical page address 0x%lx " 1132 "BUG: Physical page address 0x%lx "
1109 "has bits set in 31-29\n", pa); 1133 "has bits set in 31-29\n", pa);
1110 1134
1111 if (type == 0) 1135 if (type == RCVHQ_RCV_TYPE_EAGER)
1112 pa |= dd->ipath_tidtemplate; 1136 pa |= dd->ipath_tidtemplate;
1113 else /* for now, always full 4KB page */ 1137 else /* for now, always full 4KB page */
1114 pa |= 2 << 29; 1138 pa |= 2 << 29;
@@ -1132,7 +1156,7 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1132 * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher 1156 * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
1133 * @dd: the infinipath device 1157 * @dd: the infinipath device
1134 * @tidptr: pointer to the expected TID (in chip) to udpate 1158 * @tidptr: pointer to the expected TID (in chip) to udpate
1135 * @tidtype: 0 for eager, 1 for expected 1159 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
1136 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing 1160 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1137 * 1161 *
1138 * This exists as a separate routine to allow for selection of the 1162 * This exists as a separate routine to allow for selection of the
@@ -1157,7 +1181,7 @@ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
1157 "BUG: Physical page address 0x%lx " 1181 "BUG: Physical page address 0x%lx "
1158 "has bits set in 31-29\n", pa); 1182 "has bits set in 31-29\n", pa);
1159 1183
1160 if (type == 0) 1184 if (type == RCVHQ_RCV_TYPE_EAGER)
1161 pa |= dd->ipath_tidtemplate; 1185 pa |= dd->ipath_tidtemplate;
1162 else /* for now, always full 4KB page */ 1186 else /* for now, always full 4KB page */
1163 pa |= 2 << 29; 1187 pa |= 2 << 29;
@@ -1196,7 +1220,8 @@ static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
1196 port * dd->ipath_rcvtidcnt * sizeof(*tidbase)); 1220 port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
1197 1221
1198 for (i = 0; i < dd->ipath_rcvtidcnt; i++) 1222 for (i = 0; i < dd->ipath_rcvtidcnt; i++)
1199 ipath_pe_put_tid(dd, &tidbase[i], 0, tidinv); 1223 ipath_pe_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1224 tidinv);
1200 1225
1201 tidbase = (u64 __iomem *) 1226 tidbase = (u64 __iomem *)
1202 ((char __iomem *)(dd->ipath_kregbase) + 1227 ((char __iomem *)(dd->ipath_kregbase) +
@@ -1204,7 +1229,8 @@ static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
1204 port * dd->ipath_rcvegrcnt * sizeof(*tidbase)); 1229 port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
1205 1230
1206 for (i = 0; i < dd->ipath_rcvegrcnt; i++) 1231 for (i = 0; i < dd->ipath_rcvegrcnt; i++)
1207 ipath_pe_put_tid(dd, &tidbase[i], 1, tidinv); 1232 ipath_pe_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
1233 tidinv);
1208} 1234}
1209 1235
1210/** 1236/**
@@ -1311,13 +1337,6 @@ static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
1311 1337
1312 dd = pd->port_dd; 1338 dd = pd->port_dd;
1313 1339
1314 if (dd != NULL && dd->ipath_minrev >= 2) {
1315 ipath_cdbg(PROC, "IBA6120 Rev2, allow multiple PBC write\n");
1316 kinfo->spi_runtime_flags |= IPATH_RUNTIME_PBC_REWRITE;
1317 ipath_cdbg(PROC, "IBA6120 Rev2, allow loose DMA alignment\n");
1318 kinfo->spi_runtime_flags |= IPATH_RUNTIME_LOOSE_DMA_ALIGN;
1319 }
1320
1321done: 1340done:
1322 kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE; 1341 kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE;
1323 return 0; 1342 return 0;
@@ -1354,7 +1373,6 @@ static int ipath_pe_txe_recover(struct ipath_devdata *dd)
1354 dev_info(&dd->pcidev->dev, 1373 dev_info(&dd->pcidev->dev,
1355 "Recovering from TXE PIO parity error\n"); 1374 "Recovering from TXE PIO parity error\n");
1356 } 1375 }
1357 ipath_disarm_senderrbufs(dd, 1);
1358 return 1; 1376 return 1;
1359} 1377}
1360 1378
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 7045ba689494..49951d583804 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -133,7 +133,8 @@ static int create_port0_egr(struct ipath_devdata *dd)
133 dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE); 133 dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE);
134 dd->ipath_f_put_tid(dd, e + (u64 __iomem *) 134 dd->ipath_f_put_tid(dd, e + (u64 __iomem *)
135 ((char __iomem *) dd->ipath_kregbase + 135 ((char __iomem *) dd->ipath_kregbase +
136 dd->ipath_rcvegrbase), 0, 136 dd->ipath_rcvegrbase),
137 RCVHQ_RCV_TYPE_EAGER,
137 dd->ipath_port0_skbinfo[e].phys); 138 dd->ipath_port0_skbinfo[e].phys);
138 } 139 }
139 140
@@ -310,7 +311,12 @@ static int init_chip_first(struct ipath_devdata *dd,
310 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize); 311 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
311 dd->ipath_piosize2k = val & ~0U; 312 dd->ipath_piosize2k = val & ~0U;
312 dd->ipath_piosize4k = val >> 32; 313 dd->ipath_piosize4k = val >> 32;
313 dd->ipath_ibmtu = 4096; /* default to largest legal MTU */ 314 /*
315 * Note: the chips support a maximum MTU of 4096, but the driver
316 * hasn't implemented this feature yet, so set the initial value
317 * to 2048.
318 */
319 dd->ipath_ibmtu = 2048;
314 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt); 320 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
315 dd->ipath_piobcnt2k = val & ~0U; 321 dd->ipath_piobcnt2k = val & ~0U;
316 dd->ipath_piobcnt4k = val >> 32; 322 dd->ipath_piobcnt4k = val >> 32;
@@ -340,6 +346,10 @@ static int init_chip_first(struct ipath_devdata *dd,
340 346
341 spin_lock_init(&dd->ipath_tid_lock); 347 spin_lock_init(&dd->ipath_tid_lock);
342 348
349 spin_lock_init(&dd->ipath_gpio_lock);
350 spin_lock_init(&dd->ipath_eep_st_lock);
351 sema_init(&dd->ipath_eep_sem, 1);
352
343done: 353done:
344 *pdp = pd; 354 *pdp = pd;
345 return ret; 355 return ret;
@@ -646,7 +656,7 @@ static int init_housekeeping(struct ipath_devdata *dd,
646 ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn); 656 ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn);
647 657
648 snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion), 658 snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion),
649 "Driver %u.%u, %s, InfiniPath%u %u.%u, PCI %u, " 659 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "
650 "SW Compat %u\n", 660 "SW Compat %u\n",
651 IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn, 661 IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn,
652 (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) & 662 (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) &
@@ -727,7 +737,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
727 uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0; 737 uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
728 if (ipath_kpiobufs == 0) { 738 if (ipath_kpiobufs == 0) {
729 /* not set by user (this is default) */ 739 /* not set by user (this is default) */
730 if (piobufs >= (uports * IPATH_MIN_USER_PORT_BUFCNT) + 32) 740 if (piobufs > 144)
731 kpiobufs = 32; 741 kpiobufs = 32;
732 else 742 else
733 kpiobufs = 16; 743 kpiobufs = 16;
@@ -767,6 +777,12 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
767 piobufs, dd->ipath_pbufsport, uports); 777 piobufs, dd->ipath_pbufsport, uports);
768 778
769 dd->ipath_f_early_init(dd); 779 dd->ipath_f_early_init(dd);
780 /*
781 * cancel any possible active sends from early driver load.
782 * Follows early_init because some chips have to initialize
783 * PIO buffers in early_init to avoid false parity errors.
784 */
785 ipath_cancel_sends(dd);
770 786
771 /* early_init sets rcvhdrentsize and rcvhdrsize, so this must be 787 /* early_init sets rcvhdrentsize and rcvhdrsize, so this must be
772 * done after early_init */ 788 * done after early_init */
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index a90d3b5699c4..47aa43428fbf 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -93,7 +93,8 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
93 93
94 if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) { 94 if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
95 int i; 95 int i;
96 if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG)) { 96 if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&
97 dd->ipath_lastcancel > jiffies) {
97 __IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG, 98 __IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,
98 "SendbufErrs %lx %lx", sbuf[0], 99 "SendbufErrs %lx %lx", sbuf[0],
99 sbuf[1]); 100 sbuf[1]);
@@ -108,7 +109,8 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
108 ipath_clrpiobuf(dd, i); 109 ipath_clrpiobuf(dd, i);
109 ipath_disarm_piobufs(dd, i, 1); 110 ipath_disarm_piobufs(dd, i, 1);
110 } 111 }
111 dd->ipath_lastcancel = jiffies+3; /* no armlaunch for a bit */ 112 /* ignore armlaunch errs for a bit */
113 dd->ipath_lastcancel = jiffies+3;
112 } 114 }
113} 115}
114 116
@@ -131,6 +133,17 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
131 INFINIPATH_E_INVALIDADDR) 133 INFINIPATH_E_INVALIDADDR)
132 134
133/* 135/*
136 * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
137 * errors not related to freeze and cancelling buffers. Can't ignore
138 * armlaunch because could get more while still cleaning up, and need
139 * to cancel those as they happen.
140 */
141#define E_SPKT_ERRS_IGNORE \
142 (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
143 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SMINPKTLEN | \
144 INFINIPATH_E_SPKTLEN)
145
146/*
134 * these are errors that can occur when the link changes state while 147 * these are errors that can occur when the link changes state while
135 * a packet is being sent or received. This doesn't cover things 148 * a packet is being sent or received. This doesn't cover things
136 * like EBP or VCRC that can be the result of a sending having the 149 * like EBP or VCRC that can be the result of a sending having the
@@ -290,12 +303,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
290 * Flush all queued sends when link went to DOWN or INIT, 303 * Flush all queued sends when link went to DOWN or INIT,
291 * to be sure that they don't block SMA and other MAD packets 304 * to be sure that they don't block SMA and other MAD packets
292 */ 305 */
293 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 306 ipath_cancel_sends(dd);
294 INFINIPATH_S_ABORT);
295 ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
296 (unsigned)(dd->ipath_piobcnt2k +
297 dd->ipath_piobcnt4k) -
298 dd->ipath_lastport_piobuf);
299 } 307 }
300 else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM || 308 else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
301 lstate == IPATH_IBSTATE_ACTIVE) { 309 lstate == IPATH_IBSTATE_ACTIVE) {
@@ -505,6 +513,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
505 int i, iserr = 0; 513 int i, iserr = 0;
506 int chkerrpkts = 0, noprint = 0; 514 int chkerrpkts = 0, noprint = 0;
507 unsigned supp_msgs; 515 unsigned supp_msgs;
516 int log_idx;
508 517
509 supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint); 518 supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint);
510 519
@@ -518,6 +527,13 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
518 if (errs & INFINIPATH_E_HARDWARE) { 527 if (errs & INFINIPATH_E_HARDWARE) {
519 /* reuse same msg buf */ 528 /* reuse same msg buf */
520 dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg); 529 dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
530 } else {
531 u64 mask;
532 for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) {
533 mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;
534 if (errs & mask)
535 ipath_inc_eeprom_err(dd, log_idx, 1);
536 }
521 } 537 }
522 538
523 if (!noprint && (errs & ~dd->ipath_e_bitsextant)) 539 if (!noprint && (errs & ~dd->ipath_e_bitsextant))
@@ -675,6 +691,17 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
675 chkerrpkts = 1; 691 chkerrpkts = 1;
676 dd->ipath_lastrcvhdrqtails[i] = tl; 692 dd->ipath_lastrcvhdrqtails[i] = tl;
677 pd->port_hdrqfull++; 693 pd->port_hdrqfull++;
694 if (test_bit(IPATH_PORT_WAITING_OVERFLOW,
695 &pd->port_flag)) {
696 clear_bit(
697 IPATH_PORT_WAITING_OVERFLOW,
698 &pd->port_flag);
699 set_bit(
700 IPATH_PORT_WAITING_OVERFLOW,
701 &pd->int_flag);
702 wake_up_interruptible(
703 &pd->port_wait);
704 }
678 } 705 }
679 } 706 }
680 } 707 }
@@ -744,6 +771,72 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
744 return chkerrpkts; 771 return chkerrpkts;
745} 772}
746 773
774
775/*
776 * try to cleanup as much as possible for anything that might have gone
777 * wrong while in freeze mode, such as pio buffers being written by user
778 * processes (causing armlaunch), send errors due to going into freeze mode,
779 * etc., and try to avoid causing extra interrupts while doing so.
780 * Forcibly update the in-memory pioavail register copies after cleanup
781 * because the chip won't do it for anything changing while in freeze mode
782 * (we don't want to wait for the next pio buffer state change).
783 * Make sure that we don't lose any important interrupts by using the chip
784 * feature that says that writing 0 to a bit in *clear that is set in
785 * *status will cause an interrupt to be generated again (if allowed by
786 * the *mask value).
787 */
788void ipath_clear_freeze(struct ipath_devdata *dd)
789{
790 int i, im;
791 __le64 val;
792
793 /* disable error interrupts, to avoid confusion */
794 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
795
796 /*
797 * clear all sends, because they have may been
798 * completed by usercode while in freeze mode, and
799 * therefore would not be sent, and eventually
800 * might cause the process to run out of bufs
801 */
802 ipath_cancel_sends(dd);
803 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
804 dd->ipath_control);
805
806 /* ensure pio avail updates continue */
807 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
808 dd->ipath_sendctrl & ~IPATH_S_PIOBUFAVAILUPD);
809 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
810 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
811 dd->ipath_sendctrl);
812
813 /*
814 * We just enabled pioavailupdate, so dma copy is almost certainly
815 * not yet right, so read the registers directly. Similar to init
816 */
817 for (i = 0; i < dd->ipath_pioavregs; i++) {
818 /* deal with 6110 chip bug */
819 im = i > 3 ? ((i&1) ? i-1 : i+1) : i;
820 val = ipath_read_kreg64(dd, 0x1000+(im*sizeof(u64)));
821 dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i]
822 = le64_to_cpu(val);
823 }
824
825 /*
826 * force new interrupt if any hwerr, error or interrupt bits are
827 * still set, and clear "safe" send packet errors related to freeze
828 * and cancelling sends. Re-enable error interrupts before possible
829 * force of re-interrupt on pending interrupts.
830 */
831 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
832 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
833 E_SPKT_ERRS_IGNORE);
834 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
835 ~dd->ipath_maskederrs);
836 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
837}
838
839
747/* this is separate to allow for better optimization of ipath_intr() */ 840/* this is separate to allow for better optimization of ipath_intr() */
748 841
749static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp) 842static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
@@ -872,14 +965,25 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
872 dd->ipath_i_rcvurg_mask); 965 dd->ipath_i_rcvurg_mask);
873 for (i = 1; i < dd->ipath_cfgports; i++) { 966 for (i = 1; i < dd->ipath_cfgports; i++) {
874 struct ipath_portdata *pd = dd->ipath_pd[i]; 967 struct ipath_portdata *pd = dd->ipath_pd[i];
875 if (portr & (1 << i) && pd && pd->port_cnt && 968 if (portr & (1 << i) && pd && pd->port_cnt) {
876 test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { 969 if (test_bit(IPATH_PORT_WAITING_RCV,
877 clear_bit(IPATH_PORT_WAITING_RCV, 970 &pd->port_flag)) {
878 &pd->port_flag); 971 clear_bit(IPATH_PORT_WAITING_RCV,
879 clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT, 972 &pd->port_flag);
880 &dd->ipath_rcvctrl); 973 set_bit(IPATH_PORT_WAITING_RCV,
881 wake_up_interruptible(&pd->port_wait); 974 &pd->int_flag);
882 rcvdint = 1; 975 clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT,
976 &dd->ipath_rcvctrl);
977 wake_up_interruptible(&pd->port_wait);
978 rcvdint = 1;
979 } else if (test_bit(IPATH_PORT_WAITING_URG,
980 &pd->port_flag)) {
981 clear_bit(IPATH_PORT_WAITING_URG,
982 &pd->port_flag);
983 set_bit(IPATH_PORT_WAITING_URG,
984 &pd->int_flag);
985 wake_up_interruptible(&pd->port_wait);
986 }
883 } 987 }
884 } 988 }
885 if (rcvdint) { 989 if (rcvdint) {
@@ -905,6 +1009,9 @@ irqreturn_t ipath_intr(int irq, void *data)
905 1009
906 ipath_stats.sps_ints++; 1010 ipath_stats.sps_ints++;
907 1011
1012 if (dd->ipath_int_counter != (u32) -1)
1013 dd->ipath_int_counter++;
1014
908 if (!(dd->ipath_flags & IPATH_PRESENT)) { 1015 if (!(dd->ipath_flags & IPATH_PRESENT)) {
909 /* 1016 /*
910 * This return value is not great, but we do not want the 1017 * This return value is not great, but we do not want the
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 12194f3dd8cc..3105005fc9d2 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -1,7 +1,7 @@
1#ifndef _IPATH_KERNEL_H 1#ifndef _IPATH_KERNEL_H
2#define _IPATH_KERNEL_H 2#define _IPATH_KERNEL_H
3/* 3/*
4 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 4 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6 * 6 *
7 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
@@ -57,6 +57,24 @@
57extern struct infinipath_stats ipath_stats; 57extern struct infinipath_stats ipath_stats;
58 58
59#define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ 59#define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
60/*
61 * First-cut critierion for "device is active" is
62 * two thousand dwords combined Tx, Rx traffic per
63 * 5-second interval. SMA packets are 64 dwords,
64 * and occur "a few per second", presumably each way.
65 */
66#define IPATH_TRAFFIC_ACTIVE_THRESHOLD (2000)
67/*
68 * Struct used to indicate which errors are logged in each of the
69 * error-counters that are logged to EEPROM. A counter is incremented
70 * _once_ (saturating at 255) for each event with any bits set in
71 * the error or hwerror register masks below.
72 */
73#define IPATH_EEP_LOG_CNT (4)
74struct ipath_eep_log_mask {
75 u64 errs_to_log;
76 u64 hwerrs_to_log;
77};
60 78
61struct ipath_portdata { 79struct ipath_portdata {
62 void **port_rcvegrbuf; 80 void **port_rcvegrbuf;
@@ -109,6 +127,8 @@ struct ipath_portdata {
109 u32 port_tidcursor; 127 u32 port_tidcursor;
110 /* next expected TID to check */ 128 /* next expected TID to check */
111 unsigned long port_flag; 129 unsigned long port_flag;
130 /* what happened */
131 unsigned long int_flag;
112 /* WAIT_RCV that timed out, no interrupt */ 132 /* WAIT_RCV that timed out, no interrupt */
113 u32 port_rcvwait_to; 133 u32 port_rcvwait_to;
114 /* WAIT_PIO that timed out, no interrupt */ 134 /* WAIT_PIO that timed out, no interrupt */
@@ -137,6 +157,8 @@ struct ipath_portdata {
137 u32 userversion; 157 u32 userversion;
138 /* Bitmask of active slaves */ 158 /* Bitmask of active slaves */
139 u32 active_slaves; 159 u32 active_slaves;
160 /* Type of packets or conditions we want to poll for */
161 u16 poll_type;
140}; 162};
141 163
142struct sk_buff; 164struct sk_buff;
@@ -275,6 +297,8 @@ struct ipath_devdata {
275 u32 ipath_lastport_piobuf; 297 u32 ipath_lastport_piobuf;
276 /* is a stats timer active */ 298 /* is a stats timer active */
277 u32 ipath_stats_timer_active; 299 u32 ipath_stats_timer_active;
300 /* number of interrupts for this device -- saturates... */
301 u32 ipath_int_counter;
278 /* dwords sent read from counter */ 302 /* dwords sent read from counter */
279 u32 ipath_lastsword; 303 u32 ipath_lastsword;
280 /* dwords received read from counter */ 304 /* dwords received read from counter */
@@ -369,9 +393,6 @@ struct ipath_devdata {
369 struct class_device *diag_class_dev; 393 struct class_device *diag_class_dev;
370 /* timer used to prevent stats overflow, error throttling, etc. */ 394 /* timer used to prevent stats overflow, error throttling, etc. */
371 struct timer_list ipath_stats_timer; 395 struct timer_list ipath_stats_timer;
372 /* check for stale messages in rcv queue */
373 /* only allow one intr at a time. */
374 unsigned long ipath_rcv_pending;
375 void *ipath_dummy_hdrq; /* used after port close */ 396 void *ipath_dummy_hdrq; /* used after port close */
376 dma_addr_t ipath_dummy_hdrq_phys; 397 dma_addr_t ipath_dummy_hdrq_phys;
377 398
@@ -399,6 +420,8 @@ struct ipath_devdata {
399 u64 ipath_gpio_out; 420 u64 ipath_gpio_out;
400 /* shadow the gpio mask register */ 421 /* shadow the gpio mask register */
401 u64 ipath_gpio_mask; 422 u64 ipath_gpio_mask;
423 /* shadow the gpio output enable, etc... */
424 u64 ipath_extctrl;
402 /* kr_revision shadow */ 425 /* kr_revision shadow */
403 u64 ipath_revision; 426 u64 ipath_revision;
404 /* 427 /*
@@ -473,8 +496,6 @@ struct ipath_devdata {
473 u32 ipath_cregbase; 496 u32 ipath_cregbase;
474 /* shadow the control register contents */ 497 /* shadow the control register contents */
475 u32 ipath_control; 498 u32 ipath_control;
476 /* shadow the gpio output contents */
477 u32 ipath_extctrl;
478 /* PCI revision register (HTC rev on FPGA) */ 499 /* PCI revision register (HTC rev on FPGA) */
479 u32 ipath_pcirev; 500 u32 ipath_pcirev;
480 501
@@ -552,6 +573,9 @@ struct ipath_devdata {
552 u32 ipath_overrun_thresh_errs; 573 u32 ipath_overrun_thresh_errs;
553 u32 ipath_lli_errs; 574 u32 ipath_lli_errs;
554 575
576 /* status check work */
577 struct delayed_work status_work;
578
555 /* 579 /*
556 * Not all devices managed by a driver instance are the same 580 * Not all devices managed by a driver instance are the same
557 * type, so these fields must be per-device. 581 * type, so these fields must be per-device.
@@ -575,6 +599,37 @@ struct ipath_devdata {
575 u16 ipath_gpio_scl_num; 599 u16 ipath_gpio_scl_num;
576 u64 ipath_gpio_sda; 600 u64 ipath_gpio_sda;
577 u64 ipath_gpio_scl; 601 u64 ipath_gpio_scl;
602
603 /* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
604 spinlock_t ipath_gpio_lock;
605
606 /* used to override LED behavior */
607 u8 ipath_led_override; /* Substituted for normal value, if non-zero */
608 u16 ipath_led_override_timeoff; /* delta to next timer event */
609 u8 ipath_led_override_vals[2]; /* Alternates per blink-frame */
610 u8 ipath_led_override_phase; /* Just counts, LSB picks from vals[] */
611 atomic_t ipath_led_override_timer_active;
612 /* Used to flash LEDs in override mode */
613 struct timer_list ipath_led_override_timer;
614
615 /* Support (including locks) for EEPROM logging of errors and time */
616 /* control access to actual counters, timer */
617 spinlock_t ipath_eep_st_lock;
618 /* control high-level access to EEPROM */
619 struct semaphore ipath_eep_sem;
620 /* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
621 uint64_t ipath_traffic_wds;
622 /* active time is kept in seconds, but logged in hours */
623 atomic_t ipath_active_time;
624 /* Below are nominal shadow of EEPROM, new since last EEPROM update */
625 uint8_t ipath_eep_st_errs[IPATH_EEP_LOG_CNT];
626 uint8_t ipath_eep_st_new_errs[IPATH_EEP_LOG_CNT];
627 uint16_t ipath_eep_hrs;
628 /*
629 * masks for which bits of errs, hwerrs that cause
630 * each of the counters to increment.
631 */
632 struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
578}; 633};
579 634
580/* Private data for file operations */ 635/* Private data for file operations */
@@ -592,6 +647,7 @@ int ipath_enable_wc(struct ipath_devdata *dd);
592void ipath_disable_wc(struct ipath_devdata *dd); 647void ipath_disable_wc(struct ipath_devdata *dd);
593int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp); 648int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp);
594void ipath_shutdown_device(struct ipath_devdata *); 649void ipath_shutdown_device(struct ipath_devdata *);
650void ipath_clear_freeze(struct ipath_devdata *);
595 651
596struct file_operations; 652struct file_operations;
597int ipath_cdev_init(int minor, char *name, const struct file_operations *fops, 653int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
@@ -627,6 +683,7 @@ int ipath_unordered_wc(void);
627 683
628void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first, 684void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
629 unsigned cnt); 685 unsigned cnt);
686void ipath_cancel_sends(struct ipath_devdata *);
630 687
631int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *); 688int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
632void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *); 689void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
@@ -685,7 +742,6 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
685 * are 64bit */ 742 * are 64bit */
686#define IPATH_32BITCOUNTERS 0x20000 743#define IPATH_32BITCOUNTERS 0x20000
687 /* can miss port0 rx interrupts */ 744 /* can miss port0 rx interrupts */
688#define IPATH_POLL_RX_INTR 0x40000
689#define IPATH_DISABLED 0x80000 /* administratively disabled */ 745#define IPATH_DISABLED 0x80000 /* administratively disabled */
690 /* Use GPIO interrupts for new counters */ 746 /* Use GPIO interrupts for new counters */
691#define IPATH_GPIO_ERRINTRS 0x100000 747#define IPATH_GPIO_ERRINTRS 0x100000
@@ -704,6 +760,10 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
704#define IPATH_PORT_WAITING_PIO 3 760#define IPATH_PORT_WAITING_PIO 3
705 /* master has not finished initializing */ 761 /* master has not finished initializing */
706#define IPATH_PORT_MASTER_UNINIT 4 762#define IPATH_PORT_MASTER_UNINIT 4
763 /* waiting for an urgent packet to arrive */
764#define IPATH_PORT_WAITING_URG 5
765 /* waiting for a header overflow */
766#define IPATH_PORT_WAITING_OVERFLOW 6
707 767
708/* free up any allocated data at closes */ 768/* free up any allocated data at closes */
709void ipath_free_data(struct ipath_portdata *dd); 769void ipath_free_data(struct ipath_portdata *dd);
@@ -713,10 +773,21 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
713void ipath_init_iba6120_funcs(struct ipath_devdata *); 773void ipath_init_iba6120_funcs(struct ipath_devdata *);
714void ipath_init_iba6110_funcs(struct ipath_devdata *); 774void ipath_init_iba6110_funcs(struct ipath_devdata *);
715void ipath_get_eeprom_info(struct ipath_devdata *); 775void ipath_get_eeprom_info(struct ipath_devdata *);
776int ipath_update_eeprom_log(struct ipath_devdata *dd);
777void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
716u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); 778u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
717void ipath_disarm_senderrbufs(struct ipath_devdata *, int); 779void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
718 780
719/* 781/*
782 * Set LED override, only the two LSBs have "public" meaning, but
783 * any non-zero value substitutes them for the Link and LinkTrain
784 * LED states.
785 */
786#define IPATH_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
787#define IPATH_LED_LOG 2 /* Logical (link) YELLOW LED */
788void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
789
790/*
720 * number of words used for protocol header if not set by ipath_userinit(); 791 * number of words used for protocol header if not set by ipath_userinit();
721 */ 792 */
722#define IPATH_DFLT_RCVHDRSIZE 9 793#define IPATH_DFLT_RCVHDRSIZE 9
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index dd487c100f5b..85a4aefc6c03 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
index 05a1d2b01d9d..82616b779e24 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.h b/drivers/infiniband/hw/ipath/ipath_layer.h
index 3854a4eae684..415709c4d85b 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.h
+++ b/drivers/infiniband/hw/ipath/ipath_layer.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 25908b02fbe5..d61c03044545 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -103,7 +103,7 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp,
103 /* This is already in network order */ 103 /* This is already in network order */
104 nip->sys_guid = to_idev(ibdev)->sys_image_guid; 104 nip->sys_guid = to_idev(ibdev)->sys_image_guid;
105 nip->node_guid = dd->ipath_guid; 105 nip->node_guid = dd->ipath_guid;
106 nip->port_guid = nip->sys_guid; 106 nip->port_guid = dd->ipath_guid;
107 nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd)); 107 nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
108 nip->device_id = cpu_to_be16(dd->ipath_deviceid); 108 nip->device_id = cpu_to_be16(dd->ipath_deviceid);
109 majrev = dd->ipath_majrev; 109 majrev = dd->ipath_majrev;
@@ -292,7 +292,12 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
292 /* pip->vl_arb_high_cap; // only one VL */ 292 /* pip->vl_arb_high_cap; // only one VL */
293 /* pip->vl_arb_low_cap; // only one VL */ 293 /* pip->vl_arb_low_cap; // only one VL */
294 /* InitTypeReply = 0 */ 294 /* InitTypeReply = 0 */
295 pip->inittypereply_mtucap = IB_MTU_4096; 295 /*
296 * Note: the chips support a maximum MTU of 4096, but the driver
297 * hasn't implemented this feature yet, so set the maximum value
298 * to 2048.
299 */
300 pip->inittypereply_mtucap = IB_MTU_2048;
296 // HCAs ignore VLStallCount and HOQLife 301 // HCAs ignore VLStallCount and HOQLife
297 /* pip->vlstallcnt_hoqlife; */ 302 /* pip->vlstallcnt_hoqlife; */
298 pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */ 303 pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c
index 937bc3396b53..fa830e22002f 100644
--- a/drivers/infiniband/hw/ipath/ipath_mmap.c
+++ b/drivers/infiniband/hw/ipath/ipath_mmap.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index bdeef8d4f279..e442470a2375 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index bfef08ecd342..1324b35ff1f8 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -336,7 +336,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
336 qp->qkey = 0; 336 qp->qkey = 0;
337 qp->qp_access_flags = 0; 337 qp->qp_access_flags = 0;
338 qp->s_busy = 0; 338 qp->s_busy = 0;
339 qp->s_flags &= ~IPATH_S_SIGNAL_REQ_WR; 339 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
340 qp->s_hdrwords = 0; 340 qp->s_hdrwords = 0;
341 qp->s_psn = 0; 341 qp->s_psn = 0;
342 qp->r_psn = 0; 342 qp->r_psn = 0;
@@ -507,16 +507,13 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
507 attr->port_num > ibqp->device->phys_port_cnt) 507 attr->port_num > ibqp->device->phys_port_cnt)
508 goto inval; 508 goto inval;
509 509
510 /*
511 * Note: the chips support a maximum MTU of 4096, but the driver
512 * hasn't implemented this feature yet, so don't allow Path MTU
513 * values greater than 2048.
514 */
510 if (attr_mask & IB_QP_PATH_MTU) 515 if (attr_mask & IB_QP_PATH_MTU)
511 if (attr->path_mtu > IB_MTU_4096) 516 if (attr->path_mtu > IB_MTU_2048)
512 goto inval;
513
514 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
515 if (attr->max_dest_rd_atomic > 1)
516 goto inval;
517
518 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
519 if (attr->max_rd_atomic > 1)
520 goto inval; 517 goto inval;
521 518
522 if (attr_mask & IB_QP_PATH_MIG_STATE) 519 if (attr_mask & IB_QP_PATH_MIG_STATE)
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 1915771fd038..46744ea2babd 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -125,8 +125,10 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
125 if (len > pmtu) { 125 if (len > pmtu) {
126 len = pmtu; 126 len = pmtu;
127 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 127 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
128 } else 128 } else {
129 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 129 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
130 e->sent = 1;
131 }
130 ohdr->u.aeth = ipath_compute_aeth(qp); 132 ohdr->u.aeth = ipath_compute_aeth(qp);
131 hwords++; 133 hwords++;
132 qp->s_ack_rdma_psn = e->psn; 134 qp->s_ack_rdma_psn = e->psn;
@@ -143,6 +145,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
143 cpu_to_be32(e->atomic_data); 145 cpu_to_be32(e->atomic_data);
144 hwords += sizeof(ohdr->u.at) / sizeof(u32); 146 hwords += sizeof(ohdr->u.at) / sizeof(u32);
145 bth2 = e->psn; 147 bth2 = e->psn;
148 e->sent = 1;
146 } 149 }
147 bth0 = qp->s_ack_state << 24; 150 bth0 = qp->s_ack_state << 24;
148 break; 151 break;
@@ -158,6 +161,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
158 ohdr->u.aeth = ipath_compute_aeth(qp); 161 ohdr->u.aeth = ipath_compute_aeth(qp);
159 hwords++; 162 hwords++;
160 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 163 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
164 qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
161 } 165 }
162 bth0 = qp->s_ack_state << 24; 166 bth0 = qp->s_ack_state << 24;
163 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; 167 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
@@ -188,7 +192,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
188 } 192 }
189 qp->s_hdrwords = hwords; 193 qp->s_hdrwords = hwords;
190 qp->s_cur_size = len; 194 qp->s_cur_size = len;
191 *bth0p = bth0; 195 *bth0p = bth0 | (1 << 22); /* Set M bit */
192 *bth2p = bth2; 196 *bth2p = bth2;
193 return 1; 197 return 1;
194 198
@@ -240,7 +244,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
240 244
241 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 245 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
242 hwords = 5; 246 hwords = 5;
243 bth0 = 0; 247 bth0 = 1 << 22; /* Set M bit */
244 248
245 /* Send a request. */ 249 /* Send a request. */
246 wqe = get_swqe_ptr(qp, qp->s_cur); 250 wqe = get_swqe_ptr(qp, qp->s_cur);
@@ -604,7 +608,7 @@ static void send_rc_ack(struct ipath_qp *qp)
604 } 608 }
605 /* read pkey_index w/o lock (its atomic) */ 609 /* read pkey_index w/o lock (its atomic) */
606 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) | 610 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) |
607 OP(ACKNOWLEDGE) << 24; 611 (OP(ACKNOWLEDGE) << 24) | (1 << 22);
608 if (qp->r_nak_state) 612 if (qp->r_nak_state)
609 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | 613 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
610 (qp->r_nak_state << 614 (qp->r_nak_state <<
@@ -806,13 +810,15 @@ static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
806 * Called at interrupt level with the QP s_lock held and interrupts disabled. 810 * Called at interrupt level with the QP s_lock held and interrupts disabled.
807 * Returns 1 if OK, 0 if current operation should be aborted (NAK). 811 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
808 */ 812 */
809static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) 813static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
814 u64 val)
810{ 815{
811 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 816 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
812 struct ib_wc wc; 817 struct ib_wc wc;
813 struct ipath_swqe *wqe; 818 struct ipath_swqe *wqe;
814 int ret = 0; 819 int ret = 0;
815 u32 ack_psn; 820 u32 ack_psn;
821 int diff;
816 822
817 /* 823 /*
818 * Remove the QP from the timeout queue (or RNR timeout queue). 824 * Remove the QP from the timeout queue (or RNR timeout queue).
@@ -840,7 +846,19 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
840 * The MSN might be for a later WQE than the PSN indicates so 846 * The MSN might be for a later WQE than the PSN indicates so
841 * only complete WQEs that the PSN finishes. 847 * only complete WQEs that the PSN finishes.
842 */ 848 */
843 while (ipath_cmp24(ack_psn, wqe->lpsn) >= 0) { 849 while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) {
850 /*
851 * RDMA_READ_RESPONSE_ONLY is a special case since
852 * we want to generate completion events for everything
853 * before the RDMA read, copy the data, then generate
854 * the completion for the read.
855 */
856 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
857 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
858 diff == 0) {
859 ret = 1;
860 goto bail;
861 }
844 /* 862 /*
845 * If this request is a RDMA read or atomic, and the ACK is 863 * If this request is a RDMA read or atomic, and the ACK is
846 * for a later operation, this ACK NAKs the RDMA read or 864 * for a later operation, this ACK NAKs the RDMA read or
@@ -851,12 +869,10 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
851 * is sent but before the response is received. 869 * is sent but before the response is received.
852 */ 870 */
853 if ((wqe->wr.opcode == IB_WR_RDMA_READ && 871 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
854 (opcode != OP(RDMA_READ_RESPONSE_LAST) || 872 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
855 ipath_cmp24(ack_psn, wqe->lpsn) != 0)) ||
856 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 873 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
857 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && 874 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
858 (opcode != OP(ATOMIC_ACKNOWLEDGE) || 875 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
859 ipath_cmp24(wqe->psn, psn) != 0))) {
860 /* 876 /*
861 * The last valid PSN seen is the previous 877 * The last valid PSN seen is the previous
862 * request's. 878 * request's.
@@ -870,6 +886,9 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
870 */ 886 */
871 goto bail; 887 goto bail;
872 } 888 }
889 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
890 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
891 *(u64 *) wqe->sg_list[0].vaddr = val;
873 if (qp->s_num_rd_atomic && 892 if (qp->s_num_rd_atomic &&
874 (wqe->wr.opcode == IB_WR_RDMA_READ || 893 (wqe->wr.opcode == IB_WR_RDMA_READ ||
875 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || 894 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
@@ -1079,6 +1098,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1079 int diff; 1098 int diff;
1080 u32 pad; 1099 u32 pad;
1081 u32 aeth; 1100 u32 aeth;
1101 u64 val;
1082 1102
1083 spin_lock_irqsave(&qp->s_lock, flags); 1103 spin_lock_irqsave(&qp->s_lock, flags);
1084 1104
@@ -1118,8 +1138,6 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1118 data += sizeof(__be32); 1138 data += sizeof(__be32);
1119 } 1139 }
1120 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) { 1140 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1121 u64 val;
1122
1123 if (!header_in_data) { 1141 if (!header_in_data) {
1124 __be32 *p = ohdr->u.at.atomic_ack_eth; 1142 __be32 *p = ohdr->u.at.atomic_ack_eth;
1125 1143
@@ -1127,12 +1145,13 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1127 be32_to_cpu(p[1]); 1145 be32_to_cpu(p[1]);
1128 } else 1146 } else
1129 val = be64_to_cpu(((__be64 *) data)[0]); 1147 val = be64_to_cpu(((__be64 *) data)[0]);
1130 *(u64 *) wqe->sg_list[0].vaddr = val; 1148 } else
1131 } 1149 val = 0;
1132 if (!do_rc_ack(qp, aeth, psn, opcode) || 1150 if (!do_rc_ack(qp, aeth, psn, opcode, val) ||
1133 opcode != OP(RDMA_READ_RESPONSE_FIRST)) 1151 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1134 goto ack_done; 1152 goto ack_done;
1135 hdrsize += 4; 1153 hdrsize += 4;
1154 wqe = get_swqe_ptr(qp, qp->s_last);
1136 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1155 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1137 goto ack_op_err; 1156 goto ack_op_err;
1138 /* 1157 /*
@@ -1176,13 +1195,12 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1176 goto bail; 1195 goto bail;
1177 1196
1178 case OP(RDMA_READ_RESPONSE_ONLY): 1197 case OP(RDMA_READ_RESPONSE_ONLY):
1179 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { 1198 if (!header_in_data)
1180 dev->n_rdma_seq++; 1199 aeth = be32_to_cpu(ohdr->u.aeth);
1181 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); 1200 else
1201 aeth = be32_to_cpu(((__be32 *) data)[0]);
1202 if (!do_rc_ack(qp, aeth, psn, opcode, 0))
1182 goto ack_done; 1203 goto ack_done;
1183 }
1184 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1185 goto ack_op_err;
1186 /* Get the number of bytes the message was padded by. */ 1204 /* Get the number of bytes the message was padded by. */
1187 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 1205 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1188 /* 1206 /*
@@ -1197,6 +1215,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1197 * have to be careful to copy the data to the right 1215 * have to be careful to copy the data to the right
1198 * location. 1216 * location.
1199 */ 1217 */
1218 wqe = get_swqe_ptr(qp, qp->s_last);
1200 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 1219 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1201 wqe, psn, pmtu); 1220 wqe, psn, pmtu);
1202 goto read_last; 1221 goto read_last;
@@ -1230,7 +1249,8 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1230 data += sizeof(__be32); 1249 data += sizeof(__be32);
1231 } 1250 }
1232 ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen); 1251 ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
1233 (void) do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST)); 1252 (void) do_rc_ack(qp, aeth, psn,
1253 OP(RDMA_READ_RESPONSE_LAST), 0);
1234 goto ack_done; 1254 goto ack_done;
1235 } 1255 }
1236 1256
@@ -1344,8 +1364,11 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1344 e = NULL; 1364 e = NULL;
1345 break; 1365 break;
1346 } 1366 }
1347 if (ipath_cmp24(psn, e->psn) >= 0) 1367 if (ipath_cmp24(psn, e->psn) >= 0) {
1368 if (prev == qp->s_tail_ack_queue)
1369 old_req = 0;
1348 break; 1370 break;
1371 }
1349 } 1372 }
1350 switch (opcode) { 1373 switch (opcode) {
1351 case OP(RDMA_READ_REQUEST): { 1374 case OP(RDMA_READ_REQUEST): {
@@ -1460,6 +1483,22 @@ static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
1460 spin_unlock_irqrestore(&qp->s_lock, flags); 1483 spin_unlock_irqrestore(&qp->s_lock, flags);
1461} 1484}
1462 1485
1486static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
1487{
1488 unsigned long flags;
1489 unsigned next;
1490
1491 next = n + 1;
1492 if (next > IPATH_MAX_RDMA_ATOMIC)
1493 next = 0;
1494 spin_lock_irqsave(&qp->s_lock, flags);
1495 if (n == qp->s_tail_ack_queue) {
1496 qp->s_tail_ack_queue = next;
1497 qp->s_ack_state = OP(ACKNOWLEDGE);
1498 }
1499 spin_unlock_irqrestore(&qp->s_lock, flags);
1500}
1501
1463/** 1502/**
1464 * ipath_rc_rcv - process an incoming RC packet 1503 * ipath_rc_rcv - process an incoming RC packet
1465 * @dev: the device this packet came in on 1504 * @dev: the device this packet came in on
@@ -1672,6 +1711,9 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1672 case OP(RDMA_WRITE_FIRST): 1711 case OP(RDMA_WRITE_FIRST):
1673 case OP(RDMA_WRITE_ONLY): 1712 case OP(RDMA_WRITE_ONLY):
1674 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): 1713 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1714 if (unlikely(!(qp->qp_access_flags &
1715 IB_ACCESS_REMOTE_WRITE)))
1716 goto nack_inv;
1675 /* consume RWQE */ 1717 /* consume RWQE */
1676 /* RETH comes after BTH */ 1718 /* RETH comes after BTH */
1677 if (!header_in_data) 1719 if (!header_in_data)
@@ -1701,9 +1743,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1701 qp->r_sge.sge.length = 0; 1743 qp->r_sge.sge.length = 0;
1702 qp->r_sge.sge.sge_length = 0; 1744 qp->r_sge.sge.sge_length = 0;
1703 } 1745 }
1704 if (unlikely(!(qp->qp_access_flags &
1705 IB_ACCESS_REMOTE_WRITE)))
1706 goto nack_acc;
1707 if (opcode == OP(RDMA_WRITE_FIRST)) 1746 if (opcode == OP(RDMA_WRITE_FIRST))
1708 goto send_middle; 1747 goto send_middle;
1709 else if (opcode == OP(RDMA_WRITE_ONLY)) 1748 else if (opcode == OP(RDMA_WRITE_ONLY))
@@ -1717,13 +1756,17 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1717 u32 len; 1756 u32 len;
1718 u8 next; 1757 u8 next;
1719 1758
1720 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) 1759 if (unlikely(!(qp->qp_access_flags &
1721 goto nack_acc; 1760 IB_ACCESS_REMOTE_READ)))
1761 goto nack_inv;
1722 next = qp->r_head_ack_queue + 1; 1762 next = qp->r_head_ack_queue + 1;
1723 if (next > IPATH_MAX_RDMA_ATOMIC) 1763 if (next > IPATH_MAX_RDMA_ATOMIC)
1724 next = 0; 1764 next = 0;
1725 if (unlikely(next == qp->s_tail_ack_queue)) 1765 if (unlikely(next == qp->s_tail_ack_queue)) {
1726 goto nack_inv; 1766 if (!qp->s_ack_queue[next].sent)
1767 goto nack_inv;
1768 ipath_update_ack_queue(qp, next);
1769 }
1727 e = &qp->s_ack_queue[qp->r_head_ack_queue]; 1770 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1728 /* RETH comes after BTH */ 1771 /* RETH comes after BTH */
1729 if (!header_in_data) 1772 if (!header_in_data)
@@ -1758,6 +1801,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1758 e->rdma_sge.sge.sge_length = 0; 1801 e->rdma_sge.sge.sge_length = 0;
1759 } 1802 }
1760 e->opcode = opcode; 1803 e->opcode = opcode;
1804 e->sent = 0;
1761 e->psn = psn; 1805 e->psn = psn;
1762 /* 1806 /*
1763 * We need to increment the MSN here instead of when we 1807 * We need to increment the MSN here instead of when we
@@ -1789,12 +1833,15 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1789 1833
1790 if (unlikely(!(qp->qp_access_flags & 1834 if (unlikely(!(qp->qp_access_flags &
1791 IB_ACCESS_REMOTE_ATOMIC))) 1835 IB_ACCESS_REMOTE_ATOMIC)))
1792 goto nack_acc; 1836 goto nack_inv;
1793 next = qp->r_head_ack_queue + 1; 1837 next = qp->r_head_ack_queue + 1;
1794 if (next > IPATH_MAX_RDMA_ATOMIC) 1838 if (next > IPATH_MAX_RDMA_ATOMIC)
1795 next = 0; 1839 next = 0;
1796 if (unlikely(next == qp->s_tail_ack_queue)) 1840 if (unlikely(next == qp->s_tail_ack_queue)) {
1797 goto nack_inv; 1841 if (!qp->s_ack_queue[next].sent)
1842 goto nack_inv;
1843 ipath_update_ack_queue(qp, next);
1844 }
1798 if (!header_in_data) 1845 if (!header_in_data)
1799 ateth = &ohdr->u.atomic_eth; 1846 ateth = &ohdr->u.atomic_eth;
1800 else 1847 else
@@ -1819,6 +1866,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1819 be64_to_cpu(ateth->compare_data), 1866 be64_to_cpu(ateth->compare_data),
1820 sdata); 1867 sdata);
1821 e->opcode = opcode; 1868 e->opcode = opcode;
1869 e->sent = 0;
1822 e->psn = psn & IPATH_PSN_MASK; 1870 e->psn = psn & IPATH_PSN_MASK;
1823 qp->r_msn++; 1871 qp->r_msn++;
1824 qp->r_psn++; 1872 qp->r_psn++;
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index c182bcd62098..708eba3165d7 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index d9c2a9b15d86..85256747d8a1 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -194,6 +194,8 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
194 ret = 0; 194 ret = 0;
195 goto bail; 195 goto bail;
196 } 196 }
197 /* Make sure entry is read after head index is read. */
198 smp_rmb();
197 wqe = get_rwqe_ptr(rq, tail); 199 wqe = get_rwqe_ptr(rq, tail);
198 if (++tail >= rq->size) 200 if (++tail >= rq->size)
199 tail = 0; 201 tail = 0;
@@ -267,7 +269,7 @@ again:
267 spin_lock_irqsave(&sqp->s_lock, flags); 269 spin_lock_irqsave(&sqp->s_lock, flags);
268 270
269 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) || 271 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) ||
270 qp->s_rnr_timeout) { 272 sqp->s_rnr_timeout) {
271 spin_unlock_irqrestore(&sqp->s_lock, flags); 273 spin_unlock_irqrestore(&sqp->s_lock, flags);
272 goto done; 274 goto done;
273 } 275 }
@@ -319,12 +321,22 @@ again:
319 break; 321 break;
320 322
321 case IB_WR_RDMA_WRITE_WITH_IMM: 323 case IB_WR_RDMA_WRITE_WITH_IMM:
324 if (unlikely(!(qp->qp_access_flags &
325 IB_ACCESS_REMOTE_WRITE))) {
326 wc.status = IB_WC_REM_INV_REQ_ERR;
327 goto err;
328 }
322 wc.wc_flags = IB_WC_WITH_IMM; 329 wc.wc_flags = IB_WC_WITH_IMM;
323 wc.imm_data = wqe->wr.imm_data; 330 wc.imm_data = wqe->wr.imm_data;
324 if (!ipath_get_rwqe(qp, 1)) 331 if (!ipath_get_rwqe(qp, 1))
325 goto rnr_nak; 332 goto rnr_nak;
326 /* FALLTHROUGH */ 333 /* FALLTHROUGH */
327 case IB_WR_RDMA_WRITE: 334 case IB_WR_RDMA_WRITE:
335 if (unlikely(!(qp->qp_access_flags &
336 IB_ACCESS_REMOTE_WRITE))) {
337 wc.status = IB_WC_REM_INV_REQ_ERR;
338 goto err;
339 }
328 if (wqe->length == 0) 340 if (wqe->length == 0)
329 break; 341 break;
330 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, 342 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
@@ -354,8 +366,10 @@ again:
354 366
355 case IB_WR_RDMA_READ: 367 case IB_WR_RDMA_READ:
356 if (unlikely(!(qp->qp_access_flags & 368 if (unlikely(!(qp->qp_access_flags &
357 IB_ACCESS_REMOTE_READ))) 369 IB_ACCESS_REMOTE_READ))) {
358 goto acc_err; 370 wc.status = IB_WC_REM_INV_REQ_ERR;
371 goto err;
372 }
359 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, 373 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
360 wqe->wr.wr.rdma.remote_addr, 374 wqe->wr.wr.rdma.remote_addr,
361 wqe->wr.wr.rdma.rkey, 375 wqe->wr.wr.rdma.rkey,
@@ -369,8 +383,10 @@ again:
369 case IB_WR_ATOMIC_CMP_AND_SWP: 383 case IB_WR_ATOMIC_CMP_AND_SWP:
370 case IB_WR_ATOMIC_FETCH_AND_ADD: 384 case IB_WR_ATOMIC_FETCH_AND_ADD:
371 if (unlikely(!(qp->qp_access_flags & 385 if (unlikely(!(qp->qp_access_flags &
372 IB_ACCESS_REMOTE_ATOMIC))) 386 IB_ACCESS_REMOTE_ATOMIC))) {
373 goto acc_err; 387 wc.status = IB_WC_REM_INV_REQ_ERR;
388 goto err;
389 }
374 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), 390 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
375 wqe->wr.wr.atomic.remote_addr, 391 wqe->wr.wr.atomic.remote_addr,
376 wqe->wr.wr.atomic.rkey, 392 wqe->wr.wr.atomic.rkey,
@@ -396,6 +412,8 @@ again:
396 412
397 if (len > sge->length) 413 if (len > sge->length)
398 len = sge->length; 414 len = sge->length;
415 if (len > sge->sge_length)
416 len = sge->sge_length;
399 BUG_ON(len == 0); 417 BUG_ON(len == 0);
400 ipath_copy_sge(&qp->r_sge, sge->vaddr, len); 418 ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
401 sge->vaddr += len; 419 sge->vaddr += len;
@@ -503,11 +521,9 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
503 * could be called. If we are still in the tasklet function, 521 * could be called. If we are still in the tasklet function,
504 * tasklet_hi_schedule() will not call us until the next time 522 * tasklet_hi_schedule() will not call us until the next time
505 * tasklet_hi_schedule() is called. 523 * tasklet_hi_schedule() is called.
506 * We clear the tasklet flag now since we are committing to return 524 * We leave the busy flag set so that another post send doesn't
507 * from the tasklet function. 525 * try to put the same QP on the piowait list again.
508 */ 526 */
509 clear_bit(IPATH_S_BUSY, &qp->s_busy);
510 tasklet_unlock(&qp->s_task);
511 want_buffer(dev->dd); 527 want_buffer(dev->dd);
512 dev->n_piowait++; 528 dev->n_piowait++;
513} 529}
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index 03acae66ba81..40c36ec19016 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -80,6 +80,8 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
80 wqe->num_sge = wr->num_sge; 80 wqe->num_sge = wr->num_sge;
81 for (i = 0; i < wr->num_sge; i++) 81 for (i = 0; i < wr->num_sge; i++)
82 wqe->sg_list[i] = wr->sg_list[i]; 82 wqe->sg_list[i] = wr->sg_list[i];
83 /* Make sure queue entry is written before the head index. */
84 smp_wmb();
83 wq->head = next; 85 wq->head = next;
84 spin_unlock_irqrestore(&srq->rq.lock, flags); 86 spin_unlock_irqrestore(&srq->rq.lock, flags);
85 } 87 }
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index d8b5e4cefe25..73ed17d03188 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -55,6 +55,7 @@ u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
55 u64 val64; 55 u64 val64;
56 unsigned long t0, t1; 56 unsigned long t0, t1;
57 u64 ret; 57 u64 ret;
58 unsigned long flags;
58 59
59 t0 = jiffies; 60 t0 = jiffies;
60 /* If fast increment counters are only 32 bits, snapshot them, 61 /* If fast increment counters are only 32 bits, snapshot them,
@@ -91,12 +92,18 @@ u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
91 if (creg == dd->ipath_cregs->cr_wordsendcnt) { 92 if (creg == dd->ipath_cregs->cr_wordsendcnt) {
92 if (val != dd->ipath_lastsword) { 93 if (val != dd->ipath_lastsword) {
93 dd->ipath_sword += val - dd->ipath_lastsword; 94 dd->ipath_sword += val - dd->ipath_lastsword;
95 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
96 dd->ipath_traffic_wds += val - dd->ipath_lastsword;
97 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
94 dd->ipath_lastsword = val; 98 dd->ipath_lastsword = val;
95 } 99 }
96 val64 = dd->ipath_sword; 100 val64 = dd->ipath_sword;
97 } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) { 101 } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
98 if (val != dd->ipath_lastrword) { 102 if (val != dd->ipath_lastrword) {
99 dd->ipath_rword += val - dd->ipath_lastrword; 103 dd->ipath_rword += val - dd->ipath_lastrword;
104 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
105 dd->ipath_traffic_wds += val - dd->ipath_lastrword;
106 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
100 dd->ipath_lastrword = val; 107 dd->ipath_lastrword = val;
101 } 108 }
102 val64 = dd->ipath_rword; 109 val64 = dd->ipath_rword;
@@ -200,6 +207,7 @@ void ipath_get_faststats(unsigned long opaque)
200 struct ipath_devdata *dd = (struct ipath_devdata *) opaque; 207 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
201 u32 val; 208 u32 val;
202 static unsigned cnt; 209 static unsigned cnt;
210 unsigned long flags;
203 211
204 /* 212 /*
205 * don't access the chip while running diags, or memory diags can 213 * don't access the chip while running diags, or memory diags can
@@ -210,9 +218,20 @@ void ipath_get_faststats(unsigned long opaque)
210 /* but re-arm the timer, for diags case; won't hurt other */ 218 /* but re-arm the timer, for diags case; won't hurt other */
211 goto done; 219 goto done;
212 220
221 /*
222 * We now try to maintain a "active timer", based on traffic
223 * exceeding a threshold, so we need to check the word-counts
224 * even if they are 64-bit.
225 */
226 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
227 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
228 spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
229 if (dd->ipath_traffic_wds >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
230 atomic_add(5, &dd->ipath_active_time); /* S/B #define */
231 dd->ipath_traffic_wds = 0;
232 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
233
213 if (dd->ipath_flags & IPATH_32BITCOUNTERS) { 234 if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
214 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
215 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
216 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); 235 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
217 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); 236 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
218 } 237 }
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 4dc398d5e011..16238cd3a036 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -596,6 +596,43 @@ bail:
596 return ret; 596 return ret;
597} 597}
598 598
599static ssize_t store_led_override(struct device *dev,
600 struct device_attribute *attr,
601 const char *buf,
602 size_t count)
603{
604 struct ipath_devdata *dd = dev_get_drvdata(dev);
605 int ret;
606 u16 val;
607
608 ret = ipath_parse_ushort(buf, &val);
609 if (ret > 0)
610 ipath_set_led_override(dd, val);
611 else
612 ipath_dev_err(dd, "attempt to set invalid LED override\n");
613 return ret;
614}
615
616static ssize_t show_logged_errs(struct device *dev,
617 struct device_attribute *attr,
618 char *buf)
619{
620 struct ipath_devdata *dd = dev_get_drvdata(dev);
621 int idx, count;
622
623 /* force consistency with actual EEPROM */
624 if (ipath_update_eeprom_log(dd) != 0)
625 return -ENXIO;
626
627 count = 0;
628 for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
629 count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
630 dd->ipath_eep_st_errs[idx],
631 idx == (IPATH_EEP_LOG_CNT - 1) ? '\n' : ' ');
632 }
633
634 return count;
635}
599 636
600static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL); 637static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
601static DRIVER_ATTR(version, S_IRUGO, show_version, NULL); 638static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
@@ -625,6 +662,8 @@ static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
625static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); 662static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
626static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL); 663static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
627static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv); 664static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
665static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
666static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
628 667
629static struct attribute *dev_attributes[] = { 668static struct attribute *dev_attributes[] = {
630 &dev_attr_guid.attr, 669 &dev_attr_guid.attr,
@@ -641,6 +680,8 @@ static struct attribute *dev_attributes[] = {
641 &dev_attr_unit.attr, 680 &dev_attr_unit.attr,
642 &dev_attr_enabled.attr, 681 &dev_attr_enabled.attr,
643 &dev_attr_rx_pol_inv.attr, 682 &dev_attr_rx_pol_inv.attr,
683 &dev_attr_led_override.attr,
684 &dev_attr_logged_errors.attr,
644 NULL 685 NULL
645}; 686};
646 687
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 1c2b03c2ef5e..8380fbc50d2c 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -58,7 +58,6 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
58 wc->port_num = 0; 58 wc->port_num = 0;
59 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 0); 59 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 0);
60 } 60 }
61 wqe = get_swqe_ptr(qp, qp->s_last);
62} 61}
63 62
64/** 63/**
@@ -87,7 +86,7 @@ int ipath_make_uc_req(struct ipath_qp *qp,
87 86
88 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 87 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
89 hwords = 5; 88 hwords = 5;
90 bth0 = 0; 89 bth0 = 1 << 22; /* Set M bit */
91 90
92 /* Get the next send request. */ 91 /* Get the next send request. */
93 wqe = get_swqe_ptr(qp, qp->s_last); 92 wqe = get_swqe_ptr(qp, qp->s_last);
@@ -97,8 +96,10 @@ int ipath_make_uc_req(struct ipath_qp *qp,
97 * Signal the completion of the last send 96 * Signal the completion of the last send
98 * (if there is one). 97 * (if there is one).
99 */ 98 */
100 if (qp->s_last != qp->s_tail) 99 if (qp->s_last != qp->s_tail) {
101 complete_last_send(qp, wqe, &wc); 100 complete_last_send(qp, wqe, &wc);
101 wqe = get_swqe_ptr(qp, qp->s_last);
102 }
102 103
103 /* Check if send work queue is empty. */ 104 /* Check if send work queue is empty. */
104 if (qp->s_tail == qp->s_head) 105 if (qp->s_tail == qp->s_head)
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index a518f7c8fa83..f9a3338a5fb7 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -176,6 +176,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
176 dev->n_pkt_drops++; 176 dev->n_pkt_drops++;
177 goto bail_sge; 177 goto bail_sge;
178 } 178 }
179 /* Make sure entry is read after head index is read. */
180 smp_rmb();
179 wqe = get_rwqe_ptr(rq, tail); 181 wqe = get_rwqe_ptr(rq, tail);
180 if (++tail >= rq->size) 182 if (++tail >= rq->size)
181 tail = 0; 183 tail = 0;
@@ -231,6 +233,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
231 233
232 if (len > length) 234 if (len > length)
233 len = length; 235 len = length;
236 if (len > sge->sge_length)
237 len = sge->sge_length;
234 BUG_ON(len == 0); 238 BUG_ON(len == 0);
235 ipath_copy_sge(&rsge, sge->vaddr, len); 239 ipath_copy_sge(&rsge, sge->vaddr, len);
236 sge->vaddr += len; 240 sge->vaddr += len;
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 8536aeb96af8..27034d38b3dd 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index bb70845279b8..65f7181e9cf8 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -164,9 +164,11 @@ void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
164 while (length) { 164 while (length) {
165 u32 len = sge->length; 165 u32 len = sge->length;
166 166
167 BUG_ON(len == 0);
168 if (len > length) 167 if (len > length)
169 len = length; 168 len = length;
169 if (len > sge->sge_length)
170 len = sge->sge_length;
171 BUG_ON(len == 0);
170 memcpy(sge->vaddr, data, len); 172 memcpy(sge->vaddr, data, len);
171 sge->vaddr += len; 173 sge->vaddr += len;
172 sge->length -= len; 174 sge->length -= len;
@@ -202,9 +204,11 @@ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
202 while (length) { 204 while (length) {
203 u32 len = sge->length; 205 u32 len = sge->length;
204 206
205 BUG_ON(len == 0);
206 if (len > length) 207 if (len > length)
207 len = length; 208 len = length;
209 if (len > sge->sge_length)
210 len = sge->sge_length;
211 BUG_ON(len == 0);
208 sge->vaddr += len; 212 sge->vaddr += len;
209 sge->length -= len; 213 sge->length -= len;
210 sge->sge_length -= len; 214 sge->sge_length -= len;
@@ -323,6 +327,8 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
323 wqe->num_sge = wr->num_sge; 327 wqe->num_sge = wr->num_sge;
324 for (i = 0; i < wr->num_sge; i++) 328 for (i = 0; i < wr->num_sge; i++)
325 wqe->sg_list[i] = wr->sg_list[i]; 329 wqe->sg_list[i] = wr->sg_list[i];
330 /* Make sure queue entry is written before the head index. */
331 smp_wmb();
326 wq->head = next; 332 wq->head = next;
327 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 333 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
328 } 334 }
@@ -948,6 +954,7 @@ int ipath_ib_piobufavail(struct ipath_ibdev *dev)
948 qp = list_entry(dev->piowait.next, struct ipath_qp, 954 qp = list_entry(dev->piowait.next, struct ipath_qp,
949 piowait); 955 piowait);
950 list_del_init(&qp->piowait); 956 list_del_init(&qp->piowait);
957 clear_bit(IPATH_S_BUSY, &qp->s_busy);
951 tasklet_hi_schedule(&qp->s_task); 958 tasklet_hi_schedule(&qp->s_task);
952 } 959 }
953 spin_unlock_irqrestore(&dev->pending_lock, flags); 960 spin_unlock_irqrestore(&dev->pending_lock, flags);
@@ -981,6 +988,8 @@ static int ipath_query_device(struct ib_device *ibdev,
981 props->max_ah = ib_ipath_max_ahs; 988 props->max_ah = ib_ipath_max_ahs;
982 props->max_cqe = ib_ipath_max_cqes; 989 props->max_cqe = ib_ipath_max_cqes;
983 props->max_mr = dev->lk_table.max; 990 props->max_mr = dev->lk_table.max;
991 props->max_fmr = dev->lk_table.max;
992 props->max_map_per_fmr = 32767;
984 props->max_pd = ib_ipath_max_pds; 993 props->max_pd = ib_ipath_max_pds;
985 props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC; 994 props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
986 props->max_qp_init_rd_atom = 255; 995 props->max_qp_init_rd_atom = 255;
@@ -1051,7 +1060,12 @@ static int ipath_query_port(struct ib_device *ibdev,
1051 props->max_vl_num = 1; /* VLCap = VL0 */ 1060 props->max_vl_num = 1; /* VLCap = VL0 */
1052 props->init_type_reply = 0; 1061 props->init_type_reply = 0;
1053 1062
1054 props->max_mtu = IB_MTU_4096; 1063 /*
1064 * Note: the chips support a maximum MTU of 4096, but the driver
1065 * hasn't implemented this feature yet, so set the maximum value
1066 * to 2048.
1067 */
1068 props->max_mtu = IB_MTU_2048;
1055 switch (dev->dd->ipath_ibmtu) { 1069 switch (dev->dd->ipath_ibmtu) {
1056 case 4096: 1070 case 4096:
1057 mtu = IB_MTU_4096; 1071 mtu = IB_MTU_4096;
@@ -1361,13 +1375,6 @@ static void __verbs_timer(unsigned long arg)
1361{ 1375{
1362 struct ipath_devdata *dd = (struct ipath_devdata *) arg; 1376 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1363 1377
1364 /*
1365 * If port 0 receive packet interrupts are not available, or
1366 * can be missed, poll the receive queue
1367 */
1368 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
1369 ipath_kreceive(dd);
1370
1371 /* Handle verbs layer timeouts. */ 1378 /* Handle verbs layer timeouts. */
1372 ipath_ib_timer(dd->verbs_dev); 1379 ipath_ib_timer(dd->verbs_dev);
1373 1380
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 088b837ebea8..f3d1f2cee6f8 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -321,6 +321,7 @@ struct ipath_sge_state {
321 */ 321 */
322struct ipath_ack_entry { 322struct ipath_ack_entry {
323 u8 opcode; 323 u8 opcode;
324 u8 sent;
324 u32 psn; 325 u32 psn;
325 union { 326 union {
326 struct ipath_sge_state rdma_sge; 327 struct ipath_sge_state rdma_sge;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
index dd691cfa5079..9e5abf9c309d 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
index 0095bb70f34e..1d7bd82a1fb1 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
index 04696e62da87..3428acb0868c 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -63,12 +63,29 @@ int ipath_enable_wc(struct ipath_devdata *dd)
63 * of 2 address matching the length (which has to be a power of 2). 63 * of 2 address matching the length (which has to be a power of 2).
64 * For rev1, that means the base address, for rev2, it will be just 64 * For rev1, that means the base address, for rev2, it will be just
65 * the PIO buffers themselves. 65 * the PIO buffers themselves.
66 * For chips with two sets of buffers, the calculations are
67 * somewhat more complicated; we need to sum, and the piobufbase
68 * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
69 * The buffers are still packed, so a single range covers both.
66 */ 70 */
67 pioaddr = addr + dd->ipath_piobufbase; 71 if (dd->ipath_piobcnt2k && dd->ipath_piobcnt4k) { /* 2 sizes */
68 piolen = (dd->ipath_piobcnt2k + 72 unsigned long pio2kbase, pio4kbase;
69 dd->ipath_piobcnt4k) * 73 pio2kbase = dd->ipath_piobufbase & 0xffffffffUL;
70 ALIGN(dd->ipath_piobcnt2k + 74 pio4kbase = (dd->ipath_piobufbase >> 32) & 0xffffffffUL;
71 dd->ipath_piobcnt4k, dd->ipath_palign); 75 if (pio2kbase < pio4kbase) { /* all, for now */
76 pioaddr = addr + pio2kbase;
77 piolen = pio4kbase - pio2kbase +
78 dd->ipath_piobcnt4k * dd->ipath_4kalign;
79 } else {
80 pioaddr = addr + pio4kbase;
81 piolen = pio2kbase - pio4kbase +
82 dd->ipath_piobcnt2k * dd->ipath_palign;
83 }
84 } else { /* single buffer size (2K, currently) */
85 pioaddr = addr + dd->ipath_piobufbase;
86 piolen = dd->ipath_piobcnt2k * dd->ipath_palign +
87 dd->ipath_piobcnt4k * dd->ipath_4kalign;
88 }
72 89
73 for (bits = 0; !(piolen & (1ULL << bits)); bits++) 90 for (bits = 0; !(piolen & (1ULL << bits)); bits++)
74 /* do nothing */ ; 91 /* do nothing */ ;