diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-11-12 13:56:31 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-11-12 13:56:31 -0500 |
| commit | 3edac25f2e8ac8c2a84904c140e1aeb434e73e75 (patch) | |
| tree | f1defee02ebca2d0972adf9099881e56298c5b88 | |
| parent | 504765f3b020f15e88bc1334d5b3e0a6e849b1cd (diff) | |
| parent | c35a2549642c45ba9085d8b6db4dd68d2b0de230 (diff) | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IPoIB: Fix crash in path_rec_completion()
IPoIB: Fix hang in ipoib_flush_paths()
IPoIB: Don't enable NAPI when it's already enabled
RDMA/cxgb3: Fix deadlock in iw_cxgb3 (hang when configuring interface)
IB/ehca: Remove reference to special QP in case of port activation failure
IB/mlx4: Set umem field to NULL in mlx4_ib_alloc_fast_reg_mr()
mlx4_core: Fix unused variable warning
RDMA/nes: Mitigate compatibility issue regarding PCIe write credits
RDMA/nes: Fix CQ allocation scheme for multicast receive queue apps
RDMA/nes: Correct handling of PBL resources
RDMA/nes: Reindent mis-indented spinlocks
RDMA/cxgb3: Fix too-big reserved field zeroing in iwch_post_zb_read()
IB/ipath: Fix RDMA write with immediate copy of last packet
| -rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.c | 6 | ||||
| -rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_qp.c | 1 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 44 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qp.c | 5 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ruc.c | 10 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 1 | ||||
| -rw-r--r-- | drivers/infiniband/hw/nes/nes.c | 16 | ||||
| -rw-r--r-- | drivers/infiniband/hw/nes/nes_hw.h | 1 | ||||
| -rw-r--r-- | drivers/infiniband/hw/nes/nes_verbs.c | 64 | ||||
| -rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 6 | ||||
| -rw-r--r-- | drivers/net/cxgb3/cxgb3_main.c | 2 | ||||
| -rw-r--r-- | drivers/net/mlx4/mlx4.h | 9 |
12 files changed, 108 insertions, 57 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index ecff98043589..160ef482712d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
| @@ -1102,9 +1102,7 @@ static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev) | |||
| 1102 | char *cp, *next; | 1102 | char *cp, *next; |
| 1103 | unsigned fw_maj, fw_min, fw_mic; | 1103 | unsigned fw_maj, fw_min, fw_mic; |
| 1104 | 1104 | ||
| 1105 | rtnl_lock(); | ||
| 1106 | lldev->ethtool_ops->get_drvinfo(lldev, &info); | 1105 | lldev->ethtool_ops->get_drvinfo(lldev, &info); |
| 1107 | rtnl_unlock(); | ||
| 1108 | 1106 | ||
| 1109 | next = info.fw_version + 1; | 1107 | next = info.fw_version + 1; |
| 1110 | cp = strsep(&next, "."); | 1108 | cp = strsep(&next, "."); |
| @@ -1192,9 +1190,7 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, ch | |||
| 1192 | struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; | 1190 | struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; |
| 1193 | 1191 | ||
| 1194 | PDBG("%s dev 0x%p\n", __func__, dev); | 1192 | PDBG("%s dev 0x%p\n", __func__, dev); |
| 1195 | rtnl_lock(); | ||
| 1196 | lldev->ethtool_ops->get_drvinfo(lldev, &info); | 1193 | lldev->ethtool_ops->get_drvinfo(lldev, &info); |
| 1197 | rtnl_unlock(); | ||
| 1198 | return sprintf(buf, "%s\n", info.fw_version); | 1194 | return sprintf(buf, "%s\n", info.fw_version); |
| 1199 | } | 1195 | } |
| 1200 | 1196 | ||
| @@ -1207,9 +1203,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr, | |||
| 1207 | struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; | 1203 | struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; |
| 1208 | 1204 | ||
| 1209 | PDBG("%s dev 0x%p\n", __func__, dev); | 1205 | PDBG("%s dev 0x%p\n", __func__, dev); |
| 1210 | rtnl_lock(); | ||
| 1211 | lldev->ethtool_ops->get_drvinfo(lldev, &info); | 1206 | lldev->ethtool_ops->get_drvinfo(lldev, &info); |
| 1212 | rtnl_unlock(); | ||
| 1213 | return sprintf(buf, "%s\n", info.driver); | 1207 | return sprintf(buf, "%s\n", info.driver); |
| 1214 | } | 1208 | } |
| 1215 | 1209 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 3e4585c2318a..19661b2f0406 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
| @@ -745,7 +745,6 @@ int iwch_post_zb_read(struct iwch_qp *qhp) | |||
| 745 | wqe->read.rdmaop = T3_READ_REQ; | 745 | wqe->read.rdmaop = T3_READ_REQ; |
| 746 | wqe->read.reserved[0] = 0; | 746 | wqe->read.reserved[0] = 0; |
| 747 | wqe->read.reserved[1] = 0; | 747 | wqe->read.reserved[1] = 0; |
| 748 | wqe->read.reserved[2] = 0; | ||
| 749 | wqe->read.rem_stag = cpu_to_be32(1); | 748 | wqe->read.rem_stag = cpu_to_be32(1); |
| 750 | wqe->read.rem_to = cpu_to_be64(1); | 749 | wqe->read.rem_to = cpu_to_be64(1); |
| 751 | wqe->read.local_stag = cpu_to_be32(1); | 750 | wqe->read.local_stag = cpu_to_be32(1); |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index cb55be04442c..757035ea246f 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
| @@ -359,36 +359,48 @@ static void notify_port_conf_change(struct ehca_shca *shca, int port_num) | |||
| 359 | *old_attr = new_attr; | 359 | *old_attr = new_attr; |
| 360 | } | 360 | } |
| 361 | 361 | ||
| 362 | /* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */ | ||
| 363 | static int replay_modify_qp(struct ehca_sport *sport) | ||
| 364 | { | ||
| 365 | int aqp1_destroyed; | ||
| 366 | unsigned long flags; | ||
| 367 | |||
| 368 | spin_lock_irqsave(&sport->mod_sqp_lock, flags); | ||
| 369 | |||
| 370 | aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI]; | ||
| 371 | |||
| 372 | if (sport->ibqp_sqp[IB_QPT_SMI]) | ||
| 373 | ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]); | ||
| 374 | if (!aqp1_destroyed) | ||
| 375 | ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]); | ||
| 376 | |||
| 377 | spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); | ||
| 378 | |||
| 379 | return aqp1_destroyed; | ||
| 380 | } | ||
| 381 | |||
| 362 | static void parse_ec(struct ehca_shca *shca, u64 eqe) | 382 | static void parse_ec(struct ehca_shca *shca, u64 eqe) |
| 363 | { | 383 | { |
| 364 | u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); | 384 | u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); |
| 365 | u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); | 385 | u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); |
| 366 | u8 spec_event; | 386 | u8 spec_event; |
| 367 | struct ehca_sport *sport = &shca->sport[port - 1]; | 387 | struct ehca_sport *sport = &shca->sport[port - 1]; |
| 368 | unsigned long flags; | ||
| 369 | 388 | ||
| 370 | switch (ec) { | 389 | switch (ec) { |
| 371 | case 0x30: /* port availability change */ | 390 | case 0x30: /* port availability change */ |
| 372 | if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { | 391 | if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { |
| 373 | int suppress_event; | 392 | /* only replay modify_qp calls in autodetect mode; |
| 374 | /* replay modify_qp for sqps */ | 393 | * if AQP1 was destroyed, the port is already down |
| 375 | spin_lock_irqsave(&sport->mod_sqp_lock, flags); | 394 | * again and we can drop the event. |
| 376 | suppress_event = !sport->ibqp_sqp[IB_QPT_GSI]; | 395 | */ |
| 377 | if (sport->ibqp_sqp[IB_QPT_SMI]) | 396 | if (ehca_nr_ports < 0) |
| 378 | ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]); | 397 | if (replay_modify_qp(sport)) |
| 379 | if (!suppress_event) | 398 | break; |
| 380 | ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]); | ||
| 381 | spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); | ||
| 382 | |||
| 383 | /* AQP1 was destroyed, ignore this event */ | ||
| 384 | if (suppress_event) | ||
| 385 | break; | ||
| 386 | 399 | ||
| 387 | sport->port_state = IB_PORT_ACTIVE; | 400 | sport->port_state = IB_PORT_ACTIVE; |
| 388 | dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, | 401 | dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, |
| 389 | "is active"); | 402 | "is active"); |
| 390 | ehca_query_sma_attr(shca, port, | 403 | ehca_query_sma_attr(shca, port, &sport->saved_attr); |
| 391 | &sport->saved_attr); | ||
| 392 | } else { | 404 | } else { |
| 393 | sport->port_state = IB_PORT_DOWN; | 405 | sport->port_state = IB_PORT_DOWN; |
| 394 | dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, | 406 | dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 4d54b9f64567..9e05ee2db39b 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
| @@ -860,6 +860,11 @@ static struct ehca_qp *internal_create_qp( | |||
| 860 | if (qp_type == IB_QPT_GSI) { | 860 | if (qp_type == IB_QPT_GSI) { |
| 861 | h_ret = ehca_define_sqp(shca, my_qp, init_attr); | 861 | h_ret = ehca_define_sqp(shca, my_qp, init_attr); |
| 862 | if (h_ret != H_SUCCESS) { | 862 | if (h_ret != H_SUCCESS) { |
| 863 | kfree(my_qp->mod_qp_parm); | ||
| 864 | my_qp->mod_qp_parm = NULL; | ||
| 865 | /* the QP pointer is no longer valid */ | ||
| 866 | shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] = | ||
| 867 | NULL; | ||
| 863 | ret = ehca2ib_return_code(h_ret); | 868 | ret = ehca2ib_return_code(h_ret); |
| 864 | goto create_qp_exit6; | 869 | goto create_qp_exit6; |
| 865 | } | 870 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index fc0f6d9e6030..2296832f94da 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
| @@ -156,7 +156,7 @@ bail: | |||
| 156 | /** | 156 | /** |
| 157 | * ipath_get_rwqe - copy the next RWQE into the QP's RWQE | 157 | * ipath_get_rwqe - copy the next RWQE into the QP's RWQE |
| 158 | * @qp: the QP | 158 | * @qp: the QP |
| 159 | * @wr_id_only: update wr_id only, not SGEs | 159 | * @wr_id_only: update qp->r_wr_id only, not qp->r_sge |
| 160 | * | 160 | * |
| 161 | * Return 0 if no RWQE is available, otherwise return 1. | 161 | * Return 0 if no RWQE is available, otherwise return 1. |
| 162 | * | 162 | * |
| @@ -173,8 +173,6 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
| 173 | u32 tail; | 173 | u32 tail; |
| 174 | int ret; | 174 | int ret; |
| 175 | 175 | ||
| 176 | qp->r_sge.sg_list = qp->r_sg_list; | ||
| 177 | |||
| 178 | if (qp->ibqp.srq) { | 176 | if (qp->ibqp.srq) { |
| 179 | srq = to_isrq(qp->ibqp.srq); | 177 | srq = to_isrq(qp->ibqp.srq); |
| 180 | handler = srq->ibsrq.event_handler; | 178 | handler = srq->ibsrq.event_handler; |
| @@ -206,8 +204,10 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
| 206 | wqe = get_rwqe_ptr(rq, tail); | 204 | wqe = get_rwqe_ptr(rq, tail); |
| 207 | if (++tail >= rq->size) | 205 | if (++tail >= rq->size) |
| 208 | tail = 0; | 206 | tail = 0; |
| 209 | } while (!wr_id_only && !ipath_init_sge(qp, wqe, &qp->r_len, | 207 | if (wr_id_only) |
| 210 | &qp->r_sge)); | 208 | break; |
| 209 | qp->r_sge.sg_list = qp->r_sg_list; | ||
| 210 | } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge)); | ||
| 211 | qp->r_wr_id = wqe->wr_id; | 211 | qp->r_wr_id = wqe->wr_id; |
| 212 | wq->tail = tail; | 212 | wq->tail = tail; |
| 213 | 213 | ||
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 87f5c5a87b98..8e4d26d56a95 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
| @@ -205,6 +205,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, | |||
| 205 | goto err_mr; | 205 | goto err_mr; |
| 206 | 206 | ||
| 207 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; | 207 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; |
| 208 | mr->umem = NULL; | ||
| 208 | 209 | ||
| 209 | return &mr->ibmr; | 210 | return &mr->ibmr; |
| 210 | 211 | ||
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index a2b04d62b1a4..aa1dc41f04c8 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
| @@ -95,6 +95,10 @@ unsigned int wqm_quanta = 0x10000; | |||
| 95 | module_param(wqm_quanta, int, 0644); | 95 | module_param(wqm_quanta, int, 0644); |
| 96 | MODULE_PARM_DESC(wqm_quanta, "WQM quanta"); | 96 | MODULE_PARM_DESC(wqm_quanta, "WQM quanta"); |
| 97 | 97 | ||
| 98 | static unsigned int limit_maxrdreqsz; | ||
| 99 | module_param(limit_maxrdreqsz, bool, 0644); | ||
| 100 | MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes"); | ||
| 101 | |||
| 98 | LIST_HEAD(nes_adapter_list); | 102 | LIST_HEAD(nes_adapter_list); |
| 99 | static LIST_HEAD(nes_dev_list); | 103 | static LIST_HEAD(nes_dev_list); |
| 100 | 104 | ||
| @@ -588,6 +592,18 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i | |||
| 588 | nesdev->nesadapter->port_count; | 592 | nesdev->nesadapter->port_count; |
| 589 | } | 593 | } |
| 590 | 594 | ||
| 595 | if ((limit_maxrdreqsz || | ||
| 596 | ((nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_GLADIUS) && | ||
| 597 | (hw_rev == NE020_REV1))) && | ||
| 598 | (pcie_get_readrq(pcidev) > 256)) { | ||
| 599 | if (pcie_set_readrq(pcidev, 256)) | ||
| 600 | printk(KERN_ERR PFX "Unable to set max read request" | ||
| 601 | " to 256 bytes\n"); | ||
| 602 | else | ||
| 603 | nes_debug(NES_DBG_INIT, "Max read request size set" | ||
| 604 | " to 256 bytes\n"); | ||
| 605 | } | ||
| 606 | |||
| 591 | tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); | 607 | tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); |
| 592 | 608 | ||
| 593 | /* bring up the Control QP */ | 609 | /* bring up the Control QP */ |
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 610b9d859597..bc0b4de04450 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #define NES_PHY_TYPE_ARGUS 4 | 40 | #define NES_PHY_TYPE_ARGUS 4 |
| 41 | #define NES_PHY_TYPE_PUMA_1G 5 | 41 | #define NES_PHY_TYPE_PUMA_1G 5 |
| 42 | #define NES_PHY_TYPE_PUMA_10G 6 | 42 | #define NES_PHY_TYPE_PUMA_10G 6 |
| 43 | #define NES_PHY_TYPE_GLADIUS 7 | ||
| 43 | 44 | ||
| 44 | #define NES_MULTICAST_PF_MAX 8 | 45 | #define NES_MULTICAST_PF_MAX 8 |
| 45 | 46 | ||
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 932e56fcf774..d36c9a0bf1bb 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
| @@ -220,14 +220,14 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw, | |||
| 220 | if (nesqp->ibqp_state > IB_QPS_RTS) | 220 | if (nesqp->ibqp_state > IB_QPS_RTS) |
| 221 | return -EINVAL; | 221 | return -EINVAL; |
| 222 | 222 | ||
| 223 | spin_lock_irqsave(&nesqp->lock, flags); | 223 | spin_lock_irqsave(&nesqp->lock, flags); |
| 224 | 224 | ||
| 225 | head = nesqp->hwqp.sq_head; | 225 | head = nesqp->hwqp.sq_head; |
| 226 | qsize = nesqp->hwqp.sq_tail; | 226 | qsize = nesqp->hwqp.sq_tail; |
| 227 | 227 | ||
| 228 | /* Check for SQ overflow */ | 228 | /* Check for SQ overflow */ |
| 229 | if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { | 229 | if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { |
| 230 | spin_unlock_irqrestore(&nesqp->lock, flags); | 230 | spin_unlock_irqrestore(&nesqp->lock, flags); |
| 231 | return -EINVAL; | 231 | return -EINVAL; |
| 232 | } | 232 | } |
| 233 | 233 | ||
| @@ -269,7 +269,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw, | |||
| 269 | nes_write32(nesdev->regs+NES_WQE_ALLOC, | 269 | nes_write32(nesdev->regs+NES_WQE_ALLOC, |
| 270 | (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); | 270 | (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); |
| 271 | 271 | ||
| 272 | spin_unlock_irqrestore(&nesqp->lock, flags); | 272 | spin_unlock_irqrestore(&nesqp->lock, flags); |
| 273 | 273 | ||
| 274 | return 0; | 274 | return 0; |
| 275 | } | 275 | } |
| @@ -349,7 +349,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, | |||
| 349 | if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) { | 349 | if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) { |
| 350 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | 350 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); |
| 351 | ret = -ENOMEM; | 351 | ret = -ENOMEM; |
| 352 | goto failed_vpbl_alloc; | 352 | goto failed_vpbl_avail; |
| 353 | } else { | 353 | } else { |
| 354 | nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used; | 354 | nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used; |
| 355 | } | 355 | } |
| @@ -357,7 +357,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, | |||
| 357 | if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) { | 357 | if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) { |
| 358 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | 358 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); |
| 359 | ret = -ENOMEM; | 359 | ret = -ENOMEM; |
| 360 | goto failed_vpbl_alloc; | 360 | goto failed_vpbl_avail; |
| 361 | } else { | 361 | } else { |
| 362 | nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used; | 362 | nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used; |
| 363 | } | 363 | } |
| @@ -391,14 +391,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, | |||
| 391 | goto failed_vpbl_alloc; | 391 | goto failed_vpbl_alloc; |
| 392 | } | 392 | } |
| 393 | 393 | ||
| 394 | nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL); | 394 | nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1; |
| 395 | nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_ATOMIC); | ||
| 395 | if (!nesfmr->root_vpbl.leaf_vpbl) { | 396 | if (!nesfmr->root_vpbl.leaf_vpbl) { |
| 396 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | 397 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); |
| 397 | ret = -ENOMEM; | 398 | ret = -ENOMEM; |
| 398 | goto failed_leaf_vpbl_alloc; | 399 | goto failed_leaf_vpbl_alloc; |
| 399 | } | 400 | } |
| 400 | 401 | ||
| 401 | nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1; | ||
| 402 | nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p" | 402 | nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p" |
| 403 | " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n", | 403 | " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n", |
| 404 | nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl); | 404 | nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl); |
| @@ -519,6 +519,16 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, | |||
| 519 | nesfmr->root_vpbl.pbl_pbase); | 519 | nesfmr->root_vpbl.pbl_pbase); |
| 520 | 520 | ||
| 521 | failed_vpbl_alloc: | 521 | failed_vpbl_alloc: |
| 522 | if (nesfmr->nesmr.pbls_used != 0) { | ||
| 523 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
| 524 | if (nesfmr->nesmr.pbl_4k) | ||
| 525 | nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used; | ||
| 526 | else | ||
| 527 | nesadapter->free_256pbl += nesfmr->nesmr.pbls_used; | ||
| 528 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
| 529 | } | ||
| 530 | |||
| 531 | failed_vpbl_avail: | ||
| 522 | kfree(nesfmr); | 532 | kfree(nesfmr); |
| 523 | 533 | ||
| 524 | failed_fmr_alloc: | 534 | failed_fmr_alloc: |
| @@ -534,18 +544,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, | |||
| 534 | */ | 544 | */ |
| 535 | static int nes_dealloc_fmr(struct ib_fmr *ibfmr) | 545 | static int nes_dealloc_fmr(struct ib_fmr *ibfmr) |
| 536 | { | 546 | { |
| 547 | unsigned long flags; | ||
| 537 | struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr); | 548 | struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr); |
| 538 | struct nes_fmr *nesfmr = to_nesfmr(nesmr); | 549 | struct nes_fmr *nesfmr = to_nesfmr(nesmr); |
| 539 | struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device); | 550 | struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device); |
| 540 | struct nes_device *nesdev = nesvnic->nesdev; | 551 | struct nes_device *nesdev = nesvnic->nesdev; |
| 541 | struct nes_mr temp_nesmr = *nesmr; | 552 | struct nes_adapter *nesadapter = nesdev->nesadapter; |
| 542 | int i = 0; | 553 | int i = 0; |
| 543 | 554 | ||
| 544 | temp_nesmr.ibmw.device = ibfmr->device; | ||
| 545 | temp_nesmr.ibmw.pd = ibfmr->pd; | ||
| 546 | temp_nesmr.ibmw.rkey = ibfmr->rkey; | ||
| 547 | temp_nesmr.ibmw.uobject = NULL; | ||
| 548 | |||
| 549 | /* free the resources */ | 555 | /* free the resources */ |
| 550 | if (nesfmr->leaf_pbl_cnt == 0) { | 556 | if (nesfmr->leaf_pbl_cnt == 0) { |
| 551 | /* single PBL case */ | 557 | /* single PBL case */ |
| @@ -561,8 +567,24 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr) | |||
| 561 | pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase, | 567 | pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase, |
| 562 | nesfmr->root_vpbl.pbl_pbase); | 568 | nesfmr->root_vpbl.pbl_pbase); |
| 563 | } | 569 | } |
| 570 | nesmr->ibmw.device = ibfmr->device; | ||
| 571 | nesmr->ibmw.pd = ibfmr->pd; | ||
| 572 | nesmr->ibmw.rkey = ibfmr->rkey; | ||
| 573 | nesmr->ibmw.uobject = NULL; | ||
| 564 | 574 | ||
| 565 | return nes_dealloc_mw(&temp_nesmr.ibmw); | 575 | if (nesfmr->nesmr.pbls_used != 0) { |
| 576 | spin_lock_irqsave(&nesadapter->pbl_lock, flags); | ||
| 577 | if (nesfmr->nesmr.pbl_4k) { | ||
| 578 | nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used; | ||
| 579 | WARN_ON(nesadapter->free_4kpbl > nesadapter->max_4kpbl); | ||
| 580 | } else { | ||
| 581 | nesadapter->free_256pbl += nesfmr->nesmr.pbls_used; | ||
| 582 | WARN_ON(nesadapter->free_256pbl > nesadapter->max_256pbl); | ||
| 583 | } | ||
| 584 | spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); | ||
| 585 | } | ||
| 586 | |||
| 587 | return nes_dealloc_mw(&nesmr->ibmw); | ||
| 566 | } | 588 | } |
| 567 | 589 | ||
| 568 | 590 | ||
| @@ -1595,7 +1617,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
| 1595 | nes_ucontext->mcrqf = req.mcrqf; | 1617 | nes_ucontext->mcrqf = req.mcrqf; |
| 1596 | if (nes_ucontext->mcrqf) { | 1618 | if (nes_ucontext->mcrqf) { |
| 1597 | if (nes_ucontext->mcrqf & 0x80000000) | 1619 | if (nes_ucontext->mcrqf & 0x80000000) |
| 1598 | nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 12 + (nes_ucontext->mcrqf & 0xf) - 1; | 1620 | nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 28 + 2 * ((nes_ucontext->mcrqf & 0xf) - 1); |
| 1599 | else if (nes_ucontext->mcrqf & 0x40000000) | 1621 | else if (nes_ucontext->mcrqf & 0x40000000) |
| 1600 | nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff; | 1622 | nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff; |
| 1601 | else | 1623 | else |
| @@ -3212,7 +3234,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
| 3212 | if (nesqp->ibqp_state > IB_QPS_RTS) | 3234 | if (nesqp->ibqp_state > IB_QPS_RTS) |
| 3213 | return -EINVAL; | 3235 | return -EINVAL; |
| 3214 | 3236 | ||
| 3215 | spin_lock_irqsave(&nesqp->lock, flags); | 3237 | spin_lock_irqsave(&nesqp->lock, flags); |
| 3216 | 3238 | ||
| 3217 | head = nesqp->hwqp.sq_head; | 3239 | head = nesqp->hwqp.sq_head; |
| 3218 | 3240 | ||
| @@ -3337,7 +3359,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
| 3337 | (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id); | 3359 | (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id); |
| 3338 | } | 3360 | } |
| 3339 | 3361 | ||
| 3340 | spin_unlock_irqrestore(&nesqp->lock, flags); | 3362 | spin_unlock_irqrestore(&nesqp->lock, flags); |
| 3341 | 3363 | ||
| 3342 | if (err) | 3364 | if (err) |
| 3343 | *bad_wr = ib_wr; | 3365 | *bad_wr = ib_wr; |
| @@ -3368,7 +3390,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, | |||
| 3368 | if (nesqp->ibqp_state > IB_QPS_RTS) | 3390 | if (nesqp->ibqp_state > IB_QPS_RTS) |
| 3369 | return -EINVAL; | 3391 | return -EINVAL; |
| 3370 | 3392 | ||
| 3371 | spin_lock_irqsave(&nesqp->lock, flags); | 3393 | spin_lock_irqsave(&nesqp->lock, flags); |
| 3372 | 3394 | ||
| 3373 | head = nesqp->hwqp.rq_head; | 3395 | head = nesqp->hwqp.rq_head; |
| 3374 | 3396 | ||
| @@ -3421,7 +3443,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, | |||
| 3421 | nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id); | 3443 | nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id); |
| 3422 | } | 3444 | } |
| 3423 | 3445 | ||
| 3424 | spin_unlock_irqrestore(&nesqp->lock, flags); | 3446 | spin_unlock_irqrestore(&nesqp->lock, flags); |
| 3425 | 3447 | ||
| 3426 | if (err) | 3448 | if (err) |
| 3427 | *bad_wr = ib_wr; | 3449 | *bad_wr = ib_wr; |
| @@ -3453,7 +3475,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
| 3453 | 3475 | ||
| 3454 | nes_debug(NES_DBG_CQ, "\n"); | 3476 | nes_debug(NES_DBG_CQ, "\n"); |
| 3455 | 3477 | ||
| 3456 | spin_lock_irqsave(&nescq->lock, flags); | 3478 | spin_lock_irqsave(&nescq->lock, flags); |
| 3457 | 3479 | ||
| 3458 | head = nescq->hw_cq.cq_head; | 3480 | head = nescq->hw_cq.cq_head; |
| 3459 | cq_size = nescq->hw_cq.cq_size; | 3481 | cq_size = nescq->hw_cq.cq_size; |
| @@ -3562,7 +3584,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
| 3562 | nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n", | 3584 | nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n", |
| 3563 | cqe_count, nescq->hw_cq.cq_number); | 3585 | cqe_count, nescq->hw_cq.cq_number); |
| 3564 | 3586 | ||
| 3565 | spin_unlock_irqrestore(&nescq->lock, flags); | 3587 | spin_unlock_irqrestore(&nescq->lock, flags); |
| 3566 | 3588 | ||
| 3567 | return cqe_count; | 3589 | return cqe_count; |
| 3568 | } | 3590 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index fddded7900d1..85257f6b9576 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -106,12 +106,13 @@ int ipoib_open(struct net_device *dev) | |||
| 106 | 106 | ||
| 107 | ipoib_dbg(priv, "bringing up interface\n"); | 107 | ipoib_dbg(priv, "bringing up interface\n"); |
| 108 | 108 | ||
| 109 | napi_enable(&priv->napi); | ||
| 110 | set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); | 109 | set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); |
| 111 | 110 | ||
| 112 | if (ipoib_pkey_dev_delay_open(dev)) | 111 | if (ipoib_pkey_dev_delay_open(dev)) |
| 113 | return 0; | 112 | return 0; |
| 114 | 113 | ||
| 114 | napi_enable(&priv->napi); | ||
| 115 | |||
| 115 | if (ipoib_ib_dev_open(dev)) { | 116 | if (ipoib_ib_dev_open(dev)) { |
| 116 | napi_disable(&priv->napi); | 117 | napi_disable(&priv->napi); |
| 117 | return -EINVAL; | 118 | return -EINVAL; |
| @@ -546,6 +547,7 @@ static int path_rec_start(struct net_device *dev, | |||
| 546 | if (path->query_id < 0) { | 547 | if (path->query_id < 0) { |
| 547 | ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); | 548 | ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); |
| 548 | path->query = NULL; | 549 | path->query = NULL; |
| 550 | complete(&path->done); | ||
| 549 | return path->query_id; | 551 | return path->query_id; |
| 550 | } | 552 | } |
| 551 | 553 | ||
| @@ -662,7 +664,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
| 662 | skb_push(skb, sizeof *phdr); | 664 | skb_push(skb, sizeof *phdr); |
| 663 | __skb_queue_tail(&path->queue, skb); | 665 | __skb_queue_tail(&path->queue, skb); |
| 664 | 666 | ||
| 665 | if (path_rec_start(dev, path)) { | 667 | if (!path->query && path_rec_start(dev, path)) { |
| 666 | spin_unlock_irqrestore(&priv->lock, flags); | 668 | spin_unlock_irqrestore(&priv->lock, flags); |
| 667 | path_free(dev, path); | 669 | path_free(dev, path); |
| 668 | return; | 670 | return; |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 1ace41a13ac3..5e663ccda4d4 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
| @@ -1307,8 +1307,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |||
| 1307 | u32 fw_vers = 0; | 1307 | u32 fw_vers = 0; |
| 1308 | u32 tp_vers = 0; | 1308 | u32 tp_vers = 0; |
| 1309 | 1309 | ||
| 1310 | spin_lock(&adapter->stats_lock); | ||
| 1310 | t3_get_fw_version(adapter, &fw_vers); | 1311 | t3_get_fw_version(adapter, &fw_vers); |
| 1311 | t3_get_tp_version(adapter, &tp_vers); | 1312 | t3_get_tp_version(adapter, &tp_vers); |
| 1313 | spin_unlock(&adapter->stats_lock); | ||
| 1312 | 1314 | ||
| 1313 | strcpy(info->driver, DRV_NAME); | 1315 | strcpy(info->driver, DRV_NAME); |
| 1314 | strcpy(info->version, DRV_VERSION); | 1316 | strcpy(info->version, DRV_VERSION); |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index fa431fad0eec..56a2e213fe62 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
| @@ -87,6 +87,9 @@ enum { | |||
| 87 | 87 | ||
| 88 | #ifdef CONFIG_MLX4_DEBUG | 88 | #ifdef CONFIG_MLX4_DEBUG |
| 89 | extern int mlx4_debug_level; | 89 | extern int mlx4_debug_level; |
| 90 | #else /* CONFIG_MLX4_DEBUG */ | ||
| 91 | #define mlx4_debug_level (0) | ||
| 92 | #endif /* CONFIG_MLX4_DEBUG */ | ||
| 90 | 93 | ||
| 91 | #define mlx4_dbg(mdev, format, arg...) \ | 94 | #define mlx4_dbg(mdev, format, arg...) \ |
| 92 | do { \ | 95 | do { \ |
| @@ -94,12 +97,6 @@ extern int mlx4_debug_level; | |||
| 94 | dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \ | 97 | dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \ |
| 95 | } while (0) | 98 | } while (0) |
| 96 | 99 | ||
| 97 | #else /* CONFIG_MLX4_DEBUG */ | ||
| 98 | |||
| 99 | #define mlx4_dbg(mdev, format, arg...) do { (void) mdev; } while (0) | ||
| 100 | |||
| 101 | #endif /* CONFIG_MLX4_DEBUG */ | ||
| 102 | |||
| 103 | #define mlx4_err(mdev, format, arg...) \ | 100 | #define mlx4_err(mdev, format, arg...) \ |
| 104 | dev_err(&mdev->pdev->dev, format, ## arg) | 101 | dev_err(&mdev->pdev->dev, format, ## arg) |
| 105 | #define mlx4_info(mdev, format, arg...) \ | 102 | #define mlx4_info(mdev, format, arg...) \ |
