diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2_rnic.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 189 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/Kconfig | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 26 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/user.h | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_main.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_iba7322.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_initiator.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 181 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.h | 7 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srpt/ib_srpt.c | 14 |
18 files changed, 332 insertions, 158 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index d53cf519f42a..00400c352c1a 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c | |||
@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | |||
1082 | 1082 | ||
1083 | /* Initialize network device */ | 1083 | /* Initialize network device */ |
1084 | if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { | 1084 | if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { |
1085 | ret = -ENOMEM; | ||
1085 | iounmap(mmio_regs); | 1086 | iounmap(mmio_regs); |
1086 | goto bail4; | 1087 | goto bail4; |
1087 | } | 1088 | } |
@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | |||
1151 | goto bail10; | 1152 | goto bail10; |
1152 | } | 1153 | } |
1153 | 1154 | ||
1154 | if (c2_register_device(c2dev)) | 1155 | ret = c2_register_device(c2dev); |
1156 | if (ret) | ||
1155 | goto bail10; | 1157 | goto bail10; |
1156 | 1158 | ||
1157 | return 0; | 1159 | return 0; |
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index b7c986990053..d2a6d961344b 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c | |||
@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
576 | goto bail4; | 576 | goto bail4; |
577 | 577 | ||
578 | /* Initialize cached the adapter limits */ | 578 | /* Initialize cached the adapter limits */ |
579 | if (c2_rnic_query(c2dev, &c2dev->props)) | 579 | err = c2_rnic_query(c2dev, &c2dev->props); |
580 | if (err) | ||
580 | goto bail5; | 581 | goto bail5; |
581 | 582 | ||
582 | /* Initialize the PD pool */ | 583 | /* Initialize the PD pool */ |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 45126879ad28..d286bdebe2ab 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3352 | goto free_dst; | 3352 | goto free_dst; |
3353 | } | 3353 | } |
3354 | 3354 | ||
3355 | neigh_release(neigh); | ||
3355 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | 3356 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
3356 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; | 3357 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; |
3357 | window = (__force u16) htons((__force u16)tcph->window); | 3358 | window = (__force u16) htons((__force u16)tcph->window); |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index c2702f549f10..f9c12e92fdd6 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -53,8 +53,8 @@ | |||
53 | #include "user.h" | 53 | #include "user.h" |
54 | 54 | ||
55 | #define DRV_NAME MLX4_IB_DRV_NAME | 55 | #define DRV_NAME MLX4_IB_DRV_NAME |
56 | #define DRV_VERSION "1.0" | 56 | #define DRV_VERSION "2.2-1" |
57 | #define DRV_RELDATE "April 4, 2008" | 57 | #define DRV_RELDATE "Feb 2014" |
58 | 58 | ||
59 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF | 59 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF |
60 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF | 60 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF |
@@ -347,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
347 | props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? | 347 | props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? |
348 | IB_WIDTH_4X : IB_WIDTH_1X; | 348 | IB_WIDTH_4X : IB_WIDTH_1X; |
349 | props->active_speed = IB_SPEED_QDR; | 349 | props->active_speed = IB_SPEED_QDR; |
350 | props->port_cap_flags = IB_PORT_CM_SUP; | 350 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS; |
351 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; | 351 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; |
352 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; | 352 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; |
353 | props->pkey_tbl_len = 1; | 353 | props->pkey_tbl_len = 1; |
@@ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = { | |||
1357 | &dev_attr_board_id | 1357 | &dev_attr_board_id |
1358 | }; | 1358 | }; |
1359 | 1359 | ||
1360 | static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, | ||
1361 | struct net_device *dev) | ||
1362 | { | ||
1363 | memcpy(eui, dev->dev_addr, 3); | ||
1364 | memcpy(eui + 5, dev->dev_addr + 3, 3); | ||
1365 | if (vlan_id < 0x1000) { | ||
1366 | eui[3] = vlan_id >> 8; | ||
1367 | eui[4] = vlan_id & 0xff; | ||
1368 | } else { | ||
1369 | eui[3] = 0xff; | ||
1370 | eui[4] = 0xfe; | ||
1371 | } | ||
1372 | eui[0] ^= 2; | ||
1373 | } | ||
1374 | |||
1360 | static void update_gids_task(struct work_struct *work) | 1375 | static void update_gids_task(struct work_struct *work) |
1361 | { | 1376 | { |
1362 | struct update_gid_work *gw = container_of(work, struct update_gid_work, work); | 1377 | struct update_gid_work *gw = container_of(work, struct update_gid_work, work); |
@@ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work) | |||
1393 | struct mlx4_cmd_mailbox *mailbox; | 1408 | struct mlx4_cmd_mailbox *mailbox; |
1394 | union ib_gid *gids; | 1409 | union ib_gid *gids; |
1395 | int err; | 1410 | int err; |
1396 | int i; | ||
1397 | struct mlx4_dev *dev = gw->dev->dev; | 1411 | struct mlx4_dev *dev = gw->dev->dev; |
1398 | 1412 | ||
1399 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 1413 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
@@ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work) | |||
1405 | gids = mailbox->buf; | 1419 | gids = mailbox->buf; |
1406 | memcpy(gids, gw->gids, sizeof(gw->gids)); | 1420 | memcpy(gids, gw->gids, sizeof(gw->gids)); |
1407 | 1421 | ||
1408 | for (i = 1; i < gw->dev->num_ports + 1; i++) { | 1422 | if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) == |
1409 | if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) == | 1423 | IB_LINK_LAYER_ETHERNET) { |
1410 | IB_LINK_LAYER_ETHERNET) { | 1424 | err = mlx4_cmd(dev, mailbox->dma, |
1411 | err = mlx4_cmd(dev, mailbox->dma, | 1425 | MLX4_SET_PORT_GID_TABLE << 8 | gw->port, |
1412 | MLX4_SET_PORT_GID_TABLE << 8 | i, | 1426 | 1, MLX4_CMD_SET_PORT, |
1413 | 1, MLX4_CMD_SET_PORT, | 1427 | MLX4_CMD_TIME_CLASS_B, |
1414 | MLX4_CMD_TIME_CLASS_B, | 1428 | MLX4_CMD_WRAPPED); |
1415 | MLX4_CMD_WRAPPED); | 1429 | if (err) |
1416 | if (err) | 1430 | pr_warn(KERN_WARNING |
1417 | pr_warn(KERN_WARNING | 1431 | "set port %d command failed\n", gw->port); |
1418 | "set port %d command failed\n", i); | ||
1419 | } | ||
1420 | } | 1432 | } |
1421 | 1433 | ||
1422 | mlx4_free_cmd_mailbox(dev, mailbox); | 1434 | mlx4_free_cmd_mailbox(dev, mailbox); |
@@ -1425,7 +1437,8 @@ free: | |||
1425 | } | 1437 | } |
1426 | 1438 | ||
1427 | static int update_gid_table(struct mlx4_ib_dev *dev, int port, | 1439 | static int update_gid_table(struct mlx4_ib_dev *dev, int port, |
1428 | union ib_gid *gid, int clear) | 1440 | union ib_gid *gid, int clear, |
1441 | int default_gid) | ||
1429 | { | 1442 | { |
1430 | struct update_gid_work *work; | 1443 | struct update_gid_work *work; |
1431 | int i; | 1444 | int i; |
@@ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, | |||
1434 | int found = -1; | 1447 | int found = -1; |
1435 | int max_gids; | 1448 | int max_gids; |
1436 | 1449 | ||
1437 | max_gids = dev->dev->caps.gid_table_len[port]; | 1450 | if (default_gid) { |
1438 | for (i = 0; i < max_gids; ++i) { | 1451 | free = 0; |
1439 | if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, | 1452 | } else { |
1440 | sizeof(*gid))) | 1453 | max_gids = dev->dev->caps.gid_table_len[port]; |
1441 | found = i; | 1454 | for (i = 1; i < max_gids; ++i) { |
1442 | 1455 | if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, | |
1443 | if (clear) { | ||
1444 | if (found >= 0) { | ||
1445 | need_update = 1; | ||
1446 | dev->iboe.gid_table[port - 1][found] = zgid; | ||
1447 | break; | ||
1448 | } | ||
1449 | } else { | ||
1450 | if (found >= 0) | ||
1451 | break; | ||
1452 | |||
1453 | if (free < 0 && | ||
1454 | !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, | ||
1455 | sizeof(*gid))) | 1456 | sizeof(*gid))) |
1456 | free = i; | 1457 | found = i; |
1458 | |||
1459 | if (clear) { | ||
1460 | if (found >= 0) { | ||
1461 | need_update = 1; | ||
1462 | dev->iboe.gid_table[port - 1][found] = | ||
1463 | zgid; | ||
1464 | break; | ||
1465 | } | ||
1466 | } else { | ||
1467 | if (found >= 0) | ||
1468 | break; | ||
1469 | |||
1470 | if (free < 0 && | ||
1471 | !memcmp(&dev->iboe.gid_table[port - 1][i], | ||
1472 | &zgid, sizeof(*gid))) | ||
1473 | free = i; | ||
1474 | } | ||
1457 | } | 1475 | } |
1458 | } | 1476 | } |
1459 | 1477 | ||
@@ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, | |||
1478 | return 0; | 1496 | return 0; |
1479 | } | 1497 | } |
1480 | 1498 | ||
1481 | static int reset_gid_table(struct mlx4_ib_dev *dev) | 1499 | static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid) |
1482 | { | 1500 | { |
1483 | struct update_gid_work *work; | 1501 | gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); |
1502 | mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev); | ||
1503 | } | ||
1504 | |||
1484 | 1505 | ||
1506 | static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port) | ||
1507 | { | ||
1508 | struct update_gid_work *work; | ||
1485 | 1509 | ||
1486 | work = kzalloc(sizeof(*work), GFP_ATOMIC); | 1510 | work = kzalloc(sizeof(*work), GFP_ATOMIC); |
1487 | if (!work) | 1511 | if (!work) |
1488 | return -ENOMEM; | 1512 | return -ENOMEM; |
1489 | memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table)); | 1513 | |
1514 | memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids)); | ||
1490 | memset(work->gids, 0, sizeof(work->gids)); | 1515 | memset(work->gids, 0, sizeof(work->gids)); |
1491 | INIT_WORK(&work->work, reset_gids_task); | 1516 | INIT_WORK(&work->work, reset_gids_task); |
1492 | work->dev = dev; | 1517 | work->dev = dev; |
1518 | work->port = port; | ||
1493 | queue_work(wq, &work->work); | 1519 | queue_work(wq, &work->work); |
1494 | return 0; | 1520 | return 0; |
1495 | } | 1521 | } |
@@ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1502 | struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? | 1528 | struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? |
1503 | rdma_vlan_dev_real_dev(event_netdev) : | 1529 | rdma_vlan_dev_real_dev(event_netdev) : |
1504 | event_netdev; | 1530 | event_netdev; |
1531 | union ib_gid default_gid; | ||
1532 | |||
1533 | mlx4_make_default_gid(real_dev, &default_gid); | ||
1534 | |||
1535 | if (!memcmp(gid, &default_gid, sizeof(*gid))) | ||
1536 | return 0; | ||
1505 | 1537 | ||
1506 | if (event != NETDEV_DOWN && event != NETDEV_UP) | 1538 | if (event != NETDEV_DOWN && event != NETDEV_UP) |
1507 | return 0; | 1539 | return 0; |
@@ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1520 | (!netif_is_bond_master(real_dev) && | 1552 | (!netif_is_bond_master(real_dev) && |
1521 | (real_dev == iboe->netdevs[port - 1]))) | 1553 | (real_dev == iboe->netdevs[port - 1]))) |
1522 | update_gid_table(ibdev, port, gid, | 1554 | update_gid_table(ibdev, port, gid, |
1523 | event == NETDEV_DOWN); | 1555 | event == NETDEV_DOWN, 0); |
1524 | 1556 | ||
1525 | spin_unlock(&iboe->lock); | 1557 | spin_unlock(&iboe->lock); |
1526 | return 0; | 1558 | return 0; |
@@ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, | |||
1536 | rdma_vlan_dev_real_dev(dev) : dev; | 1568 | rdma_vlan_dev_real_dev(dev) : dev; |
1537 | 1569 | ||
1538 | iboe = &ibdev->iboe; | 1570 | iboe = &ibdev->iboe; |
1539 | spin_lock(&iboe->lock); | ||
1540 | 1571 | ||
1541 | for (port = 1; port <= MLX4_MAX_PORTS; ++port) | 1572 | for (port = 1; port <= MLX4_MAX_PORTS; ++port) |
1542 | if ((netif_is_bond_master(real_dev) && | 1573 | if ((netif_is_bond_master(real_dev) && |
@@ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, | |||
1545 | (real_dev == iboe->netdevs[port - 1]))) | 1576 | (real_dev == iboe->netdevs[port - 1]))) |
1546 | break; | 1577 | break; |
1547 | 1578 | ||
1548 | spin_unlock(&iboe->lock); | ||
1549 | |||
1550 | if ((port == 0) || (port > MLX4_MAX_PORTS)) | 1579 | if ((port == 0) || (port > MLX4_MAX_PORTS)) |
1551 | return 0; | 1580 | return 0; |
1552 | else | 1581 | else |
@@ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1607 | /*ifa->ifa_address;*/ | 1636 | /*ifa->ifa_address;*/ |
1608 | ipv6_addr_set_v4mapped(ifa->ifa_address, | 1637 | ipv6_addr_set_v4mapped(ifa->ifa_address, |
1609 | (struct in6_addr *)&gid); | 1638 | (struct in6_addr *)&gid); |
1610 | update_gid_table(ibdev, port, &gid, 0); | 1639 | update_gid_table(ibdev, port, &gid, 0, 0); |
1611 | } | 1640 | } |
1612 | endfor_ifa(in_dev); | 1641 | endfor_ifa(in_dev); |
1613 | in_dev_put(in_dev); | 1642 | in_dev_put(in_dev); |
@@ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1619 | read_lock_bh(&in6_dev->lock); | 1648 | read_lock_bh(&in6_dev->lock); |
1620 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { | 1649 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { |
1621 | pgid = (union ib_gid *)&ifp->addr; | 1650 | pgid = (union ib_gid *)&ifp->addr; |
1622 | update_gid_table(ibdev, port, pgid, 0); | 1651 | update_gid_table(ibdev, port, pgid, 0, 0); |
1623 | } | 1652 | } |
1624 | read_unlock_bh(&in6_dev->lock); | 1653 | read_unlock_bh(&in6_dev->lock); |
1625 | in6_dev_put(in6_dev); | 1654 | in6_dev_put(in6_dev); |
@@ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1627 | #endif | 1656 | #endif |
1628 | } | 1657 | } |
1629 | 1658 | ||
1659 | static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev, | ||
1660 | struct net_device *dev, u8 port) | ||
1661 | { | ||
1662 | union ib_gid gid; | ||
1663 | mlx4_make_default_gid(dev, &gid); | ||
1664 | update_gid_table(ibdev, port, &gid, 0, 1); | ||
1665 | } | ||
1666 | |||
1630 | static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | 1667 | static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) |
1631 | { | 1668 | { |
1632 | struct net_device *dev; | 1669 | struct net_device *dev; |
1670 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; | ||
1671 | int i; | ||
1633 | 1672 | ||
1634 | if (reset_gid_table(ibdev)) | 1673 | for (i = 1; i <= ibdev->num_ports; ++i) |
1635 | return -1; | 1674 | if (reset_gid_table(ibdev, i)) |
1675 | return -1; | ||
1636 | 1676 | ||
1637 | read_lock(&dev_base_lock); | 1677 | read_lock(&dev_base_lock); |
1678 | spin_lock(&iboe->lock); | ||
1638 | 1679 | ||
1639 | for_each_netdev(&init_net, dev) { | 1680 | for_each_netdev(&init_net, dev) { |
1640 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); | 1681 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); |
@@ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | |||
1642 | mlx4_ib_get_dev_addr(dev, ibdev, port); | 1683 | mlx4_ib_get_dev_addr(dev, ibdev, port); |
1643 | } | 1684 | } |
1644 | 1685 | ||
1686 | spin_unlock(&iboe->lock); | ||
1645 | read_unlock(&dev_base_lock); | 1687 | read_unlock(&dev_base_lock); |
1646 | 1688 | ||
1647 | return 0; | 1689 | return 0; |
@@ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) | |||
1656 | 1698 | ||
1657 | spin_lock(&iboe->lock); | 1699 | spin_lock(&iboe->lock); |
1658 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { | 1700 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { |
1701 | enum ib_port_state port_state = IB_PORT_NOP; | ||
1659 | struct net_device *old_master = iboe->masters[port - 1]; | 1702 | struct net_device *old_master = iboe->masters[port - 1]; |
1703 | struct net_device *curr_netdev; | ||
1660 | struct net_device *curr_master; | 1704 | struct net_device *curr_master; |
1705 | |||
1661 | iboe->netdevs[port - 1] = | 1706 | iboe->netdevs[port - 1] = |
1662 | mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); | 1707 | mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); |
1708 | if (iboe->netdevs[port - 1]) | ||
1709 | mlx4_ib_set_default_gid(ibdev, | ||
1710 | iboe->netdevs[port - 1], port); | ||
1711 | curr_netdev = iboe->netdevs[port - 1]; | ||
1663 | 1712 | ||
1664 | if (iboe->netdevs[port - 1] && | 1713 | if (iboe->netdevs[port - 1] && |
1665 | netif_is_bond_slave(iboe->netdevs[port - 1])) { | 1714 | netif_is_bond_slave(iboe->netdevs[port - 1])) { |
1666 | rtnl_lock(); | ||
1667 | iboe->masters[port - 1] = netdev_master_upper_dev_get( | 1715 | iboe->masters[port - 1] = netdev_master_upper_dev_get( |
1668 | iboe->netdevs[port - 1]); | 1716 | iboe->netdevs[port - 1]); |
1669 | rtnl_unlock(); | 1717 | } else { |
1718 | iboe->masters[port - 1] = NULL; | ||
1670 | } | 1719 | } |
1671 | curr_master = iboe->masters[port - 1]; | 1720 | curr_master = iboe->masters[port - 1]; |
1672 | 1721 | ||
1722 | if (curr_netdev) { | ||
1723 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? | ||
1724 | IB_PORT_ACTIVE : IB_PORT_DOWN; | ||
1725 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1726 | } else { | ||
1727 | reset_gid_table(ibdev, port); | ||
1728 | } | ||
1729 | /* if using bonding/team and a slave port is down, we don't the bond IP | ||
1730 | * based gids in the table since flows that select port by gid may get | ||
1731 | * the down port. | ||
1732 | */ | ||
1733 | if (curr_master && (port_state == IB_PORT_DOWN)) { | ||
1734 | reset_gid_table(ibdev, port); | ||
1735 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1736 | } | ||
1673 | /* if bonding is used it is possible that we add it to masters | 1737 | /* if bonding is used it is possible that we add it to masters |
1674 | only after IP address is assigned to the net bonding | 1738 | * only after IP address is assigned to the net bonding |
1675 | interface */ | 1739 | * interface. |
1676 | if (curr_master && (old_master != curr_master)) | 1740 | */ |
1741 | if (curr_master && (old_master != curr_master)) { | ||
1742 | reset_gid_table(ibdev, port); | ||
1743 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1677 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); | 1744 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); |
1745 | } | ||
1746 | |||
1747 | if (!curr_master && (old_master != curr_master)) { | ||
1748 | reset_gid_table(ibdev, port); | ||
1749 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1750 | mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||
1751 | } | ||
1678 | } | 1752 | } |
1679 | 1753 | ||
1680 | spin_unlock(&iboe->lock); | 1754 | spin_unlock(&iboe->lock); |
@@ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1810 | int i, j; | 1884 | int i, j; |
1811 | int err; | 1885 | int err; |
1812 | struct mlx4_ib_iboe *iboe; | 1886 | struct mlx4_ib_iboe *iboe; |
1887 | int ib_num_ports = 0; | ||
1813 | 1888 | ||
1814 | pr_info_once("%s", mlx4_ib_version); | 1889 | pr_info_once("%s", mlx4_ib_version); |
1815 | 1890 | ||
@@ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1985 | ibdev->counters[i] = -1; | 2060 | ibdev->counters[i] = -1; |
1986 | } | 2061 | } |
1987 | 2062 | ||
2063 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
2064 | ib_num_ports++; | ||
2065 | |||
1988 | spin_lock_init(&ibdev->sm_lock); | 2066 | spin_lock_init(&ibdev->sm_lock); |
1989 | mutex_init(&ibdev->cap_mask_mutex); | 2067 | mutex_init(&ibdev->cap_mask_mutex); |
1990 | 2068 | ||
1991 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { | 2069 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && |
2070 | ib_num_ports) { | ||
1992 | ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; | 2071 | ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; |
1993 | err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, | 2072 | err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, |
1994 | MLX4_IB_UC_STEER_QPN_ALIGN, | 2073 | MLX4_IB_UC_STEER_QPN_ALIGN, |
@@ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2051 | } | 2130 | } |
2052 | } | 2131 | } |
2053 | #endif | 2132 | #endif |
2133 | for (i = 1 ; i <= ibdev->num_ports ; ++i) | ||
2134 | reset_gid_table(ibdev, i); | ||
2135 | rtnl_lock(); | ||
2054 | mlx4_ib_scan_netdevs(ibdev); | 2136 | mlx4_ib_scan_netdevs(ibdev); |
2137 | rtnl_unlock(); | ||
2055 | mlx4_ib_init_gid_table(ibdev); | 2138 | mlx4_ib_init_gid_table(ibdev); |
2056 | } | 2139 | } |
2057 | 2140 | ||
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig index 8e6aebfaf8a4..10df386c6344 100644 --- a/drivers/infiniband/hw/mlx5/Kconfig +++ b/drivers/infiniband/hw/mlx5/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config MLX5_INFINIBAND | 1 | config MLX5_INFINIBAND |
2 | tristate "Mellanox Connect-IB HCA support" | 2 | tristate "Mellanox Connect-IB HCA support" |
3 | depends on NETDEVICES && ETHERNET && PCI && X86 | 3 | depends on NETDEVICES && ETHERNET && PCI |
4 | select NET_VENDOR_MELLANOX | 4 | select NET_VENDOR_MELLANOX |
5 | select MLX5_CORE | 5 | select MLX5_CORE |
6 | ---help--- | 6 | ---help--- |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 9660d093f8cf..bf900579ac08 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -46,8 +46,8 @@ | |||
46 | #include "mlx5_ib.h" | 46 | #include "mlx5_ib.h" |
47 | 47 | ||
48 | #define DRIVER_NAME "mlx5_ib" | 48 | #define DRIVER_NAME "mlx5_ib" |
49 | #define DRIVER_VERSION "1.0" | 49 | #define DRIVER_VERSION "2.2-1" |
50 | #define DRIVER_RELDATE "June 2013" | 50 | #define DRIVER_RELDATE "Feb 2014" |
51 | 51 | ||
52 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); | 52 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); |
53 | MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); | 53 | MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); |
@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
261 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | | 261 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | |
262 | IB_DEVICE_PORT_ACTIVE_EVENT | | 262 | IB_DEVICE_PORT_ACTIVE_EVENT | |
263 | IB_DEVICE_SYS_IMAGE_GUID | | 263 | IB_DEVICE_SYS_IMAGE_GUID | |
264 | IB_DEVICE_RC_RNR_NAK_GEN | | 264 | IB_DEVICE_RC_RNR_NAK_GEN; |
265 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; | ||
266 | flags = dev->mdev.caps.flags; | 265 | flags = dev->mdev.caps.flags; |
267 | if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) | 266 | if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) |
268 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; | 267 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; |
@@ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
536 | struct ib_udata *udata) | 535 | struct ib_udata *udata) |
537 | { | 536 | { |
538 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 537 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
539 | struct mlx5_ib_alloc_ucontext_req req; | 538 | struct mlx5_ib_alloc_ucontext_req_v2 req; |
540 | struct mlx5_ib_alloc_ucontext_resp resp; | 539 | struct mlx5_ib_alloc_ucontext_resp resp; |
541 | struct mlx5_ib_ucontext *context; | 540 | struct mlx5_ib_ucontext *context; |
542 | struct mlx5_uuar_info *uuari; | 541 | struct mlx5_uuar_info *uuari; |
543 | struct mlx5_uar *uars; | 542 | struct mlx5_uar *uars; |
544 | int gross_uuars; | 543 | int gross_uuars; |
545 | int num_uars; | 544 | int num_uars; |
545 | int ver; | ||
546 | int uuarn; | 546 | int uuarn; |
547 | int err; | 547 | int err; |
548 | int i; | 548 | int i; |
549 | int reqlen; | ||
549 | 550 | ||
550 | if (!dev->ib_active) | 551 | if (!dev->ib_active) |
551 | return ERR_PTR(-EAGAIN); | 552 | return ERR_PTR(-EAGAIN); |
552 | 553 | ||
553 | err = ib_copy_from_udata(&req, udata, sizeof(req)); | 554 | memset(&req, 0, sizeof(req)); |
555 | reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); | ||
556 | if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) | ||
557 | ver = 0; | ||
558 | else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2)) | ||
559 | ver = 2; | ||
560 | else | ||
561 | return ERR_PTR(-EINVAL); | ||
562 | |||
563 | err = ib_copy_from_udata(&req, udata, reqlen); | ||
554 | if (err) | 564 | if (err) |
555 | return ERR_PTR(err); | 565 | return ERR_PTR(err); |
556 | 566 | ||
567 | if (req.flags || req.reserved) | ||
568 | return ERR_PTR(-EINVAL); | ||
569 | |||
557 | if (req.total_num_uuars > MLX5_MAX_UUARS) | 570 | if (req.total_num_uuars > MLX5_MAX_UUARS) |
558 | return ERR_PTR(-ENOMEM); | 571 | return ERR_PTR(-ENOMEM); |
559 | 572 | ||
@@ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
626 | if (err) | 639 | if (err) |
627 | goto out_uars; | 640 | goto out_uars; |
628 | 641 | ||
642 | uuari->ver = ver; | ||
629 | uuari->num_low_latency_uuars = req.num_low_latency_uuars; | 643 | uuari->num_low_latency_uuars = req.num_low_latency_uuars; |
630 | uuari->uars = uars; | 644 | uuari->uars = uars; |
631 | uuari->num_uars = num_uars; | 645 | uuari->num_uars = num_uars; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ae37fb9bf262..7dfe8a1c84cf 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type) | |||
216 | 216 | ||
217 | case IB_QPT_UC: | 217 | case IB_QPT_UC: |
218 | size += sizeof(struct mlx5_wqe_ctrl_seg) + | 218 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
219 | sizeof(struct mlx5_wqe_raddr_seg); | 219 | sizeof(struct mlx5_wqe_raddr_seg) + |
220 | sizeof(struct mlx5_wqe_umr_ctrl_seg) + | ||
221 | sizeof(struct mlx5_mkey_seg); | ||
220 | break; | 222 | break; |
221 | 223 | ||
222 | case IB_QPT_UD: | 224 | case IB_QPT_UD: |
@@ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari, | |||
428 | break; | 430 | break; |
429 | 431 | ||
430 | case MLX5_IB_LATENCY_CLASS_MEDIUM: | 432 | case MLX5_IB_LATENCY_CLASS_MEDIUM: |
431 | uuarn = alloc_med_class_uuar(uuari); | 433 | if (uuari->ver < 2) |
434 | uuarn = -ENOMEM; | ||
435 | else | ||
436 | uuarn = alloc_med_class_uuar(uuari); | ||
432 | break; | 437 | break; |
433 | 438 | ||
434 | case MLX5_IB_LATENCY_CLASS_HIGH: | 439 | case MLX5_IB_LATENCY_CLASS_HIGH: |
435 | uuarn = alloc_high_class_uuar(uuari); | 440 | if (uuari->ver < 2) |
441 | uuarn = -ENOMEM; | ||
442 | else | ||
443 | uuarn = alloc_high_class_uuar(uuari); | ||
436 | break; | 444 | break; |
437 | 445 | ||
438 | case MLX5_IB_LATENCY_CLASS_FAST_PATH: | 446 | case MLX5_IB_LATENCY_CLASS_FAST_PATH: |
@@ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, | |||
657 | int err; | 665 | int err; |
658 | 666 | ||
659 | uuari = &dev->mdev.priv.uuari; | 667 | uuari = &dev->mdev.priv.uuari; |
660 | if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) | 668 | if (init_attr->create_flags) |
661 | qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; | 669 | return -EINVAL; |
662 | 670 | ||
663 | if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) | 671 | if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) |
664 | lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; | 672 | lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; |
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h index 32a2a5dfc523..0f4f8e42a17f 100644 --- a/drivers/infiniband/hw/mlx5/user.h +++ b/drivers/infiniband/hw/mlx5/user.h | |||
@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req { | |||
62 | __u32 num_low_latency_uuars; | 62 | __u32 num_low_latency_uuars; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | struct mlx5_ib_alloc_ucontext_req_v2 { | ||
66 | __u32 total_num_uuars; | ||
67 | __u32 num_low_latency_uuars; | ||
68 | __u32 flags; | ||
69 | __u32 reserved; | ||
70 | }; | ||
71 | |||
65 | struct mlx5_ib_alloc_ucontext_resp { | 72 | struct mlx5_ib_alloc_ucontext_resp { |
66 | __u32 qp_tab_size; | 73 | __u32 qp_tab_size; |
67 | __u32 bf_reg_size; | 74 | __u32 bf_reg_size; |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 429141078eec..353c7b05a90a 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | |||
675 | INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); | 675 | INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); |
676 | 676 | ||
677 | /* Initialize network devices */ | 677 | /* Initialize network devices */ |
678 | if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) | 678 | netdev = nes_netdev_init(nesdev, mmio_regs); |
679 | if (netdev == NULL) { | ||
680 | ret = -ENOMEM; | ||
679 | goto bail7; | 681 | goto bail7; |
682 | } | ||
680 | 683 | ||
681 | /* Register network device */ | 684 | /* Register network device */ |
682 | ret = register_netdev(netdev); | 685 | ret = register_netdev(netdev); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 2ca86ca818bd..1a8a945efa60 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
@@ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev, | |||
127 | 127 | ||
128 | is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; | 128 | is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; |
129 | if (is_vlan) | 129 | if (is_vlan) |
130 | netdev = vlan_dev_real_dev(netdev); | 130 | netdev = rdma_vlan_dev_real_dev(netdev); |
131 | 131 | ||
132 | rcu_read_lock(); | 132 | rcu_read_lock(); |
133 | list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { | 133 | list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index aa92f40c9d50..e0cc201be41a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev, | |||
176 | props->port_cap_flags = | 176 | props->port_cap_flags = |
177 | IB_PORT_CM_SUP | | 177 | IB_PORT_CM_SUP | |
178 | IB_PORT_REINIT_SUP | | 178 | IB_PORT_REINIT_SUP | |
179 | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; | 179 | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS; |
180 | props->gid_tbl_len = OCRDMA_MAX_SGID; | 180 | props->gid_tbl_len = OCRDMA_MAX_SGID; |
181 | props->pkey_tbl_len = 1; | 181 | props->pkey_tbl_len = 1; |
182 | props->bad_pkey_cntr = 0; | 182 | props->bad_pkey_cntr = 0; |
@@ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp, | |||
1416 | OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> | 1416 | OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> |
1417 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; | 1417 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; |
1418 | qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & | 1418 | qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & |
1419 | OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> | 1419 | OCRDMA_QP_PARAMS_TCLASS_MASK) >> |
1420 | OCRDMA_QP_PARAMS_TCLASS_SHIFT; | 1420 | OCRDMA_QP_PARAMS_TCLASS_SHIFT; |
1421 | 1421 | ||
1422 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; | 1422 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 5bfc02f450e6..d1bd21319d7d 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |||
2395 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | 2395 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); |
2396 | qib_write_kreg(dd, kr_scratch, 0ULL); | 2396 | qib_write_kreg(dd, kr_scratch, 0ULL); |
2397 | 2397 | ||
2398 | /* ensure previous Tx parameters are not still forced */ | ||
2399 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | ||
2400 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
2401 | reset_tx_deemphasis_override)); | ||
2402 | |||
2398 | if (qib_compat_ddr_negotiate) { | 2403 | if (qib_compat_ddr_negotiate) { |
2399 | ppd->cpspec->ibdeltainprog = 1; | 2404 | ppd->cpspec->ibdeltainprog = 1; |
2400 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, | 2405 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c index 7ecc6061f1f4..f8dfd76be89f 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c | |||
@@ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, | |||
629 | { | 629 | { |
630 | enum usnic_transport_type trans_type = qp_flow->trans_type; | 630 | enum usnic_transport_type trans_type = qp_flow->trans_type; |
631 | int err; | 631 | int err; |
632 | uint16_t port_num = 0; | ||
632 | 633 | ||
633 | switch (trans_type) { | 634 | switch (trans_type) { |
634 | case USNIC_TRANSPORT_ROCE_CUSTOM: | 635 | case USNIC_TRANSPORT_ROCE_CUSTOM: |
@@ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, | |||
637 | case USNIC_TRANSPORT_IPV4_UDP: | 638 | case USNIC_TRANSPORT_IPV4_UDP: |
638 | err = usnic_transport_sock_get_addr(qp_flow->udp.sock, | 639 | err = usnic_transport_sock_get_addr(qp_flow->udp.sock, |
639 | NULL, NULL, | 640 | NULL, NULL, |
640 | (uint16_t *) id); | 641 | &port_num); |
641 | if (err) | 642 | if (err) |
642 | return err; | 643 | return err; |
644 | /* | ||
645 | * Copy port_num to stack first and then to *id, | ||
646 | * so that the short to int cast works for little | ||
647 | * and big endian systems. | ||
648 | */ | ||
649 | *id = port_num; | ||
643 | break; | 650 | break; |
644 | default: | 651 | default: |
645 | usnic_err("Unsupported transport %u\n", trans_type); | 652 | usnic_err("Unsupported transport %u\n", trans_type); |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 538822684d5b..334f34b1cd46 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc, | |||
610 | ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, | 610 | ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, |
611 | ISER_HEADERS_LEN, DMA_TO_DEVICE); | 611 | ISER_HEADERS_LEN, DMA_TO_DEVICE); |
612 | kmem_cache_free(ig.desc_cache, tx_desc); | 612 | kmem_cache_free(ig.desc_cache, tx_desc); |
613 | tx_desc = NULL; | ||
613 | } | 614 | } |
614 | 615 | ||
615 | atomic_dec(&ib_conn->post_send_buf_count); | 616 | atomic_dec(&ib_conn->post_send_buf_count); |
616 | 617 | ||
617 | if (tx_desc->type == ISCSI_TX_CONTROL) { | 618 | if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { |
618 | /* this arithmetic is legal by libiscsi dd_data allocation */ | 619 | /* this arithmetic is legal by libiscsi dd_data allocation */ |
619 | task = (void *) ((long)(void *)tx_desc - | 620 | task = (void *) ((long)(void *)tx_desc - |
620 | sizeof(struct iscsi_task)); | 621 | sizeof(struct iscsi_task)); |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index afe95674008b..ca37edef2791 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id) | |||
652 | /* getting here when the state is UP means that the conn is being * | 652 | /* getting here when the state is UP means that the conn is being * |
653 | * terminated asynchronously from the iSCSI layer's perspective. */ | 653 | * terminated asynchronously from the iSCSI layer's perspective. */ |
654 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, | 654 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, |
655 | ISER_CONN_TERMINATING)) | 655 | ISER_CONN_TERMINATING)){ |
656 | iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, | 656 | if (ib_conn->iser_conn) |
657 | ISCSI_ERR_CONN_FAILED); | 657 | iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, |
658 | ISCSI_ERR_CONN_FAILED); | ||
659 | else | ||
660 | iser_err("iscsi_iser connection isn't bound\n"); | ||
661 | } | ||
658 | 662 | ||
659 | /* Complete the termination process if no posts are pending */ | 663 | /* Complete the termination process if no posts are pending */ |
660 | if (ib_conn->post_recv_buf_count == 0 && | 664 | if (ib_conn->post_recv_buf_count == 0 && |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 2b161be3c1a3..8ee228e9ab5a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -453,6 +453,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) | |||
453 | if (ret) { | 453 | if (ret) { |
454 | pr_err("Failed to create fastreg descriptor err=%d\n", | 454 | pr_err("Failed to create fastreg descriptor err=%d\n", |
455 | ret); | 455 | ret); |
456 | kfree(fr_desc); | ||
456 | goto err; | 457 | goto err; |
457 | } | 458 | } |
458 | 459 | ||
@@ -491,12 +492,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
491 | isert_conn->state = ISER_CONN_INIT; | 492 | isert_conn->state = ISER_CONN_INIT; |
492 | INIT_LIST_HEAD(&isert_conn->conn_accept_node); | 493 | INIT_LIST_HEAD(&isert_conn->conn_accept_node); |
493 | init_completion(&isert_conn->conn_login_comp); | 494 | init_completion(&isert_conn->conn_login_comp); |
494 | init_waitqueue_head(&isert_conn->conn_wait); | 495 | init_completion(&isert_conn->conn_wait); |
495 | init_waitqueue_head(&isert_conn->conn_wait_comp_err); | 496 | init_completion(&isert_conn->conn_wait_comp_err); |
496 | kref_init(&isert_conn->conn_kref); | 497 | kref_init(&isert_conn->conn_kref); |
497 | kref_get(&isert_conn->conn_kref); | 498 | kref_get(&isert_conn->conn_kref); |
498 | mutex_init(&isert_conn->conn_mutex); | 499 | mutex_init(&isert_conn->conn_mutex); |
499 | mutex_init(&isert_conn->conn_comp_mutex); | ||
500 | spin_lock_init(&isert_conn->conn_lock); | 500 | spin_lock_init(&isert_conn->conn_lock); |
501 | 501 | ||
502 | cma_id->context = isert_conn; | 502 | cma_id->context = isert_conn; |
@@ -687,11 +687,11 @@ isert_disconnect_work(struct work_struct *work) | |||
687 | 687 | ||
688 | pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); | 688 | pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); |
689 | mutex_lock(&isert_conn->conn_mutex); | 689 | mutex_lock(&isert_conn->conn_mutex); |
690 | isert_conn->state = ISER_CONN_DOWN; | 690 | if (isert_conn->state == ISER_CONN_UP) |
691 | isert_conn->state = ISER_CONN_TERMINATING; | ||
691 | 692 | ||
692 | if (isert_conn->post_recv_buf_count == 0 && | 693 | if (isert_conn->post_recv_buf_count == 0 && |
693 | atomic_read(&isert_conn->post_send_buf_count) == 0) { | 694 | atomic_read(&isert_conn->post_send_buf_count) == 0) { |
694 | pr_debug("Calling wake_up(&isert_conn->conn_wait);\n"); | ||
695 | mutex_unlock(&isert_conn->conn_mutex); | 695 | mutex_unlock(&isert_conn->conn_mutex); |
696 | goto wake_up; | 696 | goto wake_up; |
697 | } | 697 | } |
@@ -711,7 +711,7 @@ isert_disconnect_work(struct work_struct *work) | |||
711 | mutex_unlock(&isert_conn->conn_mutex); | 711 | mutex_unlock(&isert_conn->conn_mutex); |
712 | 712 | ||
713 | wake_up: | 713 | wake_up: |
714 | wake_up(&isert_conn->conn_wait); | 714 | complete(&isert_conn->conn_wait); |
715 | isert_put_conn(isert_conn); | 715 | isert_put_conn(isert_conn); |
716 | } | 716 | } |
717 | 717 | ||
@@ -887,16 +887,17 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
887 | * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED | 887 | * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED |
888 | * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. | 888 | * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. |
889 | */ | 889 | */ |
890 | mutex_lock(&isert_conn->conn_comp_mutex); | 890 | mutex_lock(&isert_conn->conn_mutex); |
891 | if (coalesce && | 891 | if (coalesce && isert_conn->state == ISER_CONN_UP && |
892 | ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { | 892 | ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { |
893 | tx_desc->llnode_active = true; | ||
893 | llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); | 894 | llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); |
894 | mutex_unlock(&isert_conn->conn_comp_mutex); | 895 | mutex_unlock(&isert_conn->conn_mutex); |
895 | return; | 896 | return; |
896 | } | 897 | } |
897 | isert_conn->conn_comp_batch = 0; | 898 | isert_conn->conn_comp_batch = 0; |
898 | tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); | 899 | tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); |
899 | mutex_unlock(&isert_conn->conn_comp_mutex); | 900 | mutex_unlock(&isert_conn->conn_mutex); |
900 | 901 | ||
901 | send_wr->send_flags = IB_SEND_SIGNALED; | 902 | send_wr->send_flags = IB_SEND_SIGNALED; |
902 | } | 903 | } |
@@ -1463,7 +1464,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) | |||
1463 | case ISCSI_OP_SCSI_CMD: | 1464 | case ISCSI_OP_SCSI_CMD: |
1464 | spin_lock_bh(&conn->cmd_lock); | 1465 | spin_lock_bh(&conn->cmd_lock); |
1465 | if (!list_empty(&cmd->i_conn_node)) | 1466 | if (!list_empty(&cmd->i_conn_node)) |
1466 | list_del(&cmd->i_conn_node); | 1467 | list_del_init(&cmd->i_conn_node); |
1467 | spin_unlock_bh(&conn->cmd_lock); | 1468 | spin_unlock_bh(&conn->cmd_lock); |
1468 | 1469 | ||
1469 | if (cmd->data_direction == DMA_TO_DEVICE) | 1470 | if (cmd->data_direction == DMA_TO_DEVICE) |
@@ -1475,7 +1476,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) | |||
1475 | case ISCSI_OP_SCSI_TMFUNC: | 1476 | case ISCSI_OP_SCSI_TMFUNC: |
1476 | spin_lock_bh(&conn->cmd_lock); | 1477 | spin_lock_bh(&conn->cmd_lock); |
1477 | if (!list_empty(&cmd->i_conn_node)) | 1478 | if (!list_empty(&cmd->i_conn_node)) |
1478 | list_del(&cmd->i_conn_node); | 1479 | list_del_init(&cmd->i_conn_node); |
1479 | spin_unlock_bh(&conn->cmd_lock); | 1480 | spin_unlock_bh(&conn->cmd_lock); |
1480 | 1481 | ||
1481 | transport_generic_free_cmd(&cmd->se_cmd, 0); | 1482 | transport_generic_free_cmd(&cmd->se_cmd, 0); |
@@ -1485,7 +1486,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) | |||
1485 | case ISCSI_OP_TEXT: | 1486 | case ISCSI_OP_TEXT: |
1486 | spin_lock_bh(&conn->cmd_lock); | 1487 | spin_lock_bh(&conn->cmd_lock); |
1487 | if (!list_empty(&cmd->i_conn_node)) | 1488 | if (!list_empty(&cmd->i_conn_node)) |
1488 | list_del(&cmd->i_conn_node); | 1489 | list_del_init(&cmd->i_conn_node); |
1489 | spin_unlock_bh(&conn->cmd_lock); | 1490 | spin_unlock_bh(&conn->cmd_lock); |
1490 | 1491 | ||
1491 | /* | 1492 | /* |
@@ -1548,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, | |||
1548 | iscsit_stop_dataout_timer(cmd); | 1549 | iscsit_stop_dataout_timer(cmd); |
1549 | device->unreg_rdma_mem(isert_cmd, isert_conn); | 1550 | device->unreg_rdma_mem(isert_cmd, isert_conn); |
1550 | cmd->write_data_done = wr->cur_rdma_length; | 1551 | cmd->write_data_done = wr->cur_rdma_length; |
1552 | wr->send_wr_num = 0; | ||
1551 | 1553 | ||
1552 | pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); | 1554 | pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); |
1553 | spin_lock_bh(&cmd->istate_lock); | 1555 | spin_lock_bh(&cmd->istate_lock); |
@@ -1588,7 +1590,7 @@ isert_do_control_comp(struct work_struct *work) | |||
1588 | pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); | 1590 | pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); |
1589 | /* | 1591 | /* |
1590 | * Call atomic_dec(&isert_conn->post_send_buf_count) | 1592 | * Call atomic_dec(&isert_conn->post_send_buf_count) |
1591 | * from isert_free_conn() | 1593 | * from isert_wait_conn() |
1592 | */ | 1594 | */ |
1593 | isert_conn->logout_posted = true; | 1595 | isert_conn->logout_posted = true; |
1594 | iscsit_logout_post_handler(cmd, cmd->conn); | 1596 | iscsit_logout_post_handler(cmd, cmd->conn); |
@@ -1612,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc, | |||
1612 | struct ib_device *ib_dev) | 1614 | struct ib_device *ib_dev) |
1613 | { | 1615 | { |
1614 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 1616 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
1617 | struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; | ||
1615 | 1618 | ||
1616 | if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || | 1619 | if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || |
1617 | cmd->i_state == ISTATE_SEND_LOGOUTRSP || | 1620 | cmd->i_state == ISTATE_SEND_LOGOUTRSP || |
@@ -1623,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc, | |||
1623 | queue_work(isert_comp_wq, &isert_cmd->comp_work); | 1626 | queue_work(isert_comp_wq, &isert_cmd->comp_work); |
1624 | return; | 1627 | return; |
1625 | } | 1628 | } |
1626 | atomic_dec(&isert_conn->post_send_buf_count); | 1629 | atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); |
1627 | 1630 | ||
1628 | cmd->i_state = ISTATE_SENT_STATUS; | 1631 | cmd->i_state = ISTATE_SENT_STATUS; |
1629 | isert_completion_put(tx_desc, isert_cmd, ib_dev); | 1632 | isert_completion_put(tx_desc, isert_cmd, ib_dev); |
@@ -1661,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc, | |||
1661 | case ISER_IB_RDMA_READ: | 1664 | case ISER_IB_RDMA_READ: |
1662 | pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); | 1665 | pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); |
1663 | 1666 | ||
1664 | atomic_dec(&isert_conn->post_send_buf_count); | 1667 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); |
1665 | isert_completion_rdma_read(tx_desc, isert_cmd); | 1668 | isert_completion_rdma_read(tx_desc, isert_cmd); |
1666 | break; | 1669 | break; |
1667 | default: | 1670 | default: |
@@ -1690,31 +1693,76 @@ isert_send_completion(struct iser_tx_desc *tx_desc, | |||
1690 | } | 1693 | } |
1691 | 1694 | ||
1692 | static void | 1695 | static void |
1693 | isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) | 1696 | isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev) |
1697 | { | ||
1698 | struct llist_node *llnode; | ||
1699 | struct isert_rdma_wr *wr; | ||
1700 | struct iser_tx_desc *t; | ||
1701 | |||
1702 | mutex_lock(&isert_conn->conn_mutex); | ||
1703 | llnode = llist_del_all(&isert_conn->conn_comp_llist); | ||
1704 | isert_conn->conn_comp_batch = 0; | ||
1705 | mutex_unlock(&isert_conn->conn_mutex); | ||
1706 | |||
1707 | while (llnode) { | ||
1708 | t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); | ||
1709 | llnode = llist_next(llnode); | ||
1710 | wr = &t->isert_cmd->rdma_wr; | ||
1711 | |||
1712 | atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); | ||
1713 | isert_completion_put(t, t->isert_cmd, ib_dev); | ||
1714 | } | ||
1715 | } | ||
1716 | |||
1717 | static void | ||
1718 | isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) | ||
1694 | { | 1719 | { |
1695 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 1720 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
1721 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; | ||
1722 | struct llist_node *llnode = tx_desc->comp_llnode_batch; | ||
1723 | struct isert_rdma_wr *wr; | ||
1724 | struct iser_tx_desc *t; | ||
1696 | 1725 | ||
1697 | if (tx_desc) { | 1726 | while (llnode) { |
1698 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; | 1727 | t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); |
1728 | llnode = llist_next(llnode); | ||
1729 | wr = &t->isert_cmd->rdma_wr; | ||
1699 | 1730 | ||
1700 | if (!isert_cmd) | 1731 | atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); |
1701 | isert_unmap_tx_desc(tx_desc, ib_dev); | 1732 | isert_completion_put(t, t->isert_cmd, ib_dev); |
1702 | else | ||
1703 | isert_completion_put(tx_desc, isert_cmd, ib_dev); | ||
1704 | } | 1733 | } |
1734 | tx_desc->comp_llnode_batch = NULL; | ||
1705 | 1735 | ||
1706 | if (isert_conn->post_recv_buf_count == 0 && | 1736 | if (!isert_cmd) |
1707 | atomic_read(&isert_conn->post_send_buf_count) == 0) { | 1737 | isert_unmap_tx_desc(tx_desc, ib_dev); |
1708 | pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); | 1738 | else |
1709 | pr_debug("Calling wake_up from isert_cq_comp_err\n"); | 1739 | isert_completion_put(tx_desc, isert_cmd, ib_dev); |
1740 | } | ||
1710 | 1741 | ||
1711 | mutex_lock(&isert_conn->conn_mutex); | 1742 | static void |
1712 | if (isert_conn->state != ISER_CONN_DOWN) | 1743 | isert_cq_rx_comp_err(struct isert_conn *isert_conn) |
1713 | isert_conn->state = ISER_CONN_TERMINATING; | 1744 | { |
1714 | mutex_unlock(&isert_conn->conn_mutex); | 1745 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
1746 | struct iscsi_conn *conn = isert_conn->conn; | ||
1715 | 1747 | ||
1716 | wake_up(&isert_conn->conn_wait_comp_err); | 1748 | if (isert_conn->post_recv_buf_count) |
1749 | return; | ||
1750 | |||
1751 | isert_cq_drain_comp_llist(isert_conn, ib_dev); | ||
1752 | |||
1753 | if (conn->sess) { | ||
1754 | target_sess_cmd_list_set_waiting(conn->sess->se_sess); | ||
1755 | target_wait_for_sess_cmds(conn->sess->se_sess); | ||
1717 | } | 1756 | } |
1757 | |||
1758 | while (atomic_read(&isert_conn->post_send_buf_count)) | ||
1759 | msleep(3000); | ||
1760 | |||
1761 | mutex_lock(&isert_conn->conn_mutex); | ||
1762 | isert_conn->state = ISER_CONN_DOWN; | ||
1763 | mutex_unlock(&isert_conn->conn_mutex); | ||
1764 | |||
1765 | complete(&isert_conn->conn_wait_comp_err); | ||
1718 | } | 1766 | } |
1719 | 1767 | ||
1720 | static void | 1768 | static void |
@@ -1739,8 +1787,14 @@ isert_cq_tx_work(struct work_struct *work) | |||
1739 | pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); | 1787 | pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); |
1740 | pr_debug("TX wc.status: 0x%08x\n", wc.status); | 1788 | pr_debug("TX wc.status: 0x%08x\n", wc.status); |
1741 | pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); | 1789 | pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); |
1742 | atomic_dec(&isert_conn->post_send_buf_count); | 1790 | |
1743 | isert_cq_comp_err(tx_desc, isert_conn); | 1791 | if (wc.wr_id != ISER_FASTREG_LI_WRID) { |
1792 | if (tx_desc->llnode_active) | ||
1793 | continue; | ||
1794 | |||
1795 | atomic_dec(&isert_conn->post_send_buf_count); | ||
1796 | isert_cq_tx_comp_err(tx_desc, isert_conn); | ||
1797 | } | ||
1744 | } | 1798 | } |
1745 | } | 1799 | } |
1746 | 1800 | ||
@@ -1783,7 +1837,7 @@ isert_cq_rx_work(struct work_struct *work) | |||
1783 | wc.vendor_err); | 1837 | wc.vendor_err); |
1784 | } | 1838 | } |
1785 | isert_conn->post_recv_buf_count--; | 1839 | isert_conn->post_recv_buf_count--; |
1786 | isert_cq_comp_err(NULL, isert_conn); | 1840 | isert_cq_rx_comp_err(isert_conn); |
1787 | } | 1841 | } |
1788 | } | 1842 | } |
1789 | 1843 | ||
@@ -2201,6 +2255,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, | |||
2201 | 2255 | ||
2202 | if (!fr_desc->valid) { | 2256 | if (!fr_desc->valid) { |
2203 | memset(&inv_wr, 0, sizeof(inv_wr)); | 2257 | memset(&inv_wr, 0, sizeof(inv_wr)); |
2258 | inv_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
2204 | inv_wr.opcode = IB_WR_LOCAL_INV; | 2259 | inv_wr.opcode = IB_WR_LOCAL_INV; |
2205 | inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; | 2260 | inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; |
2206 | wr = &inv_wr; | 2261 | wr = &inv_wr; |
@@ -2211,6 +2266,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, | |||
2211 | 2266 | ||
2212 | /* Prepare FASTREG WR */ | 2267 | /* Prepare FASTREG WR */ |
2213 | memset(&fr_wr, 0, sizeof(fr_wr)); | 2268 | memset(&fr_wr, 0, sizeof(fr_wr)); |
2269 | fr_wr.wr_id = ISER_FASTREG_LI_WRID; | ||
2214 | fr_wr.opcode = IB_WR_FAST_REG_MR; | 2270 | fr_wr.opcode = IB_WR_FAST_REG_MR; |
2215 | fr_wr.wr.fast_reg.iova_start = | 2271 | fr_wr.wr.fast_reg.iova_start = |
2216 | fr_desc->data_frpl->page_list[0] + page_off; | 2272 | fr_desc->data_frpl->page_list[0] + page_off; |
@@ -2376,12 +2432,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2376 | isert_init_send_wr(isert_conn, isert_cmd, | 2432 | isert_init_send_wr(isert_conn, isert_cmd, |
2377 | &isert_cmd->tx_desc.send_wr, true); | 2433 | &isert_cmd->tx_desc.send_wr, true); |
2378 | 2434 | ||
2379 | atomic_inc(&isert_conn->post_send_buf_count); | 2435 | atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); |
2380 | 2436 | ||
2381 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); | 2437 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); |
2382 | if (rc) { | 2438 | if (rc) { |
2383 | pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); | 2439 | pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); |
2384 | atomic_dec(&isert_conn->post_send_buf_count); | 2440 | atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); |
2385 | } | 2441 | } |
2386 | pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", | 2442 | pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", |
2387 | isert_cmd); | 2443 | isert_cmd); |
@@ -2409,12 +2465,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) | |||
2409 | return rc; | 2465 | return rc; |
2410 | } | 2466 | } |
2411 | 2467 | ||
2412 | atomic_inc(&isert_conn->post_send_buf_count); | 2468 | atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); |
2413 | 2469 | ||
2414 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); | 2470 | rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); |
2415 | if (rc) { | 2471 | if (rc) { |
2416 | pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); | 2472 | pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); |
2417 | atomic_dec(&isert_conn->post_send_buf_count); | 2473 | atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); |
2418 | } | 2474 | } |
2419 | pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", | 2475 | pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", |
2420 | isert_cmd); | 2476 | isert_cmd); |
@@ -2701,22 +2757,11 @@ isert_free_np(struct iscsi_np *np) | |||
2701 | kfree(isert_np); | 2757 | kfree(isert_np); |
2702 | } | 2758 | } |
2703 | 2759 | ||
2704 | static int isert_check_state(struct isert_conn *isert_conn, int state) | 2760 | static void isert_wait_conn(struct iscsi_conn *conn) |
2705 | { | ||
2706 | int ret; | ||
2707 | |||
2708 | mutex_lock(&isert_conn->conn_mutex); | ||
2709 | ret = (isert_conn->state == state); | ||
2710 | mutex_unlock(&isert_conn->conn_mutex); | ||
2711 | |||
2712 | return ret; | ||
2713 | } | ||
2714 | |||
2715 | static void isert_free_conn(struct iscsi_conn *conn) | ||
2716 | { | 2761 | { |
2717 | struct isert_conn *isert_conn = conn->context; | 2762 | struct isert_conn *isert_conn = conn->context; |
2718 | 2763 | ||
2719 | pr_debug("isert_free_conn: Starting \n"); | 2764 | pr_debug("isert_wait_conn: Starting \n"); |
2720 | /* | 2765 | /* |
2721 | * Decrement post_send_buf_count for special case when called | 2766 | * Decrement post_send_buf_count for special case when called |
2722 | * from isert_do_control_comp() -> iscsit_logout_post_handler() | 2767 | * from isert_do_control_comp() -> iscsit_logout_post_handler() |
@@ -2726,38 +2771,29 @@ static void isert_free_conn(struct iscsi_conn *conn) | |||
2726 | atomic_dec(&isert_conn->post_send_buf_count); | 2771 | atomic_dec(&isert_conn->post_send_buf_count); |
2727 | 2772 | ||
2728 | if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { | 2773 | if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { |
2729 | pr_debug("Calling rdma_disconnect from isert_free_conn\n"); | 2774 | pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); |
2730 | rdma_disconnect(isert_conn->conn_cm_id); | 2775 | rdma_disconnect(isert_conn->conn_cm_id); |
2731 | } | 2776 | } |
2732 | /* | 2777 | /* |
2733 | * Only wait for conn_wait_comp_err if the isert_conn made it | 2778 | * Only wait for conn_wait_comp_err if the isert_conn made it |
2734 | * into full feature phase.. | 2779 | * into full feature phase.. |
2735 | */ | 2780 | */ |
2736 | if (isert_conn->state == ISER_CONN_UP) { | ||
2737 | pr_debug("isert_free_conn: Before wait_event comp_err %d\n", | ||
2738 | isert_conn->state); | ||
2739 | mutex_unlock(&isert_conn->conn_mutex); | ||
2740 | |||
2741 | wait_event(isert_conn->conn_wait_comp_err, | ||
2742 | (isert_check_state(isert_conn, ISER_CONN_TERMINATING))); | ||
2743 | |||
2744 | wait_event(isert_conn->conn_wait, | ||
2745 | (isert_check_state(isert_conn, ISER_CONN_DOWN))); | ||
2746 | |||
2747 | isert_put_conn(isert_conn); | ||
2748 | return; | ||
2749 | } | ||
2750 | if (isert_conn->state == ISER_CONN_INIT) { | 2781 | if (isert_conn->state == ISER_CONN_INIT) { |
2751 | mutex_unlock(&isert_conn->conn_mutex); | 2782 | mutex_unlock(&isert_conn->conn_mutex); |
2752 | isert_put_conn(isert_conn); | ||
2753 | return; | 2783 | return; |
2754 | } | 2784 | } |
2755 | pr_debug("isert_free_conn: wait_event conn_wait %d\n", | 2785 | if (isert_conn->state == ISER_CONN_UP) |
2756 | isert_conn->state); | 2786 | isert_conn->state = ISER_CONN_TERMINATING; |
2757 | mutex_unlock(&isert_conn->conn_mutex); | 2787 | mutex_unlock(&isert_conn->conn_mutex); |
2758 | 2788 | ||
2759 | wait_event(isert_conn->conn_wait, | 2789 | wait_for_completion(&isert_conn->conn_wait_comp_err); |
2760 | (isert_check_state(isert_conn, ISER_CONN_DOWN))); | 2790 | |
2791 | wait_for_completion(&isert_conn->conn_wait); | ||
2792 | } | ||
2793 | |||
2794 | static void isert_free_conn(struct iscsi_conn *conn) | ||
2795 | { | ||
2796 | struct isert_conn *isert_conn = conn->context; | ||
2761 | 2797 | ||
2762 | isert_put_conn(isert_conn); | 2798 | isert_put_conn(isert_conn); |
2763 | } | 2799 | } |
@@ -2770,6 +2806,7 @@ static struct iscsit_transport iser_target_transport = { | |||
2770 | .iscsit_setup_np = isert_setup_np, | 2806 | .iscsit_setup_np = isert_setup_np, |
2771 | .iscsit_accept_np = isert_accept_np, | 2807 | .iscsit_accept_np = isert_accept_np, |
2772 | .iscsit_free_np = isert_free_np, | 2808 | .iscsit_free_np = isert_free_np, |
2809 | .iscsit_wait_conn = isert_wait_conn, | ||
2773 | .iscsit_free_conn = isert_free_conn, | 2810 | .iscsit_free_conn = isert_free_conn, |
2774 | .iscsit_get_login_rx = isert_get_login_rx, | 2811 | .iscsit_get_login_rx = isert_get_login_rx, |
2775 | .iscsit_put_login_tx = isert_put_login_tx, | 2812 | .iscsit_put_login_tx = isert_put_login_tx, |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 708a069002f3..f6ae7f5dd408 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #define ISERT_RDMA_LISTEN_BACKLOG 10 | 7 | #define ISERT_RDMA_LISTEN_BACKLOG 10 |
8 | #define ISCSI_ISER_SG_TABLESIZE 256 | 8 | #define ISCSI_ISER_SG_TABLESIZE 256 |
9 | #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL | ||
9 | 10 | ||
10 | enum isert_desc_type { | 11 | enum isert_desc_type { |
11 | ISCSI_TX_CONTROL, | 12 | ISCSI_TX_CONTROL, |
@@ -45,6 +46,7 @@ struct iser_tx_desc { | |||
45 | struct isert_cmd *isert_cmd; | 46 | struct isert_cmd *isert_cmd; |
46 | struct llist_node *comp_llnode_batch; | 47 | struct llist_node *comp_llnode_batch; |
47 | struct llist_node comp_llnode; | 48 | struct llist_node comp_llnode; |
49 | bool llnode_active; | ||
48 | struct ib_send_wr send_wr; | 50 | struct ib_send_wr send_wr; |
49 | } __packed; | 51 | } __packed; |
50 | 52 | ||
@@ -116,8 +118,8 @@ struct isert_conn { | |||
116 | struct isert_device *conn_device; | 118 | struct isert_device *conn_device; |
117 | struct work_struct conn_logout_work; | 119 | struct work_struct conn_logout_work; |
118 | struct mutex conn_mutex; | 120 | struct mutex conn_mutex; |
119 | wait_queue_head_t conn_wait; | 121 | struct completion conn_wait; |
120 | wait_queue_head_t conn_wait_comp_err; | 122 | struct completion conn_wait_comp_err; |
121 | struct kref conn_kref; | 123 | struct kref conn_kref; |
122 | struct list_head conn_fr_pool; | 124 | struct list_head conn_fr_pool; |
123 | int conn_fr_pool_size; | 125 | int conn_fr_pool_size; |
@@ -126,7 +128,6 @@ struct isert_conn { | |||
126 | #define ISERT_COMP_BATCH_COUNT 8 | 128 | #define ISERT_COMP_BATCH_COUNT 8 |
127 | int conn_comp_batch; | 129 | int conn_comp_batch; |
128 | struct llist_head conn_comp_llist; | 130 | struct llist_head conn_comp_llist; |
129 | struct mutex conn_comp_mutex; | ||
130 | }; | 131 | }; |
131 | 132 | ||
132 | #define ISERT_MAX_CQ 64 | 133 | #define ISERT_MAX_CQ 64 |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 520a7e5a490b..0e537d8d0e47 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -3666,9 +3666,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size( | |||
3666 | unsigned long val; | 3666 | unsigned long val; |
3667 | int ret; | 3667 | int ret; |
3668 | 3668 | ||
3669 | ret = strict_strtoul(page, 0, &val); | 3669 | ret = kstrtoul(page, 0, &val); |
3670 | if (ret < 0) { | 3670 | if (ret < 0) { |
3671 | pr_err("strict_strtoul() failed with ret: %d\n", ret); | 3671 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
3672 | return -EINVAL; | 3672 | return -EINVAL; |
3673 | } | 3673 | } |
3674 | if (val > MAX_SRPT_RDMA_SIZE) { | 3674 | if (val > MAX_SRPT_RDMA_SIZE) { |
@@ -3706,9 +3706,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size( | |||
3706 | unsigned long val; | 3706 | unsigned long val; |
3707 | int ret; | 3707 | int ret; |
3708 | 3708 | ||
3709 | ret = strict_strtoul(page, 0, &val); | 3709 | ret = kstrtoul(page, 0, &val); |
3710 | if (ret < 0) { | 3710 | if (ret < 0) { |
3711 | pr_err("strict_strtoul() failed with ret: %d\n", ret); | 3711 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
3712 | return -EINVAL; | 3712 | return -EINVAL; |
3713 | } | 3713 | } |
3714 | if (val > MAX_SRPT_RSP_SIZE) { | 3714 | if (val > MAX_SRPT_RSP_SIZE) { |
@@ -3746,9 +3746,9 @@ static ssize_t srpt_tpg_attrib_store_srp_sq_size( | |||
3746 | unsigned long val; | 3746 | unsigned long val; |
3747 | int ret; | 3747 | int ret; |
3748 | 3748 | ||
3749 | ret = strict_strtoul(page, 0, &val); | 3749 | ret = kstrtoul(page, 0, &val); |
3750 | if (ret < 0) { | 3750 | if (ret < 0) { |
3751 | pr_err("strict_strtoul() failed with ret: %d\n", ret); | 3751 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
3752 | return -EINVAL; | 3752 | return -EINVAL; |
3753 | } | 3753 | } |
3754 | if (val > MAX_SRPT_SRQ_SIZE) { | 3754 | if (val > MAX_SRPT_SRQ_SIZE) { |
@@ -3793,7 +3793,7 @@ static ssize_t srpt_tpg_store_enable( | |||
3793 | unsigned long tmp; | 3793 | unsigned long tmp; |
3794 | int ret; | 3794 | int ret; |
3795 | 3795 | ||
3796 | ret = strict_strtoul(page, 0, &tmp); | 3796 | ret = kstrtoul(page, 0, &tmp); |
3797 | if (ret < 0) { | 3797 | if (ret < 0) { |
3798 | printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); | 3798 | printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); |
3799 | return -EINVAL; | 3799 | return -EINVAL; |