aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/umem.c19
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c201
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c68
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c43
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c19
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c24
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c20
19 files changed, 304 insertions, 153 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index a3a2e9c1639b..df0c4f605a21 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -105,6 +105,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
105 umem->length = size; 105 umem->length = size;
106 umem->offset = addr & ~PAGE_MASK; 106 umem->offset = addr & ~PAGE_MASK;
107 umem->page_size = PAGE_SIZE; 107 umem->page_size = PAGE_SIZE;
108 umem->pid = get_task_pid(current, PIDTYPE_PID);
108 /* 109 /*
109 * We ask for writable memory if any access flags other than 110 * We ask for writable memory if any access flags other than
110 * "remote read" are set. "Local write" and "remote write" 111 * "remote read" are set. "Local write" and "remote write"
@@ -198,6 +199,7 @@ out:
198 if (ret < 0) { 199 if (ret < 0) {
199 if (need_release) 200 if (need_release)
200 __ib_umem_release(context->device, umem, 0); 201 __ib_umem_release(context->device, umem, 0);
202 put_pid(umem->pid);
201 kfree(umem); 203 kfree(umem);
202 } else 204 } else
203 current->mm->pinned_vm = locked; 205 current->mm->pinned_vm = locked;
@@ -230,15 +232,19 @@ void ib_umem_release(struct ib_umem *umem)
230{ 232{
231 struct ib_ucontext *context = umem->context; 233 struct ib_ucontext *context = umem->context;
232 struct mm_struct *mm; 234 struct mm_struct *mm;
235 struct task_struct *task;
233 unsigned long diff; 236 unsigned long diff;
234 237
235 __ib_umem_release(umem->context->device, umem, 1); 238 __ib_umem_release(umem->context->device, umem, 1);
236 239
237 mm = get_task_mm(current); 240 task = get_pid_task(umem->pid, PIDTYPE_PID);
238 if (!mm) { 241 put_pid(umem->pid);
239 kfree(umem); 242 if (!task)
240 return; 243 goto out;
241 } 244 mm = get_task_mm(task);
245 put_task_struct(task);
246 if (!mm)
247 goto out;
242 248
243 diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; 249 diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
244 250
@@ -262,9 +268,10 @@ void ib_umem_release(struct ib_umem *umem)
262 } else 268 } else
263 down_write(&mm->mmap_sem); 269 down_write(&mm->mmap_sem);
264 270
265 current->mm->pinned_vm -= diff; 271 mm->pinned_vm -= diff;
266 up_write(&mm->mmap_sem); 272 up_write(&mm->mmap_sem);
267 mmput(mm); 273 mmput(mm);
274out:
268 kfree(umem); 275 kfree(umem);
269} 276}
270EXPORT_SYMBOL(ib_umem_release); 277EXPORT_SYMBOL(ib_umem_release);
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index e7bee46868d1..abd97247443e 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst,
140 dst->packet_life_time = src->packet_life_time; 140 dst->packet_life_time = src->packet_life_time;
141 dst->preference = src->preference; 141 dst->preference = src->preference;
142 dst->packet_life_time_selector = src->packet_life_time_selector; 142 dst->packet_life_time_selector = src->packet_life_time_selector;
143
144 memset(dst->smac, 0, sizeof(dst->smac));
145 memset(dst->dmac, 0, sizeof(dst->dmac));
146 dst->vlan_id = 0xffff;
143} 147}
144EXPORT_SYMBOL(ib_copy_path_rec_from_user); 148EXPORT_SYMBOL(ib_copy_path_rec_from_user);
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index dc66c4506916..1da1252dcdb3 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
54 54
55/* call with current->mm->mmap_sem held */ 55/* call with current->mm->mmap_sem held */
56static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, 56static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
57 struct page **p, struct vm_area_struct **vma) 57 struct page **p)
58{ 58{
59 unsigned long lock_limit; 59 unsigned long lock_limit;
60 size_t got; 60 size_t got;
@@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
74 ret = get_user_pages(current, current->mm, 74 ret = get_user_pages(current, current->mm,
75 start_page + got * PAGE_SIZE, 75 start_page + got * PAGE_SIZE,
76 num_pages - got, 1, 1, 76 num_pages - got, 1, 1,
77 p + got, vma); 77 p + got, NULL);
78 if (ret < 0) 78 if (ret < 0)
79 goto bail_release; 79 goto bail_release;
80 } 80 }
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
165 165
166 down_write(&current->mm->mmap_sem); 166 down_write(&current->mm->mmap_sem);
167 167
168 ret = __ipath_get_user_pages(start_page, num_pages, p, NULL); 168 ret = __ipath_get_user_pages(start_page, num_pages, p);
169 169
170 up_write(&current->mm->mmap_sem); 170 up_write(&current->mm->mmap_sem);
171 171
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index e1e558a3d692..bda5994ceb68 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -59,6 +59,7 @@
59 59
60#define MLX4_IB_FLOW_MAX_PRIO 0xFFF 60#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
61#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF 61#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
62#define MLX4_IB_CARD_REV_A0 0xA0
62 63
63MODULE_AUTHOR("Roland Dreier"); 64MODULE_AUTHOR("Roland Dreier");
64MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); 65MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev)
119 return dmfs; 120 return dmfs;
120} 121}
121 122
123static int num_ib_ports(struct mlx4_dev *dev)
124{
125 int ib_ports = 0;
126 int i;
127
128 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
129 ib_ports++;
130
131 return ib_ports;
132}
133
122static int mlx4_ib_query_device(struct ib_device *ibdev, 134static int mlx4_ib_query_device(struct ib_device *ibdev,
123 struct ib_device_attr *props) 135 struct ib_device_attr *props)
124{ 136{
@@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
126 struct ib_smp *in_mad = NULL; 138 struct ib_smp *in_mad = NULL;
127 struct ib_smp *out_mad = NULL; 139 struct ib_smp *out_mad = NULL;
128 int err = -ENOMEM; 140 int err = -ENOMEM;
141 int have_ib_ports;
129 142
130 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 143 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
131 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 144 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
142 155
143 memset(props, 0, sizeof *props); 156 memset(props, 0, sizeof *props);
144 157
158 have_ib_ports = num_ib_ports(dev->dev);
159
145 props->fw_ver = dev->dev->caps.fw_ver; 160 props->fw_ver = dev->dev->caps.fw_ver;
146 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 161 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
147 IB_DEVICE_PORT_ACTIVE_EVENT | 162 IB_DEVICE_PORT_ACTIVE_EVENT |
@@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
152 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 167 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
153 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) 168 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
154 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 169 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
155 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) 170 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
156 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 171 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
157 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) 172 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
158 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; 173 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
159 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 174 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
160 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 175 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
161 if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) 176 if (dev->dev->caps.max_gso_sz &&
177 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
178 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
162 props->device_cap_flags |= IB_DEVICE_UD_TSO; 179 props->device_cap_flags |= IB_DEVICE_UD_TSO;
163 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) 180 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
164 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 181 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
@@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
357 props->state = IB_PORT_DOWN; 374 props->state = IB_PORT_DOWN;
358 props->phys_state = state_to_phys_state(props->state); 375 props->phys_state = state_to_phys_state(props->state);
359 props->active_mtu = IB_MTU_256; 376 props->active_mtu = IB_MTU_256;
360 spin_lock(&iboe->lock); 377 spin_lock_bh(&iboe->lock);
361 ndev = iboe->netdevs[port - 1]; 378 ndev = iboe->netdevs[port - 1];
362 if (!ndev) 379 if (!ndev)
363 goto out_unlock; 380 goto out_unlock;
@@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
369 IB_PORT_ACTIVE : IB_PORT_DOWN; 386 IB_PORT_ACTIVE : IB_PORT_DOWN;
370 props->phys_state = state_to_phys_state(props->state); 387 props->phys_state = state_to_phys_state(props->state);
371out_unlock: 388out_unlock:
372 spin_unlock(&iboe->lock); 389 spin_unlock_bh(&iboe->lock);
373out: 390out:
374 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 391 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
375 return err; 392 return err;
@@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
811 if (!mqp->port) 828 if (!mqp->port)
812 return 0; 829 return 0;
813 830
814 spin_lock(&mdev->iboe.lock); 831 spin_lock_bh(&mdev->iboe.lock);
815 ndev = mdev->iboe.netdevs[mqp->port - 1]; 832 ndev = mdev->iboe.netdevs[mqp->port - 1];
816 if (ndev) 833 if (ndev)
817 dev_hold(ndev); 834 dev_hold(ndev);
818 spin_unlock(&mdev->iboe.lock); 835 spin_unlock_bh(&mdev->iboe.lock);
819 836
820 if (ndev) { 837 if (ndev) {
821 ret = 1; 838 ret = 1;
@@ -1089,6 +1106,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1089 return err; 1106 return err;
1090} 1107}
1091 1108
1109static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1110 u64 *reg_id)
1111{
1112 void *ib_flow;
1113 union ib_flow_spec *ib_spec;
1114 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1115 int err = 0;
1116
1117 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1118 return 0; /* do nothing */
1119
1120 ib_flow = flow_attr + 1;
1121 ib_spec = (union ib_flow_spec *)ib_flow;
1122
1123 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1124 return 0; /* do nothing */
1125
1126 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1127 flow_attr->port, qp->qp_num,
1128 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1129 reg_id);
1130 return err;
1131}
1132
1092static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, 1133static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1093 struct ib_flow_attr *flow_attr, 1134 struct ib_flow_attr *flow_attr,
1094 int domain) 1135 int domain)
@@ -1136,6 +1177,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1136 i++; 1177 i++;
1137 } 1178 }
1138 1179
1180 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1181 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
1182 if (err)
1183 goto err_free;
1184 }
1185
1139 return &mflow->ibflow; 1186 return &mflow->ibflow;
1140 1187
1141err_free: 1188err_free:
@@ -1262,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1262 mutex_lock(&mqp->mutex); 1309 mutex_lock(&mqp->mutex);
1263 ge = find_gid_entry(mqp, gid->raw); 1310 ge = find_gid_entry(mqp, gid->raw);
1264 if (ge) { 1311 if (ge) {
1265 spin_lock(&mdev->iboe.lock); 1312 spin_lock_bh(&mdev->iboe.lock);
1266 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; 1313 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1267 if (ndev) 1314 if (ndev)
1268 dev_hold(ndev); 1315 dev_hold(ndev);
1269 spin_unlock(&mdev->iboe.lock); 1316 spin_unlock_bh(&mdev->iboe.lock);
1270 if (ndev) 1317 if (ndev)
1271 dev_put(ndev); 1318 dev_put(ndev);
1272 list_del(&ge->list); 1319 list_del(&ge->list);
@@ -1387,6 +1434,9 @@ static void update_gids_task(struct work_struct *work)
1387 int err; 1434 int err;
1388 struct mlx4_dev *dev = gw->dev->dev; 1435 struct mlx4_dev *dev = gw->dev->dev;
1389 1436
1437 if (!gw->dev->ib_active)
1438 return;
1439
1390 mailbox = mlx4_alloc_cmd_mailbox(dev); 1440 mailbox = mlx4_alloc_cmd_mailbox(dev);
1391 if (IS_ERR(mailbox)) { 1441 if (IS_ERR(mailbox)) {
1392 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); 1442 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
@@ -1417,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work)
1417 int err; 1467 int err;
1418 struct mlx4_dev *dev = gw->dev->dev; 1468 struct mlx4_dev *dev = gw->dev->dev;
1419 1469
1470 if (!gw->dev->ib_active)
1471 return;
1472
1420 mailbox = mlx4_alloc_cmd_mailbox(dev); 1473 mailbox = mlx4_alloc_cmd_mailbox(dev);
1421 if (IS_ERR(mailbox)) { 1474 if (IS_ERR(mailbox)) {
1422 pr_warn("reset gid table failed\n"); 1475 pr_warn("reset gid table failed\n");
@@ -1551,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1551 return 0; 1604 return 0;
1552 1605
1553 iboe = &ibdev->iboe; 1606 iboe = &ibdev->iboe;
1554 spin_lock(&iboe->lock); 1607 spin_lock_bh(&iboe->lock);
1555 1608
1556 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) 1609 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1557 if ((netif_is_bond_master(real_dev) && 1610 if ((netif_is_bond_master(real_dev) &&
@@ -1561,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1561 update_gid_table(ibdev, port, gid, 1614 update_gid_table(ibdev, port, gid,
1562 event == NETDEV_DOWN, 0); 1615 event == NETDEV_DOWN, 0);
1563 1616
1564 spin_unlock(&iboe->lock); 1617 spin_unlock_bh(&iboe->lock);
1565 return 0; 1618 return 0;
1566 1619
1567} 1620}
@@ -1634,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1634 new_smac = mlx4_mac_to_u64(dev->dev_addr); 1687 new_smac = mlx4_mac_to_u64(dev->dev_addr);
1635 read_unlock(&dev_base_lock); 1688 read_unlock(&dev_base_lock);
1636 1689
1690 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
1691
1692 /* no need for update QP1 and mac registration in non-SRIOV */
1693 if (!mlx4_is_mfunc(ibdev->dev))
1694 return;
1695
1637 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); 1696 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1638 qp = ibdev->qp1_proxy[port - 1]; 1697 qp = ibdev->qp1_proxy[port - 1];
1639 if (qp) { 1698 if (qp) {
1640 int new_smac_index; 1699 int new_smac_index;
1641 u64 old_smac = qp->pri.smac; 1700 u64 old_smac;
1642 struct mlx4_update_qp_params update_params; 1701 struct mlx4_update_qp_params update_params;
1643 1702
1703 mutex_lock(&qp->mutex);
1704 old_smac = qp->pri.smac;
1644 if (new_smac == old_smac) 1705 if (new_smac == old_smac)
1645 goto unlock; 1706 goto unlock;
1646 1707
@@ -1650,22 +1711,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1650 goto unlock; 1711 goto unlock;
1651 1712
1652 update_params.smac_index = new_smac_index; 1713 update_params.smac_index = new_smac_index;
1653 if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC, 1714 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
1654 &update_params)) { 1715 &update_params)) {
1655 release_mac = new_smac; 1716 release_mac = new_smac;
1656 goto unlock; 1717 goto unlock;
1657 } 1718 }
1658 1719 /* if old port was zero, no mac was yet registered for this QP */
1720 if (qp->pri.smac_port)
1721 release_mac = old_smac;
1659 qp->pri.smac = new_smac; 1722 qp->pri.smac = new_smac;
1723 qp->pri.smac_port = port;
1660 qp->pri.smac_index = new_smac_index; 1724 qp->pri.smac_index = new_smac_index;
1661
1662 release_mac = old_smac;
1663 } 1725 }
1664 1726
1665unlock: 1727unlock:
1666 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1667 if (release_mac != MLX4_IB_INVALID_MAC) 1728 if (release_mac != MLX4_IB_INVALID_MAC)
1668 mlx4_unregister_mac(ibdev->dev, port, release_mac); 1729 mlx4_unregister_mac(ibdev->dev, port, release_mac);
1730 if (qp)
1731 mutex_unlock(&qp->mutex);
1732 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1669} 1733}
1670 1734
1671static void mlx4_ib_get_dev_addr(struct net_device *dev, 1735static void mlx4_ib_get_dev_addr(struct net_device *dev,
@@ -1676,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
1676 struct inet6_dev *in6_dev; 1740 struct inet6_dev *in6_dev;
1677 union ib_gid *pgid; 1741 union ib_gid *pgid;
1678 struct inet6_ifaddr *ifp; 1742 struct inet6_ifaddr *ifp;
1743 union ib_gid default_gid;
1679#endif 1744#endif
1680 union ib_gid gid; 1745 union ib_gid gid;
1681 1746
@@ -1696,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
1696 in_dev_put(in_dev); 1761 in_dev_put(in_dev);
1697 } 1762 }
1698#if IS_ENABLED(CONFIG_IPV6) 1763#if IS_ENABLED(CONFIG_IPV6)
1764 mlx4_make_default_gid(dev, &default_gid);
1699 /* IPv6 gids */ 1765 /* IPv6 gids */
1700 in6_dev = in6_dev_get(dev); 1766 in6_dev = in6_dev_get(dev);
1701 if (in6_dev) { 1767 if (in6_dev) {
1702 read_lock_bh(&in6_dev->lock); 1768 read_lock_bh(&in6_dev->lock);
1703 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { 1769 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
1704 pgid = (union ib_gid *)&ifp->addr; 1770 pgid = (union ib_gid *)&ifp->addr;
1771 if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
1772 continue;
1705 update_gid_table(ibdev, port, pgid, 0, 0); 1773 update_gid_table(ibdev, port, pgid, 0, 0);
1706 } 1774 }
1707 read_unlock_bh(&in6_dev->lock); 1775 read_unlock_bh(&in6_dev->lock);
@@ -1723,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1723 struct net_device *dev; 1791 struct net_device *dev;
1724 struct mlx4_ib_iboe *iboe = &ibdev->iboe; 1792 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
1725 int i; 1793 int i;
1794 int err = 0;
1726 1795
1727 for (i = 1; i <= ibdev->num_ports; ++i) 1796 for (i = 1; i <= ibdev->num_ports; ++i) {
1728 if (reset_gid_table(ibdev, i)) 1797 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
1729 return -1; 1798 IB_LINK_LAYER_ETHERNET) {
1799 err = reset_gid_table(ibdev, i);
1800 if (err)
1801 goto out;
1802 }
1803 }
1730 1804
1731 read_lock(&dev_base_lock); 1805 read_lock(&dev_base_lock);
1732 spin_lock(&iboe->lock); 1806 spin_lock_bh(&iboe->lock);
1733 1807
1734 for_each_netdev(&init_net, dev) { 1808 for_each_netdev(&init_net, dev) {
1735 u8 port = mlx4_ib_get_dev_port(dev, ibdev); 1809 u8 port = mlx4_ib_get_dev_port(dev, ibdev);
1736 if (port) 1810 /* port will be non-zero only for ETH ports */
1811 if (port) {
1812 mlx4_ib_set_default_gid(ibdev, dev, port);
1737 mlx4_ib_get_dev_addr(dev, ibdev, port); 1813 mlx4_ib_get_dev_addr(dev, ibdev, port);
1814 }
1738 } 1815 }
1739 1816
1740 spin_unlock(&iboe->lock); 1817 spin_unlock_bh(&iboe->lock);
1741 read_unlock(&dev_base_lock); 1818 read_unlock(&dev_base_lock);
1742 1819out:
1743 return 0; 1820 return err;
1744} 1821}
1745 1822
1746static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, 1823static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
@@ -1754,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1754 1831
1755 iboe = &ibdev->iboe; 1832 iboe = &ibdev->iboe;
1756 1833
1757 spin_lock(&iboe->lock); 1834 spin_lock_bh(&iboe->lock);
1758 mlx4_foreach_ib_transport_port(port, ibdev->dev) { 1835 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1759 enum ib_port_state port_state = IB_PORT_NOP; 1836 enum ib_port_state port_state = IB_PORT_NOP;
1760 struct net_device *old_master = iboe->masters[port - 1]; 1837 struct net_device *old_master = iboe->masters[port - 1];
@@ -1786,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1786 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? 1863 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
1787 IB_PORT_ACTIVE : IB_PORT_DOWN; 1864 IB_PORT_ACTIVE : IB_PORT_DOWN;
1788 mlx4_ib_set_default_gid(ibdev, curr_netdev, port); 1865 mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1789 } else { 1866 if (curr_master) {
1790 reset_gid_table(ibdev, port); 1867 /* if using bonding/team and a slave port is down, we
1791 } 1868 * don't want the bond IP based gids in the table since
1792 /* if using bonding/team and a slave port is down, we don't the bond IP 1869 * flows that select port by gid may get the down port.
1793 * based gids in the table since flows that select port by gid may get 1870 */
1794 * the down port. 1871 if (port_state == IB_PORT_DOWN) {
1795 */ 1872 reset_gid_table(ibdev, port);
1796 if (curr_master && (port_state == IB_PORT_DOWN)) { 1873 mlx4_ib_set_default_gid(ibdev,
1797 reset_gid_table(ibdev, port); 1874 curr_netdev,
1798 mlx4_ib_set_default_gid(ibdev, curr_netdev, port); 1875 port);
1799 } 1876 } else {
1800 /* if bonding is used it is possible that we add it to masters 1877 /* gids from the upper dev (bond/team)
1801 * only after IP address is assigned to the net bonding 1878 * should appear in port's gid table
1802 * interface. 1879 */
1803 */ 1880 mlx4_ib_get_dev_addr(curr_master,
1804 if (curr_master && (old_master != curr_master)) { 1881 ibdev, port);
1805 reset_gid_table(ibdev, port); 1882 }
1806 mlx4_ib_set_default_gid(ibdev, curr_netdev, port); 1883 }
1807 mlx4_ib_get_dev_addr(curr_master, ibdev, port); 1884 /* if bonding is used it is possible that we add it to
1808 } 1885 * masters only after IP address is assigned to the
1886 * net bonding interface.
1887 */
1888 if (curr_master && (old_master != curr_master)) {
1889 reset_gid_table(ibdev, port);
1890 mlx4_ib_set_default_gid(ibdev,
1891 curr_netdev, port);
1892 mlx4_ib_get_dev_addr(curr_master, ibdev, port);
1893 }
1809 1894
1810 if (!curr_master && (old_master != curr_master)) { 1895 if (!curr_master && (old_master != curr_master)) {
1896 reset_gid_table(ibdev, port);
1897 mlx4_ib_set_default_gid(ibdev,
1898 curr_netdev, port);
1899 mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
1900 }
1901 } else {
1811 reset_gid_table(ibdev, port); 1902 reset_gid_table(ibdev, port);
1812 mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1813 mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
1814 } 1903 }
1815 } 1904 }
1816 1905
1817 spin_unlock(&iboe->lock); 1906 spin_unlock_bh(&iboe->lock);
1818 1907
1819 if (update_qps_port > 0) 1908 if (update_qps_port > 0)
1820 mlx4_ib_update_qps(ibdev, dev, update_qps_port); 1909 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
@@ -2156,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2156 goto err_steer_free_bitmap; 2245 goto err_steer_free_bitmap;
2157 } 2246 }
2158 2247
2248 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2249 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2250
2159 if (ib_register_device(&ibdev->ib_dev, NULL)) 2251 if (ib_register_device(&ibdev->ib_dev, NULL))
2160 goto err_steer_free_bitmap; 2252 goto err_steer_free_bitmap;
2161 2253
@@ -2192,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2192 } 2284 }
2193 } 2285 }
2194#endif 2286#endif
2195 for (i = 1 ; i <= ibdev->num_ports ; ++i) 2287 if (mlx4_ib_init_gid_table(ibdev))
2196 reset_gid_table(ibdev, i); 2288 goto err_notif;
2197 rtnl_lock();
2198 mlx4_ib_scan_netdevs(ibdev, NULL, 0);
2199 rtnl_unlock();
2200 mlx4_ib_init_gid_table(ibdev);
2201 } 2289 }
2202 2290
2203 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { 2291 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -2345,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2345 struct mlx4_ib_dev *ibdev = ibdev_ptr; 2433 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2346 int p; 2434 int p;
2347 2435
2436 ibdev->ib_active = false;
2437 flush_workqueue(wq);
2438
2348 mlx4_ib_close_sriov(ibdev); 2439 mlx4_ib_close_sriov(ibdev);
2349 mlx4_ib_mad_cleanup(ibdev); 2440 mlx4_ib_mad_cleanup(ibdev);
2350 ib_unregister_device(&ibdev->ib_dev); 2441 ib_unregister_device(&ibdev->ib_dev);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e8cad3926bfc..6eb743f65f6f 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -451,6 +451,7 @@ struct mlx4_ib_iboe {
451 spinlock_t lock; 451 spinlock_t lock;
452 struct net_device *netdevs[MLX4_MAX_PORTS]; 452 struct net_device *netdevs[MLX4_MAX_PORTS];
453 struct net_device *masters[MLX4_MAX_PORTS]; 453 struct net_device *masters[MLX4_MAX_PORTS];
454 atomic64_t mac[MLX4_MAX_PORTS];
454 struct notifier_block nb; 455 struct notifier_block nb;
455 struct notifier_block nb_inet; 456 struct notifier_block nb_inet;
456 struct notifier_block nb_inet6; 457 struct notifier_block nb_inet6;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 9b0e80e59b08..8f9325cfc85d 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -234,14 +234,13 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
234 0); 234 0);
235 if (IS_ERR(mmr->umem)) { 235 if (IS_ERR(mmr->umem)) {
236 err = PTR_ERR(mmr->umem); 236 err = PTR_ERR(mmr->umem);
237 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
237 mmr->umem = NULL; 238 mmr->umem = NULL;
238 goto release_mpt_entry; 239 goto release_mpt_entry;
239 } 240 }
240 n = ib_umem_page_count(mmr->umem); 241 n = ib_umem_page_count(mmr->umem);
241 shift = ilog2(mmr->umem->page_size); 242 shift = ilog2(mmr->umem->page_size);
242 243
243 mmr->mmr.iova = virt_addr;
244 mmr->mmr.size = length;
245 err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, 244 err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
246 virt_addr, length, n, shift, 245 virt_addr, length, n, shift,
247 *pmpt_entry); 246 *pmpt_entry);
@@ -249,6 +248,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
249 ib_umem_release(mmr->umem); 248 ib_umem_release(mmr->umem);
250 goto release_mpt_entry; 249 goto release_mpt_entry;
251 } 250 }
251 mmr->mmr.iova = virt_addr;
252 mmr->mmr.size = length;
252 253
253 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); 254 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
254 if (err) { 255 if (err) {
@@ -262,6 +263,8 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
262 * return a failure. But dereg_mr will free the resources. 263 * return a failure. But dereg_mr will free the resources.
263 */ 264 */
264 err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); 265 err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
266 if (!err && flags & IB_MR_REREG_ACCESS)
267 mmr->mmr.access = mr_access_flags;
265 268
266release_mpt_entry: 269release_mpt_entry:
267 mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); 270 mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 67780452f0cf..9c5150c3cb31 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
964 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) 964 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
965 pr_warn("modify QP %06x to RESET failed.\n", 965 pr_warn("modify QP %06x to RESET failed.\n",
966 qp->mqp.qpn); 966 qp->mqp.qpn);
967 if (qp->pri.smac) { 967 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
968 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); 968 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
969 qp->pri.smac = 0; 969 qp->pri.smac = 0;
970 qp->pri.smac_port = 0;
970 } 971 }
971 if (qp->alt.smac) { 972 if (qp->alt.smac) {
972 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); 973 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
1325 * If one was already assigned, but the new mac differs, 1326 * If one was already assigned, but the new mac differs,
1326 * unregister the old one and register the new one. 1327 * unregister the old one and register the new one.
1327 */ 1328 */
1328 if (!smac_info->smac || smac_info->smac != smac) { 1329 if ((!smac_info->smac && !smac_info->smac_port) ||
1330 smac_info->smac != smac) {
1329 /* register candidate now, unreg if needed, after success */ 1331 /* register candidate now, unreg if needed, after success */
1330 smac_index = mlx4_register_mac(dev->dev, port, smac); 1332 smac_index = mlx4_register_mac(dev->dev, port, smac);
1331 if (smac_index >= 0) { 1333 if (smac_index >= 0) {
@@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1390static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, 1392static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac,
1391 struct mlx4_qp_context *context) 1393 struct mlx4_qp_context *context)
1392{ 1394{
1393 struct net_device *ndev;
1394 u64 u64_mac; 1395 u64 u64_mac;
1395 int smac_index; 1396 int smac_index;
1396 1397
1397 1398 u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
1398 ndev = dev->iboe.netdevs[qp->port - 1];
1399 if (ndev) {
1400 smac = ndev->dev_addr;
1401 u64_mac = mlx4_mac_to_u64(smac);
1402 } else {
1403 u64_mac = dev->dev->caps.def_mac[qp->port];
1404 }
1405 1399
1406 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); 1400 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
1407 if (!qp->pri.smac) { 1401 if (!qp->pri.smac && !qp->pri.smac_port) {
1408 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); 1402 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
1409 if (smac_index >= 0) { 1403 if (smac_index >= 0) {
1410 qp->pri.candidate_smac_index = smac_index; 1404 qp->pri.candidate_smac_index = smac_index;
@@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1432 int steer_qp = 0; 1426 int steer_qp = 0;
1433 int err = -EINVAL; 1427 int err = -EINVAL;
1434 1428
1429 /* APM is not supported under RoCE */
1430 if (attr_mask & IB_QP_ALT_PATH &&
1431 rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
1432 IB_LINK_LAYER_ETHERNET)
1433 return -ENOTSUPP;
1434
1435 context = kzalloc(sizeof *context, GFP_KERNEL); 1435 context = kzalloc(sizeof *context, GFP_KERNEL);
1436 if (!context) 1436 if (!context)
1437 return -ENOMEM; 1437 return -ENOMEM;
@@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1677 } 1677 }
1678 } 1678 }
1679 1679
1680 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) 1680 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
1681 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | 1681 context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
1682 MLX4_IB_LINK_TYPE_ETH; 1682 MLX4_IB_LINK_TYPE_ETH;
1683 if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1684 /* set QP to receive both tunneled & non-tunneled packets */
1685 if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
1686 context->srqn = cpu_to_be32(7 << 28);
1687 }
1688 }
1683 1689
1684 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) { 1690 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
1685 int is_eth = rdma_port_get_link_layer( 1691 int is_eth = rdma_port_get_link_layer(
@@ -1780,9 +1786,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1780 if (qp->flags & MLX4_IB_QP_NETIF) 1786 if (qp->flags & MLX4_IB_QP_NETIF)
1781 mlx4_ib_steer_qp_reg(dev, qp, 0); 1787 mlx4_ib_steer_qp_reg(dev, qp, 0);
1782 } 1788 }
1783 if (qp->pri.smac) { 1789 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
1784 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); 1790 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
1785 qp->pri.smac = 0; 1791 qp->pri.smac = 0;
1792 qp->pri.smac_port = 0;
1786 } 1793 }
1787 if (qp->alt.smac) { 1794 if (qp->alt.smac) {
1788 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); 1795 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1806,11 +1813,12 @@ out:
1806 if (err && steer_qp) 1813 if (err && steer_qp)
1807 mlx4_ib_steer_qp_reg(dev, qp, 0); 1814 mlx4_ib_steer_qp_reg(dev, qp, 0);
1808 kfree(context); 1815 kfree(context);
1809 if (qp->pri.candidate_smac) { 1816 if (qp->pri.candidate_smac ||
1817 (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
1810 if (err) { 1818 if (err) {
1811 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); 1819 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
1812 } else { 1820 } else {
1813 if (qp->pri.smac) 1821 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
1814 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); 1822 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
1815 qp->pri.smac = qp->pri.candidate_smac; 1823 qp->pri.smac = qp->pri.candidate_smac;
1816 qp->pri.smac_index = qp->pri.candidate_smac_index; 1824 qp->pri.smac_index = qp->pri.candidate_smac_index;
@@ -2083,6 +2091,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
2083 return 0; 2091 return 0;
2084} 2092}
2085 2093
2094static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac)
2095{
2096 int i;
2097
2098 for (i = ETH_ALEN; i; i--) {
2099 dst_mac[i - 1] = src_mac & 0xff;
2100 src_mac >>= 8;
2101 }
2102}
2103
2086static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 2104static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
2087 void *wqe, unsigned *mlx_seg_len) 2105 void *wqe, unsigned *mlx_seg_len)
2088{ 2106{
@@ -2197,7 +2215,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
2197 } 2215 }
2198 2216
2199 if (is_eth) { 2217 if (is_eth) {
2200 u8 *smac;
2201 struct in6_addr in6; 2218 struct in6_addr in6;
2202 2219
2203 u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; 2220 u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
@@ -2210,12 +2227,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
2210 memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); 2227 memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
2211 memcpy(&in6, sgid.raw, sizeof(in6)); 2228 memcpy(&in6, sgid.raw, sizeof(in6));
2212 2229
2213 if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) 2230 if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
2214 smac = to_mdev(sqp->qp.ibqp.device)-> 2231 u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]);
2215 iboe.netdevs[sqp->qp.port - 1]->dev_addr; 2232 u8 smac[ETH_ALEN];
2216 else /* use the src mac of the tunnel */ 2233
2217 smac = ah->av.eth.s_mac; 2234 mlx4_u64_to_smac(smac, mac);
2218 memcpy(sqp->ud_header.eth.smac_h, smac, 6); 2235 memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN);
2236 } else {
2237 /* use the src mac of the tunnel */
2238 memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN);
2239 }
2240
2219 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) 2241 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
2220 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 2242 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
2221 if (!is_vlan) { 2243 if (!is_vlan) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 40f8536c10b0..ac02ce4e8040 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -38,7 +38,7 @@
38#define OCRDMA_VID_PCP_SHIFT 0xD 38#define OCRDMA_VID_PCP_SHIFT 0xD
39 39
40static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, 40static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
41 struct ib_ah_attr *attr, int pdid) 41 struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
42{ 42{
43 int status = 0; 43 int status = 0;
44 u16 vlan_tag; bool vlan_enabled = false; 44 u16 vlan_tag; bool vlan_enabled = false;
@@ -49,8 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
49 memset(&eth, 0, sizeof(eth)); 49 memset(&eth, 0, sizeof(eth));
50 memset(&grh, 0, sizeof(grh)); 50 memset(&grh, 0, sizeof(grh));
51 51
52 ah->sgid_index = attr->grh.sgid_index; 52 /* VLAN */
53
54 vlan_tag = attr->vlan_id; 53 vlan_tag = attr->vlan_id;
55 if (!vlan_tag || (vlan_tag > 0xFFF)) 54 if (!vlan_tag || (vlan_tag > 0xFFF))
56 vlan_tag = dev->pvid; 55 vlan_tag = dev->pvid;
@@ -65,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
65 eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); 64 eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
66 eth_sz = sizeof(struct ocrdma_eth_basic); 65 eth_sz = sizeof(struct ocrdma_eth_basic);
67 } 66 }
67 /* MAC */
68 memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); 68 memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
69 memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
70 status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]); 69 status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
71 if (status) 70 if (status)
72 return status; 71 return status;
73 status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, 72 ah->sgid_index = attr->grh.sgid_index;
74 (union ib_gid *)&grh.sgid[0]); 73 memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
75 if (status) 74 memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
76 return status;
77 75
78 grh.tclass_flow = cpu_to_be32((6 << 28) | 76 grh.tclass_flow = cpu_to_be32((6 << 28) |
79 (attr->grh.traffic_class << 24) | 77 (attr->grh.traffic_class << 24) |
@@ -81,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
81 /* 0x1b is next header value in GRH */ 79 /* 0x1b is next header value in GRH */
82 grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | 80 grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
83 (0x1b << 8) | attr->grh.hop_limit); 81 (0x1b << 8) | attr->grh.hop_limit);
84 82 /* Eth HDR */
85 memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
86 memcpy(&ah->av->eth_hdr, &eth, eth_sz); 83 memcpy(&ah->av->eth_hdr, &eth, eth_sz);
87 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); 84 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
88 if (vlan_enabled) 85 if (vlan_enabled)
@@ -98,6 +95,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
98 struct ocrdma_ah *ah; 95 struct ocrdma_ah *ah;
99 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 96 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
100 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 97 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
98 union ib_gid sgid;
99 u8 zmac[ETH_ALEN];
101 100
102 if (!(attr->ah_flags & IB_AH_GRH)) 101 if (!(attr->ah_flags & IB_AH_GRH))
103 return ERR_PTR(-EINVAL); 102 return ERR_PTR(-EINVAL);
@@ -111,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
111 status = ocrdma_alloc_av(dev, ah); 110 status = ocrdma_alloc_av(dev, ah);
112 if (status) 111 if (status)
113 goto av_err; 112 goto av_err;
114 status = set_av_attr(dev, ah, attr, pd->id); 113
114 status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid);
115 if (status) {
116 pr_err("%s(): Failed to query sgid, status = %d\n",
117 __func__, status);
118 goto av_conf_err;
119 }
120
121 memset(&zmac, 0, ETH_ALEN);
122 if (pd->uctx &&
123 memcmp(attr->dmac, &zmac, ETH_ALEN)) {
124 status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
125 attr->dmac, &attr->vlan_id);
126 if (status) {
127 pr_err("%s(): Failed to resolve dmac from gid."
128 "status = %d\n", __func__, status);
129 goto av_conf_err;
130 }
131 }
132
133 status = set_av_attr(dev, ah, attr, &sgid, pd->id);
115 if (status) 134 if (status)
116 goto av_conf_err; 135 goto av_conf_err;
117 136
@@ -145,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
145 struct ocrdma_av *av = ah->av; 164 struct ocrdma_av *av = ah->av;
146 struct ocrdma_grh *grh; 165 struct ocrdma_grh *grh;
147 attr->ah_flags |= IB_AH_GRH; 166 attr->ah_flags |= IB_AH_GRH;
148 if (ah->av->valid & Bit(1)) { 167 if (ah->av->valid & OCRDMA_AV_VALID) {
149 grh = (struct ocrdma_grh *)((u8 *)ah->av + 168 grh = (struct ocrdma_grh *)((u8 *)ah->av +
150 sizeof(struct ocrdma_eth_vlan)); 169 sizeof(struct ocrdma_eth_vlan));
151 attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13; 170 attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index acb434d16903..8f5f2577f288 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
101 attr->max_srq_sge = dev->attr.max_srq_sge; 101 attr->max_srq_sge = dev->attr.max_srq_sge;
102 attr->max_srq_wr = dev->attr.max_rqe; 102 attr->max_srq_wr = dev->attr.max_rqe;
103 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; 103 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
104 attr->max_fast_reg_page_list_len = 0; 104 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
105 attr->max_pkeys = 1; 105 attr->max_pkeys = 1;
106 return 0; 106 return 0;
107} 107}
@@ -2846,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2846 if (cq->first_arm) { 2846 if (cq->first_arm) {
2847 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); 2847 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2848 cq->first_arm = false; 2848 cq->first_arm = false;
2849 goto skip_defer;
2850 } 2849 }
2851 cq->deferred_arm = true;
2852 2850
2853skip_defer: 2851 cq->deferred_arm = true;
2854 cq->deferred_sol = sol_needed; 2852 cq->deferred_sol = sol_needed;
2855 spin_unlock_irqrestore(&cq->cq_lock, flags); 2853 spin_unlock_irqrestore(&cq->cq_lock, flags);
2856 2854
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 799a0c3bffc4..6abd3ed3cd51 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
193 struct qib_qp_iter *iter; 193 struct qib_qp_iter *iter;
194 loff_t n = *pos; 194 loff_t n = *pos;
195 195
196 rcu_read_lock();
196 iter = qib_qp_iter_init(s->private); 197 iter = qib_qp_iter_init(s->private);
197 if (!iter) 198 if (!iter)
198 return NULL; 199 return NULL;
@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
224 225
225static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) 226static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
226{ 227{
227 /* nothing for now */ 228 rcu_read_unlock();
228} 229}
229 230
230static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) 231static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 7fcc150d603c..6ddc0264aad2 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
1325 struct qib_qp *pqp = iter->qp; 1325 struct qib_qp *pqp = iter->qp;
1326 struct qib_qp *qp; 1326 struct qib_qp *qp;
1327 1327
1328 rcu_read_lock();
1329 for (; n < dev->qp_table_size; n++) { 1328 for (; n < dev->qp_table_size; n++) {
1330 if (pqp) 1329 if (pqp)
1331 qp = rcu_dereference(pqp->next); 1330 qp = rcu_dereference(pqp->next);
@@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
1333 qp = rcu_dereference(dev->qp_table[n]); 1332 qp = rcu_dereference(dev->qp_table[n]);
1334 pqp = qp; 1333 pqp = qp;
1335 if (qp) { 1334 if (qp) {
1336 if (iter->qp)
1337 atomic_dec(&iter->qp->refcount);
1338 atomic_inc(&qp->refcount);
1339 rcu_read_unlock();
1340 iter->qp = qp; 1335 iter->qp = qp;
1341 iter->n = n; 1336 iter->n = n;
1342 return 0; 1337 return 0;
1343 } 1338 }
1344 } 1339 }
1345 rcu_read_unlock();
1346 if (iter->qp)
1347 atomic_dec(&iter->qp->refcount);
1348 return ret; 1340 return ret;
1349} 1341}
1350 1342
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 2bc1d2b96298..74f90b2619f6 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
52 * Call with current->mm->mmap_sem held. 52 * Call with current->mm->mmap_sem held.
53 */ 53 */
54static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, 54static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
55 struct page **p, struct vm_area_struct **vma) 55 struct page **p)
56{ 56{
57 unsigned long lock_limit; 57 unsigned long lock_limit;
58 size_t got; 58 size_t got;
@@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
69 ret = get_user_pages(current, current->mm, 69 ret = get_user_pages(current, current->mm,
70 start_page + got * PAGE_SIZE, 70 start_page + got * PAGE_SIZE,
71 num_pages - got, 1, 1, 71 num_pages - got, 1, 1,
72 p + got, vma); 72 p + got, NULL);
73 if (ret < 0) 73 if (ret < 0)
74 goto bail_release; 74 goto bail_release;
75 } 75 }
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
136 136
137 down_write(&current->mm->mmap_sem); 137 down_write(&current->mm->mmap_sem);
138 138
139 ret = __qib_get_user_pages(start_page, num_pages, p, NULL); 139 ret = __qib_get_user_pages(start_page, num_pages, p);
140 140
141 up_write(&current->mm->mmap_sem); 141 up_write(&current->mm->mmap_sem);
142 142
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 3edce617c31b..d7562beb5423 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -131,6 +131,12 @@ struct ipoib_cb {
131 u8 hwaddr[INFINIBAND_ALEN]; 131 u8 hwaddr[INFINIBAND_ALEN];
132}; 132};
133 133
134static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
135{
136 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
137 return (struct ipoib_cb *)skb->cb;
138}
139
134/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ 140/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
135struct ipoib_mcast { 141struct ipoib_mcast {
136 struct ib_sa_mcmember_rec mcmember; 142 struct ib_sa_mcmember_rec mcmember;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 1310acf6bf92..13e6e0431592 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -716,7 +716,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
716{ 716{
717 struct ipoib_dev_priv *priv = netdev_priv(dev); 717 struct ipoib_dev_priv *priv = netdev_priv(dev);
718 struct ipoib_neigh *neigh; 718 struct ipoib_neigh *neigh;
719 struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; 719 struct ipoib_cb *cb = ipoib_skb_cb(skb);
720 struct ipoib_header *header; 720 struct ipoib_header *header;
721 unsigned long flags; 721 unsigned long flags;
722 722
@@ -813,7 +813,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
813 const void *daddr, const void *saddr, unsigned len) 813 const void *daddr, const void *saddr, unsigned len)
814{ 814{
815 struct ipoib_header *header; 815 struct ipoib_header *header;
816 struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; 816 struct ipoib_cb *cb = ipoib_skb_cb(skb);
817 817
818 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 818 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
819 819
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d4e005720d01..ffb83b5f7e80 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -529,21 +529,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
529 port_attr.state); 529 port_attr.state);
530 return; 530 return;
531 } 531 }
532 priv->local_lid = port_attr.lid;
532 533
533 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) 534 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
534 ipoib_warn(priv, "ib_query_gid() failed\n"); 535 ipoib_warn(priv, "ib_query_gid() failed\n");
535 else 536 else
536 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 537 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
537 538
538 {
539 struct ib_port_attr attr;
540
541 if (!ib_query_port(priv->ca, priv->port, &attr))
542 priv->local_lid = attr.lid;
543 else
544 ipoib_warn(priv, "ib_query_port failed\n");
545 }
546
547 if (!priv->broadcast) { 539 if (!priv->broadcast) {
548 struct ipoib_mcast *broadcast; 540 struct ipoib_mcast *broadcast;
549 541
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 61ee91d88380..93ce62fe1594 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -344,7 +344,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
344 int is_leading) 344 int is_leading)
345{ 345{
346 struct iscsi_conn *conn = cls_conn->dd_data; 346 struct iscsi_conn *conn = cls_conn->dd_data;
347 struct iscsi_session *session;
348 struct iser_conn *ib_conn; 347 struct iser_conn *ib_conn;
349 struct iscsi_endpoint *ep; 348 struct iscsi_endpoint *ep;
350 int error; 349 int error;
@@ -363,9 +362,17 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
363 } 362 }
364 ib_conn = ep->dd_data; 363 ib_conn = ep->dd_data;
365 364
366 session = conn->session; 365 mutex_lock(&ib_conn->state_mutex);
367 if (iser_alloc_rx_descriptors(ib_conn, session)) 366 if (ib_conn->state != ISER_CONN_UP) {
368 return -ENOMEM; 367 error = -EINVAL;
368 iser_err("iser_conn %p state is %d, teardown started\n",
369 ib_conn, ib_conn->state);
370 goto out;
371 }
372
373 error = iser_alloc_rx_descriptors(ib_conn, conn->session);
374 if (error)
375 goto out;
369 376
370 /* binds the iSER connection retrieved from the previously 377 /* binds the iSER connection retrieved from the previously
371 * connected ep_handle to the iSCSI layer connection. exchanges 378 * connected ep_handle to the iSCSI layer connection. exchanges
@@ -375,7 +382,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
375 conn->dd_data = ib_conn; 382 conn->dd_data = ib_conn;
376 ib_conn->iscsi_conn = conn; 383 ib_conn->iscsi_conn = conn;
377 384
378 return 0; 385out:
386 mutex_unlock(&ib_conn->state_mutex);
387 return error;
379} 388}
380 389
381static int 390static int
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index c877dad381cb..9f0e0e34d6ca 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -69,7 +69,7 @@
69 69
70#define DRV_NAME "iser" 70#define DRV_NAME "iser"
71#define PFX DRV_NAME ": " 71#define PFX DRV_NAME ": "
72#define DRV_VER "1.4" 72#define DRV_VER "1.4.1"
73 73
74#define iser_dbg(fmt, arg...) \ 74#define iser_dbg(fmt, arg...) \
75 do { \ 75 do { \
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 3ef167f97d6f..3bfec4bbda52 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -73,7 +73,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
73{ 73{
74 struct iser_cq_desc *cq_desc; 74 struct iser_cq_desc *cq_desc;
75 struct ib_device_attr *dev_attr = &device->dev_attr; 75 struct ib_device_attr *dev_attr = &device->dev_attr;
76 int ret, i, j; 76 int ret, i;
77 77
78 ret = ib_query_device(device->ib_device, dev_attr); 78 ret = ib_query_device(device->ib_device, dev_attr);
79 if (ret) { 79 if (ret) {
@@ -125,16 +125,20 @@ static int iser_create_device_ib_res(struct iser_device *device)
125 iser_cq_event_callback, 125 iser_cq_event_callback,
126 (void *)&cq_desc[i], 126 (void *)&cq_desc[i],
127 ISER_MAX_RX_CQ_LEN, i); 127 ISER_MAX_RX_CQ_LEN, i);
128 if (IS_ERR(device->rx_cq[i])) 128 if (IS_ERR(device->rx_cq[i])) {
129 device->rx_cq[i] = NULL;
129 goto cq_err; 130 goto cq_err;
131 }
130 132
131 device->tx_cq[i] = ib_create_cq(device->ib_device, 133 device->tx_cq[i] = ib_create_cq(device->ib_device,
132 NULL, iser_cq_event_callback, 134 NULL, iser_cq_event_callback,
133 (void *)&cq_desc[i], 135 (void *)&cq_desc[i],
134 ISER_MAX_TX_CQ_LEN, i); 136 ISER_MAX_TX_CQ_LEN, i);
135 137
136 if (IS_ERR(device->tx_cq[i])) 138 if (IS_ERR(device->tx_cq[i])) {
139 device->tx_cq[i] = NULL;
137 goto cq_err; 140 goto cq_err;
141 }
138 142
139 if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP)) 143 if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
140 goto cq_err; 144 goto cq_err;
@@ -160,14 +164,14 @@ static int iser_create_device_ib_res(struct iser_device *device)
160handler_err: 164handler_err:
161 ib_dereg_mr(device->mr); 165 ib_dereg_mr(device->mr);
162dma_mr_err: 166dma_mr_err:
163 for (j = 0; j < device->cqs_used; j++) 167 for (i = 0; i < device->cqs_used; i++)
164 tasklet_kill(&device->cq_tasklet[j]); 168 tasklet_kill(&device->cq_tasklet[i]);
165cq_err: 169cq_err:
166 for (j = 0; j < i; j++) { 170 for (i = 0; i < device->cqs_used; i++) {
167 if (device->tx_cq[j]) 171 if (device->tx_cq[i])
168 ib_destroy_cq(device->tx_cq[j]); 172 ib_destroy_cq(device->tx_cq[i]);
169 if (device->rx_cq[j]) 173 if (device->rx_cq[i])
170 ib_destroy_cq(device->rx_cq[j]); 174 ib_destroy_cq(device->rx_cq[i]);
171 } 175 }
172 ib_dealloc_pd(device->pd); 176 ib_dealloc_pd(device->pd);
173pd_err: 177pd_err:
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index d4c7928a0f36..da8ff124762a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -586,17 +586,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
586 init_completion(&isert_conn->conn_wait); 586 init_completion(&isert_conn->conn_wait);
587 init_completion(&isert_conn->conn_wait_comp_err); 587 init_completion(&isert_conn->conn_wait_comp_err);
588 kref_init(&isert_conn->conn_kref); 588 kref_init(&isert_conn->conn_kref);
589 kref_get(&isert_conn->conn_kref);
590 mutex_init(&isert_conn->conn_mutex); 589 mutex_init(&isert_conn->conn_mutex);
591 spin_lock_init(&isert_conn->conn_lock); 590 spin_lock_init(&isert_conn->conn_lock);
592 INIT_LIST_HEAD(&isert_conn->conn_fr_pool); 591 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
593 592
594 cma_id->context = isert_conn; 593 cma_id->context = isert_conn;
595 isert_conn->conn_cm_id = cma_id; 594 isert_conn->conn_cm_id = cma_id;
596 isert_conn->responder_resources = event->param.conn.responder_resources;
597 isert_conn->initiator_depth = event->param.conn.initiator_depth;
598 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
599 isert_conn->responder_resources, isert_conn->initiator_depth);
600 595
601 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + 596 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
602 ISER_RX_LOGIN_SIZE, GFP_KERNEL); 597 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
@@ -643,6 +638,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
643 goto out_rsp_dma_map; 638 goto out_rsp_dma_map;
644 } 639 }
645 640
641 /* Set max inflight RDMA READ requests */
642 isert_conn->initiator_depth = min_t(u8,
643 event->param.conn.initiator_depth,
644 device->dev_attr.max_qp_init_rd_atom);
645 pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
646
646 isert_conn->conn_device = device; 647 isert_conn->conn_device = device;
647 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); 648 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
648 if (IS_ERR(isert_conn->conn_pd)) { 649 if (IS_ERR(isert_conn->conn_pd)) {
@@ -746,7 +747,9 @@ isert_connect_release(struct isert_conn *isert_conn)
746static void 747static void
747isert_connected_handler(struct rdma_cm_id *cma_id) 748isert_connected_handler(struct rdma_cm_id *cma_id)
748{ 749{
749 return; 750 struct isert_conn *isert_conn = cma_id->context;
751
752 kref_get(&isert_conn->conn_kref);
750} 753}
751 754
752static void 755static void
@@ -798,7 +801,6 @@ isert_disconnect_work(struct work_struct *work)
798 801
799wake_up: 802wake_up:
800 complete(&isert_conn->conn_wait); 803 complete(&isert_conn->conn_wait);
801 isert_put_conn(isert_conn);
802} 804}
803 805
804static void 806static void
@@ -3067,7 +3069,6 @@ isert_rdma_accept(struct isert_conn *isert_conn)
3067 int ret; 3069 int ret;
3068 3070
3069 memset(&cp, 0, sizeof(struct rdma_conn_param)); 3071 memset(&cp, 0, sizeof(struct rdma_conn_param));
3070 cp.responder_resources = isert_conn->responder_resources;
3071 cp.initiator_depth = isert_conn->initiator_depth; 3072 cp.initiator_depth = isert_conn->initiator_depth;
3072 cp.retry_count = 7; 3073 cp.retry_count = 7;
3073 cp.rnr_retry_count = 7; 3074 cp.rnr_retry_count = 7;
@@ -3215,7 +3216,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3215 pr_debug("isert_wait_conn: Starting \n"); 3216 pr_debug("isert_wait_conn: Starting \n");
3216 3217
3217 mutex_lock(&isert_conn->conn_mutex); 3218 mutex_lock(&isert_conn->conn_mutex);
3218 if (isert_conn->conn_cm_id) { 3219 if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
3219 pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); 3220 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
3220 rdma_disconnect(isert_conn->conn_cm_id); 3221 rdma_disconnect(isert_conn->conn_cm_id);
3221 } 3222 }
@@ -3234,6 +3235,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3234 wait_for_completion(&isert_conn->conn_wait_comp_err); 3235 wait_for_completion(&isert_conn->conn_wait_comp_err);
3235 3236
3236 wait_for_completion(&isert_conn->conn_wait); 3237 wait_for_completion(&isert_conn->conn_wait);
3238 isert_put_conn(isert_conn);
3237} 3239}
3238 3240
3239static void isert_free_conn(struct iscsi_conn *conn) 3241static void isert_free_conn(struct iscsi_conn *conn)