aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c16
-rw-r--r--drivers/infiniband/hw/mlx4/main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
3 files changed, 18 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c27141fef1ab..9c2ae7efd00f 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -125,6 +125,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
125{ 125{
126 struct ib_ah *new_ah; 126 struct ib_ah *new_ah;
127 struct ib_ah_attr ah_attr; 127 struct ib_ah_attr ah_attr;
128 unsigned long flags;
128 129
129 if (!dev->send_agent[port_num - 1][0]) 130 if (!dev->send_agent[port_num - 1][0])
130 return; 131 return;
@@ -139,11 +140,11 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
139 if (IS_ERR(new_ah)) 140 if (IS_ERR(new_ah))
140 return; 141 return;
141 142
142 spin_lock(&dev->sm_lock); 143 spin_lock_irqsave(&dev->sm_lock, flags);
143 if (dev->sm_ah[port_num - 1]) 144 if (dev->sm_ah[port_num - 1])
144 ib_destroy_ah(dev->sm_ah[port_num - 1]); 145 ib_destroy_ah(dev->sm_ah[port_num - 1]);
145 dev->sm_ah[port_num - 1] = new_ah; 146 dev->sm_ah[port_num - 1] = new_ah;
146 spin_unlock(&dev->sm_lock); 147 spin_unlock_irqrestore(&dev->sm_lock, flags);
147} 148}
148 149
149/* 150/*
@@ -197,13 +198,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
197static void node_desc_override(struct ib_device *dev, 198static void node_desc_override(struct ib_device *dev,
198 struct ib_mad *mad) 199 struct ib_mad *mad)
199{ 200{
201 unsigned long flags;
202
200 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 203 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
201 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 204 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
202 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && 205 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
203 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { 206 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
204 spin_lock(&to_mdev(dev)->sm_lock); 207 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
205 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); 208 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
206 spin_unlock(&to_mdev(dev)->sm_lock); 209 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
207 } 210 }
208} 211}
209 212
@@ -213,6 +216,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
213 struct ib_mad_send_buf *send_buf; 216 struct ib_mad_send_buf *send_buf;
214 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; 217 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
215 int ret; 218 int ret;
219 unsigned long flags;
216 220
217 if (agent) { 221 if (agent) {
218 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, 222 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
@@ -225,13 +229,13 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
225 * wrong following the IB spec strictly, but we know 229 * wrong following the IB spec strictly, but we know
226 * it's OK for our devices). 230 * it's OK for our devices).
227 */ 231 */
228 spin_lock(&dev->sm_lock); 232 spin_lock_irqsave(&dev->sm_lock, flags);
229 memcpy(send_buf->mad, mad, sizeof *mad); 233 memcpy(send_buf->mad, mad, sizeof *mad);
230 if ((send_buf->ah = dev->sm_ah[port_num - 1])) 234 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
231 ret = ib_post_send_mad(send_buf, NULL); 235 ret = ib_post_send_mad(send_buf, NULL);
232 else 236 else
233 ret = -EINVAL; 237 ret = -EINVAL;
234 spin_unlock(&dev->sm_lock); 238 spin_unlock_irqrestore(&dev->sm_lock, flags);
235 239
236 if (ret) 240 if (ret)
237 ib_free_send_mad(send_buf); 241 ib_free_send_mad(send_buf);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index fe2088cfa6ee..cc05579ebce7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -423,6 +423,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
423 struct ib_device_modify *props) 423 struct ib_device_modify *props)
424{ 424{
425 struct mlx4_cmd_mailbox *mailbox; 425 struct mlx4_cmd_mailbox *mailbox;
426 unsigned long flags;
426 427
427 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 428 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
428 return -EOPNOTSUPP; 429 return -EOPNOTSUPP;
@@ -430,9 +431,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
430 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 431 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
431 return 0; 432 return 0;
432 433
433 spin_lock(&to_mdev(ibdev)->sm_lock); 434 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
434 memcpy(ibdev->node_desc, props->node_desc, 64); 435 memcpy(ibdev->node_desc, props->node_desc, 64);
435 spin_unlock(&to_mdev(ibdev)->sm_lock); 436 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
436 437
437 /* 438 /*
438 * If possible, pass node desc to FW, so it can generate 439 * If possible, pass node desc to FW, so it can generate
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a6d8ea060ea8..f585eddef4b7 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1407,6 +1407,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1407 struct mlx4_wqe_mlx_seg *mlx = wqe; 1407 struct mlx4_wqe_mlx_seg *mlx = wqe;
1408 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; 1408 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
1409 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 1409 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
1410 struct net_device *ndev;
1410 union ib_gid sgid; 1411 union ib_gid sgid;
1411 u16 pkey; 1412 u16 pkey;
1412 int send_size; 1413 int send_size;
@@ -1483,7 +1484,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1483 1484
1484 memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); 1485 memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
1485 /* FIXME: cache smac value? */ 1486 /* FIXME: cache smac value? */
1486 smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr; 1487 ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1];
1488 if (!ndev)
1489 return -ENODEV;
1490 smac = ndev->dev_addr;
1487 memcpy(sqp->ud_header.eth.smac_h, smac, 6); 1491 memcpy(sqp->ud_header.eth.smac_h, smac, 6);
1488 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) 1492 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
1489 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 1493 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);