diff options
author | Bjorn Helgaas <bhelgaas@google.com> | 2012-09-13 17:54:57 -0400 |
---|---|---|
committer | Bjorn Helgaas <bhelgaas@google.com> | 2012-09-13 17:54:57 -0400 |
commit | 9a5d5bd8480068c5829e3d997ee21dab9b3ed05f (patch) | |
tree | f8bea83deb720d4fa3a9ada8d4845406c2d7c8f0 /drivers/infiniband | |
parent | 271fd03a3013b106ccc178d54219c1be0c9759b7 (diff) | |
parent | 55d512e245bc7699a8800e23df1a24195dd08217 (diff) |
Merge commit 'v3.6-rc5' into pci/gavin-window-alignment
* commit 'v3.6-rc5': (1098 commits)
Linux 3.6-rc5
HID: tpkbd: work even if the new Lenovo Keyboard driver is not configured
Remove user-triggerable BUG from mpol_to_str
xen/pciback: Fix proper FLR steps.
uml: fix compile error in deliver_alarm()
dj: memory scribble in logi_dj
Fix order of arguments to compat_put_time[spec|val]
xen: Use correct masking in xen_swiotlb_alloc_coherent.
xen: fix logical error in tlb flushing
xen/p2m: Fix one-off error in checking the P2M tree directory.
powerpc: Don't use __put_user() in patch_instruction
powerpc: Make sure IPI handlers see data written by IPI senders
powerpc: Restore correct DSCR in context switch
powerpc: Fix DSCR inheritance in copy_thread()
powerpc: Keep thread.dscr and thread.dscr_inherit in sync
powerpc: Update DSCR on all CPUs when writing sysfs dscr_default
powerpc/powernv: Always go into nap mode when CPU is offline
powerpc: Give hypervisor decrementer interrupts their own handler
powerpc/vphn: Fix arch_update_cpu_topology() return value
ARM: gemini: fix the gemini build
...
Conflicts:
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/rapidio/devices/tsi721.c
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/ucma.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2_rnic.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_cm.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mad.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_main.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_iba7322.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_sd7220.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 87 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srpt/ib_srpt.c | 2 |
13 files changed, 101 insertions, 48 deletions
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 6bf850422895..055ed59838dc 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -267,6 +267,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, | |||
267 | if (!uevent) | 267 | if (!uevent) |
268 | return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; | 268 | return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; |
269 | 269 | ||
270 | mutex_lock(&ctx->file->mut); | ||
270 | uevent->cm_id = cm_id; | 271 | uevent->cm_id = cm_id; |
271 | ucma_set_event_context(ctx, event, uevent); | 272 | ucma_set_event_context(ctx, event, uevent); |
272 | uevent->resp.event = event->event; | 273 | uevent->resp.event = event->event; |
@@ -277,7 +278,6 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, | |||
277 | ucma_copy_conn_event(&uevent->resp.param.conn, | 278 | ucma_copy_conn_event(&uevent->resp.param.conn, |
278 | &event->param.conn); | 279 | &event->param.conn); |
279 | 280 | ||
280 | mutex_lock(&ctx->file->mut); | ||
281 | if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { | 281 | if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { |
282 | if (!ctx->backlog) { | 282 | if (!ctx->backlog) { |
283 | ret = -ENOMEM; | 283 | ret = -ENOMEM; |
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index 8c81992fa6db..e4a73158fc7f 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c | |||
@@ -439,7 +439,7 @@ static int c2_rnic_close(struct c2_dev *c2dev) | |||
439 | 439 | ||
440 | /* | 440 | /* |
441 | * Called by c2_probe to initialize the RNIC. This principally | 441 | * Called by c2_probe to initialize the RNIC. This principally |
442 | * involves initalizing the various limits and resouce pools that | 442 | * involves initializing the various limits and resource pools that |
443 | * comprise the RNIC instance. | 443 | * comprise the RNIC instance. |
444 | */ | 444 | */ |
445 | int __devinit c2_rnic_init(struct c2_dev *c2dev) | 445 | int __devinit c2_rnic_init(struct c2_dev *c2dev) |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 77b6b182778a..aaf88ef9409c 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -1680,7 +1680,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1680 | * T3A does 3 things when a TERM is received: | 1680 | * T3A does 3 things when a TERM is received: |
1681 | * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet | 1681 | * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet |
1682 | * 2) generate an async event on the QP with the TERMINATE opcode | 1682 | * 2) generate an async event on the QP with the TERMINATE opcode |
1683 | * 3) post a TERMINATE opcde cqe into the associated CQ. | 1683 | * 3) post a TERMINATE opcode cqe into the associated CQ. |
1684 | * | 1684 | * |
1685 | * For (1), we save the message in the qp for later consumer consumption. | 1685 | * For (1), we save the message in the qp for later consumer consumption. |
1686 | * For (2), we move the QP into TERMINATE, post a QP event and disconnect. | 1686 | * For (2), we move the QP into TERMINATE, post a QP event and disconnect. |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index c27141fef1ab..9c2ae7efd00f 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -125,6 +125,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) | |||
125 | { | 125 | { |
126 | struct ib_ah *new_ah; | 126 | struct ib_ah *new_ah; |
127 | struct ib_ah_attr ah_attr; | 127 | struct ib_ah_attr ah_attr; |
128 | unsigned long flags; | ||
128 | 129 | ||
129 | if (!dev->send_agent[port_num - 1][0]) | 130 | if (!dev->send_agent[port_num - 1][0]) |
130 | return; | 131 | return; |
@@ -139,11 +140,11 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) | |||
139 | if (IS_ERR(new_ah)) | 140 | if (IS_ERR(new_ah)) |
140 | return; | 141 | return; |
141 | 142 | ||
142 | spin_lock(&dev->sm_lock); | 143 | spin_lock_irqsave(&dev->sm_lock, flags); |
143 | if (dev->sm_ah[port_num - 1]) | 144 | if (dev->sm_ah[port_num - 1]) |
144 | ib_destroy_ah(dev->sm_ah[port_num - 1]); | 145 | ib_destroy_ah(dev->sm_ah[port_num - 1]); |
145 | dev->sm_ah[port_num - 1] = new_ah; | 146 | dev->sm_ah[port_num - 1] = new_ah; |
146 | spin_unlock(&dev->sm_lock); | 147 | spin_unlock_irqrestore(&dev->sm_lock, flags); |
147 | } | 148 | } |
148 | 149 | ||
149 | /* | 150 | /* |
@@ -197,13 +198,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, | |||
197 | static void node_desc_override(struct ib_device *dev, | 198 | static void node_desc_override(struct ib_device *dev, |
198 | struct ib_mad *mad) | 199 | struct ib_mad *mad) |
199 | { | 200 | { |
201 | unsigned long flags; | ||
202 | |||
200 | if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || | 203 | if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || |
201 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && | 204 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && |
202 | mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && | 205 | mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && |
203 | mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { | 206 | mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { |
204 | spin_lock(&to_mdev(dev)->sm_lock); | 207 | spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); |
205 | memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); | 208 | memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); |
206 | spin_unlock(&to_mdev(dev)->sm_lock); | 209 | spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); |
207 | } | 210 | } |
208 | } | 211 | } |
209 | 212 | ||
@@ -213,6 +216,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma | |||
213 | struct ib_mad_send_buf *send_buf; | 216 | struct ib_mad_send_buf *send_buf; |
214 | struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; | 217 | struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; |
215 | int ret; | 218 | int ret; |
219 | unsigned long flags; | ||
216 | 220 | ||
217 | if (agent) { | 221 | if (agent) { |
218 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, | 222 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, |
@@ -225,13 +229,13 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma | |||
225 | * wrong following the IB spec strictly, but we know | 229 | * wrong following the IB spec strictly, but we know |
226 | * it's OK for our devices). | 230 | * it's OK for our devices). |
227 | */ | 231 | */ |
228 | spin_lock(&dev->sm_lock); | 232 | spin_lock_irqsave(&dev->sm_lock, flags); |
229 | memcpy(send_buf->mad, mad, sizeof *mad); | 233 | memcpy(send_buf->mad, mad, sizeof *mad); |
230 | if ((send_buf->ah = dev->sm_ah[port_num - 1])) | 234 | if ((send_buf->ah = dev->sm_ah[port_num - 1])) |
231 | ret = ib_post_send_mad(send_buf, NULL); | 235 | ret = ib_post_send_mad(send_buf, NULL); |
232 | else | 236 | else |
233 | ret = -EINVAL; | 237 | ret = -EINVAL; |
234 | spin_unlock(&dev->sm_lock); | 238 | spin_unlock_irqrestore(&dev->sm_lock, flags); |
235 | 239 | ||
236 | if (ret) | 240 | if (ret) |
237 | ib_free_send_mad(send_buf); | 241 | ib_free_send_mad(send_buf); |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index fe2088cfa6ee..cc05579ebce7 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -423,6 +423,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, | |||
423 | struct ib_device_modify *props) | 423 | struct ib_device_modify *props) |
424 | { | 424 | { |
425 | struct mlx4_cmd_mailbox *mailbox; | 425 | struct mlx4_cmd_mailbox *mailbox; |
426 | unsigned long flags; | ||
426 | 427 | ||
427 | if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) | 428 | if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) |
428 | return -EOPNOTSUPP; | 429 | return -EOPNOTSUPP; |
@@ -430,9 +431,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, | |||
430 | if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) | 431 | if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) |
431 | return 0; | 432 | return 0; |
432 | 433 | ||
433 | spin_lock(&to_mdev(ibdev)->sm_lock); | 434 | spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags); |
434 | memcpy(ibdev->node_desc, props->node_desc, 64); | 435 | memcpy(ibdev->node_desc, props->node_desc, 64); |
435 | spin_unlock(&to_mdev(ibdev)->sm_lock); | 436 | spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags); |
436 | 437 | ||
437 | /* | 438 | /* |
438 | * If possible, pass node desc to FW, so it can generate | 439 | * If possible, pass node desc to FW, so it can generate |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index a6d8ea060ea8..f585eddef4b7 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1407,6 +1407,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
1407 | struct mlx4_wqe_mlx_seg *mlx = wqe; | 1407 | struct mlx4_wqe_mlx_seg *mlx = wqe; |
1408 | struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; | 1408 | struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; |
1409 | struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); | 1409 | struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); |
1410 | struct net_device *ndev; | ||
1410 | union ib_gid sgid; | 1411 | union ib_gid sgid; |
1411 | u16 pkey; | 1412 | u16 pkey; |
1412 | int send_size; | 1413 | int send_size; |
@@ -1483,7 +1484,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
1483 | 1484 | ||
1484 | memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); | 1485 | memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); |
1485 | /* FIXME: cache smac value? */ | 1486 | /* FIXME: cache smac value? */ |
1486 | smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr; | 1487 | ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]; |
1488 | if (!ndev) | ||
1489 | return -ENODEV; | ||
1490 | smac = ndev->dev_addr; | ||
1487 | memcpy(sqp->ud_header.eth.smac_h, smac, 6); | 1491 | memcpy(sqp->ud_header.eth.smac_h, smac, 6); |
1488 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) | 1492 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) |
1489 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); | 1493 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 5a044526e4f4..c4e0131f1b57 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
@@ -161,7 +161,7 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) | |||
161 | ocrdma_get_guid(dev, &sgid->raw[8]); | 161 | ocrdma_get_guid(dev, &sgid->raw[8]); |
162 | } | 162 | } |
163 | 163 | ||
164 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 164 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
165 | static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev) | 165 | static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev) |
166 | { | 166 | { |
167 | struct net_device *netdev, *tmp; | 167 | struct net_device *netdev, *tmp; |
@@ -202,14 +202,13 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) | |||
202 | return 0; | 202 | return 0; |
203 | } | 203 | } |
204 | 204 | ||
205 | #if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q) | 205 | #if IS_ENABLED(CONFIG_IPV6) |
206 | 206 | ||
207 | static int ocrdma_inet6addr_event(struct notifier_block *notifier, | 207 | static int ocrdma_inet6addr_event(struct notifier_block *notifier, |
208 | unsigned long event, void *ptr) | 208 | unsigned long event, void *ptr) |
209 | { | 209 | { |
210 | struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; | 210 | struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; |
211 | struct net_device *event_netdev = ifa->idev->dev; | 211 | struct net_device *netdev = ifa->idev->dev; |
212 | struct net_device *netdev = NULL; | ||
213 | struct ib_event gid_event; | 212 | struct ib_event gid_event; |
214 | struct ocrdma_dev *dev; | 213 | struct ocrdma_dev *dev; |
215 | bool found = false; | 214 | bool found = false; |
@@ -217,11 +216,12 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier, | |||
217 | bool is_vlan = false; | 216 | bool is_vlan = false; |
218 | u16 vid = 0; | 217 | u16 vid = 0; |
219 | 218 | ||
220 | netdev = vlan_dev_real_dev(event_netdev); | 219 | is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; |
221 | if (netdev != event_netdev) { | 220 | if (is_vlan) { |
222 | is_vlan = true; | 221 | vid = vlan_dev_vlan_id(netdev); |
223 | vid = vlan_dev_vlan_id(event_netdev); | 222 | netdev = vlan_dev_real_dev(netdev); |
224 | } | 223 | } |
224 | |||
225 | rcu_read_lock(); | 225 | rcu_read_lock(); |
226 | list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { | 226 | list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { |
227 | if (dev->nic_info.netdev == netdev) { | 227 | if (dev->nic_info.netdev == netdev) { |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 0d7280af99bc..3f6b21e9dc11 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -6346,8 +6346,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6346 | dd->piobcnt4k * dd->align4k; | 6346 | dd->piobcnt4k * dd->align4k; |
6347 | dd->piovl15base = ioremap_nocache(vl15off, | 6347 | dd->piovl15base = ioremap_nocache(vl15off, |
6348 | NUM_VL15_BUFS * dd->align4k); | 6348 | NUM_VL15_BUFS * dd->align4k); |
6349 | if (!dd->piovl15base) | 6349 | if (!dd->piovl15base) { |
6350 | ret = -ENOMEM; | ||
6350 | goto bail; | 6351 | goto bail; |
6352 | } | ||
6351 | } | 6353 | } |
6352 | qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ | 6354 | qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ |
6353 | 6355 | ||
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index a322d5171a2c..50a8a0d4fe67 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c | |||
@@ -372,7 +372,7 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd, | |||
372 | /* Read CTRL reg for each channel to check TRIMDONE */ | 372 | /* Read CTRL reg for each channel to check TRIMDONE */ |
373 | if (baduns & (1 << chn)) { | 373 | if (baduns & (1 << chn)) { |
374 | qib_dev_err(dd, | 374 | qib_dev_err(dd, |
375 | "Reseting TRIMDONE on chn %d (%s)\n", | 375 | "Resetting TRIMDONE on chn %d (%s)\n", |
376 | chn, where); | 376 | chn, where); |
377 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, | 377 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, |
378 | IB_CTRL2(chn), 0x10, 0x10); | 378 | IB_CTRL2(chn), 0x10, 0x10); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 95ecf4eadf5f..24683fda8e21 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -1271,12 +1271,15 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path | |||
1271 | void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) | 1271 | void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) |
1272 | { | 1272 | { |
1273 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); | 1273 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); |
1274 | unsigned long flags; | ||
1274 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { | 1275 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { |
1276 | spin_lock_irqsave(&priv->lock, flags); | ||
1275 | list_move(&tx->list, &priv->cm.reap_list); | 1277 | list_move(&tx->list, &priv->cm.reap_list); |
1276 | queue_work(ipoib_workqueue, &priv->cm.reap_task); | 1278 | queue_work(ipoib_workqueue, &priv->cm.reap_task); |
1277 | ipoib_dbg(priv, "Reap connection for gid %pI6\n", | 1279 | ipoib_dbg(priv, "Reap connection for gid %pI6\n", |
1278 | tx->neigh->daddr + 4); | 1280 | tx->neigh->daddr + 4); |
1279 | tx->neigh = NULL; | 1281 | tx->neigh = NULL; |
1282 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1280 | } | 1283 | } |
1281 | } | 1284 | } |
1282 | 1285 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 97920b77a5d0..3e2085a3ee47 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -1052,7 +1052,7 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh) | |||
1052 | for (n = rcu_dereference_protected(*np, | 1052 | for (n = rcu_dereference_protected(*np, |
1053 | lockdep_is_held(&ntbl->rwlock)); | 1053 | lockdep_is_held(&ntbl->rwlock)); |
1054 | n != NULL; | 1054 | n != NULL; |
1055 | n = rcu_dereference_protected(neigh->hnext, | 1055 | n = rcu_dereference_protected(*np, |
1056 | lockdep_is_held(&ntbl->rwlock))) { | 1056 | lockdep_is_held(&ntbl->rwlock))) { |
1057 | if (n == neigh) { | 1057 | if (n == neigh) { |
1058 | /* found */ | 1058 | /* found */ |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index bcbf22ee0aa7..1b5b0c730054 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -586,24 +586,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
586 | scmnd->sc_data_direction); | 586 | scmnd->sc_data_direction); |
587 | } | 587 | } |
588 | 588 | ||
589 | static void srp_remove_req(struct srp_target_port *target, | 589 | /** |
590 | struct srp_request *req, s32 req_lim_delta) | 590 | * srp_claim_req - Take ownership of the scmnd associated with a request. |
591 | * @target: SRP target port. | ||
592 | * @req: SRP request. | ||
593 | * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take | ||
594 | * ownership of @req->scmnd if it equals @scmnd. | ||
595 | * | ||
596 | * Return value: | ||
597 | * Either NULL or a pointer to the SCSI command the caller became owner of. | ||
598 | */ | ||
599 | static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, | ||
600 | struct srp_request *req, | ||
601 | struct scsi_cmnd *scmnd) | ||
602 | { | ||
603 | unsigned long flags; | ||
604 | |||
605 | spin_lock_irqsave(&target->lock, flags); | ||
606 | if (!scmnd) { | ||
607 | scmnd = req->scmnd; | ||
608 | req->scmnd = NULL; | ||
609 | } else if (req->scmnd == scmnd) { | ||
610 | req->scmnd = NULL; | ||
611 | } else { | ||
612 | scmnd = NULL; | ||
613 | } | ||
614 | spin_unlock_irqrestore(&target->lock, flags); | ||
615 | |||
616 | return scmnd; | ||
617 | } | ||
618 | |||
619 | /** | ||
620 | * srp_free_req() - Unmap data and add request to the free request list. | ||
621 | */ | ||
622 | static void srp_free_req(struct srp_target_port *target, | ||
623 | struct srp_request *req, struct scsi_cmnd *scmnd, | ||
624 | s32 req_lim_delta) | ||
591 | { | 625 | { |
592 | unsigned long flags; | 626 | unsigned long flags; |
593 | 627 | ||
594 | srp_unmap_data(req->scmnd, target, req); | 628 | srp_unmap_data(scmnd, target, req); |
629 | |||
595 | spin_lock_irqsave(&target->lock, flags); | 630 | spin_lock_irqsave(&target->lock, flags); |
596 | target->req_lim += req_lim_delta; | 631 | target->req_lim += req_lim_delta; |
597 | req->scmnd = NULL; | ||
598 | list_add_tail(&req->list, &target->free_reqs); | 632 | list_add_tail(&req->list, &target->free_reqs); |
599 | spin_unlock_irqrestore(&target->lock, flags); | 633 | spin_unlock_irqrestore(&target->lock, flags); |
600 | } | 634 | } |
601 | 635 | ||
602 | static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) | 636 | static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) |
603 | { | 637 | { |
604 | req->scmnd->result = DID_RESET << 16; | 638 | struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); |
605 | req->scmnd->scsi_done(req->scmnd); | 639 | |
606 | srp_remove_req(target, req, 0); | 640 | if (scmnd) { |
641 | scmnd->result = DID_RESET << 16; | ||
642 | scmnd->scsi_done(scmnd); | ||
643 | srp_free_req(target, req, scmnd, 0); | ||
644 | } | ||
607 | } | 645 | } |
608 | 646 | ||
609 | static int srp_reconnect_target(struct srp_target_port *target) | 647 | static int srp_reconnect_target(struct srp_target_port *target) |
@@ -1073,11 +1111,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
1073 | complete(&target->tsk_mgmt_done); | 1111 | complete(&target->tsk_mgmt_done); |
1074 | } else { | 1112 | } else { |
1075 | req = &target->req_ring[rsp->tag]; | 1113 | req = &target->req_ring[rsp->tag]; |
1076 | scmnd = req->scmnd; | 1114 | scmnd = srp_claim_req(target, req, NULL); |
1077 | if (!scmnd) | 1115 | if (!scmnd) { |
1078 | shost_printk(KERN_ERR, target->scsi_host, | 1116 | shost_printk(KERN_ERR, target->scsi_host, |
1079 | "Null scmnd for RSP w/tag %016llx\n", | 1117 | "Null scmnd for RSP w/tag %016llx\n", |
1080 | (unsigned long long) rsp->tag); | 1118 | (unsigned long long) rsp->tag); |
1119 | |||
1120 | spin_lock_irqsave(&target->lock, flags); | ||
1121 | target->req_lim += be32_to_cpu(rsp->req_lim_delta); | ||
1122 | spin_unlock_irqrestore(&target->lock, flags); | ||
1123 | |||
1124 | return; | ||
1125 | } | ||
1081 | scmnd->result = rsp->status; | 1126 | scmnd->result = rsp->status; |
1082 | 1127 | ||
1083 | if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { | 1128 | if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { |
@@ -1092,7 +1137,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
1092 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) | 1137 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) |
1093 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); | 1138 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); |
1094 | 1139 | ||
1095 | srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta)); | 1140 | srp_free_req(target, req, scmnd, |
1141 | be32_to_cpu(rsp->req_lim_delta)); | ||
1142 | |||
1096 | scmnd->host_scribble = NULL; | 1143 | scmnd->host_scribble = NULL; |
1097 | scmnd->scsi_done(scmnd); | 1144 | scmnd->scsi_done(scmnd); |
1098 | } | 1145 | } |
@@ -1631,25 +1678,17 @@ static int srp_abort(struct scsi_cmnd *scmnd) | |||
1631 | { | 1678 | { |
1632 | struct srp_target_port *target = host_to_target(scmnd->device->host); | 1679 | struct srp_target_port *target = host_to_target(scmnd->device->host); |
1633 | struct srp_request *req = (struct srp_request *) scmnd->host_scribble; | 1680 | struct srp_request *req = (struct srp_request *) scmnd->host_scribble; |
1634 | int ret = SUCCESS; | ||
1635 | 1681 | ||
1636 | shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); | 1682 | shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); |
1637 | 1683 | ||
1638 | if (!req || target->qp_in_error) | 1684 | if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd)) |
1639 | return FAILED; | 1685 | return FAILED; |
1640 | if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, | 1686 | srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, |
1641 | SRP_TSK_ABORT_TASK)) | 1687 | SRP_TSK_ABORT_TASK); |
1642 | return FAILED; | 1688 | srp_free_req(target, req, scmnd, 0); |
1643 | 1689 | scmnd->result = DID_ABORT << 16; | |
1644 | if (req->scmnd) { | ||
1645 | if (!target->tsk_mgmt_status) { | ||
1646 | srp_remove_req(target, req, 0); | ||
1647 | scmnd->result = DID_ABORT << 16; | ||
1648 | } else | ||
1649 | ret = FAILED; | ||
1650 | } | ||
1651 | 1690 | ||
1652 | return ret; | 1691 | return SUCCESS; |
1653 | } | 1692 | } |
1654 | 1693 | ||
1655 | static int srp_reset_device(struct scsi_cmnd *scmnd) | 1694 | static int srp_reset_device(struct scsi_cmnd *scmnd) |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 7a0ce8d42887..9e1449f8c6a2 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -1469,7 +1469,7 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, | |||
1469 | * | 1469 | * |
1470 | * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping | 1470 | * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping |
1471 | * the data that has been transferred via IB RDMA had to be postponed until the | 1471 | * the data that has been transferred via IB RDMA had to be postponed until the |
1472 | * check_stop_free() callback. None of this is nessecary anymore and needs to | 1472 | * check_stop_free() callback. None of this is necessary anymore and needs to |
1473 | * be cleaned up. | 1473 | * be cleaned up. |
1474 | */ | 1474 | */ |
1475 | static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, | 1475 | static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, |