aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2007-06-18 14:09:36 -0400
committerRoland Dreier <rolandd@cisco.com>2007-07-11 00:47:29 -0400
commit24be6e81c78314c91a47200272eb4bc31284bd7b (patch)
treebbdc6d8789ae5e2be3dad7930b3eacbbb1dcddec /drivers/infiniband
parent2aec5c602c6a44e2a3a173339a9ab94549658e4b (diff)
IB/cm: Use spin_lock_irq() instead of spin_lock_irqsave() when possible
The ib_cm is a little over zealous about using spin_lock_irqsave, when spin_lock_irq would do. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c171
1 files changed, 75 insertions, 96 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 40c004a2697e..16181d655854 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -318,12 +318,10 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
318 318
319static void cm_free_id(__be32 local_id) 319static void cm_free_id(__be32 local_id)
320{ 320{
321 unsigned long flags; 321 spin_lock_irq(&cm.lock);
322
323 spin_lock_irqsave(&cm.lock, flags);
324 idr_remove(&cm.local_id_table, 322 idr_remove(&cm.local_id_table,
325 (__force int) (local_id ^ cm.random_id_operand)); 323 (__force int) (local_id ^ cm.random_id_operand));
326 spin_unlock_irqrestore(&cm.lock, flags); 324 spin_unlock_irq(&cm.lock);
327} 325}
328 326
329static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 327static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
@@ -345,11 +343,10 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
345static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 343static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
346{ 344{
347 struct cm_id_private *cm_id_priv; 345 struct cm_id_private *cm_id_priv;
348 unsigned long flags;
349 346
350 spin_lock_irqsave(&cm.lock, flags); 347 spin_lock_irq(&cm.lock);
351 cm_id_priv = cm_get_id(local_id, remote_id); 348 cm_id_priv = cm_get_id(local_id, remote_id);
352 spin_unlock_irqrestore(&cm.lock, flags); 349 spin_unlock_irq(&cm.lock);
353 350
354 return cm_id_priv; 351 return cm_id_priv;
355} 352}
@@ -713,31 +710,30 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
713{ 710{
714 struct cm_id_private *cm_id_priv; 711 struct cm_id_private *cm_id_priv;
715 struct cm_work *work; 712 struct cm_work *work;
716 unsigned long flags;
717 713
718 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 714 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
719retest: 715retest:
720 spin_lock_irqsave(&cm_id_priv->lock, flags); 716 spin_lock_irq(&cm_id_priv->lock);
721 switch (cm_id->state) { 717 switch (cm_id->state) {
722 case IB_CM_LISTEN: 718 case IB_CM_LISTEN:
723 cm_id->state = IB_CM_IDLE; 719 cm_id->state = IB_CM_IDLE;
724 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 720 spin_unlock_irq(&cm_id_priv->lock);
725 spin_lock_irqsave(&cm.lock, flags); 721 spin_lock_irq(&cm.lock);
726 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); 722 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
727 spin_unlock_irqrestore(&cm.lock, flags); 723 spin_unlock_irq(&cm.lock);
728 break; 724 break;
729 case IB_CM_SIDR_REQ_SENT: 725 case IB_CM_SIDR_REQ_SENT:
730 cm_id->state = IB_CM_IDLE; 726 cm_id->state = IB_CM_IDLE;
731 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 727 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
732 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 728 spin_unlock_irq(&cm_id_priv->lock);
733 break; 729 break;
734 case IB_CM_SIDR_REQ_RCVD: 730 case IB_CM_SIDR_REQ_RCVD:
735 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 731 spin_unlock_irq(&cm_id_priv->lock);
736 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 732 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
737 break; 733 break;
738 case IB_CM_REQ_SENT: 734 case IB_CM_REQ_SENT:
739 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 735 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
740 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 736 spin_unlock_irq(&cm_id_priv->lock);
741 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 737 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
742 &cm_id_priv->id.device->node_guid, 738 &cm_id_priv->id.device->node_guid,
743 sizeof cm_id_priv->id.device->node_guid, 739 sizeof cm_id_priv->id.device->node_guid,
@@ -747,9 +743,9 @@ retest:
747 if (err == -ENOMEM) { 743 if (err == -ENOMEM) {
748 /* Do not reject to allow future retries. */ 744 /* Do not reject to allow future retries. */
749 cm_reset_to_idle(cm_id_priv); 745 cm_reset_to_idle(cm_id_priv);
750 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 746 spin_unlock_irq(&cm_id_priv->lock);
751 } else { 747 } else {
752 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 748 spin_unlock_irq(&cm_id_priv->lock);
753 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 749 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
754 NULL, 0, NULL, 0); 750 NULL, 0, NULL, 0);
755 } 751 }
@@ -762,25 +758,25 @@ retest:
762 case IB_CM_MRA_REQ_SENT: 758 case IB_CM_MRA_REQ_SENT:
763 case IB_CM_REP_RCVD: 759 case IB_CM_REP_RCVD:
764 case IB_CM_MRA_REP_SENT: 760 case IB_CM_MRA_REP_SENT:
765 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 761 spin_unlock_irq(&cm_id_priv->lock);
766 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 762 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
767 NULL, 0, NULL, 0); 763 NULL, 0, NULL, 0);
768 break; 764 break;
769 case IB_CM_ESTABLISHED: 765 case IB_CM_ESTABLISHED:
770 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 766 spin_unlock_irq(&cm_id_priv->lock);
771 ib_send_cm_dreq(cm_id, NULL, 0); 767 ib_send_cm_dreq(cm_id, NULL, 0);
772 goto retest; 768 goto retest;
773 case IB_CM_DREQ_SENT: 769 case IB_CM_DREQ_SENT:
774 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 770 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
775 cm_enter_timewait(cm_id_priv); 771 cm_enter_timewait(cm_id_priv);
776 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 772 spin_unlock_irq(&cm_id_priv->lock);
777 break; 773 break;
778 case IB_CM_DREQ_RCVD: 774 case IB_CM_DREQ_RCVD:
779 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 775 spin_unlock_irq(&cm_id_priv->lock);
780 ib_send_cm_drep(cm_id, NULL, 0); 776 ib_send_cm_drep(cm_id, NULL, 0);
781 break; 777 break;
782 default: 778 default:
783 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 779 spin_unlock_irq(&cm_id_priv->lock);
784 break; 780 break;
785 } 781 }
786 782
@@ -1169,7 +1165,6 @@ static void cm_format_req_event(struct cm_work *work,
1169static void cm_process_work(struct cm_id_private *cm_id_priv, 1165static void cm_process_work(struct cm_id_private *cm_id_priv,
1170 struct cm_work *work) 1166 struct cm_work *work)
1171{ 1167{
1172 unsigned long flags;
1173 int ret; 1168 int ret;
1174 1169
1175 /* We will typically only have the current event to report. */ 1170 /* We will typically only have the current event to report. */
@@ -1177,9 +1172,9 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
1177 cm_free_work(work); 1172 cm_free_work(work);
1178 1173
1179 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { 1174 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1180 spin_lock_irqsave(&cm_id_priv->lock, flags); 1175 spin_lock_irq(&cm_id_priv->lock);
1181 work = cm_dequeue_work(cm_id_priv); 1176 work = cm_dequeue_work(cm_id_priv);
1182 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1177 spin_unlock_irq(&cm_id_priv->lock);
1183 BUG_ON(!work); 1178 BUG_ON(!work);
1184 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, 1179 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1185 &work->cm_event); 1180 &work->cm_event);
@@ -1250,7 +1245,6 @@ static void cm_dup_req_handler(struct cm_work *work,
1250 struct cm_id_private *cm_id_priv) 1245 struct cm_id_private *cm_id_priv)
1251{ 1246{
1252 struct ib_mad_send_buf *msg = NULL; 1247 struct ib_mad_send_buf *msg = NULL;
1253 unsigned long flags;
1254 int ret; 1248 int ret;
1255 1249
1256 /* Quick state check to discard duplicate REQs. */ 1250 /* Quick state check to discard duplicate REQs. */
@@ -1261,7 +1255,7 @@ static void cm_dup_req_handler(struct cm_work *work,
1261 if (ret) 1255 if (ret)
1262 return; 1256 return;
1263 1257
1264 spin_lock_irqsave(&cm_id_priv->lock, flags); 1258 spin_lock_irq(&cm_id_priv->lock);
1265 switch (cm_id_priv->id.state) { 1259 switch (cm_id_priv->id.state) {
1266 case IB_CM_MRA_REQ_SENT: 1260 case IB_CM_MRA_REQ_SENT:
1267 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 1261 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
@@ -1276,14 +1270,14 @@ static void cm_dup_req_handler(struct cm_work *work,
1276 default: 1270 default:
1277 goto unlock; 1271 goto unlock;
1278 } 1272 }
1279 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1273 spin_unlock_irq(&cm_id_priv->lock);
1280 1274
1281 ret = ib_post_send_mad(msg, NULL); 1275 ret = ib_post_send_mad(msg, NULL);
1282 if (ret) 1276 if (ret)
1283 goto free; 1277 goto free;
1284 return; 1278 return;
1285 1279
1286unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1280unlock: spin_unlock_irq(&cm_id_priv->lock);
1287free: cm_free_msg(msg); 1281free: cm_free_msg(msg);
1288} 1282}
1289 1283
@@ -1293,17 +1287,16 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
1293 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1287 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1294 struct cm_timewait_info *timewait_info; 1288 struct cm_timewait_info *timewait_info;
1295 struct cm_req_msg *req_msg; 1289 struct cm_req_msg *req_msg;
1296 unsigned long flags;
1297 1290
1298 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1291 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1299 1292
1300 /* Check for possible duplicate REQ. */ 1293 /* Check for possible duplicate REQ. */
1301 spin_lock_irqsave(&cm.lock, flags); 1294 spin_lock_irq(&cm.lock);
1302 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); 1295 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1303 if (timewait_info) { 1296 if (timewait_info) {
1304 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1297 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1305 timewait_info->work.remote_id); 1298 timewait_info->work.remote_id);
1306 spin_unlock_irqrestore(&cm.lock, flags); 1299 spin_unlock_irq(&cm.lock);
1307 if (cur_cm_id_priv) { 1300 if (cur_cm_id_priv) {
1308 cm_dup_req_handler(work, cur_cm_id_priv); 1301 cm_dup_req_handler(work, cur_cm_id_priv);
1309 cm_deref_id(cur_cm_id_priv); 1302 cm_deref_id(cur_cm_id_priv);
@@ -1315,7 +1308,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
1315 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1308 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1316 if (timewait_info) { 1309 if (timewait_info) {
1317 cm_cleanup_timewait(cm_id_priv->timewait_info); 1310 cm_cleanup_timewait(cm_id_priv->timewait_info);
1318 spin_unlock_irqrestore(&cm.lock, flags); 1311 spin_unlock_irq(&cm.lock);
1319 cm_issue_rej(work->port, work->mad_recv_wc, 1312 cm_issue_rej(work->port, work->mad_recv_wc,
1320 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1313 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1321 NULL, 0); 1314 NULL, 0);
@@ -1328,7 +1321,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
1328 req_msg->private_data); 1321 req_msg->private_data);
1329 if (!listen_cm_id_priv) { 1322 if (!listen_cm_id_priv) {
1330 cm_cleanup_timewait(cm_id_priv->timewait_info); 1323 cm_cleanup_timewait(cm_id_priv->timewait_info);
1331 spin_unlock_irqrestore(&cm.lock, flags); 1324 spin_unlock_irq(&cm.lock);
1332 cm_issue_rej(work->port, work->mad_recv_wc, 1325 cm_issue_rej(work->port, work->mad_recv_wc,
1333 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, 1326 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1334 NULL, 0); 1327 NULL, 0);
@@ -1338,7 +1331,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
1338 atomic_inc(&cm_id_priv->refcount); 1331 atomic_inc(&cm_id_priv->refcount);
1339 cm_id_priv->id.state = IB_CM_REQ_RCVD; 1332 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1340 atomic_inc(&cm_id_priv->work_count); 1333 atomic_inc(&cm_id_priv->work_count);
1341 spin_unlock_irqrestore(&cm.lock, flags); 1334 spin_unlock_irq(&cm.lock);
1342out: 1335out:
1343 return listen_cm_id_priv; 1336 return listen_cm_id_priv;
1344} 1337}
@@ -1591,7 +1584,6 @@ static void cm_dup_rep_handler(struct cm_work *work)
1591 struct cm_id_private *cm_id_priv; 1584 struct cm_id_private *cm_id_priv;
1592 struct cm_rep_msg *rep_msg; 1585 struct cm_rep_msg *rep_msg;
1593 struct ib_mad_send_buf *msg = NULL; 1586 struct ib_mad_send_buf *msg = NULL;
1594 unsigned long flags;
1595 int ret; 1587 int ret;
1596 1588
1597 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; 1589 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
@@ -1604,7 +1596,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
1604 if (ret) 1596 if (ret)
1605 goto deref; 1597 goto deref;
1606 1598
1607 spin_lock_irqsave(&cm_id_priv->lock, flags); 1599 spin_lock_irq(&cm_id_priv->lock);
1608 if (cm_id_priv->id.state == IB_CM_ESTABLISHED) 1600 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1609 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1601 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1610 cm_id_priv->private_data, 1602 cm_id_priv->private_data,
@@ -1616,14 +1608,14 @@ static void cm_dup_rep_handler(struct cm_work *work)
1616 cm_id_priv->private_data_len); 1608 cm_id_priv->private_data_len);
1617 else 1609 else
1618 goto unlock; 1610 goto unlock;
1619 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1611 spin_unlock_irq(&cm_id_priv->lock);
1620 1612
1621 ret = ib_post_send_mad(msg, NULL); 1613 ret = ib_post_send_mad(msg, NULL);
1622 if (ret) 1614 if (ret)
1623 goto free; 1615 goto free;
1624 goto deref; 1616 goto deref;
1625 1617
1626unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1618unlock: spin_unlock_irq(&cm_id_priv->lock);
1627free: cm_free_msg(msg); 1619free: cm_free_msg(msg);
1628deref: cm_deref_id(cm_id_priv); 1620deref: cm_deref_id(cm_id_priv);
1629} 1621}
@@ -1632,7 +1624,6 @@ static int cm_rep_handler(struct cm_work *work)
1632{ 1624{
1633 struct cm_id_private *cm_id_priv; 1625 struct cm_id_private *cm_id_priv;
1634 struct cm_rep_msg *rep_msg; 1626 struct cm_rep_msg *rep_msg;
1635 unsigned long flags;
1636 int ret; 1627 int ret;
1637 1628
1638 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1629 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1644,13 +1635,13 @@ static int cm_rep_handler(struct cm_work *work)
1644 1635
1645 cm_format_rep_event(work); 1636 cm_format_rep_event(work);
1646 1637
1647 spin_lock_irqsave(&cm_id_priv->lock, flags); 1638 spin_lock_irq(&cm_id_priv->lock);
1648 switch (cm_id_priv->id.state) { 1639 switch (cm_id_priv->id.state) {
1649 case IB_CM_REQ_SENT: 1640 case IB_CM_REQ_SENT:
1650 case IB_CM_MRA_REQ_RCVD: 1641 case IB_CM_MRA_REQ_RCVD:
1651 break; 1642 break;
1652 default: 1643 default:
1653 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1644 spin_unlock_irq(&cm_id_priv->lock);
1654 ret = -EINVAL; 1645 ret = -EINVAL;
1655 goto error; 1646 goto error;
1656 } 1647 }
@@ -1663,7 +1654,7 @@ static int cm_rep_handler(struct cm_work *work)
1663 /* Check for duplicate REP. */ 1654 /* Check for duplicate REP. */
1664 if (cm_insert_remote_id(cm_id_priv->timewait_info)) { 1655 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1665 spin_unlock(&cm.lock); 1656 spin_unlock(&cm.lock);
1666 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1657 spin_unlock_irq(&cm_id_priv->lock);
1667 ret = -EINVAL; 1658 ret = -EINVAL;
1668 goto error; 1659 goto error;
1669 } 1660 }
@@ -1673,7 +1664,7 @@ static int cm_rep_handler(struct cm_work *work)
1673 &cm.remote_id_table); 1664 &cm.remote_id_table);
1674 cm_id_priv->timewait_info->inserted_remote_id = 0; 1665 cm_id_priv->timewait_info->inserted_remote_id = 0;
1675 spin_unlock(&cm.lock); 1666 spin_unlock(&cm.lock);
1676 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1667 spin_unlock_irq(&cm_id_priv->lock);
1677 cm_issue_rej(work->port, work->mad_recv_wc, 1668 cm_issue_rej(work->port, work->mad_recv_wc,
1678 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 1669 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1679 NULL, 0); 1670 NULL, 0);
@@ -1696,7 +1687,7 @@ static int cm_rep_handler(struct cm_work *work)
1696 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1687 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1697 if (!ret) 1688 if (!ret)
1698 list_add_tail(&work->list, &cm_id_priv->work_list); 1689 list_add_tail(&work->list, &cm_id_priv->work_list);
1699 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1690 spin_unlock_irq(&cm_id_priv->lock);
1700 1691
1701 if (ret) 1692 if (ret)
1702 cm_process_work(cm_id_priv, work); 1693 cm_process_work(cm_id_priv, work);
@@ -1712,7 +1703,6 @@ error:
1712static int cm_establish_handler(struct cm_work *work) 1703static int cm_establish_handler(struct cm_work *work)
1713{ 1704{
1714 struct cm_id_private *cm_id_priv; 1705 struct cm_id_private *cm_id_priv;
1715 unsigned long flags;
1716 int ret; 1706 int ret;
1717 1707
1718 /* See comment in cm_establish about lookup. */ 1708 /* See comment in cm_establish about lookup. */
@@ -1720,9 +1710,9 @@ static int cm_establish_handler(struct cm_work *work)
1720 if (!cm_id_priv) 1710 if (!cm_id_priv)
1721 return -EINVAL; 1711 return -EINVAL;
1722 1712
1723 spin_lock_irqsave(&cm_id_priv->lock, flags); 1713 spin_lock_irq(&cm_id_priv->lock);
1724 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { 1714 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1725 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1715 spin_unlock_irq(&cm_id_priv->lock);
1726 goto out; 1716 goto out;
1727 } 1717 }
1728 1718
@@ -1730,7 +1720,7 @@ static int cm_establish_handler(struct cm_work *work)
1730 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1720 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1731 if (!ret) 1721 if (!ret)
1732 list_add_tail(&work->list, &cm_id_priv->work_list); 1722 list_add_tail(&work->list, &cm_id_priv->work_list);
1733 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1723 spin_unlock_irq(&cm_id_priv->lock);
1734 1724
1735 if (ret) 1725 if (ret)
1736 cm_process_work(cm_id_priv, work); 1726 cm_process_work(cm_id_priv, work);
@@ -1746,7 +1736,6 @@ static int cm_rtu_handler(struct cm_work *work)
1746{ 1736{
1747 struct cm_id_private *cm_id_priv; 1737 struct cm_id_private *cm_id_priv;
1748 struct cm_rtu_msg *rtu_msg; 1738 struct cm_rtu_msg *rtu_msg;
1749 unsigned long flags;
1750 int ret; 1739 int ret;
1751 1740
1752 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; 1741 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1757,10 +1746,10 @@ static int cm_rtu_handler(struct cm_work *work)
1757 1746
1758 work->cm_event.private_data = &rtu_msg->private_data; 1747 work->cm_event.private_data = &rtu_msg->private_data;
1759 1748
1760 spin_lock_irqsave(&cm_id_priv->lock, flags); 1749 spin_lock_irq(&cm_id_priv->lock);
1761 if (cm_id_priv->id.state != IB_CM_REP_SENT && 1750 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1762 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 1751 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1763 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1752 spin_unlock_irq(&cm_id_priv->lock);
1764 goto out; 1753 goto out;
1765 } 1754 }
1766 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1755 cm_id_priv->id.state = IB_CM_ESTABLISHED;
@@ -1769,7 +1758,7 @@ static int cm_rtu_handler(struct cm_work *work)
1769 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1758 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1770 if (!ret) 1759 if (!ret)
1771 list_add_tail(&work->list, &cm_id_priv->work_list); 1760 list_add_tail(&work->list, &cm_id_priv->work_list);
1772 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1761 spin_unlock_irq(&cm_id_priv->lock);
1773 1762
1774 if (ret) 1763 if (ret)
1775 cm_process_work(cm_id_priv, work); 1764 cm_process_work(cm_id_priv, work);
@@ -1932,7 +1921,6 @@ static int cm_dreq_handler(struct cm_work *work)
1932 struct cm_id_private *cm_id_priv; 1921 struct cm_id_private *cm_id_priv;
1933 struct cm_dreq_msg *dreq_msg; 1922 struct cm_dreq_msg *dreq_msg;
1934 struct ib_mad_send_buf *msg = NULL; 1923 struct ib_mad_send_buf *msg = NULL;
1935 unsigned long flags;
1936 int ret; 1924 int ret;
1937 1925
1938 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; 1926 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1945,7 +1933,7 @@ static int cm_dreq_handler(struct cm_work *work)
1945 1933
1946 work->cm_event.private_data = &dreq_msg->private_data; 1934 work->cm_event.private_data = &dreq_msg->private_data;
1947 1935
1948 spin_lock_irqsave(&cm_id_priv->lock, flags); 1936 spin_lock_irq(&cm_id_priv->lock);
1949 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) 1937 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1950 goto unlock; 1938 goto unlock;
1951 1939
@@ -1964,7 +1952,7 @@ static int cm_dreq_handler(struct cm_work *work)
1964 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1952 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1965 cm_id_priv->private_data, 1953 cm_id_priv->private_data,
1966 cm_id_priv->private_data_len); 1954 cm_id_priv->private_data_len);
1967 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1955 spin_unlock_irq(&cm_id_priv->lock);
1968 1956
1969 if (ib_post_send_mad(msg, NULL)) 1957 if (ib_post_send_mad(msg, NULL))
1970 cm_free_msg(msg); 1958 cm_free_msg(msg);
@@ -1977,7 +1965,7 @@ static int cm_dreq_handler(struct cm_work *work)
1977 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1965 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1978 if (!ret) 1966 if (!ret)
1979 list_add_tail(&work->list, &cm_id_priv->work_list); 1967 list_add_tail(&work->list, &cm_id_priv->work_list);
1980 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1968 spin_unlock_irq(&cm_id_priv->lock);
1981 1969
1982 if (ret) 1970 if (ret)
1983 cm_process_work(cm_id_priv, work); 1971 cm_process_work(cm_id_priv, work);
@@ -1985,7 +1973,7 @@ static int cm_dreq_handler(struct cm_work *work)
1985 cm_deref_id(cm_id_priv); 1973 cm_deref_id(cm_id_priv);
1986 return 0; 1974 return 0;
1987 1975
1988unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1976unlock: spin_unlock_irq(&cm_id_priv->lock);
1989deref: cm_deref_id(cm_id_priv); 1977deref: cm_deref_id(cm_id_priv);
1990 return -EINVAL; 1978 return -EINVAL;
1991} 1979}
@@ -1994,7 +1982,6 @@ static int cm_drep_handler(struct cm_work *work)
1994{ 1982{
1995 struct cm_id_private *cm_id_priv; 1983 struct cm_id_private *cm_id_priv;
1996 struct cm_drep_msg *drep_msg; 1984 struct cm_drep_msg *drep_msg;
1997 unsigned long flags;
1998 int ret; 1985 int ret;
1999 1986
2000 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; 1987 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -2005,10 +1992,10 @@ static int cm_drep_handler(struct cm_work *work)
2005 1992
2006 work->cm_event.private_data = &drep_msg->private_data; 1993 work->cm_event.private_data = &drep_msg->private_data;
2007 1994
2008 spin_lock_irqsave(&cm_id_priv->lock, flags); 1995 spin_lock_irq(&cm_id_priv->lock);
2009 if (cm_id_priv->id.state != IB_CM_DREQ_SENT && 1996 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2010 cm_id_priv->id.state != IB_CM_DREQ_RCVD) { 1997 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2011 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1998 spin_unlock_irq(&cm_id_priv->lock);
2012 goto out; 1999 goto out;
2013 } 2000 }
2014 cm_enter_timewait(cm_id_priv); 2001 cm_enter_timewait(cm_id_priv);
@@ -2017,7 +2004,7 @@ static int cm_drep_handler(struct cm_work *work)
2017 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2004 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2018 if (!ret) 2005 if (!ret)
2019 list_add_tail(&work->list, &cm_id_priv->work_list); 2006 list_add_tail(&work->list, &cm_id_priv->work_list);
2020 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2007 spin_unlock_irq(&cm_id_priv->lock);
2021 2008
2022 if (ret) 2009 if (ret)
2023 cm_process_work(cm_id_priv, work); 2010 cm_process_work(cm_id_priv, work);
@@ -2107,17 +2094,16 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2107{ 2094{
2108 struct cm_timewait_info *timewait_info; 2095 struct cm_timewait_info *timewait_info;
2109 struct cm_id_private *cm_id_priv; 2096 struct cm_id_private *cm_id_priv;
2110 unsigned long flags;
2111 __be32 remote_id; 2097 __be32 remote_id;
2112 2098
2113 remote_id = rej_msg->local_comm_id; 2099 remote_id = rej_msg->local_comm_id;
2114 2100
2115 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 2101 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2116 spin_lock_irqsave(&cm.lock, flags); 2102 spin_lock_irq(&cm.lock);
2117 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 2103 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2118 remote_id); 2104 remote_id);
2119 if (!timewait_info) { 2105 if (!timewait_info) {
2120 spin_unlock_irqrestore(&cm.lock, flags); 2106 spin_unlock_irq(&cm.lock);
2121 return NULL; 2107 return NULL;
2122 } 2108 }
2123 cm_id_priv = idr_find(&cm.local_id_table, (__force int) 2109 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
@@ -2129,7 +2115,7 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2129 else 2115 else
2130 cm_id_priv = NULL; 2116 cm_id_priv = NULL;
2131 } 2117 }
2132 spin_unlock_irqrestore(&cm.lock, flags); 2118 spin_unlock_irq(&cm.lock);
2133 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) 2119 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2134 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); 2120 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2135 else 2121 else
@@ -2142,7 +2128,6 @@ static int cm_rej_handler(struct cm_work *work)
2142{ 2128{
2143 struct cm_id_private *cm_id_priv; 2129 struct cm_id_private *cm_id_priv;
2144 struct cm_rej_msg *rej_msg; 2130 struct cm_rej_msg *rej_msg;
2145 unsigned long flags;
2146 int ret; 2131 int ret;
2147 2132
2148 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; 2133 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -2152,7 +2137,7 @@ static int cm_rej_handler(struct cm_work *work)
2152 2137
2153 cm_format_rej_event(work); 2138 cm_format_rej_event(work);
2154 2139
2155 spin_lock_irqsave(&cm_id_priv->lock, flags); 2140 spin_lock_irq(&cm_id_priv->lock);
2156 switch (cm_id_priv->id.state) { 2141 switch (cm_id_priv->id.state) {
2157 case IB_CM_REQ_SENT: 2142 case IB_CM_REQ_SENT:
2158 case IB_CM_MRA_REQ_RCVD: 2143 case IB_CM_MRA_REQ_RCVD:
@@ -2176,7 +2161,7 @@ static int cm_rej_handler(struct cm_work *work)
2176 cm_enter_timewait(cm_id_priv); 2161 cm_enter_timewait(cm_id_priv);
2177 break; 2162 break;
2178 default: 2163 default:
2179 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2164 spin_unlock_irq(&cm_id_priv->lock);
2180 ret = -EINVAL; 2165 ret = -EINVAL;
2181 goto out; 2166 goto out;
2182 } 2167 }
@@ -2184,7 +2169,7 @@ static int cm_rej_handler(struct cm_work *work)
2184 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2169 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2185 if (!ret) 2170 if (!ret)
2186 list_add_tail(&work->list, &cm_id_priv->work_list); 2171 list_add_tail(&work->list, &cm_id_priv->work_list);
2187 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2172 spin_unlock_irq(&cm_id_priv->lock);
2188 2173
2189 if (ret) 2174 if (ret)
2190 cm_process_work(cm_id_priv, work); 2175 cm_process_work(cm_id_priv, work);
@@ -2295,7 +2280,6 @@ static int cm_mra_handler(struct cm_work *work)
2295{ 2280{
2296 struct cm_id_private *cm_id_priv; 2281 struct cm_id_private *cm_id_priv;
2297 struct cm_mra_msg *mra_msg; 2282 struct cm_mra_msg *mra_msg;
2298 unsigned long flags;
2299 int timeout, ret; 2283 int timeout, ret;
2300 2284
2301 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; 2285 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -2309,7 +2293,7 @@ static int cm_mra_handler(struct cm_work *work)
2309 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + 2293 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2310 cm_convert_to_ms(cm_id_priv->av.packet_life_time); 2294 cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2311 2295
2312 spin_lock_irqsave(&cm_id_priv->lock, flags); 2296 spin_lock_irq(&cm_id_priv->lock);
2313 switch (cm_id_priv->id.state) { 2297 switch (cm_id_priv->id.state) {
2314 case IB_CM_REQ_SENT: 2298 case IB_CM_REQ_SENT:
2315 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2299 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
@@ -2342,7 +2326,7 @@ static int cm_mra_handler(struct cm_work *work)
2342 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2326 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2343 if (!ret) 2327 if (!ret)
2344 list_add_tail(&work->list, &cm_id_priv->work_list); 2328 list_add_tail(&work->list, &cm_id_priv->work_list);
2345 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2329 spin_unlock_irq(&cm_id_priv->lock);
2346 2330
2347 if (ret) 2331 if (ret)
2348 cm_process_work(cm_id_priv, work); 2332 cm_process_work(cm_id_priv, work);
@@ -2350,7 +2334,7 @@ static int cm_mra_handler(struct cm_work *work)
2350 cm_deref_id(cm_id_priv); 2334 cm_deref_id(cm_id_priv);
2351 return 0; 2335 return 0;
2352out: 2336out:
2353 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2337 spin_unlock_irq(&cm_id_priv->lock);
2354 cm_deref_id(cm_id_priv); 2338 cm_deref_id(cm_id_priv);
2355 return -EINVAL; 2339 return -EINVAL;
2356} 2340}
@@ -2465,7 +2449,6 @@ static int cm_lap_handler(struct cm_work *work)
2465 struct cm_lap_msg *lap_msg; 2449 struct cm_lap_msg *lap_msg;
2466 struct ib_cm_lap_event_param *param; 2450 struct ib_cm_lap_event_param *param;
2467 struct ib_mad_send_buf *msg = NULL; 2451 struct ib_mad_send_buf *msg = NULL;
2468 unsigned long flags;
2469 int ret; 2452 int ret;
2470 2453
2471 /* todo: verify LAP request and send reject APR if invalid. */ 2454 /* todo: verify LAP request and send reject APR if invalid. */
@@ -2480,7 +2463,7 @@ static int cm_lap_handler(struct cm_work *work)
2480 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); 2463 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2481 work->cm_event.private_data = &lap_msg->private_data; 2464 work->cm_event.private_data = &lap_msg->private_data;
2482 2465
2483 spin_lock_irqsave(&cm_id_priv->lock, flags); 2466 spin_lock_irq(&cm_id_priv->lock);
2484 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) 2467 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2485 goto unlock; 2468 goto unlock;
2486 2469
@@ -2497,7 +2480,7 @@ static int cm_lap_handler(struct cm_work *work)
2497 cm_id_priv->service_timeout, 2480 cm_id_priv->service_timeout,
2498 cm_id_priv->private_data, 2481 cm_id_priv->private_data,
2499 cm_id_priv->private_data_len); 2482 cm_id_priv->private_data_len);
2500 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2483 spin_unlock_irq(&cm_id_priv->lock);
2501 2484
2502 if (ib_post_send_mad(msg, NULL)) 2485 if (ib_post_send_mad(msg, NULL))
2503 cm_free_msg(msg); 2486 cm_free_msg(msg);
@@ -2515,7 +2498,7 @@ static int cm_lap_handler(struct cm_work *work)
2515 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2498 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2516 if (!ret) 2499 if (!ret)
2517 list_add_tail(&work->list, &cm_id_priv->work_list); 2500 list_add_tail(&work->list, &cm_id_priv->work_list);
2518 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2501 spin_unlock_irq(&cm_id_priv->lock);
2519 2502
2520 if (ret) 2503 if (ret)
2521 cm_process_work(cm_id_priv, work); 2504 cm_process_work(cm_id_priv, work);
@@ -2523,7 +2506,7 @@ static int cm_lap_handler(struct cm_work *work)
2523 cm_deref_id(cm_id_priv); 2506 cm_deref_id(cm_id_priv);
2524 return 0; 2507 return 0;
2525 2508
2526unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2509unlock: spin_unlock_irq(&cm_id_priv->lock);
2527deref: cm_deref_id(cm_id_priv); 2510deref: cm_deref_id(cm_id_priv);
2528 return -EINVAL; 2511 return -EINVAL;
2529} 2512}
@@ -2598,7 +2581,6 @@ static int cm_apr_handler(struct cm_work *work)
2598{ 2581{
2599 struct cm_id_private *cm_id_priv; 2582 struct cm_id_private *cm_id_priv;
2600 struct cm_apr_msg *apr_msg; 2583 struct cm_apr_msg *apr_msg;
2601 unsigned long flags;
2602 int ret; 2584 int ret;
2603 2585
2604 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; 2586 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -2612,11 +2594,11 @@ static int cm_apr_handler(struct cm_work *work)
2612 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; 2594 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2613 work->cm_event.private_data = &apr_msg->private_data; 2595 work->cm_event.private_data = &apr_msg->private_data;
2614 2596
2615 spin_lock_irqsave(&cm_id_priv->lock, flags); 2597 spin_lock_irq(&cm_id_priv->lock);
2616 if (cm_id_priv->id.state != IB_CM_ESTABLISHED || 2598 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2617 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && 2599 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2618 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { 2600 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2619 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2601 spin_unlock_irq(&cm_id_priv->lock);
2620 goto out; 2602 goto out;
2621 } 2603 }
2622 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2604 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
@@ -2626,7 +2608,7 @@ static int cm_apr_handler(struct cm_work *work)
2626 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2608 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2627 if (!ret) 2609 if (!ret)
2628 list_add_tail(&work->list, &cm_id_priv->work_list); 2610 list_add_tail(&work->list, &cm_id_priv->work_list);
2629 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2611 spin_unlock_irq(&cm_id_priv->lock);
2630 2612
2631 if (ret) 2613 if (ret)
2632 cm_process_work(cm_id_priv, work); 2614 cm_process_work(cm_id_priv, work);
@@ -2761,7 +2743,6 @@ static int cm_sidr_req_handler(struct cm_work *work)
2761 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 2743 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2762 struct cm_sidr_req_msg *sidr_req_msg; 2744 struct cm_sidr_req_msg *sidr_req_msg;
2763 struct ib_wc *wc; 2745 struct ib_wc *wc;
2764 unsigned long flags;
2765 2746
2766 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL); 2747 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
2767 if (IS_ERR(cm_id)) 2748 if (IS_ERR(cm_id))
@@ -2782,10 +2763,10 @@ static int cm_sidr_req_handler(struct cm_work *work)
2782 cm_id_priv->tid = sidr_req_msg->hdr.tid; 2763 cm_id_priv->tid = sidr_req_msg->hdr.tid;
2783 atomic_inc(&cm_id_priv->work_count); 2764 atomic_inc(&cm_id_priv->work_count);
2784 2765
2785 spin_lock_irqsave(&cm.lock, flags); 2766 spin_lock_irq(&cm.lock);
2786 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 2767 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2787 if (cur_cm_id_priv) { 2768 if (cur_cm_id_priv) {
2788 spin_unlock_irqrestore(&cm.lock, flags); 2769 spin_unlock_irq(&cm.lock);
2789 goto out; /* Duplicate message. */ 2770 goto out; /* Duplicate message. */
2790 } 2771 }
2791 cur_cm_id_priv = cm_find_listen(cm_id->device, 2772 cur_cm_id_priv = cm_find_listen(cm_id->device,
@@ -2793,12 +2774,12 @@ static int cm_sidr_req_handler(struct cm_work *work)
2793 sidr_req_msg->private_data); 2774 sidr_req_msg->private_data);
2794 if (!cur_cm_id_priv) { 2775 if (!cur_cm_id_priv) {
2795 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 2776 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2796 spin_unlock_irqrestore(&cm.lock, flags); 2777 spin_unlock_irq(&cm.lock);
2797 /* todo: reply with no match */ 2778 /* todo: reply with no match */
2798 goto out; /* No match. */ 2779 goto out; /* No match. */
2799 } 2780 }
2800 atomic_inc(&cur_cm_id_priv->refcount); 2781 atomic_inc(&cur_cm_id_priv->refcount);
2801 spin_unlock_irqrestore(&cm.lock, flags); 2782 spin_unlock_irq(&cm.lock);
2802 2783
2803 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2784 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2804 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2785 cm_id_priv->id.context = cur_cm_id_priv->id.context;
@@ -2899,7 +2880,6 @@ static int cm_sidr_rep_handler(struct cm_work *work)
2899{ 2880{
2900 struct cm_sidr_rep_msg *sidr_rep_msg; 2881 struct cm_sidr_rep_msg *sidr_rep_msg;
2901 struct cm_id_private *cm_id_priv; 2882 struct cm_id_private *cm_id_priv;
2902 unsigned long flags;
2903 2883
2904 sidr_rep_msg = (struct cm_sidr_rep_msg *) 2884 sidr_rep_msg = (struct cm_sidr_rep_msg *)
2905 work->mad_recv_wc->recv_buf.mad; 2885 work->mad_recv_wc->recv_buf.mad;
@@ -2907,14 +2887,14 @@ static int cm_sidr_rep_handler(struct cm_work *work)
2907 if (!cm_id_priv) 2887 if (!cm_id_priv)
2908 return -EINVAL; /* Unmatched reply. */ 2888 return -EINVAL; /* Unmatched reply. */
2909 2889
2910 spin_lock_irqsave(&cm_id_priv->lock, flags); 2890 spin_lock_irq(&cm_id_priv->lock);
2911 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { 2891 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2912 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2892 spin_unlock_irq(&cm_id_priv->lock);
2913 goto out; 2893 goto out;
2914 } 2894 }
2915 cm_id_priv->id.state = IB_CM_IDLE; 2895 cm_id_priv->id.state = IB_CM_IDLE;
2916 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2896 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2917 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2897 spin_unlock_irq(&cm_id_priv->lock);
2918 2898
2919 cm_format_sidr_rep_event(work); 2899 cm_format_sidr_rep_event(work);
2920 cm_process_work(cm_id_priv, work); 2900 cm_process_work(cm_id_priv, work);
@@ -2930,14 +2910,13 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
2930 struct cm_id_private *cm_id_priv; 2910 struct cm_id_private *cm_id_priv;
2931 struct ib_cm_event cm_event; 2911 struct ib_cm_event cm_event;
2932 enum ib_cm_state state; 2912 enum ib_cm_state state;
2933 unsigned long flags;
2934 int ret; 2913 int ret;
2935 2914
2936 memset(&cm_event, 0, sizeof cm_event); 2915 memset(&cm_event, 0, sizeof cm_event);
2937 cm_id_priv = msg->context[0]; 2916 cm_id_priv = msg->context[0];
2938 2917
2939 /* Discard old sends or ones without a response. */ 2918 /* Discard old sends or ones without a response. */
2940 spin_lock_irqsave(&cm_id_priv->lock, flags); 2919 spin_lock_irq(&cm_id_priv->lock);
2941 state = (enum ib_cm_state) (unsigned long) msg->context[1]; 2920 state = (enum ib_cm_state) (unsigned long) msg->context[1];
2942 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) 2921 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2943 goto discard; 2922 goto discard;
@@ -2964,7 +2943,7 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
2964 default: 2943 default:
2965 goto discard; 2944 goto discard;
2966 } 2945 }
2967 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2946 spin_unlock_irq(&cm_id_priv->lock);
2968 cm_event.param.send_status = wc_status; 2947 cm_event.param.send_status = wc_status;
2969 2948
2970 /* No other events can occur on the cm_id at this point. */ 2949 /* No other events can occur on the cm_id at this point. */
@@ -2974,7 +2953,7 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
2974 ib_destroy_cm_id(&cm_id_priv->id); 2953 ib_destroy_cm_id(&cm_id_priv->id);
2975 return; 2954 return;
2976discard: 2955discard:
2977 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2956 spin_unlock_irq(&cm_id_priv->lock);
2978 cm_free_msg(msg); 2957 cm_free_msg(msg);
2979} 2958}
2980 2959