diff options
author | Michael S. Tsirkin <mst@mellanox.co.il> | 2006-07-14 03:23:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-15 00:53:50 -0400 |
commit | 04c335430f6d9f9088c852bc05a3a0c8baa921c4 (patch) | |
tree | 61be60558ca4c1204594f242f281fee59f7851c2 /drivers | |
parent | 0964d9161826ca5cab5a03192490ec676c8abf8c (diff) |
[PATCH] IB/cm: drop REQ when out of memory
If a user of the IB CM returns -ENOMEM from their connection callback, simply
drop the incoming REQ - do not attempt to send a reject. This should allow
the sender to retry the request.
Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/core/cm.c | 21 |
1 files changed, 18 insertions, 3 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 3f6705f3083a..f85c97f7500a 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -701,7 +701,7 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) | |||
701 | } | 701 | } |
702 | } | 702 | } |
703 | 703 | ||
704 | void ib_destroy_cm_id(struct ib_cm_id *cm_id) | 704 | static void cm_destroy_id(struct ib_cm_id *cm_id, int err) |
705 | { | 705 | { |
706 | struct cm_id_private *cm_id_priv; | 706 | struct cm_id_private *cm_id_priv; |
707 | struct cm_work *work; | 707 | struct cm_work *work; |
@@ -735,12 +735,22 @@ retest: | |||
735 | sizeof cm_id_priv->av.port->cm_dev->ca_guid, | 735 | sizeof cm_id_priv->av.port->cm_dev->ca_guid, |
736 | NULL, 0); | 736 | NULL, 0); |
737 | break; | 737 | break; |
738 | case IB_CM_REQ_RCVD: | ||
739 | if (err == -ENOMEM) { | ||
740 | /* Do not reject to allow future retries. */ | ||
741 | cm_reset_to_idle(cm_id_priv); | ||
742 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
743 | } else { | ||
744 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
745 | ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, | ||
746 | NULL, 0, NULL, 0); | ||
747 | } | ||
748 | break; | ||
738 | case IB_CM_MRA_REQ_RCVD: | 749 | case IB_CM_MRA_REQ_RCVD: |
739 | case IB_CM_REP_SENT: | 750 | case IB_CM_REP_SENT: |
740 | case IB_CM_MRA_REP_RCVD: | 751 | case IB_CM_MRA_REP_RCVD: |
741 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); | 752 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
742 | /* Fall through */ | 753 | /* Fall through */ |
743 | case IB_CM_REQ_RCVD: | ||
744 | case IB_CM_MRA_REQ_SENT: | 754 | case IB_CM_MRA_REQ_SENT: |
745 | case IB_CM_REP_RCVD: | 755 | case IB_CM_REP_RCVD: |
746 | case IB_CM_MRA_REP_SENT: | 756 | case IB_CM_MRA_REP_SENT: |
@@ -775,6 +785,11 @@ retest: | |||
775 | kfree(cm_id_priv->private_data); | 785 | kfree(cm_id_priv->private_data); |
776 | kfree(cm_id_priv); | 786 | kfree(cm_id_priv); |
777 | } | 787 | } |
788 | |||
789 | void ib_destroy_cm_id(struct ib_cm_id *cm_id) | ||
790 | { | ||
791 | cm_destroy_id(cm_id, 0); | ||
792 | } | ||
778 | EXPORT_SYMBOL(ib_destroy_cm_id); | 793 | EXPORT_SYMBOL(ib_destroy_cm_id); |
779 | 794 | ||
780 | int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, | 795 | int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, |
@@ -1163,7 +1178,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv, | |||
1163 | } | 1178 | } |
1164 | cm_deref_id(cm_id_priv); | 1179 | cm_deref_id(cm_id_priv); |
1165 | if (ret) | 1180 | if (ret) |
1166 | ib_destroy_cm_id(&cm_id_priv->id); | 1181 | cm_destroy_id(&cm_id_priv->id, ret); |
1167 | } | 1182 | } |
1168 | 1183 | ||
1169 | static void cm_format_mra(struct cm_mra_msg *mra_msg, | 1184 | static void cm_format_mra(struct cm_mra_msg *mra_msg, |