diff options
author | Roland Dreier <rolandd@cisco.com> | 2007-04-25 00:30:37 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-04-25 00:30:37 -0400 |
commit | 37aebbde7023d75bf09fbadb6796276d0a65a068 (patch) | |
tree | 070a552df6bca142b3aa7e56329c635c8fabee22 /drivers | |
parent | de493d47d8b4738827d8914a4dc94058c58f4249 (diff) |
IPoIB/cm: spin_lock_irqsave() -> spin_lock_irq() replacements
There are quite a few places in ipoib_cm.c where we know IRQs are
enabled because we do something that sleeps in the same function, so
we can convert several occurrences of spin_lock_irqsave() to a plain
spin_lock_irq(). This cleans up the source a little and makes the
code smaller too:
add/remove: 0/0 grow/shrink: 1/5 up/down: 3/-51 (-48)
function old new delta
ipoib_cm_tx_reap 403 406 +3
ipoib_cm_stale_task 146 145 -1
ipoib_cm_dev_stop 173 172 -1
ipoib_cm_tx_handler 964 956 -8
ipoib_cm_rx_handler 956 937 -19
ipoib_cm_skb_reap 212 190 -22
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 56 |
1 files changed, 24 insertions, 32 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 7a4af7a3e04f..da7e10230cf8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -228,7 +228,6 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
228 | struct net_device *dev = cm_id->context; | 228 | struct net_device *dev = cm_id->context; |
229 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 229 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
230 | struct ipoib_cm_rx *p; | 230 | struct ipoib_cm_rx *p; |
231 | unsigned long flags; | ||
232 | unsigned psn; | 231 | unsigned psn; |
233 | int ret; | 232 | int ret; |
234 | 233 | ||
@@ -257,9 +256,9 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
257 | 256 | ||
258 | cm_id->context = p; | 257 | cm_id->context = p; |
259 | p->jiffies = jiffies; | 258 | p->jiffies = jiffies; |
260 | spin_lock_irqsave(&priv->lock, flags); | 259 | spin_lock_irq(&priv->lock); |
261 | list_add(&p->list, &priv->cm.passive_ids); | 260 | list_add(&p->list, &priv->cm.passive_ids); |
262 | spin_unlock_irqrestore(&priv->lock, flags); | 261 | spin_unlock_irq(&priv->lock); |
263 | queue_delayed_work(ipoib_workqueue, | 262 | queue_delayed_work(ipoib_workqueue, |
264 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | 263 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); |
265 | return 0; | 264 | return 0; |
@@ -277,7 +276,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, | |||
277 | { | 276 | { |
278 | struct ipoib_cm_rx *p; | 277 | struct ipoib_cm_rx *p; |
279 | struct ipoib_dev_priv *priv; | 278 | struct ipoib_dev_priv *priv; |
280 | unsigned long flags; | ||
281 | int ret; | 279 | int ret; |
282 | 280 | ||
283 | switch (event->event) { | 281 | switch (event->event) { |
@@ -290,14 +288,14 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, | |||
290 | case IB_CM_REJ_RECEIVED: | 288 | case IB_CM_REJ_RECEIVED: |
291 | p = cm_id->context; | 289 | p = cm_id->context; |
292 | priv = netdev_priv(p->dev); | 290 | priv = netdev_priv(p->dev); |
293 | spin_lock_irqsave(&priv->lock, flags); | 291 | spin_lock_irq(&priv->lock); |
294 | if (list_empty(&p->list)) | 292 | if (list_empty(&p->list)) |
295 | ret = 0; /* Connection is going away already. */ | 293 | ret = 0; /* Connection is going away already. */ |
296 | else { | 294 | else { |
297 | list_del_init(&p->list); | 295 | list_del_init(&p->list); |
298 | ret = -ECONNRESET; | 296 | ret = -ECONNRESET; |
299 | } | 297 | } |
300 | spin_unlock_irqrestore(&priv->lock, flags); | 298 | spin_unlock_irq(&priv->lock); |
301 | if (ret) { | 299 | if (ret) { |
302 | ib_destroy_qp(p->qp); | 300 | ib_destroy_qp(p->qp); |
303 | kfree(p); | 301 | kfree(p); |
@@ -612,23 +610,22 @@ void ipoib_cm_dev_stop(struct net_device *dev) | |||
612 | { | 610 | { |
613 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 611 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
614 | struct ipoib_cm_rx *p; | 612 | struct ipoib_cm_rx *p; |
615 | unsigned long flags; | ||
616 | 613 | ||
617 | if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) | 614 | if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) |
618 | return; | 615 | return; |
619 | 616 | ||
620 | ib_destroy_cm_id(priv->cm.id); | 617 | ib_destroy_cm_id(priv->cm.id); |
621 | spin_lock_irqsave(&priv->lock, flags); | 618 | spin_lock_irq(&priv->lock); |
622 | while (!list_empty(&priv->cm.passive_ids)) { | 619 | while (!list_empty(&priv->cm.passive_ids)) { |
623 | p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); | 620 | p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); |
624 | list_del_init(&p->list); | 621 | list_del_init(&p->list); |
625 | spin_unlock_irqrestore(&priv->lock, flags); | 622 | spin_unlock_irq(&priv->lock); |
626 | ib_destroy_cm_id(p->id); | 623 | ib_destroy_cm_id(p->id); |
627 | ib_destroy_qp(p->qp); | 624 | ib_destroy_qp(p->qp); |
628 | kfree(p); | 625 | kfree(p); |
629 | spin_lock_irqsave(&priv->lock, flags); | 626 | spin_lock_irq(&priv->lock); |
630 | } | 627 | } |
631 | spin_unlock_irqrestore(&priv->lock, flags); | 628 | spin_unlock_irq(&priv->lock); |
632 | 629 | ||
633 | cancel_delayed_work(&priv->cm.stale_task); | 630 | cancel_delayed_work(&priv->cm.stale_task); |
634 | } | 631 | } |
@@ -642,7 +639,6 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
642 | struct ib_qp_attr qp_attr; | 639 | struct ib_qp_attr qp_attr; |
643 | int qp_attr_mask, ret; | 640 | int qp_attr_mask, ret; |
644 | struct sk_buff *skb; | 641 | struct sk_buff *skb; |
645 | unsigned long flags; | ||
646 | 642 | ||
647 | p->mtu = be32_to_cpu(data->mtu); | 643 | p->mtu = be32_to_cpu(data->mtu); |
648 | 644 | ||
@@ -680,12 +676,12 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
680 | 676 | ||
681 | skb_queue_head_init(&skqueue); | 677 | skb_queue_head_init(&skqueue); |
682 | 678 | ||
683 | spin_lock_irqsave(&priv->lock, flags); | 679 | spin_lock_irq(&priv->lock); |
684 | set_bit(IPOIB_FLAG_OPER_UP, &p->flags); | 680 | set_bit(IPOIB_FLAG_OPER_UP, &p->flags); |
685 | if (p->neigh) | 681 | if (p->neigh) |
686 | while ((skb = __skb_dequeue(&p->neigh->queue))) | 682 | while ((skb = __skb_dequeue(&p->neigh->queue))) |
687 | __skb_queue_tail(&skqueue, skb); | 683 | __skb_queue_tail(&skqueue, skb); |
688 | spin_unlock_irqrestore(&priv->lock, flags); | 684 | spin_unlock_irq(&priv->lock); |
689 | 685 | ||
690 | while ((skb = __skb_dequeue(&skqueue))) { | 686 | while ((skb = __skb_dequeue(&skqueue))) { |
691 | skb->dev = p->dev; | 687 | skb->dev = p->dev; |
@@ -895,7 +891,6 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |||
895 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); | 891 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); |
896 | struct net_device *dev = priv->dev; | 892 | struct net_device *dev = priv->dev; |
897 | struct ipoib_neigh *neigh; | 893 | struct ipoib_neigh *neigh; |
898 | unsigned long flags; | ||
899 | int ret; | 894 | int ret; |
900 | 895 | ||
901 | switch (event->event) { | 896 | switch (event->event) { |
@@ -914,7 +909,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |||
914 | case IB_CM_REJ_RECEIVED: | 909 | case IB_CM_REJ_RECEIVED: |
915 | case IB_CM_TIMEWAIT_EXIT: | 910 | case IB_CM_TIMEWAIT_EXIT: |
916 | ipoib_dbg(priv, "CM error %d.\n", event->event); | 911 | ipoib_dbg(priv, "CM error %d.\n", event->event); |
917 | spin_lock_irqsave(&priv->tx_lock, flags); | 912 | spin_lock_irq(&priv->tx_lock); |
918 | spin_lock(&priv->lock); | 913 | spin_lock(&priv->lock); |
919 | neigh = tx->neigh; | 914 | neigh = tx->neigh; |
920 | 915 | ||
@@ -934,7 +929,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |||
934 | } | 929 | } |
935 | 930 | ||
936 | spin_unlock(&priv->lock); | 931 | spin_unlock(&priv->lock); |
937 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 932 | spin_unlock_irq(&priv->tx_lock); |
938 | break; | 933 | break; |
939 | default: | 934 | default: |
940 | break; | 935 | break; |
@@ -1023,21 +1018,20 @@ static void ipoib_cm_tx_reap(struct work_struct *work) | |||
1023 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | 1018 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, |
1024 | cm.reap_task); | 1019 | cm.reap_task); |
1025 | struct ipoib_cm_tx *p; | 1020 | struct ipoib_cm_tx *p; |
1026 | unsigned long flags; | ||
1027 | 1021 | ||
1028 | spin_lock_irqsave(&priv->tx_lock, flags); | 1022 | spin_lock_irq(&priv->tx_lock); |
1029 | spin_lock(&priv->lock); | 1023 | spin_lock(&priv->lock); |
1030 | while (!list_empty(&priv->cm.reap_list)) { | 1024 | while (!list_empty(&priv->cm.reap_list)) { |
1031 | p = list_entry(priv->cm.reap_list.next, typeof(*p), list); | 1025 | p = list_entry(priv->cm.reap_list.next, typeof(*p), list); |
1032 | list_del(&p->list); | 1026 | list_del(&p->list); |
1033 | spin_unlock(&priv->lock); | 1027 | spin_unlock(&priv->lock); |
1034 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 1028 | spin_unlock_irq(&priv->tx_lock); |
1035 | ipoib_cm_tx_destroy(p); | 1029 | ipoib_cm_tx_destroy(p); |
1036 | spin_lock_irqsave(&priv->tx_lock, flags); | 1030 | spin_lock_irq(&priv->tx_lock); |
1037 | spin_lock(&priv->lock); | 1031 | spin_lock(&priv->lock); |
1038 | } | 1032 | } |
1039 | spin_unlock(&priv->lock); | 1033 | spin_unlock(&priv->lock); |
1040 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 1034 | spin_unlock_irq(&priv->tx_lock); |
1041 | } | 1035 | } |
1042 | 1036 | ||
1043 | static void ipoib_cm_skb_reap(struct work_struct *work) | 1037 | static void ipoib_cm_skb_reap(struct work_struct *work) |
@@ -1046,15 +1040,14 @@ static void ipoib_cm_skb_reap(struct work_struct *work) | |||
1046 | cm.skb_task); | 1040 | cm.skb_task); |
1047 | struct net_device *dev = priv->dev; | 1041 | struct net_device *dev = priv->dev; |
1048 | struct sk_buff *skb; | 1042 | struct sk_buff *skb; |
1049 | unsigned long flags; | ||
1050 | 1043 | ||
1051 | unsigned mtu = priv->mcast_mtu; | 1044 | unsigned mtu = priv->mcast_mtu; |
1052 | 1045 | ||
1053 | spin_lock_irqsave(&priv->tx_lock, flags); | 1046 | spin_lock_irq(&priv->tx_lock); |
1054 | spin_lock(&priv->lock); | 1047 | spin_lock(&priv->lock); |
1055 | while ((skb = skb_dequeue(&priv->cm.skb_queue))) { | 1048 | while ((skb = skb_dequeue(&priv->cm.skb_queue))) { |
1056 | spin_unlock(&priv->lock); | 1049 | spin_unlock(&priv->lock); |
1057 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 1050 | spin_unlock_irq(&priv->tx_lock); |
1058 | if (skb->protocol == htons(ETH_P_IP)) | 1051 | if (skb->protocol == htons(ETH_P_IP)) |
1059 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | 1052 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
1060 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1053 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
@@ -1062,11 +1055,11 @@ static void ipoib_cm_skb_reap(struct work_struct *work) | |||
1062 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); | 1055 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); |
1063 | #endif | 1056 | #endif |
1064 | dev_kfree_skb_any(skb); | 1057 | dev_kfree_skb_any(skb); |
1065 | spin_lock_irqsave(&priv->tx_lock, flags); | 1058 | spin_lock_irq(&priv->tx_lock); |
1066 | spin_lock(&priv->lock); | 1059 | spin_lock(&priv->lock); |
1067 | } | 1060 | } |
1068 | spin_unlock(&priv->lock); | 1061 | spin_unlock(&priv->lock); |
1069 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 1062 | spin_unlock_irq(&priv->tx_lock); |
1070 | } | 1063 | } |
1071 | 1064 | ||
1072 | void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb, | 1065 | void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb, |
@@ -1088,9 +1081,8 @@ static void ipoib_cm_stale_task(struct work_struct *work) | |||
1088 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | 1081 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, |
1089 | cm.stale_task.work); | 1082 | cm.stale_task.work); |
1090 | struct ipoib_cm_rx *p; | 1083 | struct ipoib_cm_rx *p; |
1091 | unsigned long flags; | ||
1092 | 1084 | ||
1093 | spin_lock_irqsave(&priv->lock, flags); | 1085 | spin_lock_irq(&priv->lock); |
1094 | while (!list_empty(&priv->cm.passive_ids)) { | 1086 | while (!list_empty(&priv->cm.passive_ids)) { |
1095 | /* List if sorted by LRU, start from tail, | 1087 | /* List if sorted by LRU, start from tail, |
1096 | * stop when we see a recently used entry */ | 1088 | * stop when we see a recently used entry */ |
@@ -1098,13 +1090,13 @@ static void ipoib_cm_stale_task(struct work_struct *work) | |||
1098 | if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) | 1090 | if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) |
1099 | break; | 1091 | break; |
1100 | list_del_init(&p->list); | 1092 | list_del_init(&p->list); |
1101 | spin_unlock_irqrestore(&priv->lock, flags); | 1093 | spin_unlock_irq(&priv->lock); |
1102 | ib_destroy_cm_id(p->id); | 1094 | ib_destroy_cm_id(p->id); |
1103 | ib_destroy_qp(p->qp); | 1095 | ib_destroy_qp(p->qp); |
1104 | kfree(p); | 1096 | kfree(p); |
1105 | spin_lock_irqsave(&priv->lock, flags); | 1097 | spin_lock_irq(&priv->lock); |
1106 | } | 1098 | } |
1107 | spin_unlock_irqrestore(&priv->lock, flags); | 1099 | spin_unlock_irq(&priv->lock); |
1108 | } | 1100 | } |
1109 | 1101 | ||
1110 | 1102 | ||