diff options
author | Erez Shitrit <erezsh@mellanox.com> | 2017-02-01 12:10:05 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-03-14 22:02:43 -0400 |
commit | 2e539fa49efda450229e3a13db5202b4d9ae2997 (patch) | |
tree | e12fd808608aed4136a84ef3931457e9b999a385 /drivers/infiniband | |
parent | 1626076b8e1cde49becc0e68d2779174e6a6f599 (diff) |
IB/IPoIB: Add destination address when re-queue packet
commit 2b0841766a898aba84630fb723989a77a9d3b4e6 upstream.
When sending packet to destination that was not resolved yet
via path query, the driver keeps the skb and tries to re-send it
again when the path is resolved.
But when re-sending via dev_queue_xmit the kernel doesn't call
to dev_hard_header, so IPoIB needs to keep 20 bytes in the skb
and to put the destination address inside them.
In that way the dev_start_xmit will have the correct destination,
and the driver won't take the destination from the skb->data, while
nothing exists there, which causes to packet be be dropped.
The test flow is:
1. Run the SM on remote node,
2. Restart the driver.
4. Ping some destination,
3. Observe that first ICMP request will be dropped.
Fixes: fc791b633515 ("IB/ipoib: move back IB LL address into the hard header")
Signed-off-by: Erez Shitrit <erezsh@mellanox.com>
Signed-off-by: Noa Osherovich <noaos@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Tested-by: Yuval Shaia <yuval.shaia@oracle.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 30 |
1 files changed, 17 insertions, 13 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index a5d9678f3b33..3ef7b8f049c4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -701,6 +701,14 @@ int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv) | |||
701 | return ret; | 701 | return ret; |
702 | } | 702 | } |
703 | 703 | ||
704 | static void push_pseudo_header(struct sk_buff *skb, const char *daddr) | ||
705 | { | ||
706 | struct ipoib_pseudo_header *phdr; | ||
707 | |||
708 | phdr = (struct ipoib_pseudo_header *)skb_push(skb, sizeof(*phdr)); | ||
709 | memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); | ||
710 | } | ||
711 | |||
704 | void ipoib_flush_paths(struct net_device *dev) | 712 | void ipoib_flush_paths(struct net_device *dev) |
705 | { | 713 | { |
706 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 714 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
@@ -925,8 +933,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, | |||
925 | } | 933 | } |
926 | if (skb_queue_len(&neigh->queue) < | 934 | if (skb_queue_len(&neigh->queue) < |
927 | IPOIB_MAX_PATH_REC_QUEUE) { | 935 | IPOIB_MAX_PATH_REC_QUEUE) { |
928 | /* put pseudoheader back on for next time */ | 936 | push_pseudo_header(skb, neigh->daddr); |
929 | skb_push(skb, IPOIB_PSEUDO_LEN); | ||
930 | __skb_queue_tail(&neigh->queue, skb); | 937 | __skb_queue_tail(&neigh->queue, skb); |
931 | } else { | 938 | } else { |
932 | ipoib_warn(priv, "queue length limit %d. Packet drop.\n", | 939 | ipoib_warn(priv, "queue length limit %d. Packet drop.\n", |
@@ -944,10 +951,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, | |||
944 | 951 | ||
945 | if (!path->query && path_rec_start(dev, path)) | 952 | if (!path->query && path_rec_start(dev, path)) |
946 | goto err_path; | 953 | goto err_path; |
947 | if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) | 954 | if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { |
955 | push_pseudo_header(skb, neigh->daddr); | ||
948 | __skb_queue_tail(&neigh->queue, skb); | 956 | __skb_queue_tail(&neigh->queue, skb); |
949 | else | 957 | } else { |
950 | goto err_drop; | 958 | goto err_drop; |
959 | } | ||
951 | } | 960 | } |
952 | 961 | ||
953 | spin_unlock_irqrestore(&priv->lock, flags); | 962 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -983,8 +992,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
983 | } | 992 | } |
984 | if (path) { | 993 | if (path) { |
985 | if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { | 994 | if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { |
986 | /* put pseudoheader back on for next time */ | 995 | push_pseudo_header(skb, phdr->hwaddr); |
987 | skb_push(skb, IPOIB_PSEUDO_LEN); | ||
988 | __skb_queue_tail(&path->queue, skb); | 996 | __skb_queue_tail(&path->queue, skb); |
989 | } else { | 997 | } else { |
990 | ++dev->stats.tx_dropped; | 998 | ++dev->stats.tx_dropped; |
@@ -1016,8 +1024,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, | |||
1016 | return; | 1024 | return; |
1017 | } else if ((path->query || !path_rec_start(dev, path)) && | 1025 | } else if ((path->query || !path_rec_start(dev, path)) && |
1018 | skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { | 1026 | skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { |
1019 | /* put pseudoheader back on for next time */ | 1027 | push_pseudo_header(skb, phdr->hwaddr); |
1020 | skb_push(skb, IPOIB_PSEUDO_LEN); | ||
1021 | __skb_queue_tail(&path->queue, skb); | 1028 | __skb_queue_tail(&path->queue, skb); |
1022 | } else { | 1029 | } else { |
1023 | ++dev->stats.tx_dropped; | 1030 | ++dev->stats.tx_dropped; |
@@ -1098,8 +1105,7 @@ send_using_neigh: | |||
1098 | } | 1105 | } |
1099 | 1106 | ||
1100 | if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { | 1107 | if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { |
1101 | /* put pseudoheader back on for next time */ | 1108 | push_pseudo_header(skb, phdr->hwaddr); |
1102 | skb_push(skb, sizeof(*phdr)); | ||
1103 | spin_lock_irqsave(&priv->lock, flags); | 1109 | spin_lock_irqsave(&priv->lock, flags); |
1104 | __skb_queue_tail(&neigh->queue, skb); | 1110 | __skb_queue_tail(&neigh->queue, skb); |
1105 | spin_unlock_irqrestore(&priv->lock, flags); | 1111 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -1131,7 +1137,6 @@ static int ipoib_hard_header(struct sk_buff *skb, | |||
1131 | unsigned short type, | 1137 | unsigned short type, |
1132 | const void *daddr, const void *saddr, unsigned len) | 1138 | const void *daddr, const void *saddr, unsigned len) |
1133 | { | 1139 | { |
1134 | struct ipoib_pseudo_header *phdr; | ||
1135 | struct ipoib_header *header; | 1140 | struct ipoib_header *header; |
1136 | 1141 | ||
1137 | header = (struct ipoib_header *) skb_push(skb, sizeof *header); | 1142 | header = (struct ipoib_header *) skb_push(skb, sizeof *header); |
@@ -1144,8 +1149,7 @@ static int ipoib_hard_header(struct sk_buff *skb, | |||
1144 | * destination address into skb hard header so we can figure out where | 1149 | * destination address into skb hard header so we can figure out where |
1145 | * to send the packet later. | 1150 | * to send the packet later. |
1146 | */ | 1151 | */ |
1147 | phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr)); | 1152 | push_pseudo_header(skb, daddr); |
1148 | memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); | ||
1149 | 1153 | ||
1150 | return IPOIB_HARD_LEN; | 1154 | return IPOIB_HARD_LEN; |
1151 | } | 1155 | } |