diff options
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/af_inet.c | 17 | ||||
-rw-r--r-- | net/ipv4/icmp.c | 23 | ||||
-rw-r--r-- | net/ipv4/ip_gre.c | 25 | ||||
-rw-r--r-- | net/ipv4/ping.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 18 |
5 files changed, 60 insertions, 27 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index e225a4e5b572..68f6a94f7661 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -248,8 +248,12 @@ EXPORT_SYMBOL(inet_listen); | |||
248 | u32 inet_ehash_secret __read_mostly; | 248 | u32 inet_ehash_secret __read_mostly; |
249 | EXPORT_SYMBOL(inet_ehash_secret); | 249 | EXPORT_SYMBOL(inet_ehash_secret); |
250 | 250 | ||
251 | u32 ipv6_hash_secret __read_mostly; | ||
252 | EXPORT_SYMBOL(ipv6_hash_secret); | ||
253 | |||
251 | /* | 254 | /* |
252 | * inet_ehash_secret must be set exactly once | 255 | * inet_ehash_secret must be set exactly once, and to a non nul value |
256 | * ipv6_hash_secret must be set exactly once. | ||
253 | */ | 257 | */ |
254 | void build_ehash_secret(void) | 258 | void build_ehash_secret(void) |
255 | { | 259 | { |
@@ -259,7 +263,8 @@ void build_ehash_secret(void) | |||
259 | get_random_bytes(&rnd, sizeof(rnd)); | 263 | get_random_bytes(&rnd, sizeof(rnd)); |
260 | } while (rnd == 0); | 264 | } while (rnd == 0); |
261 | 265 | ||
262 | cmpxchg(&inet_ehash_secret, 0, rnd); | 266 | if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) |
267 | get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); | ||
263 | } | 268 | } |
264 | EXPORT_SYMBOL(build_ehash_secret); | 269 | EXPORT_SYMBOL(build_ehash_secret); |
265 | 270 | ||
@@ -1327,8 +1332,10 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1327 | if (skb->next != NULL) | 1332 | if (skb->next != NULL) |
1328 | iph->frag_off |= htons(IP_MF); | 1333 | iph->frag_off |= htons(IP_MF); |
1329 | offset += (skb->len - skb->mac_len - iph->ihl * 4); | 1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); |
1330 | } else | 1335 | } else { |
1331 | iph->id = htons(id++); | 1336 | if (!(iph->frag_off & htons(IP_DF))) |
1337 | iph->id = htons(id++); | ||
1338 | } | ||
1332 | iph->tot_len = htons(skb->len - skb->mac_len); | 1339 | iph->tot_len = htons(skb->len - skb->mac_len); |
1333 | iph->check = 0; | 1340 | iph->check = 0; |
1334 | iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); | 1341 | iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); |
@@ -1572,7 +1579,7 @@ static const struct net_offload udp_offload = { | |||
1572 | 1579 | ||
1573 | static const struct net_protocol icmp_protocol = { | 1580 | static const struct net_protocol icmp_protocol = { |
1574 | .handler = icmp_rcv, | 1581 | .handler = icmp_rcv, |
1575 | .err_handler = ping_err, | 1582 | .err_handler = icmp_err, |
1576 | .no_policy = 1, | 1583 | .no_policy = 1, |
1577 | .netns_ok = 1, | 1584 | .netns_ok = 1, |
1578 | }; | 1585 | }; |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 17ff9fd7cdda..3ac5dff79627 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -934,6 +934,29 @@ error: | |||
934 | goto drop; | 934 | goto drop; |
935 | } | 935 | } |
936 | 936 | ||
937 | void icmp_err(struct sk_buff *skb, u32 info) | ||
938 | { | ||
939 | struct iphdr *iph = (struct iphdr *)skb->data; | ||
940 | struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); | ||
941 | int type = icmp_hdr(skb)->type; | ||
942 | int code = icmp_hdr(skb)->code; | ||
943 | struct net *net = dev_net(skb->dev); | ||
944 | |||
945 | /* | ||
946 | * Use ping_err to handle all icmp errors except those | ||
947 | * triggered by ICMP_ECHOREPLY which sent from kernel. | ||
948 | */ | ||
949 | if (icmph->type != ICMP_ECHOREPLY) { | ||
950 | ping_err(skb, info); | ||
951 | return; | ||
952 | } | ||
953 | |||
954 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) | ||
955 | ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ICMP, 0); | ||
956 | else if (type == ICMP_REDIRECT) | ||
957 | ipv4_redirect(skb, net, 0, 0, IPPROTO_ICMP, 0); | ||
958 | } | ||
959 | |||
937 | /* | 960 | /* |
938 | * This table is the definition of how we handle ICMP. | 961 | * This table is the definition of how we handle ICMP. |
939 | */ | 962 | */ |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 5ef4da780ac1..d0ef0e674ec5 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -735,7 +735,7 @@ drop: | |||
735 | return 0; | 735 | return 0; |
736 | } | 736 | } |
737 | 737 | ||
738 | static struct sk_buff *handle_offloads(struct sk_buff *skb) | 738 | static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff *skb) |
739 | { | 739 | { |
740 | int err; | 740 | int err; |
741 | 741 | ||
@@ -745,8 +745,12 @@ static struct sk_buff *handle_offloads(struct sk_buff *skb) | |||
745 | goto error; | 745 | goto error; |
746 | skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; | 746 | skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; |
747 | return skb; | 747 | return skb; |
748 | } | 748 | } else if (skb->ip_summed == CHECKSUM_PARTIAL && |
749 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 749 | tunnel->parms.o_flags&GRE_CSUM) { |
750 | err = skb_checksum_help(skb); | ||
751 | if (unlikely(err)) | ||
752 | goto error; | ||
753 | } else if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
750 | skb->ip_summed = CHECKSUM_NONE; | 754 | skb->ip_summed = CHECKSUM_NONE; |
751 | 755 | ||
752 | return skb; | 756 | return skb; |
@@ -776,7 +780,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
776 | int err; | 780 | int err; |
777 | int pkt_len; | 781 | int pkt_len; |
778 | 782 | ||
779 | skb = handle_offloads(skb); | 783 | skb = handle_offloads(tunnel, skb); |
780 | if (IS_ERR(skb)) { | 784 | if (IS_ERR(skb)) { |
781 | dev->stats.tx_dropped++; | 785 | dev->stats.tx_dropped++; |
782 | return NETDEV_TX_OK; | 786 | return NETDEV_TX_OK; |
@@ -970,7 +974,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
970 | iph->daddr = fl4.daddr; | 974 | iph->daddr = fl4.daddr; |
971 | iph->saddr = fl4.saddr; | 975 | iph->saddr = fl4.saddr; |
972 | iph->ttl = ttl; | 976 | iph->ttl = ttl; |
973 | iph->id = 0; | 977 | |
978 | tunnel_ip_select_ident(skb, old_iph, &rt->dst); | ||
974 | 979 | ||
975 | if (ttl == 0) { | 980 | if (ttl == 0) { |
976 | if (skb->protocol == htons(ETH_P_IP)) | 981 | if (skb->protocol == htons(ETH_P_IP)) |
@@ -1101,14 +1106,8 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev) | |||
1101 | tunnel->hlen = addend; | 1106 | tunnel->hlen = addend; |
1102 | /* TCP offload with GRE SEQ is not supported. */ | 1107 | /* TCP offload with GRE SEQ is not supported. */ |
1103 | if (!(tunnel->parms.o_flags & GRE_SEQ)) { | 1108 | if (!(tunnel->parms.o_flags & GRE_SEQ)) { |
1104 | /* device supports enc gso offload*/ | 1109 | dev->features |= NETIF_F_GSO_SOFTWARE; |
1105 | if (tdev->hw_enc_features & NETIF_F_GRE_GSO) { | 1110 | dev->hw_features |= NETIF_F_GSO_SOFTWARE; |
1106 | dev->features |= NETIF_F_TSO; | ||
1107 | dev->hw_features |= NETIF_F_TSO; | ||
1108 | } else { | ||
1109 | dev->features |= NETIF_F_GSO_SOFTWARE; | ||
1110 | dev->hw_features |= NETIF_F_GSO_SOFTWARE; | ||
1111 | } | ||
1112 | } | 1111 | } |
1113 | 1112 | ||
1114 | return mtu; | 1113 | return mtu; |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 55c4ee1bba06..2e91006d6076 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -322,8 +322,8 @@ void ping_err(struct sk_buff *skb, u32 info) | |||
322 | struct iphdr *iph = (struct iphdr *)skb->data; | 322 | struct iphdr *iph = (struct iphdr *)skb->data; |
323 | struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); | 323 | struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); |
324 | struct inet_sock *inet_sock; | 324 | struct inet_sock *inet_sock; |
325 | int type = icmph->type; | 325 | int type = icmp_hdr(skb)->type; |
326 | int code = icmph->code; | 326 | int code = icmp_hdr(skb)->code; |
327 | struct net *net = dev_net(skb->dev); | 327 | struct net *net = dev_net(skb->dev); |
328 | struct sock *sk; | 328 | struct sock *sk; |
329 | int harderr; | 329 | int harderr; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index fd0cea114b5d..e2b4461074da 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1351,8 +1351,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) | |||
1351 | return 0; | 1351 | return 0; |
1352 | } | 1352 | } |
1353 | 1353 | ||
1354 | /* Calculate MSS. Not accounting for SACKs here. */ | 1354 | /* Calculate MSS not accounting any TCP options. */ |
1355 | int tcp_mtu_to_mss(struct sock *sk, int pmtu) | 1355 | static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) |
1356 | { | 1356 | { |
1357 | const struct tcp_sock *tp = tcp_sk(sk); | 1357 | const struct tcp_sock *tp = tcp_sk(sk); |
1358 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1358 | const struct inet_connection_sock *icsk = inet_csk(sk); |
@@ -1381,13 +1381,17 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu) | |||
1381 | /* Then reserve room for full set of TCP options and 8 bytes of data */ | 1381 | /* Then reserve room for full set of TCP options and 8 bytes of data */ |
1382 | if (mss_now < 48) | 1382 | if (mss_now < 48) |
1383 | mss_now = 48; | 1383 | mss_now = 48; |
1384 | |||
1385 | /* Now subtract TCP options size, not including SACKs */ | ||
1386 | mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); | ||
1387 | |||
1388 | return mss_now; | 1384 | return mss_now; |
1389 | } | 1385 | } |
1390 | 1386 | ||
1387 | /* Calculate MSS. Not accounting for SACKs here. */ | ||
1388 | int tcp_mtu_to_mss(struct sock *sk, int pmtu) | ||
1389 | { | ||
1390 | /* Subtract TCP options size, not including SACKs */ | ||
1391 | return __tcp_mtu_to_mss(sk, pmtu) - | ||
1392 | (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); | ||
1393 | } | ||
1394 | |||
1391 | /* Inverse of above */ | 1395 | /* Inverse of above */ |
1392 | int tcp_mss_to_mtu(struct sock *sk, int mss) | 1396 | int tcp_mss_to_mtu(struct sock *sk, int mss) |
1393 | { | 1397 | { |
@@ -2930,7 +2934,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) | |||
2930 | */ | 2934 | */ |
2931 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) | 2935 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) |
2932 | tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; | 2936 | tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; |
2933 | space = tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - | 2937 | space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - |
2934 | MAX_TCP_OPTION_SPACE; | 2938 | MAX_TCP_OPTION_SPACE; |
2935 | 2939 | ||
2936 | syn_data = skb_copy_expand(syn, skb_headroom(syn), space, | 2940 | syn_data = skb_copy_expand(syn, skb_headroom(syn), space, |