aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 14:44:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-26 14:44:11 -0500
commit1cef9350cbee6e3bcf2ff646b8978fbec33f8b85 (patch)
tree5a750b707dcfb66033da612e8fbf238e3a2b184f /net/ipv4
parentecc88efbe7adceb3f4bfdbbb1efb669efcaab124 (diff)
parenteb970ff07c15f13eb474f643fd165ebe3e4e24b2 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) ping_err() ICMP error handler looks at wrong ICMP header, from Li Wei. 2) TCP socket hash function on ipv6 is too weak, from Eric Dumazet. 3) netif_set_xps_queue() forgets to drop mutex on errors, fix from Alexander Duyck. 4) sum_frag_mem_limit() can deadlock due to lack of BH disabling, fix from Eric Dumazet. 5) TCP SYN data is miscalculated in tcp_send_syn_data(), because the amount of TCP option space was not taken into account properly in this code path. Fix from yuchung Cheng. 6) MLX4 driver allocates device queues with the wrong size, from Kleber Sacilotto. 7) sock_diag can access past the end of the sock_diag_handlers[] array, from Mathias Krause. 8) vlan_set_encap_proto() makes incorrect assumptions about where skb->data points, rework the logic so that it works regardless of where skb->data happens to be. From Jesse Gross. 9) Fix gianfar build failure with NET_POLL enabled, from Paul Gortmaker. 10) Fix Ipv4 ID setting and checksum calculations in GRE driver, from Pravin B Shelar. 11) bgmac driver does: int i; for (i = 0; ...; ...) { ... for (i = 0; ...; ...) { effectively corrupting the outer loop index, use a seperate variable for the inner loops. From Rafał Miłecki. 12) Fix suspend bugs in smsc95xx driver, from Ming Lei. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (35 commits) usbnet: smsc95xx: rename FEATURE_AUTOSUSPEND usbnet: smsc95xx: fix broken runtime suspend usbnet: smsc95xx: fix suspend failure bgmac: fix indexing of 2nd level loops b43: Fix lockdep splat on module unload Revert "ip_gre: propogate target device GSO capability to the tunnel device" IP_GRE: Fix GRE_CSUM case. VXLAN: Use tunnel_ip_select_ident() for tunnel IP-Identification. IP_GRE: Fix IP-Identification. net/pasemi: Fix missing coding style vmxnet3: fix ethtool ring buffer size setting vmxnet3: make local function static bnx2x: remove dead code and make local funcs static gianfar: fix compile fail for NET_POLL=y due to struct packing vlan: adjust vlan_set_encap_proto() for its callers sock_diag: Simplify sock_diag_handlers[] handling in __sock_diag_rcv_msg sock_diag: Fix out-of-bounds access to sock_diag_handlers[] vxlan: remove depends on CONFIG_EXPERIMENTAL mlx4_en: fix allocation of CPU affinity reverse-map mlx4_en: fix allocation of device tx_cq ...
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/af_inet.c17
-rw-r--r--net/ipv4/icmp.c23
-rw-r--r--net/ipv4/ip_gre.c25
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/tcp_output.c18
5 files changed, 60 insertions, 27 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e225a4e5b572..68f6a94f7661 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -248,8 +248,12 @@ EXPORT_SYMBOL(inet_listen);
248u32 inet_ehash_secret __read_mostly; 248u32 inet_ehash_secret __read_mostly;
249EXPORT_SYMBOL(inet_ehash_secret); 249EXPORT_SYMBOL(inet_ehash_secret);
250 250
251u32 ipv6_hash_secret __read_mostly;
252EXPORT_SYMBOL(ipv6_hash_secret);
253
251/* 254/*
252 * inet_ehash_secret must be set exactly once 255 * inet_ehash_secret must be set exactly once, and to a non nul value
256 * ipv6_hash_secret must be set exactly once.
253 */ 257 */
254void build_ehash_secret(void) 258void build_ehash_secret(void)
255{ 259{
@@ -259,7 +263,8 @@ void build_ehash_secret(void)
259 get_random_bytes(&rnd, sizeof(rnd)); 263 get_random_bytes(&rnd, sizeof(rnd));
260 } while (rnd == 0); 264 } while (rnd == 0);
261 265
262 cmpxchg(&inet_ehash_secret, 0, rnd); 266 if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
267 get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
263} 268}
264EXPORT_SYMBOL(build_ehash_secret); 269EXPORT_SYMBOL(build_ehash_secret);
265 270
@@ -1327,8 +1332,10 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1327 if (skb->next != NULL) 1332 if (skb->next != NULL)
1328 iph->frag_off |= htons(IP_MF); 1333 iph->frag_off |= htons(IP_MF);
1329 offset += (skb->len - skb->mac_len - iph->ihl * 4); 1334 offset += (skb->len - skb->mac_len - iph->ihl * 4);
1330 } else 1335 } else {
1331 iph->id = htons(id++); 1336 if (!(iph->frag_off & htons(IP_DF)))
1337 iph->id = htons(id++);
1338 }
1332 iph->tot_len = htons(skb->len - skb->mac_len); 1339 iph->tot_len = htons(skb->len - skb->mac_len);
1333 iph->check = 0; 1340 iph->check = 0;
1334 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); 1341 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
@@ -1572,7 +1579,7 @@ static const struct net_offload udp_offload = {
1572 1579
1573static const struct net_protocol icmp_protocol = { 1580static const struct net_protocol icmp_protocol = {
1574 .handler = icmp_rcv, 1581 .handler = icmp_rcv,
1575 .err_handler = ping_err, 1582 .err_handler = icmp_err,
1576 .no_policy = 1, 1583 .no_policy = 1,
1577 .netns_ok = 1, 1584 .netns_ok = 1,
1578}; 1585};
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 17ff9fd7cdda..3ac5dff79627 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -934,6 +934,29 @@ error:
934 goto drop; 934 goto drop;
935} 935}
936 936
937void icmp_err(struct sk_buff *skb, u32 info)
938{
939 struct iphdr *iph = (struct iphdr *)skb->data;
940 struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
941 int type = icmp_hdr(skb)->type;
942 int code = icmp_hdr(skb)->code;
943 struct net *net = dev_net(skb->dev);
944
945 /*
946 * Use ping_err to handle all icmp errors except those
947 * triggered by ICMP_ECHOREPLY which sent from kernel.
948 */
949 if (icmph->type != ICMP_ECHOREPLY) {
950 ping_err(skb, info);
951 return;
952 }
953
954 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
955 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ICMP, 0);
956 else if (type == ICMP_REDIRECT)
957 ipv4_redirect(skb, net, 0, 0, IPPROTO_ICMP, 0);
958}
959
937/* 960/*
938 * This table is the definition of how we handle ICMP. 961 * This table is the definition of how we handle ICMP.
939 */ 962 */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5ef4da780ac1..d0ef0e674ec5 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -735,7 +735,7 @@ drop:
735 return 0; 735 return 0;
736} 736}
737 737
738static struct sk_buff *handle_offloads(struct sk_buff *skb) 738static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff *skb)
739{ 739{
740 int err; 740 int err;
741 741
@@ -745,8 +745,12 @@ static struct sk_buff *handle_offloads(struct sk_buff *skb)
745 goto error; 745 goto error;
746 skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; 746 skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
747 return skb; 747 return skb;
748 } 748 } else if (skb->ip_summed == CHECKSUM_PARTIAL &&
749 if (skb->ip_summed != CHECKSUM_PARTIAL) 749 tunnel->parms.o_flags&GRE_CSUM) {
750 err = skb_checksum_help(skb);
751 if (unlikely(err))
752 goto error;
753 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
750 skb->ip_summed = CHECKSUM_NONE; 754 skb->ip_summed = CHECKSUM_NONE;
751 755
752 return skb; 756 return skb;
@@ -776,7 +780,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
776 int err; 780 int err;
777 int pkt_len; 781 int pkt_len;
778 782
779 skb = handle_offloads(skb); 783 skb = handle_offloads(tunnel, skb);
780 if (IS_ERR(skb)) { 784 if (IS_ERR(skb)) {
781 dev->stats.tx_dropped++; 785 dev->stats.tx_dropped++;
782 return NETDEV_TX_OK; 786 return NETDEV_TX_OK;
@@ -970,7 +974,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
970 iph->daddr = fl4.daddr; 974 iph->daddr = fl4.daddr;
971 iph->saddr = fl4.saddr; 975 iph->saddr = fl4.saddr;
972 iph->ttl = ttl; 976 iph->ttl = ttl;
973 iph->id = 0; 977
978 tunnel_ip_select_ident(skb, old_iph, &rt->dst);
974 979
975 if (ttl == 0) { 980 if (ttl == 0) {
976 if (skb->protocol == htons(ETH_P_IP)) 981 if (skb->protocol == htons(ETH_P_IP))
@@ -1101,14 +1106,8 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
1101 tunnel->hlen = addend; 1106 tunnel->hlen = addend;
1102 /* TCP offload with GRE SEQ is not supported. */ 1107 /* TCP offload with GRE SEQ is not supported. */
1103 if (!(tunnel->parms.o_flags & GRE_SEQ)) { 1108 if (!(tunnel->parms.o_flags & GRE_SEQ)) {
1104 /* device supports enc gso offload*/ 1109 dev->features |= NETIF_F_GSO_SOFTWARE;
1105 if (tdev->hw_enc_features & NETIF_F_GRE_GSO) { 1110 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1106 dev->features |= NETIF_F_TSO;
1107 dev->hw_features |= NETIF_F_TSO;
1108 } else {
1109 dev->features |= NETIF_F_GSO_SOFTWARE;
1110 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1111 }
1112 } 1111 }
1113 1112
1114 return mtu; 1113 return mtu;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 55c4ee1bba06..2e91006d6076 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -322,8 +322,8 @@ void ping_err(struct sk_buff *skb, u32 info)
322 struct iphdr *iph = (struct iphdr *)skb->data; 322 struct iphdr *iph = (struct iphdr *)skb->data;
323 struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); 323 struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
324 struct inet_sock *inet_sock; 324 struct inet_sock *inet_sock;
325 int type = icmph->type; 325 int type = icmp_hdr(skb)->type;
326 int code = icmph->code; 326 int code = icmp_hdr(skb)->code;
327 struct net *net = dev_net(skb->dev); 327 struct net *net = dev_net(skb->dev);
328 struct sock *sk; 328 struct sock *sk;
329 int harderr; 329 int harderr;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index fd0cea114b5d..e2b4461074da 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1351,8 +1351,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1351 return 0; 1351 return 0;
1352} 1352}
1353 1353
1354/* Calculate MSS. Not accounting for SACKs here. */ 1354/* Calculate MSS not accounting any TCP options. */
1355int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1355static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
1356{ 1356{
1357 const struct tcp_sock *tp = tcp_sk(sk); 1357 const struct tcp_sock *tp = tcp_sk(sk);
1358 const struct inet_connection_sock *icsk = inet_csk(sk); 1358 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1381,13 +1381,17 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1381 /* Then reserve room for full set of TCP options and 8 bytes of data */ 1381 /* Then reserve room for full set of TCP options and 8 bytes of data */
1382 if (mss_now < 48) 1382 if (mss_now < 48)
1383 mss_now = 48; 1383 mss_now = 48;
1384
1385 /* Now subtract TCP options size, not including SACKs */
1386 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
1387
1388 return mss_now; 1384 return mss_now;
1389} 1385}
1390 1386
1387/* Calculate MSS. Not accounting for SACKs here. */
1388int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1389{
1390 /* Subtract TCP options size, not including SACKs */
1391 return __tcp_mtu_to_mss(sk, pmtu) -
1392 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1393}
1394
1391/* Inverse of above */ 1395/* Inverse of above */
1392int tcp_mss_to_mtu(struct sock *sk, int mss) 1396int tcp_mss_to_mtu(struct sock *sk, int mss)
1393{ 1397{
@@ -2930,7 +2934,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2930 */ 2934 */
2931 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) 2935 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
2932 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2936 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2933 space = tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - 2937 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
2934 MAX_TCP_OPTION_SPACE; 2938 MAX_TCP_OPTION_SPACE;
2935 2939
2936 syn_data = skb_copy_expand(syn, skb_headroom(syn), space, 2940 syn_data = skb_copy_expand(syn, skb_headroom(syn), space,