aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/sock_diag.c27
-rw-r--r--net/ipv4/af_inet.c17
-rw-r--r--net/ipv4/icmp.c23
-rw-r--r--net/ipv4/ip_gre.c25
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/tcp_output.c18
8 files changed, 75 insertions, 45 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 17bc535115d3..18d8b5acc343 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1882,8 +1882,10 @@ int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
1882 1882
1883 if (!new_dev_maps) 1883 if (!new_dev_maps)
1884 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); 1884 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1885 if (!new_dev_maps) 1885 if (!new_dev_maps) {
1886 mutex_unlock(&xps_map_mutex);
1886 return -ENOMEM; 1887 return -ENOMEM;
1888 }
1887 1889
1888 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : 1890 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1889 NULL; 1891 NULL;
diff --git a/net/core/sock.c b/net/core/sock.c
index fe96c5d34299..b261a7977746 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -186,8 +186,10 @@ void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
186static struct lock_class_key af_family_keys[AF_MAX]; 186static struct lock_class_key af_family_keys[AF_MAX];
187static struct lock_class_key af_family_slock_keys[AF_MAX]; 187static struct lock_class_key af_family_slock_keys[AF_MAX];
188 188
189#if defined(CONFIG_MEMCG_KMEM)
189struct static_key memcg_socket_limit_enabled; 190struct static_key memcg_socket_limit_enabled;
190EXPORT_SYMBOL(memcg_socket_limit_enabled); 191EXPORT_SYMBOL(memcg_socket_limit_enabled);
192#endif
191 193
192/* 194/*
193 * Make lock validator output more readable. (we pre-construct these 195 * Make lock validator output more readable. (we pre-construct these
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 602cd637182e..a29e90cf36b7 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -97,21 +97,6 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
97} 97}
98EXPORT_SYMBOL_GPL(sock_diag_unregister); 98EXPORT_SYMBOL_GPL(sock_diag_unregister);
99 99
100static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
101{
102 if (sock_diag_handlers[family] == NULL)
103 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
104 NETLINK_SOCK_DIAG, family);
105
106 mutex_lock(&sock_diag_table_mutex);
107 return sock_diag_handlers[family];
108}
109
110static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
111{
112 mutex_unlock(&sock_diag_table_mutex);
113}
114
115static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 100static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
116{ 101{
117 int err; 102 int err;
@@ -121,12 +106,20 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
121 if (nlmsg_len(nlh) < sizeof(*req)) 106 if (nlmsg_len(nlh) < sizeof(*req))
122 return -EINVAL; 107 return -EINVAL;
123 108
124 hndl = sock_diag_lock_handler(req->sdiag_family); 109 if (req->sdiag_family >= AF_MAX)
110 return -EINVAL;
111
112 if (sock_diag_handlers[req->sdiag_family] == NULL)
113 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
114 NETLINK_SOCK_DIAG, req->sdiag_family);
115
116 mutex_lock(&sock_diag_table_mutex);
117 hndl = sock_diag_handlers[req->sdiag_family];
125 if (hndl == NULL) 118 if (hndl == NULL)
126 err = -ENOENT; 119 err = -ENOENT;
127 else 120 else
128 err = hndl->dump(skb, nlh); 121 err = hndl->dump(skb, nlh);
129 sock_diag_unlock_handler(hndl); 122 mutex_unlock(&sock_diag_table_mutex);
130 123
131 return err; 124 return err;
132} 125}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e225a4e5b572..68f6a94f7661 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -248,8 +248,12 @@ EXPORT_SYMBOL(inet_listen);
248u32 inet_ehash_secret __read_mostly; 248u32 inet_ehash_secret __read_mostly;
249EXPORT_SYMBOL(inet_ehash_secret); 249EXPORT_SYMBOL(inet_ehash_secret);
250 250
251u32 ipv6_hash_secret __read_mostly;
252EXPORT_SYMBOL(ipv6_hash_secret);
253
251/* 254/*
252 * inet_ehash_secret must be set exactly once 255 * inet_ehash_secret must be set exactly once, and to a non nul value
256 * ipv6_hash_secret must be set exactly once.
253 */ 257 */
254void build_ehash_secret(void) 258void build_ehash_secret(void)
255{ 259{
@@ -259,7 +263,8 @@ void build_ehash_secret(void)
259 get_random_bytes(&rnd, sizeof(rnd)); 263 get_random_bytes(&rnd, sizeof(rnd));
260 } while (rnd == 0); 264 } while (rnd == 0);
261 265
262 cmpxchg(&inet_ehash_secret, 0, rnd); 266 if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
267 get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
263} 268}
264EXPORT_SYMBOL(build_ehash_secret); 269EXPORT_SYMBOL(build_ehash_secret);
265 270
@@ -1327,8 +1332,10 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1327 if (skb->next != NULL) 1332 if (skb->next != NULL)
1328 iph->frag_off |= htons(IP_MF); 1333 iph->frag_off |= htons(IP_MF);
1329 offset += (skb->len - skb->mac_len - iph->ihl * 4); 1334 offset += (skb->len - skb->mac_len - iph->ihl * 4);
1330 } else 1335 } else {
1331 iph->id = htons(id++); 1336 if (!(iph->frag_off & htons(IP_DF)))
1337 iph->id = htons(id++);
1338 }
1332 iph->tot_len = htons(skb->len - skb->mac_len); 1339 iph->tot_len = htons(skb->len - skb->mac_len);
1333 iph->check = 0; 1340 iph->check = 0;
1334 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); 1341 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
@@ -1572,7 +1579,7 @@ static const struct net_offload udp_offload = {
1572 1579
1573static const struct net_protocol icmp_protocol = { 1580static const struct net_protocol icmp_protocol = {
1574 .handler = icmp_rcv, 1581 .handler = icmp_rcv,
1575 .err_handler = ping_err, 1582 .err_handler = icmp_err,
1576 .no_policy = 1, 1583 .no_policy = 1,
1577 .netns_ok = 1, 1584 .netns_ok = 1,
1578}; 1585};
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 17ff9fd7cdda..3ac5dff79627 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -934,6 +934,29 @@ error:
934 goto drop; 934 goto drop;
935} 935}
936 936
937void icmp_err(struct sk_buff *skb, u32 info)
938{
939 struct iphdr *iph = (struct iphdr *)skb->data;
940 struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
941 int type = icmp_hdr(skb)->type;
942 int code = icmp_hdr(skb)->code;
943 struct net *net = dev_net(skb->dev);
944
945 /*
946 * Use ping_err to handle all icmp errors except those
947 * triggered by ICMP_ECHOREPLY which sent from kernel.
948 */
949 if (icmph->type != ICMP_ECHOREPLY) {
950 ping_err(skb, info);
951 return;
952 }
953
954 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
955 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ICMP, 0);
956 else if (type == ICMP_REDIRECT)
957 ipv4_redirect(skb, net, 0, 0, IPPROTO_ICMP, 0);
958}
959
937/* 960/*
938 * This table is the definition of how we handle ICMP. 961 * This table is the definition of how we handle ICMP.
939 */ 962 */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5ef4da780ac1..d0ef0e674ec5 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -735,7 +735,7 @@ drop:
735 return 0; 735 return 0;
736} 736}
737 737
738static struct sk_buff *handle_offloads(struct sk_buff *skb) 738static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff *skb)
739{ 739{
740 int err; 740 int err;
741 741
@@ -745,8 +745,12 @@ static struct sk_buff *handle_offloads(struct sk_buff *skb)
745 goto error; 745 goto error;
746 skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; 746 skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
747 return skb; 747 return skb;
748 } 748 } else if (skb->ip_summed == CHECKSUM_PARTIAL &&
749 if (skb->ip_summed != CHECKSUM_PARTIAL) 749 tunnel->parms.o_flags&GRE_CSUM) {
750 err = skb_checksum_help(skb);
751 if (unlikely(err))
752 goto error;
753 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
750 skb->ip_summed = CHECKSUM_NONE; 754 skb->ip_summed = CHECKSUM_NONE;
751 755
752 return skb; 756 return skb;
@@ -776,7 +780,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
776 int err; 780 int err;
777 int pkt_len; 781 int pkt_len;
778 782
779 skb = handle_offloads(skb); 783 skb = handle_offloads(tunnel, skb);
780 if (IS_ERR(skb)) { 784 if (IS_ERR(skb)) {
781 dev->stats.tx_dropped++; 785 dev->stats.tx_dropped++;
782 return NETDEV_TX_OK; 786 return NETDEV_TX_OK;
@@ -970,7 +974,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
970 iph->daddr = fl4.daddr; 974 iph->daddr = fl4.daddr;
971 iph->saddr = fl4.saddr; 975 iph->saddr = fl4.saddr;
972 iph->ttl = ttl; 976 iph->ttl = ttl;
973 iph->id = 0; 977
978 tunnel_ip_select_ident(skb, old_iph, &rt->dst);
974 979
975 if (ttl == 0) { 980 if (ttl == 0) {
976 if (skb->protocol == htons(ETH_P_IP)) 981 if (skb->protocol == htons(ETH_P_IP))
@@ -1101,14 +1106,8 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
1101 tunnel->hlen = addend; 1106 tunnel->hlen = addend;
1102 /* TCP offload with GRE SEQ is not supported. */ 1107 /* TCP offload with GRE SEQ is not supported. */
1103 if (!(tunnel->parms.o_flags & GRE_SEQ)) { 1108 if (!(tunnel->parms.o_flags & GRE_SEQ)) {
1104 /* device supports enc gso offload*/ 1109 dev->features |= NETIF_F_GSO_SOFTWARE;
1105 if (tdev->hw_enc_features & NETIF_F_GRE_GSO) { 1110 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1106 dev->features |= NETIF_F_TSO;
1107 dev->hw_features |= NETIF_F_TSO;
1108 } else {
1109 dev->features |= NETIF_F_GSO_SOFTWARE;
1110 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1111 }
1112 } 1111 }
1113 1112
1114 return mtu; 1113 return mtu;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 55c4ee1bba06..2e91006d6076 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -322,8 +322,8 @@ void ping_err(struct sk_buff *skb, u32 info)
322 struct iphdr *iph = (struct iphdr *)skb->data; 322 struct iphdr *iph = (struct iphdr *)skb->data;
323 struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); 323 struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
324 struct inet_sock *inet_sock; 324 struct inet_sock *inet_sock;
325 int type = icmph->type; 325 int type = icmp_hdr(skb)->type;
326 int code = icmph->code; 326 int code = icmp_hdr(skb)->code;
327 struct net *net = dev_net(skb->dev); 327 struct net *net = dev_net(skb->dev);
328 struct sock *sk; 328 struct sock *sk;
329 int harderr; 329 int harderr;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index fd0cea114b5d..e2b4461074da 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1351,8 +1351,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1351 return 0; 1351 return 0;
1352} 1352}
1353 1353
1354/* Calculate MSS. Not accounting for SACKs here. */ 1354/* Calculate MSS not accounting any TCP options. */
1355int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1355static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
1356{ 1356{
1357 const struct tcp_sock *tp = tcp_sk(sk); 1357 const struct tcp_sock *tp = tcp_sk(sk);
1358 const struct inet_connection_sock *icsk = inet_csk(sk); 1358 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1381,13 +1381,17 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1381 /* Then reserve room for full set of TCP options and 8 bytes of data */ 1381 /* Then reserve room for full set of TCP options and 8 bytes of data */
1382 if (mss_now < 48) 1382 if (mss_now < 48)
1383 mss_now = 48; 1383 mss_now = 48;
1384
1385 /* Now subtract TCP options size, not including SACKs */
1386 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
1387
1388 return mss_now; 1384 return mss_now;
1389} 1385}
1390 1386
1387/* Calculate MSS. Not accounting for SACKs here. */
1388int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1389{
1390 /* Subtract TCP options size, not including SACKs */
1391 return __tcp_mtu_to_mss(sk, pmtu) -
1392 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1393}
1394
1391/* Inverse of above */ 1395/* Inverse of above */
1392int tcp_mss_to_mtu(struct sock *sk, int mss) 1396int tcp_mss_to_mtu(struct sock *sk, int mss)
1393{ 1397{
@@ -2930,7 +2934,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2930 */ 2934 */
2931 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) 2935 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
2932 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2936 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2933 space = tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - 2937 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
2934 MAX_TCP_OPTION_SPACE; 2938 MAX_TCP_OPTION_SPACE;
2935 2939
2936 syn_data = skb_copy_expand(syn, skb_headroom(syn), space, 2940 syn_data = skb_copy_expand(syn, skb_headroom(syn), space,