aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/arp.c55
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_semantics.c76
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_fragment.c8
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_sockglue.c14
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/tcp.c30
-rw-r--r--net/ipv4/tcp_ipv4.c25
-rw-r--r--net/ipv4/tcp_output.c22
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udplite.c4
18 files changed, 180 insertions, 96 deletions
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c95cd93acf29..1940b4df7699 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -70,6 +70,7 @@
70 * bonding can change the skb before 70 * bonding can change the skb before
71 * sending (e.g. insert 8021q tag). 71 * sending (e.g. insert 8021q tag).
72 * Harald Welte : convert to make use of jenkins hash 72 * Harald Welte : convert to make use of jenkins hash
73 * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support.
73 */ 74 */
74 75
75#include <linux/module.h> 76#include <linux/module.h>
@@ -524,12 +525,15 @@ int arp_bind_neighbour(struct dst_entry *dst)
524/* 525/*
525 * Check if we can use proxy ARP for this path 526 * Check if we can use proxy ARP for this path
526 */ 527 */
527 528static inline int arp_fwd_proxy(struct in_device *in_dev,
528static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt) 529 struct net_device *dev, struct rtable *rt)
529{ 530{
530 struct in_device *out_dev; 531 struct in_device *out_dev;
531 int imi, omi = -1; 532 int imi, omi = -1;
532 533
534 if (rt->u.dst.dev == dev)
535 return 0;
536
533 if (!IN_DEV_PROXY_ARP(in_dev)) 537 if (!IN_DEV_PROXY_ARP(in_dev))
534 return 0; 538 return 0;
535 539
@@ -548,6 +552,43 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
548} 552}
549 553
550/* 554/*
555 * Check for RFC3069 proxy arp private VLAN (allow to send back to same dev)
556 *
557 * RFC3069 supports proxy arp replies back to the same interface. This
558 * is done to support (ethernet) switch features, like RFC 3069, where
559 * the individual ports are not allowed to communicate with each
560 * other, BUT they are allowed to talk to the upstream router. As
561 * described in RFC 3069, it is possible to allow these hosts to
562 * communicate through the upstream router, by proxy_arp'ing.
563 *
564 * RFC 3069: "VLAN Aggregation for Efficient IP Address Allocation"
565 *
566 * This technology is known by different names:
567 * In RFC 3069 it is called VLAN Aggregation.
568 * Cisco and Allied Telesyn call it Private VLAN.
569 * Hewlett-Packard call it Source-Port filtering or port-isolation.
570 * Ericsson call it MAC-Forced Forwarding (RFC Draft).
571 *
572 */
573static inline int arp_fwd_pvlan(struct in_device *in_dev,
574 struct net_device *dev, struct rtable *rt,
575 __be32 sip, __be32 tip)
576{
577 /* Private VLAN is only concerned about the same ethernet segment */
578 if (rt->u.dst.dev != dev)
579 return 0;
580
581 /* Don't reply on self probes (often done by windowz boxes)*/
582 if (sip == tip)
583 return 0;
584
585 if (IN_DEV_PROXY_ARP_PVLAN(in_dev))
586 return 1;
587 else
588 return 0;
589}
590
591/*
551 * Interface to link layer: send routine and receive handler. 592 * Interface to link layer: send routine and receive handler.
552 */ 593 */
553 594
@@ -833,8 +874,11 @@ static int arp_process(struct sk_buff *skb)
833 } 874 }
834 goto out; 875 goto out;
835 } else if (IN_DEV_FORWARD(in_dev)) { 876 } else if (IN_DEV_FORWARD(in_dev)) {
836 if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev && 877 if (addr_type == RTN_UNICAST &&
837 (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { 878 (arp_fwd_proxy(in_dev, dev, rt) ||
879 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
880 pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))
881 {
838 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 882 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
839 if (n) 883 if (n)
840 neigh_release(n); 884 neigh_release(n);
@@ -863,7 +907,8 @@ static int arp_process(struct sk_buff *skb)
863 devices (strip is candidate) 907 devices (strip is candidate)
864 */ 908 */
865 if (n == NULL && 909 if (n == NULL &&
866 arp->ar_op == htons(ARPOP_REPLY) && 910 (arp->ar_op == htons(ARPOP_REPLY) ||
911 (arp->ar_op == htons(ARPOP_REQUEST) && tip == sip)) &&
867 inet_addr_type(net, sip) == RTN_UNICAST) 912 inet_addr_type(net, sip) == RTN_UNICAST)
868 n = __neigh_lookup(&arp_tbl, &sip, dev, 1); 913 n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
869 } 914 }
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 040c4f05b653..cd71a3908391 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1408,6 +1408,7 @@ static struct devinet_sysctl_table {
1408 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), 1408 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
1409 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), 1409 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
1410 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), 1410 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
1411 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
1411 1412
1412 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), 1413 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
1413 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), 1414 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 82dbf711d6d0..9b3e28ed5240 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -883,7 +883,7 @@ static void nl_fib_input(struct sk_buff *skb)
883 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT); 883 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
884} 884}
885 885
886static int nl_fib_lookup_init(struct net *net) 886static int __net_init nl_fib_lookup_init(struct net *net)
887{ 887{
888 struct sock *sk; 888 struct sock *sk;
889 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0, 889 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
@@ -1004,7 +1004,7 @@ fail:
1004 return err; 1004 return err;
1005} 1005}
1006 1006
1007static void __net_exit ip_fib_net_exit(struct net *net) 1007static void ip_fib_net_exit(struct net *net)
1008{ 1008{
1009 unsigned int i; 1009 unsigned int i;
1010 1010
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ed19aa6919c2..96b21011a3e4 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -62,8 +62,8 @@ static DEFINE_SPINLOCK(fib_multipath_lock);
62#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \ 62#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \
63for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) 63for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
64 64
65#define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \ 65#define change_nexthops(fi) { int nhsel; struct fib_nh *nexthop_nh; \
66for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) 66for (nhsel=0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nexthop_nh++, nhsel++)
67 67
68#else /* CONFIG_IP_ROUTE_MULTIPATH */ 68#else /* CONFIG_IP_ROUTE_MULTIPATH */
69 69
@@ -72,7 +72,7 @@ for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++,
72#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \ 72#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \
73for (nhsel=0; nhsel < 1; nhsel++) 73for (nhsel=0; nhsel < 1; nhsel++)
74 74
75#define change_nexthops(fi) { int nhsel = 0; struct fib_nh * nh = (struct fib_nh *)((fi)->fib_nh); \ 75#define change_nexthops(fi) { int nhsel = 0; struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
76for (nhsel=0; nhsel < 1; nhsel++) 76for (nhsel=0; nhsel < 1; nhsel++)
77 77
78#endif /* CONFIG_IP_ROUTE_MULTIPATH */ 78#endif /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -145,9 +145,9 @@ void free_fib_info(struct fib_info *fi)
145 return; 145 return;
146 } 146 }
147 change_nexthops(fi) { 147 change_nexthops(fi) {
148 if (nh->nh_dev) 148 if (nexthop_nh->nh_dev)
149 dev_put(nh->nh_dev); 149 dev_put(nexthop_nh->nh_dev);
150 nh->nh_dev = NULL; 150 nexthop_nh->nh_dev = NULL;
151 } endfor_nexthops(fi); 151 } endfor_nexthops(fi);
152 fib_info_cnt--; 152 fib_info_cnt--;
153 release_net(fi->fib_net); 153 release_net(fi->fib_net);
@@ -162,9 +162,9 @@ void fib_release_info(struct fib_info *fi)
162 if (fi->fib_prefsrc) 162 if (fi->fib_prefsrc)
163 hlist_del(&fi->fib_lhash); 163 hlist_del(&fi->fib_lhash);
164 change_nexthops(fi) { 164 change_nexthops(fi) {
165 if (!nh->nh_dev) 165 if (!nexthop_nh->nh_dev)
166 continue; 166 continue;
167 hlist_del(&nh->nh_hash); 167 hlist_del(&nexthop_nh->nh_hash);
168 } endfor_nexthops(fi) 168 } endfor_nexthops(fi)
169 fi->fib_dead = 1; 169 fi->fib_dead = 1;
170 fib_info_put(fi); 170 fib_info_put(fi);
@@ -395,19 +395,20 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
395 if (!rtnh_ok(rtnh, remaining)) 395 if (!rtnh_ok(rtnh, remaining))
396 return -EINVAL; 396 return -EINVAL;
397 397
398 nh->nh_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; 398 nexthop_nh->nh_flags =
399 nh->nh_oif = rtnh->rtnh_ifindex; 399 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
400 nh->nh_weight = rtnh->rtnh_hops + 1; 400 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
401 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
401 402
402 attrlen = rtnh_attrlen(rtnh); 403 attrlen = rtnh_attrlen(rtnh);
403 if (attrlen > 0) { 404 if (attrlen > 0) {
404 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 405 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
405 406
406 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 407 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
407 nh->nh_gw = nla ? nla_get_be32(nla) : 0; 408 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
408#ifdef CONFIG_NET_CLS_ROUTE 409#ifdef CONFIG_NET_CLS_ROUTE
409 nla = nla_find(attrs, attrlen, RTA_FLOW); 410 nla = nla_find(attrs, attrlen, RTA_FLOW);
410 nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 411 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
411#endif 412#endif
412 } 413 }
413 414
@@ -738,7 +739,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
738 739
739 fi->fib_nhs = nhs; 740 fi->fib_nhs = nhs;
740 change_nexthops(fi) { 741 change_nexthops(fi) {
741 nh->nh_parent = fi; 742 nexthop_nh->nh_parent = fi;
742 } endfor_nexthops(fi) 743 } endfor_nexthops(fi)
743 744
744 if (cfg->fc_mx) { 745 if (cfg->fc_mx) {
@@ -808,7 +809,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
808 goto failure; 809 goto failure;
809 } else { 810 } else {
810 change_nexthops(fi) { 811 change_nexthops(fi) {
811 if ((err = fib_check_nh(cfg, fi, nh)) != 0) 812 if ((err = fib_check_nh(cfg, fi, nexthop_nh)) != 0)
812 goto failure; 813 goto failure;
813 } endfor_nexthops(fi) 814 } endfor_nexthops(fi)
814 } 815 }
@@ -843,11 +844,11 @@ link_it:
843 struct hlist_head *head; 844 struct hlist_head *head;
844 unsigned int hash; 845 unsigned int hash;
845 846
846 if (!nh->nh_dev) 847 if (!nexthop_nh->nh_dev)
847 continue; 848 continue;
848 hash = fib_devindex_hashfn(nh->nh_dev->ifindex); 849 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
849 head = &fib_info_devhash[hash]; 850 head = &fib_info_devhash[hash];
850 hlist_add_head(&nh->nh_hash, head); 851 hlist_add_head(&nexthop_nh->nh_hash, head);
851 } endfor_nexthops(fi) 852 } endfor_nexthops(fi)
852 spin_unlock_bh(&fib_info_lock); 853 spin_unlock_bh(&fib_info_lock);
853 return fi; 854 return fi;
@@ -1080,21 +1081,21 @@ int fib_sync_down_dev(struct net_device *dev, int force)
1080 prev_fi = fi; 1081 prev_fi = fi;
1081 dead = 0; 1082 dead = 0;
1082 change_nexthops(fi) { 1083 change_nexthops(fi) {
1083 if (nh->nh_flags&RTNH_F_DEAD) 1084 if (nexthop_nh->nh_flags&RTNH_F_DEAD)
1084 dead++; 1085 dead++;
1085 else if (nh->nh_dev == dev && 1086 else if (nexthop_nh->nh_dev == dev &&
1086 nh->nh_scope != scope) { 1087 nexthop_nh->nh_scope != scope) {
1087 nh->nh_flags |= RTNH_F_DEAD; 1088 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1088#ifdef CONFIG_IP_ROUTE_MULTIPATH 1089#ifdef CONFIG_IP_ROUTE_MULTIPATH
1089 spin_lock_bh(&fib_multipath_lock); 1090 spin_lock_bh(&fib_multipath_lock);
1090 fi->fib_power -= nh->nh_power; 1091 fi->fib_power -= nexthop_nh->nh_power;
1091 nh->nh_power = 0; 1092 nexthop_nh->nh_power = 0;
1092 spin_unlock_bh(&fib_multipath_lock); 1093 spin_unlock_bh(&fib_multipath_lock);
1093#endif 1094#endif
1094 dead++; 1095 dead++;
1095 } 1096 }
1096#ifdef CONFIG_IP_ROUTE_MULTIPATH 1097#ifdef CONFIG_IP_ROUTE_MULTIPATH
1097 if (force > 1 && nh->nh_dev == dev) { 1098 if (force > 1 && nexthop_nh->nh_dev == dev) {
1098 dead = fi->fib_nhs; 1099 dead = fi->fib_nhs;
1099 break; 1100 break;
1100 } 1101 }
@@ -1144,18 +1145,20 @@ int fib_sync_up(struct net_device *dev)
1144 prev_fi = fi; 1145 prev_fi = fi;
1145 alive = 0; 1146 alive = 0;
1146 change_nexthops(fi) { 1147 change_nexthops(fi) {
1147 if (!(nh->nh_flags&RTNH_F_DEAD)) { 1148 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
1148 alive++; 1149 alive++;
1149 continue; 1150 continue;
1150 } 1151 }
1151 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) 1152 if (nexthop_nh->nh_dev == NULL ||
1153 !(nexthop_nh->nh_dev->flags&IFF_UP))
1152 continue; 1154 continue;
1153 if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev)) 1155 if (nexthop_nh->nh_dev != dev ||
1156 !__in_dev_get_rtnl(dev))
1154 continue; 1157 continue;
1155 alive++; 1158 alive++;
1156 spin_lock_bh(&fib_multipath_lock); 1159 spin_lock_bh(&fib_multipath_lock);
1157 nh->nh_power = 0; 1160 nexthop_nh->nh_power = 0;
1158 nh->nh_flags &= ~RTNH_F_DEAD; 1161 nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
1159 spin_unlock_bh(&fib_multipath_lock); 1162 spin_unlock_bh(&fib_multipath_lock);
1160 } endfor_nexthops(fi) 1163 } endfor_nexthops(fi)
1161 1164
@@ -1182,9 +1185,9 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1182 if (fi->fib_power <= 0) { 1185 if (fi->fib_power <= 0) {
1183 int power = 0; 1186 int power = 0;
1184 change_nexthops(fi) { 1187 change_nexthops(fi) {
1185 if (!(nh->nh_flags&RTNH_F_DEAD)) { 1188 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
1186 power += nh->nh_weight; 1189 power += nexthop_nh->nh_weight;
1187 nh->nh_power = nh->nh_weight; 1190 nexthop_nh->nh_power = nexthop_nh->nh_weight;
1188 } 1191 }
1189 } endfor_nexthops(fi); 1192 } endfor_nexthops(fi);
1190 fi->fib_power = power; 1193 fi->fib_power = power;
@@ -1204,9 +1207,10 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1204 w = jiffies % fi->fib_power; 1207 w = jiffies % fi->fib_power;
1205 1208
1206 change_nexthops(fi) { 1209 change_nexthops(fi) {
1207 if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) { 1210 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD) &&
1208 if ((w -= nh->nh_power) <= 0) { 1211 nexthop_nh->nh_power) {
1209 nh->nh_power--; 1212 if ((w -= nexthop_nh->nh_power) <= 0) {
1213 nexthop_nh->nh_power--;
1210 fi->fib_power--; 1214 fi->fib_power--;
1211 res->nh_sel = nhsel; 1215 res->nh_sel = nhsel;
1212 spin_unlock_bh(&fib_multipath_lock); 1216 spin_unlock_bh(&fib_multipath_lock);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 76c08402c933..8f5468393f01 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2603,7 +2603,7 @@ static const struct file_operations igmp_mcf_seq_fops = {
2603 .release = seq_release_net, 2603 .release = seq_release_net,
2604}; 2604};
2605 2605
2606static int igmp_net_init(struct net *net) 2606static int __net_init igmp_net_init(struct net *net)
2607{ 2607{
2608 struct proc_dir_entry *pde; 2608 struct proc_dir_entry *pde;
2609 2609
@@ -2621,7 +2621,7 @@ out_igmp:
2621 return -ENOMEM; 2621 return -ENOMEM;
2622} 2622}
2623 2623
2624static void igmp_net_exit(struct net *net) 2624static void __net_exit igmp_net_exit(struct net *net)
2625{ 2625{
2626 proc_net_remove(net, "mcfilter"); 2626 proc_net_remove(net, "mcfilter");
2627 proc_net_remove(net, "igmp"); 2627 proc_net_remove(net, "igmp");
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ee16475f8fc3..8da6429269dd 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -529,6 +529,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
529 syn_ack_recalc(req, thresh, max_retries, 529 syn_ack_recalc(req, thresh, max_retries,
530 queue->rskq_defer_accept, 530 queue->rskq_defer_accept,
531 &expire, &resend); 531 &expire, &resend);
532 if (req->rsk_ops->syn_ack_timeout)
533 req->rsk_ops->syn_ack_timeout(parent, req);
532 if (!expire && 534 if (!expire &&
533 (!resend || 535 (!resend ||
534 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || 536 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 86964b353c31..891c72aea520 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -646,7 +646,7 @@ static struct ctl_table ip4_frags_ctl_table[] = {
646 { } 646 { }
647}; 647};
648 648
649static int ip4_frags_ns_ctl_register(struct net *net) 649static int __net_init ip4_frags_ns_ctl_register(struct net *net)
650{ 650{
651 struct ctl_table *table; 651 struct ctl_table *table;
652 struct ctl_table_header *hdr; 652 struct ctl_table_header *hdr;
@@ -676,7 +676,7 @@ err_alloc:
676 return -ENOMEM; 676 return -ENOMEM;
677} 677}
678 678
679static void ip4_frags_ns_ctl_unregister(struct net *net) 679static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
680{ 680{
681 struct ctl_table *table; 681 struct ctl_table *table;
682 682
@@ -704,7 +704,7 @@ static inline void ip4_frags_ctl_register(void)
704} 704}
705#endif 705#endif
706 706
707static int ipv4_frags_init_net(struct net *net) 707static int __net_init ipv4_frags_init_net(struct net *net)
708{ 708{
709 /* 709 /*
710 * Fragment cache limits. We will commit 256K at one time. Should we 710 * Fragment cache limits. We will commit 256K at one time. Should we
@@ -726,7 +726,7 @@ static int ipv4_frags_init_net(struct net *net)
726 return ip4_frags_ns_ctl_register(net); 726 return ip4_frags_ns_ctl_register(net);
727} 727}
728 728
729static void ipv4_frags_exit_net(struct net *net) 729static void __net_exit ipv4_frags_exit_net(struct net *net)
730{ 730{
731 ip4_frags_ns_ctl_unregister(net); 731 ip4_frags_ns_ctl_unregister(net);
732 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 732 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f36ce156cac6..7631b20490f5 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1307,7 +1307,7 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1307 } 1307 }
1308} 1308}
1309 1309
1310static int ipgre_init_net(struct net *net) 1310static int __net_init ipgre_init_net(struct net *net)
1311{ 1311{
1312 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1312 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1313 int err; 1313 int err;
@@ -1334,7 +1334,7 @@ err_alloc_dev:
1334 return err; 1334 return err;
1335} 1335}
1336 1336
1337static void ipgre_exit_net(struct net *net) 1337static void __net_exit ipgre_exit_net(struct net *net)
1338{ 1338{
1339 struct ipgre_net *ign; 1339 struct ipgre_net *ign;
1340 LIST_HEAD(list); 1340 LIST_HEAD(list);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index cafad9baff03..644dc43a55de 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -451,7 +451,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
451 (1<<IP_TTL) | (1<<IP_HDRINCL) | 451 (1<<IP_TTL) | (1<<IP_HDRINCL) |
452 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | 452 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
453 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 453 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
454 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || 454 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
455 (1<<IP_MINTTL))) ||
455 optname == IP_MULTICAST_TTL || 456 optname == IP_MULTICAST_TTL ||
456 optname == IP_MULTICAST_ALL || 457 optname == IP_MULTICAST_ALL ||
457 optname == IP_MULTICAST_LOOP || 458 optname == IP_MULTICAST_LOOP ||
@@ -936,6 +937,14 @@ mc_msf_out:
936 inet->transparent = !!val; 937 inet->transparent = !!val;
937 break; 938 break;
938 939
940 case IP_MINTTL:
941 if (optlen < 1)
942 goto e_inval;
943 if (val < 0 || val > 255)
944 goto e_inval;
945 inet->min_ttl = val;
946 break;
947
939 default: 948 default:
940 err = -ENOPROTOOPT; 949 err = -ENOPROTOOPT;
941 break; 950 break;
@@ -1198,6 +1207,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1198 case IP_TRANSPARENT: 1207 case IP_TRANSPARENT:
1199 val = inet->transparent; 1208 val = inet->transparent;
1200 break; 1209 break;
1210 case IP_MINTTL:
1211 val = inet->min_ttl;
1212 break;
1201 default: 1213 default:
1202 release_sock(sk); 1214 release_sock(sk);
1203 return -ENOPROTOOPT; 1215 return -ENOPROTOOPT;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index eda04fed3379..95db732e542b 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -130,7 +130,6 @@ struct ipip_net {
130 struct net_device *fb_tunnel_dev; 130 struct net_device *fb_tunnel_dev;
131}; 131};
132 132
133static void ipip_fb_tunnel_init(struct net_device *dev);
134static void ipip_tunnel_init(struct net_device *dev); 133static void ipip_tunnel_init(struct net_device *dev);
135static void ipip_tunnel_setup(struct net_device *dev); 134static void ipip_tunnel_setup(struct net_device *dev);
136 135
@@ -730,7 +729,7 @@ static void ipip_tunnel_init(struct net_device *dev)
730 ipip_tunnel_bind_dev(dev); 729 ipip_tunnel_bind_dev(dev);
731} 730}
732 731
733static void ipip_fb_tunnel_init(struct net_device *dev) 732static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
734{ 733{
735 struct ip_tunnel *tunnel = netdev_priv(dev); 734 struct ip_tunnel *tunnel = netdev_priv(dev);
736 struct iphdr *iph = &tunnel->parms.iph; 735 struct iphdr *iph = &tunnel->parms.iph;
@@ -773,7 +772,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
773 } 772 }
774} 773}
775 774
776static int ipip_init_net(struct net *net) 775static int __net_init ipip_init_net(struct net *net)
777{ 776{
778 struct ipip_net *ipn = net_generic(net, ipip_net_id); 777 struct ipip_net *ipn = net_generic(net, ipip_net_id);
779 int err; 778 int err;
@@ -806,7 +805,7 @@ err_alloc_dev:
806 return err; 805 return err;
807} 806}
808 807
809static void ipip_exit_net(struct net *net) 808static void __net_exit ipip_exit_net(struct net *net)
810{ 809{
811 struct ipip_net *ipn = net_generic(net, ipip_net_id); 810 struct ipip_net *ipn = net_generic(net, ipip_net_id);
812 LIST_HEAD(list); 811 LIST_HEAD(list);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d62b05d33384..b16dfadbe6d6 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1990,8 +1990,13 @@ static int __mkroute_input(struct sk_buff *skb,
1990 if (skb->protocol != htons(ETH_P_IP)) { 1990 if (skb->protocol != htons(ETH_P_IP)) {
1991 /* Not IP (i.e. ARP). Do not create route, if it is 1991 /* Not IP (i.e. ARP). Do not create route, if it is
1992 * invalid for proxy arp. DNAT routes are always valid. 1992 * invalid for proxy arp. DNAT routes are always valid.
1993 *
1994 * Proxy arp feature have been extended to allow, ARP
1995 * replies back to the same interface, to support
1996 * Private VLAN switch technologies. See arp.c.
1993 */ 1997 */
1994 if (out_dev == in_dev) { 1998 if (out_dev == in_dev &&
1999 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1995 err = -EINVAL; 2000 err = -EINVAL;
1996 goto cleanup; 2001 goto cleanup;
1997 } 2002 }
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 66fd80ef2473..5c24db4a3c91 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -358,7 +358,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
358 358
359 tcp_select_initial_window(tcp_full_space(sk), req->mss, 359 tcp_select_initial_window(tcp_full_space(sk), req->mss,
360 &req->rcv_wnd, &req->window_clamp, 360 &req->rcv_wnd, &req->window_clamp,
361 ireq->wscale_ok, &rcv_wscale); 361 ireq->wscale_ok, &rcv_wscale,
362 dst_metric(&rt->u.dst, RTAX_INITRWND));
362 363
363 ireq->rcv_wscale = rcv_wscale; 364 ireq->rcv_wscale = rcv_wscale;
364 365
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b0a26bb25e2e..d5d69ea8f249 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -536,8 +536,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
536 tp->nonagle &= ~TCP_NAGLE_PUSH; 536 tp->nonagle &= ~TCP_NAGLE_PUSH;
537} 537}
538 538
539static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, 539static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
540 struct sk_buff *skb)
541{ 540{
542 if (flags & MSG_OOB) 541 if (flags & MSG_OOB)
543 tp->snd_up = tp->write_seq; 542 tp->snd_up = tp->write_seq;
@@ -546,13 +545,13 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
546static inline void tcp_push(struct sock *sk, int flags, int mss_now, 545static inline void tcp_push(struct sock *sk, int flags, int mss_now,
547 int nonagle) 546 int nonagle)
548{ 547{
549 struct tcp_sock *tp = tcp_sk(sk);
550
551 if (tcp_send_head(sk)) { 548 if (tcp_send_head(sk)) {
552 struct sk_buff *skb = tcp_write_queue_tail(sk); 549 struct tcp_sock *tp = tcp_sk(sk);
550
553 if (!(flags & MSG_MORE) || forced_push(tp)) 551 if (!(flags & MSG_MORE) || forced_push(tp))
554 tcp_mark_push(tp, skb); 552 tcp_mark_push(tp, tcp_write_queue_tail(sk));
555 tcp_mark_urg(tp, flags, skb); 553
554 tcp_mark_urg(tp, flags);
556 __tcp_push_pending_frames(sk, mss_now, 555 __tcp_push_pending_frames(sk, mss_now,
557 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); 556 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
558 } 557 }
@@ -877,12 +876,12 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
877#define TCP_PAGE(sk) (sk->sk_sndmsg_page) 876#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
878#define TCP_OFF(sk) (sk->sk_sndmsg_off) 877#define TCP_OFF(sk) (sk->sk_sndmsg_off)
879 878
880static inline int select_size(struct sock *sk) 879static inline int select_size(struct sock *sk, int sg)
881{ 880{
882 struct tcp_sock *tp = tcp_sk(sk); 881 struct tcp_sock *tp = tcp_sk(sk);
883 int tmp = tp->mss_cache; 882 int tmp = tp->mss_cache;
884 883
885 if (sk->sk_route_caps & NETIF_F_SG) { 884 if (sg) {
886 if (sk_can_gso(sk)) 885 if (sk_can_gso(sk))
887 tmp = 0; 886 tmp = 0;
888 else { 887 else {
@@ -906,7 +905,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
906 struct sk_buff *skb; 905 struct sk_buff *skb;
907 int iovlen, flags; 906 int iovlen, flags;
908 int mss_now, size_goal; 907 int mss_now, size_goal;
909 int err, copied; 908 int sg, err, copied;
910 long timeo; 909 long timeo;
911 910
912 lock_sock(sk); 911 lock_sock(sk);
@@ -934,6 +933,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
934 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 933 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
935 goto out_err; 934 goto out_err;
936 935
936 sg = sk->sk_route_caps & NETIF_F_SG;
937
937 while (--iovlen >= 0) { 938 while (--iovlen >= 0) {
938 int seglen = iov->iov_len; 939 int seglen = iov->iov_len;
939 unsigned char __user *from = iov->iov_base; 940 unsigned char __user *from = iov->iov_base;
@@ -959,8 +960,9 @@ new_segment:
959 if (!sk_stream_memory_free(sk)) 960 if (!sk_stream_memory_free(sk))
960 goto wait_for_sndbuf; 961 goto wait_for_sndbuf;
961 962
962 skb = sk_stream_alloc_skb(sk, select_size(sk), 963 skb = sk_stream_alloc_skb(sk,
963 sk->sk_allocation); 964 select_size(sk, sg),
965 sk->sk_allocation);
964 if (!skb) 966 if (!skb)
965 goto wait_for_memory; 967 goto wait_for_memory;
966 968
@@ -997,9 +999,7 @@ new_segment:
997 /* We can extend the last page 999 /* We can extend the last page
998 * fragment. */ 1000 * fragment. */
999 merge = 1; 1001 merge = 1;
1000 } else if (i == MAX_SKB_FRAGS || 1002 } else if (i == MAX_SKB_FRAGS || !sg) {
1001 (!i &&
1002 !(sk->sk_route_caps & NETIF_F_SG))) {
1003 /* Need to add new fragment and cannot 1003 /* Need to add new fragment and cannot
1004 * do this because interface is non-SG, 1004 * do this because interface is non-SG,
1005 * or because all the page slots are 1005 * or because all the page slots are
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 65b8ebfd078a..c3588b4fd979 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -742,9 +742,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
742 * This still operates on a request_sock only, not on a big 742 * This still operates on a request_sock only, not on a big
743 * socket. 743 * socket.
744 */ 744 */
745static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 745static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
746 struct request_sock *req, 746 struct request_sock *req,
747 struct request_values *rvp) 747 struct request_values *rvp)
748{ 748{
749 const struct inet_request_sock *ireq = inet_rsk(req); 749 const struct inet_request_sock *ireq = inet_rsk(req);
750 int err = -1; 750 int err = -1;
@@ -775,10 +775,11 @@ static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
775 return err; 775 return err;
776} 776}
777 777
778static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, 778static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
779 struct request_values *rvp) 779 struct request_values *rvp)
780{ 780{
781 return __tcp_v4_send_synack(sk, NULL, req, rvp); 781 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
782 return tcp_v4_send_synack(sk, NULL, req, rvp);
782} 783}
783 784
784/* 785/*
@@ -1192,10 +1193,11 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1192struct request_sock_ops tcp_request_sock_ops __read_mostly = { 1193struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1193 .family = PF_INET, 1194 .family = PF_INET,
1194 .obj_size = sizeof(struct tcp_request_sock), 1195 .obj_size = sizeof(struct tcp_request_sock),
1195 .rtx_syn_ack = tcp_v4_send_synack, 1196 .rtx_syn_ack = tcp_v4_rtx_synack,
1196 .send_ack = tcp_v4_reqsk_send_ack, 1197 .send_ack = tcp_v4_reqsk_send_ack,
1197 .destructor = tcp_v4_reqsk_destructor, 1198 .destructor = tcp_v4_reqsk_destructor,
1198 .send_reset = tcp_v4_send_reset, 1199 .send_reset = tcp_v4_send_reset,
1200 .syn_ack_timeout = tcp_syn_ack_timeout,
1199}; 1201};
1200 1202
1201#ifdef CONFIG_TCP_MD5SIG 1203#ifdef CONFIG_TCP_MD5SIG
@@ -1373,8 +1375,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1373 } 1375 }
1374 tcp_rsk(req)->snt_isn = isn; 1376 tcp_rsk(req)->snt_isn = isn;
1375 1377
1376 if (__tcp_v4_send_synack(sk, dst, req, 1378 if (tcp_v4_send_synack(sk, dst, req,
1377 (struct request_values *)&tmp_ext) || 1379 (struct request_values *)&tmp_ext) ||
1378 want_cookie) 1380 want_cookie)
1379 goto drop_and_free; 1381 goto drop_and_free;
1380 1382
@@ -1649,6 +1651,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
1649 if (!sk) 1651 if (!sk)
1650 goto no_tcp_socket; 1652 goto no_tcp_socket;
1651 1653
1654 if (iph->ttl < inet_sk(sk)->min_ttl)
1655 goto discard_and_relse;
1656
1652process: 1657process:
1653 if (sk->sk_state == TCP_TIME_WAIT) 1658 if (sk->sk_state == TCP_TIME_WAIT)
1654 goto do_time_wait; 1659 goto do_time_wait;
@@ -2425,12 +2430,12 @@ static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2425 }, 2430 },
2426}; 2431};
2427 2432
2428static int tcp4_proc_init_net(struct net *net) 2433static int __net_init tcp4_proc_init_net(struct net *net)
2429{ 2434{
2430 return tcp_proc_register(net, &tcp4_seq_afinfo); 2435 return tcp_proc_register(net, &tcp4_seq_afinfo);
2431} 2436}
2432 2437
2433static void tcp4_proc_exit_net(struct net *net) 2438static void __net_exit tcp4_proc_exit_net(struct net *net)
2434{ 2439{
2435 tcp_proc_unregister(net, &tcp4_seq_afinfo); 2440 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2436} 2441}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 383ce237640f..4a1605d3f909 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -183,7 +183,8 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
183 */ 183 */
184void tcp_select_initial_window(int __space, __u32 mss, 184void tcp_select_initial_window(int __space, __u32 mss,
185 __u32 *rcv_wnd, __u32 *window_clamp, 185 __u32 *rcv_wnd, __u32 *window_clamp,
186 int wscale_ok, __u8 *rcv_wscale) 186 int wscale_ok, __u8 *rcv_wscale,
187 __u32 init_rcv_wnd)
187{ 188{
188 unsigned int space = (__space < 0 ? 0 : __space); 189 unsigned int space = (__space < 0 ? 0 : __space);
189 190
@@ -232,7 +233,13 @@ void tcp_select_initial_window(int __space, __u32 mss,
232 init_cwnd = 2; 233 init_cwnd = 2;
233 else if (mss > 1460) 234 else if (mss > 1460)
234 init_cwnd = 3; 235 init_cwnd = 3;
235 if (*rcv_wnd > init_cwnd * mss) 236 /* when initializing use the value from init_rcv_wnd
237 * rather than the default from above
238 */
239 if (init_rcv_wnd &&
240 (*rcv_wnd > init_rcv_wnd * mss))
241 *rcv_wnd = init_rcv_wnd * mss;
242 else if (*rcv_wnd > init_cwnd * mss)
236 *rcv_wnd = init_cwnd * mss; 243 *rcv_wnd = init_cwnd * mss;
237 } 244 }
238 245
@@ -1794,11 +1801,6 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1794void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1801void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1795 int nonagle) 1802 int nonagle)
1796{ 1803{
1797 struct sk_buff *skb = tcp_send_head(sk);
1798
1799 if (!skb)
1800 return;
1801
1802 /* If we are closed, the bytes will have to remain here. 1804 /* If we are closed, the bytes will have to remain here.
1803 * In time closedown will finish, we empty the write queue and 1805 * In time closedown will finish, we empty the write queue and
1804 * all will be happy. 1806 * all will be happy.
@@ -2422,7 +2424,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2422 &req->rcv_wnd, 2424 &req->rcv_wnd,
2423 &req->window_clamp, 2425 &req->window_clamp,
2424 ireq->wscale_ok, 2426 ireq->wscale_ok,
2425 &rcv_wscale); 2427 &rcv_wscale,
2428 dst_metric(dst, RTAX_INITRWND));
2426 ireq->rcv_wscale = rcv_wscale; 2429 ireq->rcv_wscale = rcv_wscale;
2427 } 2430 }
2428 2431
@@ -2549,7 +2552,8 @@ static void tcp_connect_init(struct sock *sk)
2549 &tp->rcv_wnd, 2552 &tp->rcv_wnd,
2550 &tp->window_clamp, 2553 &tp->window_clamp,
2551 sysctl_tcp_window_scaling, 2554 sysctl_tcp_window_scaling,
2552 &rcv_wscale); 2555 &rcv_wscale,
2556 dst_metric(dst, RTAX_INITRWND));
2553 2557
2554 tp->rx_opt.rcv_wscale = rcv_wscale; 2558 tp->rx_opt.rcv_wscale = rcv_wscale;
2555 tp->rcv_ssthresh = tp->rcv_wnd; 2559 tp->rcv_ssthresh = tp->rcv_wnd;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8816a20c2597..de7d1bf9114f 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -474,6 +474,12 @@ static void tcp_synack_timer(struct sock *sk)
474 TCP_TIMEOUT_INIT, TCP_RTO_MAX); 474 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
475} 475}
476 476
477void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
478{
479 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
480}
481EXPORT_SYMBOL(tcp_syn_ack_timeout);
482
477void tcp_set_keepalive(struct sock *sk, int val) 483void tcp_set_keepalive(struct sock *sk, int val)
478{ 484{
479 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) 485 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f0126fdd7e04..4f7d2122d818 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2027,12 +2027,12 @@ static struct udp_seq_afinfo udp4_seq_afinfo = {
2027 }, 2027 },
2028}; 2028};
2029 2029
2030static int udp4_proc_init_net(struct net *net) 2030static int __net_init udp4_proc_init_net(struct net *net)
2031{ 2031{
2032 return udp_proc_register(net, &udp4_seq_afinfo); 2032 return udp_proc_register(net, &udp4_seq_afinfo);
2033} 2033}
2034 2034
2035static void udp4_proc_exit_net(struct net *net) 2035static void __net_exit udp4_proc_exit_net(struct net *net)
2036{ 2036{
2037 udp_proc_unregister(net, &udp4_seq_afinfo); 2037 udp_proc_unregister(net, &udp4_seq_afinfo);
2038} 2038}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 66f79513f4a5..6610bf76369f 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -81,12 +81,12 @@ static struct udp_seq_afinfo udplite4_seq_afinfo = {
81 }, 81 },
82}; 82};
83 83
84static int udplite4_proc_init_net(struct net *net) 84static int __net_init udplite4_proc_init_net(struct net *net)
85{ 85{
86 return udp_proc_register(net, &udplite4_seq_afinfo); 86 return udp_proc_register(net, &udplite4_seq_afinfo);
87} 87}
88 88
89static void udplite4_proc_exit_net(struct net *net) 89static void __net_exit udplite4_proc_exit_net(struct net *net)
90{ 90{
91 udp_proc_unregister(net, &udplite4_seq_afinfo); 91 udp_proc_unregister(net, &udplite4_seq_afinfo);
92} 92}