diff options
author | Patrick McHardy <kaber@trash.net> | 2010-02-10 08:17:10 -0500 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2010-02-10 08:17:10 -0500 |
commit | 9ab99d5a43e9f283738fd9fd365539306d13eaac (patch) | |
tree | 0214a63e3f4f7f4f187f0139e4a5d8abe453902b /net/ipv4 | |
parent | 76780373190d7e8ddfb6fed06aef068e2445c743 (diff) | |
parent | b1109bf085c8dd69537b7876ea83f914dd1fe46a (diff) |
Merge branch 'master' of /repos/git/net-next-2.6
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/arp.c | 55 | ||||
-rw-r--r-- | net/ipv4/devinet.c | 2 | ||||
-rw-r--r-- | net/ipv4/fib_frontend.c | 6 | ||||
-rw-r--r-- | net/ipv4/fib_semantics.c | 80 | ||||
-rw-r--r-- | net/ipv4/icmp.c | 2 | ||||
-rw-r--r-- | net/ipv4/igmp.c | 87 | ||||
-rw-r--r-- | net/ipv4/inet_connection_sock.c | 2 | ||||
-rw-r--r-- | net/ipv4/inet_diag.c | 2 | ||||
-rw-r--r-- | net/ipv4/ip_fragment.c | 39 | ||||
-rw-r--r-- | net/ipv4/ip_gre.c | 4 | ||||
-rw-r--r-- | net/ipv4/ip_output.c | 2 | ||||
-rw-r--r-- | net/ipv4/ip_sockglue.c | 14 | ||||
-rw-r--r-- | net/ipv4/ipcomp.c | 9 | ||||
-rw-r--r-- | net/ipv4/ipip.c | 7 | ||||
-rw-r--r-- | net/ipv4/netfilter/arp_tables.c | 4 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_tables.c | 4 | ||||
-rw-r--r-- | net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 2 | ||||
-rw-r--r-- | net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | 4 | ||||
-rw-r--r-- | net/ipv4/netfilter/nf_nat_core.c | 22 | ||||
-rw-r--r-- | net/ipv4/proc.c | 4 | ||||
-rw-r--r-- | net/ipv4/route.c | 9 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 25 | ||||
-rw-r--r-- | net/ipv4/tcp_probe.c | 19 | ||||
-rw-r--r-- | net/ipv4/tcp_timer.c | 6 | ||||
-rw-r--r-- | net/ipv4/udp.c | 4 | ||||
-rw-r--r-- | net/ipv4/udplite.c | 4 | ||||
-rw-r--r-- | net/ipv4/xfrm4_policy.c | 14 |
27 files changed, 290 insertions, 142 deletions
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index c95cd93acf29..1940b4df7699 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -70,6 +70,7 @@ | |||
70 | * bonding can change the skb before | 70 | * bonding can change the skb before |
71 | * sending (e.g. insert 8021q tag). | 71 | * sending (e.g. insert 8021q tag). |
72 | * Harald Welte : convert to make use of jenkins hash | 72 | * Harald Welte : convert to make use of jenkins hash |
73 | * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support. | ||
73 | */ | 74 | */ |
74 | 75 | ||
75 | #include <linux/module.h> | 76 | #include <linux/module.h> |
@@ -524,12 +525,15 @@ int arp_bind_neighbour(struct dst_entry *dst) | |||
524 | /* | 525 | /* |
525 | * Check if we can use proxy ARP for this path | 526 | * Check if we can use proxy ARP for this path |
526 | */ | 527 | */ |
527 | 528 | static inline int arp_fwd_proxy(struct in_device *in_dev, | |
528 | static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt) | 529 | struct net_device *dev, struct rtable *rt) |
529 | { | 530 | { |
530 | struct in_device *out_dev; | 531 | struct in_device *out_dev; |
531 | int imi, omi = -1; | 532 | int imi, omi = -1; |
532 | 533 | ||
534 | if (rt->u.dst.dev == dev) | ||
535 | return 0; | ||
536 | |||
533 | if (!IN_DEV_PROXY_ARP(in_dev)) | 537 | if (!IN_DEV_PROXY_ARP(in_dev)) |
534 | return 0; | 538 | return 0; |
535 | 539 | ||
@@ -548,6 +552,43 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt) | |||
548 | } | 552 | } |
549 | 553 | ||
550 | /* | 554 | /* |
555 | * Check for RFC3069 proxy arp private VLAN (allow to send back to same dev) | ||
556 | * | ||
557 | * RFC3069 supports proxy arp replies back to the same interface. This | ||
558 | * is done to support (ethernet) switch features, like RFC 3069, where | ||
559 | * the individual ports are not allowed to communicate with each | ||
560 | * other, BUT they are allowed to talk to the upstream router. As | ||
561 | * described in RFC 3069, it is possible to allow these hosts to | ||
562 | * communicate through the upstream router, by proxy_arp'ing. | ||
563 | * | ||
564 | * RFC 3069: "VLAN Aggregation for Efficient IP Address Allocation" | ||
565 | * | ||
566 | * This technology is known by different names: | ||
567 | * In RFC 3069 it is called VLAN Aggregation. | ||
568 | * Cisco and Allied Telesyn call it Private VLAN. | ||
569 | * Hewlett-Packard call it Source-Port filtering or port-isolation. | ||
570 | * Ericsson call it MAC-Forced Forwarding (RFC Draft). | ||
571 | * | ||
572 | */ | ||
573 | static inline int arp_fwd_pvlan(struct in_device *in_dev, | ||
574 | struct net_device *dev, struct rtable *rt, | ||
575 | __be32 sip, __be32 tip) | ||
576 | { | ||
577 | /* Private VLAN is only concerned about the same ethernet segment */ | ||
578 | if (rt->u.dst.dev != dev) | ||
579 | return 0; | ||
580 | |||
581 | /* Don't reply on self probes (often done by windowz boxes)*/ | ||
582 | if (sip == tip) | ||
583 | return 0; | ||
584 | |||
585 | if (IN_DEV_PROXY_ARP_PVLAN(in_dev)) | ||
586 | return 1; | ||
587 | else | ||
588 | return 0; | ||
589 | } | ||
590 | |||
591 | /* | ||
551 | * Interface to link layer: send routine and receive handler. | 592 | * Interface to link layer: send routine and receive handler. |
552 | */ | 593 | */ |
553 | 594 | ||
@@ -833,8 +874,11 @@ static int arp_process(struct sk_buff *skb) | |||
833 | } | 874 | } |
834 | goto out; | 875 | goto out; |
835 | } else if (IN_DEV_FORWARD(in_dev)) { | 876 | } else if (IN_DEV_FORWARD(in_dev)) { |
836 | if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev && | 877 | if (addr_type == RTN_UNICAST && |
837 | (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { | 878 | (arp_fwd_proxy(in_dev, dev, rt) || |
879 | arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || | ||
880 | pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) | ||
881 | { | ||
838 | n = neigh_event_ns(&arp_tbl, sha, &sip, dev); | 882 | n = neigh_event_ns(&arp_tbl, sha, &sip, dev); |
839 | if (n) | 883 | if (n) |
840 | neigh_release(n); | 884 | neigh_release(n); |
@@ -863,7 +907,8 @@ static int arp_process(struct sk_buff *skb) | |||
863 | devices (strip is candidate) | 907 | devices (strip is candidate) |
864 | */ | 908 | */ |
865 | if (n == NULL && | 909 | if (n == NULL && |
866 | arp->ar_op == htons(ARPOP_REPLY) && | 910 | (arp->ar_op == htons(ARPOP_REPLY) || |
911 | (arp->ar_op == htons(ARPOP_REQUEST) && tip == sip)) && | ||
867 | inet_addr_type(net, sip) == RTN_UNICAST) | 912 | inet_addr_type(net, sip) == RTN_UNICAST) |
868 | n = __neigh_lookup(&arp_tbl, &sip, dev, 1); | 913 | n = __neigh_lookup(&arp_tbl, &sip, dev, 1); |
869 | } | 914 | } |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 5cdbc102a418..cd71a3908391 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1397,6 +1397,7 @@ static struct devinet_sysctl_table { | |||
1397 | DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE, | 1397 | DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE, |
1398 | "accept_source_route"), | 1398 | "accept_source_route"), |
1399 | DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"), | 1399 | DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"), |
1400 | DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"), | ||
1400 | DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"), | 1401 | DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"), |
1401 | DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"), | 1402 | DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"), |
1402 | DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"), | 1403 | DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"), |
@@ -1407,6 +1408,7 @@ static struct devinet_sysctl_table { | |||
1407 | DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), | 1408 | DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), |
1408 | DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), | 1409 | DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), |
1409 | DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), | 1410 | DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), |
1411 | DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"), | ||
1410 | 1412 | ||
1411 | DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), | 1413 | DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), |
1412 | DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), | 1414 | DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 3323168ee52d..9b3e28ed5240 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -252,6 +252,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
252 | no_addr = in_dev->ifa_list == NULL; | 252 | no_addr = in_dev->ifa_list == NULL; |
253 | rpf = IN_DEV_RPFILTER(in_dev); | 253 | rpf = IN_DEV_RPFILTER(in_dev); |
254 | accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); | 254 | accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); |
255 | if (mark && !IN_DEV_SRC_VMARK(in_dev)) | ||
256 | fl.mark = 0; | ||
255 | } | 257 | } |
256 | rcu_read_unlock(); | 258 | rcu_read_unlock(); |
257 | 259 | ||
@@ -881,7 +883,7 @@ static void nl_fib_input(struct sk_buff *skb) | |||
881 | netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT); | 883 | netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT); |
882 | } | 884 | } |
883 | 885 | ||
884 | static int nl_fib_lookup_init(struct net *net) | 886 | static int __net_init nl_fib_lookup_init(struct net *net) |
885 | { | 887 | { |
886 | struct sock *sk; | 888 | struct sock *sk; |
887 | sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0, | 889 | sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0, |
@@ -1002,7 +1004,7 @@ fail: | |||
1002 | return err; | 1004 | return err; |
1003 | } | 1005 | } |
1004 | 1006 | ||
1005 | static void __net_exit ip_fib_net_exit(struct net *net) | 1007 | static void ip_fib_net_exit(struct net *net) |
1006 | { | 1008 | { |
1007 | unsigned int i; | 1009 | unsigned int i; |
1008 | 1010 | ||
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ed19aa6919c2..1af0ea0fb6a2 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -62,8 +62,8 @@ static DEFINE_SPINLOCK(fib_multipath_lock); | |||
62 | #define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \ | 62 | #define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \ |
63 | for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) | 63 | for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) |
64 | 64 | ||
65 | #define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \ | 65 | #define change_nexthops(fi) { int nhsel; struct fib_nh *nexthop_nh; \ |
66 | for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) | 66 | for (nhsel=0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nexthop_nh++, nhsel++) |
67 | 67 | ||
68 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ | 68 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ |
69 | 69 | ||
@@ -72,7 +72,7 @@ for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, | |||
72 | #define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \ | 72 | #define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \ |
73 | for (nhsel=0; nhsel < 1; nhsel++) | 73 | for (nhsel=0; nhsel < 1; nhsel++) |
74 | 74 | ||
75 | #define change_nexthops(fi) { int nhsel = 0; struct fib_nh * nh = (struct fib_nh *)((fi)->fib_nh); \ | 75 | #define change_nexthops(fi) { int nhsel = 0; struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ |
76 | for (nhsel=0; nhsel < 1; nhsel++) | 76 | for (nhsel=0; nhsel < 1; nhsel++) |
77 | 77 | ||
78 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ | 78 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ |
@@ -145,9 +145,9 @@ void free_fib_info(struct fib_info *fi) | |||
145 | return; | 145 | return; |
146 | } | 146 | } |
147 | change_nexthops(fi) { | 147 | change_nexthops(fi) { |
148 | if (nh->nh_dev) | 148 | if (nexthop_nh->nh_dev) |
149 | dev_put(nh->nh_dev); | 149 | dev_put(nexthop_nh->nh_dev); |
150 | nh->nh_dev = NULL; | 150 | nexthop_nh->nh_dev = NULL; |
151 | } endfor_nexthops(fi); | 151 | } endfor_nexthops(fi); |
152 | fib_info_cnt--; | 152 | fib_info_cnt--; |
153 | release_net(fi->fib_net); | 153 | release_net(fi->fib_net); |
@@ -162,9 +162,9 @@ void fib_release_info(struct fib_info *fi) | |||
162 | if (fi->fib_prefsrc) | 162 | if (fi->fib_prefsrc) |
163 | hlist_del(&fi->fib_lhash); | 163 | hlist_del(&fi->fib_lhash); |
164 | change_nexthops(fi) { | 164 | change_nexthops(fi) { |
165 | if (!nh->nh_dev) | 165 | if (!nexthop_nh->nh_dev) |
166 | continue; | 166 | continue; |
167 | hlist_del(&nh->nh_hash); | 167 | hlist_del(&nexthop_nh->nh_hash); |
168 | } endfor_nexthops(fi) | 168 | } endfor_nexthops(fi) |
169 | fi->fib_dead = 1; | 169 | fi->fib_dead = 1; |
170 | fib_info_put(fi); | 170 | fib_info_put(fi); |
@@ -395,19 +395,20 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, | |||
395 | if (!rtnh_ok(rtnh, remaining)) | 395 | if (!rtnh_ok(rtnh, remaining)) |
396 | return -EINVAL; | 396 | return -EINVAL; |
397 | 397 | ||
398 | nh->nh_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; | 398 | nexthop_nh->nh_flags = |
399 | nh->nh_oif = rtnh->rtnh_ifindex; | 399 | (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; |
400 | nh->nh_weight = rtnh->rtnh_hops + 1; | 400 | nexthop_nh->nh_oif = rtnh->rtnh_ifindex; |
401 | nexthop_nh->nh_weight = rtnh->rtnh_hops + 1; | ||
401 | 402 | ||
402 | attrlen = rtnh_attrlen(rtnh); | 403 | attrlen = rtnh_attrlen(rtnh); |
403 | if (attrlen > 0) { | 404 | if (attrlen > 0) { |
404 | struct nlattr *nla, *attrs = rtnh_attrs(rtnh); | 405 | struct nlattr *nla, *attrs = rtnh_attrs(rtnh); |
405 | 406 | ||
406 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); | 407 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); |
407 | nh->nh_gw = nla ? nla_get_be32(nla) : 0; | 408 | nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0; |
408 | #ifdef CONFIG_NET_CLS_ROUTE | 409 | #ifdef CONFIG_NET_CLS_ROUTE |
409 | nla = nla_find(attrs, attrlen, RTA_FLOW); | 410 | nla = nla_find(attrs, attrlen, RTA_FLOW); |
410 | nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; | 411 | nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; |
411 | #endif | 412 | #endif |
412 | } | 413 | } |
413 | 414 | ||
@@ -527,10 +528,6 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, | |||
527 | if (nh->nh_gw) { | 528 | if (nh->nh_gw) { |
528 | struct fib_result res; | 529 | struct fib_result res; |
529 | 530 | ||
530 | #ifdef CONFIG_IP_ROUTE_PERVASIVE | ||
531 | if (nh->nh_flags&RTNH_F_PERVASIVE) | ||
532 | return 0; | ||
533 | #endif | ||
534 | if (nh->nh_flags&RTNH_F_ONLINK) { | 531 | if (nh->nh_flags&RTNH_F_ONLINK) { |
535 | struct net_device *dev; | 532 | struct net_device *dev; |
536 | 533 | ||
@@ -738,7 +735,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
738 | 735 | ||
739 | fi->fib_nhs = nhs; | 736 | fi->fib_nhs = nhs; |
740 | change_nexthops(fi) { | 737 | change_nexthops(fi) { |
741 | nh->nh_parent = fi; | 738 | nexthop_nh->nh_parent = fi; |
742 | } endfor_nexthops(fi) | 739 | } endfor_nexthops(fi) |
743 | 740 | ||
744 | if (cfg->fc_mx) { | 741 | if (cfg->fc_mx) { |
@@ -808,7 +805,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
808 | goto failure; | 805 | goto failure; |
809 | } else { | 806 | } else { |
810 | change_nexthops(fi) { | 807 | change_nexthops(fi) { |
811 | if ((err = fib_check_nh(cfg, fi, nh)) != 0) | 808 | if ((err = fib_check_nh(cfg, fi, nexthop_nh)) != 0) |
812 | goto failure; | 809 | goto failure; |
813 | } endfor_nexthops(fi) | 810 | } endfor_nexthops(fi) |
814 | } | 811 | } |
@@ -843,11 +840,11 @@ link_it: | |||
843 | struct hlist_head *head; | 840 | struct hlist_head *head; |
844 | unsigned int hash; | 841 | unsigned int hash; |
845 | 842 | ||
846 | if (!nh->nh_dev) | 843 | if (!nexthop_nh->nh_dev) |
847 | continue; | 844 | continue; |
848 | hash = fib_devindex_hashfn(nh->nh_dev->ifindex); | 845 | hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex); |
849 | head = &fib_info_devhash[hash]; | 846 | head = &fib_info_devhash[hash]; |
850 | hlist_add_head(&nh->nh_hash, head); | 847 | hlist_add_head(&nexthop_nh->nh_hash, head); |
851 | } endfor_nexthops(fi) | 848 | } endfor_nexthops(fi) |
852 | spin_unlock_bh(&fib_info_lock); | 849 | spin_unlock_bh(&fib_info_lock); |
853 | return fi; | 850 | return fi; |
@@ -1080,21 +1077,21 @@ int fib_sync_down_dev(struct net_device *dev, int force) | |||
1080 | prev_fi = fi; | 1077 | prev_fi = fi; |
1081 | dead = 0; | 1078 | dead = 0; |
1082 | change_nexthops(fi) { | 1079 | change_nexthops(fi) { |
1083 | if (nh->nh_flags&RTNH_F_DEAD) | 1080 | if (nexthop_nh->nh_flags&RTNH_F_DEAD) |
1084 | dead++; | 1081 | dead++; |
1085 | else if (nh->nh_dev == dev && | 1082 | else if (nexthop_nh->nh_dev == dev && |
1086 | nh->nh_scope != scope) { | 1083 | nexthop_nh->nh_scope != scope) { |
1087 | nh->nh_flags |= RTNH_F_DEAD; | 1084 | nexthop_nh->nh_flags |= RTNH_F_DEAD; |
1088 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 1085 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1089 | spin_lock_bh(&fib_multipath_lock); | 1086 | spin_lock_bh(&fib_multipath_lock); |
1090 | fi->fib_power -= nh->nh_power; | 1087 | fi->fib_power -= nexthop_nh->nh_power; |
1091 | nh->nh_power = 0; | 1088 | nexthop_nh->nh_power = 0; |
1092 | spin_unlock_bh(&fib_multipath_lock); | 1089 | spin_unlock_bh(&fib_multipath_lock); |
1093 | #endif | 1090 | #endif |
1094 | dead++; | 1091 | dead++; |
1095 | } | 1092 | } |
1096 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 1093 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
1097 | if (force > 1 && nh->nh_dev == dev) { | 1094 | if (force > 1 && nexthop_nh->nh_dev == dev) { |
1098 | dead = fi->fib_nhs; | 1095 | dead = fi->fib_nhs; |
1099 | break; | 1096 | break; |
1100 | } | 1097 | } |
@@ -1144,18 +1141,20 @@ int fib_sync_up(struct net_device *dev) | |||
1144 | prev_fi = fi; | 1141 | prev_fi = fi; |
1145 | alive = 0; | 1142 | alive = 0; |
1146 | change_nexthops(fi) { | 1143 | change_nexthops(fi) { |
1147 | if (!(nh->nh_flags&RTNH_F_DEAD)) { | 1144 | if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) { |
1148 | alive++; | 1145 | alive++; |
1149 | continue; | 1146 | continue; |
1150 | } | 1147 | } |
1151 | if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) | 1148 | if (nexthop_nh->nh_dev == NULL || |
1149 | !(nexthop_nh->nh_dev->flags&IFF_UP)) | ||
1152 | continue; | 1150 | continue; |
1153 | if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev)) | 1151 | if (nexthop_nh->nh_dev != dev || |
1152 | !__in_dev_get_rtnl(dev)) | ||
1154 | continue; | 1153 | continue; |
1155 | alive++; | 1154 | alive++; |
1156 | spin_lock_bh(&fib_multipath_lock); | 1155 | spin_lock_bh(&fib_multipath_lock); |
1157 | nh->nh_power = 0; | 1156 | nexthop_nh->nh_power = 0; |
1158 | nh->nh_flags &= ~RTNH_F_DEAD; | 1157 | nexthop_nh->nh_flags &= ~RTNH_F_DEAD; |
1159 | spin_unlock_bh(&fib_multipath_lock); | 1158 | spin_unlock_bh(&fib_multipath_lock); |
1160 | } endfor_nexthops(fi) | 1159 | } endfor_nexthops(fi) |
1161 | 1160 | ||
@@ -1182,9 +1181,9 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res) | |||
1182 | if (fi->fib_power <= 0) { | 1181 | if (fi->fib_power <= 0) { |
1183 | int power = 0; | 1182 | int power = 0; |
1184 | change_nexthops(fi) { | 1183 | change_nexthops(fi) { |
1185 | if (!(nh->nh_flags&RTNH_F_DEAD)) { | 1184 | if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) { |
1186 | power += nh->nh_weight; | 1185 | power += nexthop_nh->nh_weight; |
1187 | nh->nh_power = nh->nh_weight; | 1186 | nexthop_nh->nh_power = nexthop_nh->nh_weight; |
1188 | } | 1187 | } |
1189 | } endfor_nexthops(fi); | 1188 | } endfor_nexthops(fi); |
1190 | fi->fib_power = power; | 1189 | fi->fib_power = power; |
@@ -1204,9 +1203,10 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res) | |||
1204 | w = jiffies % fi->fib_power; | 1203 | w = jiffies % fi->fib_power; |
1205 | 1204 | ||
1206 | change_nexthops(fi) { | 1205 | change_nexthops(fi) { |
1207 | if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) { | 1206 | if (!(nexthop_nh->nh_flags&RTNH_F_DEAD) && |
1208 | if ((w -= nh->nh_power) <= 0) { | 1207 | nexthop_nh->nh_power) { |
1209 | nh->nh_power--; | 1208 | if ((w -= nexthop_nh->nh_power) <= 0) { |
1209 | nexthop_nh->nh_power--; | ||
1210 | fi->fib_power--; | 1210 | fi->fib_power--; |
1211 | res->nh_sel = nhsel; | 1211 | res->nh_sel = nhsel; |
1212 | spin_unlock_bh(&fib_multipath_lock); | 1212 | spin_unlock_bh(&fib_multipath_lock); |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index fe11f60ce41b..4b4c2bcd15db 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -114,7 +114,7 @@ struct icmp_bxm { | |||
114 | /* An array of errno for error messages from dest unreach. */ | 114 | /* An array of errno for error messages from dest unreach. */ |
115 | /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ | 115 | /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ |
116 | 116 | ||
117 | struct icmp_err icmp_err_convert[] = { | 117 | const struct icmp_err icmp_err_convert[] = { |
118 | { | 118 | { |
119 | .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ | 119 | .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ |
120 | .fatal = 0, | 120 | .fatal = 0, |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 76c08402c933..d28363998743 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -1799,7 +1799,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) | |||
1799 | iml->next = inet->mc_list; | 1799 | iml->next = inet->mc_list; |
1800 | iml->sflist = NULL; | 1800 | iml->sflist = NULL; |
1801 | iml->sfmode = MCAST_EXCLUDE; | 1801 | iml->sfmode = MCAST_EXCLUDE; |
1802 | inet->mc_list = iml; | 1802 | rcu_assign_pointer(inet->mc_list, iml); |
1803 | ip_mc_inc_group(in_dev, addr); | 1803 | ip_mc_inc_group(in_dev, addr); |
1804 | err = 0; | 1804 | err = 0; |
1805 | done: | 1805 | done: |
@@ -1807,24 +1807,46 @@ done: | |||
1807 | return err; | 1807 | return err; |
1808 | } | 1808 | } |
1809 | 1809 | ||
1810 | static void ip_sf_socklist_reclaim(struct rcu_head *rp) | ||
1811 | { | ||
1812 | struct ip_sf_socklist *psf; | ||
1813 | |||
1814 | psf = container_of(rp, struct ip_sf_socklist, rcu); | ||
1815 | /* sk_omem_alloc should have been decreased by the caller*/ | ||
1816 | kfree(psf); | ||
1817 | } | ||
1818 | |||
1810 | static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, | 1819 | static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, |
1811 | struct in_device *in_dev) | 1820 | struct in_device *in_dev) |
1812 | { | 1821 | { |
1822 | struct ip_sf_socklist *psf = iml->sflist; | ||
1813 | int err; | 1823 | int err; |
1814 | 1824 | ||
1815 | if (iml->sflist == NULL) { | 1825 | if (psf == NULL) { |
1816 | /* any-source empty exclude case */ | 1826 | /* any-source empty exclude case */ |
1817 | return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, | 1827 | return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, |
1818 | iml->sfmode, 0, NULL, 0); | 1828 | iml->sfmode, 0, NULL, 0); |
1819 | } | 1829 | } |
1820 | err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, | 1830 | err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, |
1821 | iml->sfmode, iml->sflist->sl_count, | 1831 | iml->sfmode, psf->sl_count, psf->sl_addr, 0); |
1822 | iml->sflist->sl_addr, 0); | 1832 | rcu_assign_pointer(iml->sflist, NULL); |
1823 | sock_kfree_s(sk, iml->sflist, IP_SFLSIZE(iml->sflist->sl_max)); | 1833 | /* decrease mem now to avoid the memleak warning */ |
1824 | iml->sflist = NULL; | 1834 | atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc); |
1835 | call_rcu(&psf->rcu, ip_sf_socklist_reclaim); | ||
1825 | return err; | 1836 | return err; |
1826 | } | 1837 | } |
1827 | 1838 | ||
1839 | |||
1840 | static void ip_mc_socklist_reclaim(struct rcu_head *rp) | ||
1841 | { | ||
1842 | struct ip_mc_socklist *iml; | ||
1843 | |||
1844 | iml = container_of(rp, struct ip_mc_socklist, rcu); | ||
1845 | /* sk_omem_alloc should have been decreased by the caller*/ | ||
1846 | kfree(iml); | ||
1847 | } | ||
1848 | |||
1849 | |||
1828 | /* | 1850 | /* |
1829 | * Ask a socket to leave a group. | 1851 | * Ask a socket to leave a group. |
1830 | */ | 1852 | */ |
@@ -1854,12 +1876,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | |||
1854 | 1876 | ||
1855 | (void) ip_mc_leave_src(sk, iml, in_dev); | 1877 | (void) ip_mc_leave_src(sk, iml, in_dev); |
1856 | 1878 | ||
1857 | *imlp = iml->next; | 1879 | rcu_assign_pointer(*imlp, iml->next); |
1858 | 1880 | ||
1859 | if (in_dev) | 1881 | if (in_dev) |
1860 | ip_mc_dec_group(in_dev, group); | 1882 | ip_mc_dec_group(in_dev, group); |
1861 | rtnl_unlock(); | 1883 | rtnl_unlock(); |
1862 | sock_kfree_s(sk, iml, sizeof(*iml)); | 1884 | /* decrease mem now to avoid the memleak warning */ |
1885 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); | ||
1886 | call_rcu(&iml->rcu, ip_mc_socklist_reclaim); | ||
1863 | return 0; | 1887 | return 0; |
1864 | } | 1888 | } |
1865 | if (!in_dev) | 1889 | if (!in_dev) |
@@ -1974,9 +1998,12 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct | |||
1974 | if (psl) { | 1998 | if (psl) { |
1975 | for (i=0; i<psl->sl_count; i++) | 1999 | for (i=0; i<psl->sl_count; i++) |
1976 | newpsl->sl_addr[i] = psl->sl_addr[i]; | 2000 | newpsl->sl_addr[i] = psl->sl_addr[i]; |
1977 | sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max)); | 2001 | /* decrease mem now to avoid the memleak warning */ |
2002 | atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); | ||
2003 | call_rcu(&psl->rcu, ip_sf_socklist_reclaim); | ||
1978 | } | 2004 | } |
1979 | pmc->sflist = psl = newpsl; | 2005 | rcu_assign_pointer(pmc->sflist, newpsl); |
2006 | psl = newpsl; | ||
1980 | } | 2007 | } |
1981 | rv = 1; /* > 0 for insert logic below if sl_count is 0 */ | 2008 | rv = 1; /* > 0 for insert logic below if sl_count is 0 */ |
1982 | for (i=0; i<psl->sl_count; i++) { | 2009 | for (i=0; i<psl->sl_count; i++) { |
@@ -2072,11 +2099,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) | |||
2072 | if (psl) { | 2099 | if (psl) { |
2073 | (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, | 2100 | (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, |
2074 | psl->sl_count, psl->sl_addr, 0); | 2101 | psl->sl_count, psl->sl_addr, 0); |
2075 | sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max)); | 2102 | /* decrease mem now to avoid the memleak warning */ |
2103 | atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); | ||
2104 | call_rcu(&psl->rcu, ip_sf_socklist_reclaim); | ||
2076 | } else | 2105 | } else |
2077 | (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, | 2106 | (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, |
2078 | 0, NULL, 0); | 2107 | 0, NULL, 0); |
2079 | pmc->sflist = newpsl; | 2108 | rcu_assign_pointer(pmc->sflist, newpsl); |
2080 | pmc->sfmode = msf->imsf_fmode; | 2109 | pmc->sfmode = msf->imsf_fmode; |
2081 | err = 0; | 2110 | err = 0; |
2082 | done: | 2111 | done: |
@@ -2209,30 +2238,40 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif) | |||
2209 | struct ip_mc_socklist *pmc; | 2238 | struct ip_mc_socklist *pmc; |
2210 | struct ip_sf_socklist *psl; | 2239 | struct ip_sf_socklist *psl; |
2211 | int i; | 2240 | int i; |
2241 | int ret; | ||
2212 | 2242 | ||
2243 | ret = 1; | ||
2213 | if (!ipv4_is_multicast(loc_addr)) | 2244 | if (!ipv4_is_multicast(loc_addr)) |
2214 | return 1; | 2245 | goto out; |
2215 | 2246 | ||
2216 | for (pmc=inet->mc_list; pmc; pmc=pmc->next) { | 2247 | rcu_read_lock(); |
2248 | for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) { | ||
2217 | if (pmc->multi.imr_multiaddr.s_addr == loc_addr && | 2249 | if (pmc->multi.imr_multiaddr.s_addr == loc_addr && |
2218 | pmc->multi.imr_ifindex == dif) | 2250 | pmc->multi.imr_ifindex == dif) |
2219 | break; | 2251 | break; |
2220 | } | 2252 | } |
2253 | ret = inet->mc_all; | ||
2221 | if (!pmc) | 2254 | if (!pmc) |
2222 | return inet->mc_all; | 2255 | goto unlock; |
2223 | psl = pmc->sflist; | 2256 | psl = pmc->sflist; |
2257 | ret = (pmc->sfmode == MCAST_EXCLUDE); | ||
2224 | if (!psl) | 2258 | if (!psl) |
2225 | return pmc->sfmode == MCAST_EXCLUDE; | 2259 | goto unlock; |
2226 | 2260 | ||
2227 | for (i=0; i<psl->sl_count; i++) { | 2261 | for (i=0; i<psl->sl_count; i++) { |
2228 | if (psl->sl_addr[i] == rmt_addr) | 2262 | if (psl->sl_addr[i] == rmt_addr) |
2229 | break; | 2263 | break; |
2230 | } | 2264 | } |
2265 | ret = 0; | ||
2231 | if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) | 2266 | if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) |
2232 | return 0; | 2267 | goto unlock; |
2233 | if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) | 2268 | if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) |
2234 | return 0; | 2269 | goto unlock; |
2235 | return 1; | 2270 | ret = 1; |
2271 | unlock: | ||
2272 | rcu_read_unlock(); | ||
2273 | out: | ||
2274 | return ret; | ||
2236 | } | 2275 | } |
2237 | 2276 | ||
2238 | /* | 2277 | /* |
@@ -2251,7 +2290,7 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2251 | rtnl_lock(); | 2290 | rtnl_lock(); |
2252 | while ((iml = inet->mc_list) != NULL) { | 2291 | while ((iml = inet->mc_list) != NULL) { |
2253 | struct in_device *in_dev; | 2292 | struct in_device *in_dev; |
2254 | inet->mc_list = iml->next; | 2293 | rcu_assign_pointer(inet->mc_list, iml->next); |
2255 | 2294 | ||
2256 | in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); | 2295 | in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); |
2257 | (void) ip_mc_leave_src(sk, iml, in_dev); | 2296 | (void) ip_mc_leave_src(sk, iml, in_dev); |
@@ -2259,7 +2298,9 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2259 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); | 2298 | ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); |
2260 | in_dev_put(in_dev); | 2299 | in_dev_put(in_dev); |
2261 | } | 2300 | } |
2262 | sock_kfree_s(sk, iml, sizeof(*iml)); | 2301 | /* decrease mem now to avoid the memleak warning */ |
2302 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); | ||
2303 | call_rcu(&iml->rcu, ip_mc_socklist_reclaim); | ||
2263 | } | 2304 | } |
2264 | rtnl_unlock(); | 2305 | rtnl_unlock(); |
2265 | } | 2306 | } |
@@ -2603,7 +2644,7 @@ static const struct file_operations igmp_mcf_seq_fops = { | |||
2603 | .release = seq_release_net, | 2644 | .release = seq_release_net, |
2604 | }; | 2645 | }; |
2605 | 2646 | ||
2606 | static int igmp_net_init(struct net *net) | 2647 | static int __net_init igmp_net_init(struct net *net) |
2607 | { | 2648 | { |
2608 | struct proc_dir_entry *pde; | 2649 | struct proc_dir_entry *pde; |
2609 | 2650 | ||
@@ -2621,7 +2662,7 @@ out_igmp: | |||
2621 | return -ENOMEM; | 2662 | return -ENOMEM; |
2622 | } | 2663 | } |
2623 | 2664 | ||
2624 | static void igmp_net_exit(struct net *net) | 2665 | static void __net_exit igmp_net_exit(struct net *net) |
2625 | { | 2666 | { |
2626 | proc_net_remove(net, "mcfilter"); | 2667 | proc_net_remove(net, "mcfilter"); |
2627 | proc_net_remove(net, "igmp"); | 2668 | proc_net_remove(net, "igmp"); |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index ee16475f8fc3..8da6429269dd 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -529,6 +529,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, | |||
529 | syn_ack_recalc(req, thresh, max_retries, | 529 | syn_ack_recalc(req, thresh, max_retries, |
530 | queue->rskq_defer_accept, | 530 | queue->rskq_defer_accept, |
531 | &expire, &resend); | 531 | &expire, &resend); |
532 | if (req->rsk_ops->syn_ack_timeout) | ||
533 | req->rsk_ops->syn_ack_timeout(parent, req); | ||
532 | if (!expire && | 534 | if (!expire && |
533 | (!resend || | 535 | (!resend || |
534 | !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || | 536 | !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index bdb78dd180ce..1aaa8110d84b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -368,7 +368,7 @@ static int inet_diag_bc_run(const void *bc, int len, | |||
368 | yes = entry->sport >= op[1].no; | 368 | yes = entry->sport >= op[1].no; |
369 | break; | 369 | break; |
370 | case INET_DIAG_BC_S_LE: | 370 | case INET_DIAG_BC_S_LE: |
371 | yes = entry->dport <= op[1].no; | 371 | yes = entry->sport <= op[1].no; |
372 | break; | 372 | break; |
373 | case INET_DIAG_BC_D_GE: | 373 | case INET_DIAG_BC_D_GE: |
374 | yes = entry->dport >= op[1].no; | 374 | yes = entry->dport >= op[1].no; |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 86964b353c31..b59430bc041c 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <linux/netdevice.h> | 32 | #include <linux/netdevice.h> |
33 | #include <linux/jhash.h> | 33 | #include <linux/jhash.h> |
34 | #include <linux/random.h> | 34 | #include <linux/random.h> |
35 | #include <net/route.h> | ||
36 | #include <net/dst.h> | ||
35 | #include <net/sock.h> | 37 | #include <net/sock.h> |
36 | #include <net/ip.h> | 38 | #include <net/ip.h> |
37 | #include <net/icmp.h> | 39 | #include <net/icmp.h> |
@@ -205,11 +207,34 @@ static void ip_expire(unsigned long arg) | |||
205 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { | 207 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { |
206 | struct sk_buff *head = qp->q.fragments; | 208 | struct sk_buff *head = qp->q.fragments; |
207 | 209 | ||
208 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | ||
209 | rcu_read_lock(); | 210 | rcu_read_lock(); |
210 | head->dev = dev_get_by_index_rcu(net, qp->iif); | 211 | head->dev = dev_get_by_index_rcu(net, qp->iif); |
211 | if (head->dev) | 212 | if (!head->dev) |
212 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | 213 | goto out_rcu_unlock; |
214 | |||
215 | /* | ||
216 | * Only search router table for the head fragment, | ||
217 | * when defraging timeout at PRE_ROUTING HOOK. | ||
218 | */ | ||
219 | if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) { | ||
220 | const struct iphdr *iph = ip_hdr(head); | ||
221 | int err = ip_route_input(head, iph->daddr, iph->saddr, | ||
222 | iph->tos, head->dev); | ||
223 | if (unlikely(err)) | ||
224 | goto out_rcu_unlock; | ||
225 | |||
226 | /* | ||
227 | * Only an end host needs to send an ICMP | ||
228 | * "Fragment Reassembly Timeout" message, per RFC792. | ||
229 | */ | ||
230 | if (skb_rtable(head)->rt_type != RTN_LOCAL) | ||
231 | goto out_rcu_unlock; | ||
232 | |||
233 | } | ||
234 | |||
235 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | ||
236 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | ||
237 | out_rcu_unlock: | ||
213 | rcu_read_unlock(); | 238 | rcu_read_unlock(); |
214 | } | 239 | } |
215 | out: | 240 | out: |
@@ -646,7 +671,7 @@ static struct ctl_table ip4_frags_ctl_table[] = { | |||
646 | { } | 671 | { } |
647 | }; | 672 | }; |
648 | 673 | ||
649 | static int ip4_frags_ns_ctl_register(struct net *net) | 674 | static int __net_init ip4_frags_ns_ctl_register(struct net *net) |
650 | { | 675 | { |
651 | struct ctl_table *table; | 676 | struct ctl_table *table; |
652 | struct ctl_table_header *hdr; | 677 | struct ctl_table_header *hdr; |
@@ -676,7 +701,7 @@ err_alloc: | |||
676 | return -ENOMEM; | 701 | return -ENOMEM; |
677 | } | 702 | } |
678 | 703 | ||
679 | static void ip4_frags_ns_ctl_unregister(struct net *net) | 704 | static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) |
680 | { | 705 | { |
681 | struct ctl_table *table; | 706 | struct ctl_table *table; |
682 | 707 | ||
@@ -704,7 +729,7 @@ static inline void ip4_frags_ctl_register(void) | |||
704 | } | 729 | } |
705 | #endif | 730 | #endif |
706 | 731 | ||
707 | static int ipv4_frags_init_net(struct net *net) | 732 | static int __net_init ipv4_frags_init_net(struct net *net) |
708 | { | 733 | { |
709 | /* | 734 | /* |
710 | * Fragment cache limits. We will commit 256K at one time. Should we | 735 | * Fragment cache limits. We will commit 256K at one time. Should we |
@@ -726,7 +751,7 @@ static int ipv4_frags_init_net(struct net *net) | |||
726 | return ip4_frags_ns_ctl_register(net); | 751 | return ip4_frags_ns_ctl_register(net); |
727 | } | 752 | } |
728 | 753 | ||
729 | static void ipv4_frags_exit_net(struct net *net) | 754 | static void __net_exit ipv4_frags_exit_net(struct net *net) |
730 | { | 755 | { |
731 | ip4_frags_ns_ctl_unregister(net); | 756 | ip4_frags_ns_ctl_unregister(net); |
732 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); | 757 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index f36ce156cac6..7631b20490f5 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1307,7 +1307,7 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head) | |||
1307 | } | 1307 | } |
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | static int ipgre_init_net(struct net *net) | 1310 | static int __net_init ipgre_init_net(struct net *net) |
1311 | { | 1311 | { |
1312 | struct ipgre_net *ign = net_generic(net, ipgre_net_id); | 1312 | struct ipgre_net *ign = net_generic(net, ipgre_net_id); |
1313 | int err; | 1313 | int err; |
@@ -1334,7 +1334,7 @@ err_alloc_dev: | |||
1334 | return err; | 1334 | return err; |
1335 | } | 1335 | } |
1336 | 1336 | ||
1337 | static void ipgre_exit_net(struct net *net) | 1337 | static void __net_exit ipgre_exit_net(struct net *net) |
1338 | { | 1338 | { |
1339 | struct ipgre_net *ign; | 1339 | struct ipgre_net *ign; |
1340 | LIST_HEAD(list); | 1340 | LIST_HEAD(list); |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index e34013a78ef4..3451799e3dbf 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -254,7 +254,7 @@ int ip_mc_output(struct sk_buff *skb) | |||
254 | */ | 254 | */ |
255 | 255 | ||
256 | if (rt->rt_flags&RTCF_MULTICAST) { | 256 | if (rt->rt_flags&RTCF_MULTICAST) { |
257 | if ((!sk || inet_sk(sk)->mc_loop) | 257 | if (sk_mc_loop(sk) |
258 | #ifdef CONFIG_IP_MROUTE | 258 | #ifdef CONFIG_IP_MROUTE |
259 | /* Small optimization: do not loopback not local frames, | 259 | /* Small optimization: do not loopback not local frames, |
260 | which returned after forwarding; they will be dropped | 260 | which returned after forwarding; they will be dropped |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index cafad9baff03..644dc43a55de 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -451,7 +451,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
451 | (1<<IP_TTL) | (1<<IP_HDRINCL) | | 451 | (1<<IP_TTL) | (1<<IP_HDRINCL) | |
452 | (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | | 452 | (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | |
453 | (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | | 453 | (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | |
454 | (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || | 454 | (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) | |
455 | (1<<IP_MINTTL))) || | ||
455 | optname == IP_MULTICAST_TTL || | 456 | optname == IP_MULTICAST_TTL || |
456 | optname == IP_MULTICAST_ALL || | 457 | optname == IP_MULTICAST_ALL || |
457 | optname == IP_MULTICAST_LOOP || | 458 | optname == IP_MULTICAST_LOOP || |
@@ -936,6 +937,14 @@ mc_msf_out: | |||
936 | inet->transparent = !!val; | 937 | inet->transparent = !!val; |
937 | break; | 938 | break; |
938 | 939 | ||
940 | case IP_MINTTL: | ||
941 | if (optlen < 1) | ||
942 | goto e_inval; | ||
943 | if (val < 0 || val > 255) | ||
944 | goto e_inval; | ||
945 | inet->min_ttl = val; | ||
946 | break; | ||
947 | |||
939 | default: | 948 | default: |
940 | err = -ENOPROTOOPT; | 949 | err = -ENOPROTOOPT; |
941 | break; | 950 | break; |
@@ -1198,6 +1207,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1198 | case IP_TRANSPARENT: | 1207 | case IP_TRANSPARENT: |
1199 | val = inet->transparent; | 1208 | val = inet->transparent; |
1200 | break; | 1209 | break; |
1210 | case IP_MINTTL: | ||
1211 | val = inet->min_ttl; | ||
1212 | break; | ||
1201 | default: | 1213 | default: |
1202 | release_sock(sk); | 1214 | release_sock(sk); |
1203 | return -ENOPROTOOPT; | 1215 | return -ENOPROTOOPT; |
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 38fbf04150ae..b55a0c3df82f 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | static void ipcomp4_err(struct sk_buff *skb, u32 info) | 26 | static void ipcomp4_err(struct sk_buff *skb, u32 info) |
27 | { | 27 | { |
28 | struct net *net = dev_net(skb->dev); | ||
28 | __be32 spi; | 29 | __be32 spi; |
29 | struct iphdr *iph = (struct iphdr *)skb->data; | 30 | struct iphdr *iph = (struct iphdr *)skb->data; |
30 | struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); | 31 | struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); |
@@ -35,7 +36,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info) | |||
35 | return; | 36 | return; |
36 | 37 | ||
37 | spi = htonl(ntohs(ipch->cpi)); | 38 | spi = htonl(ntohs(ipch->cpi)); |
38 | x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr, | 39 | x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, |
39 | spi, IPPROTO_COMP, AF_INET); | 40 | spi, IPPROTO_COMP, AF_INET); |
40 | if (!x) | 41 | if (!x) |
41 | return; | 42 | return; |
@@ -47,9 +48,10 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info) | |||
47 | /* We always hold one tunnel user reference to indicate a tunnel */ | 48 | /* We always hold one tunnel user reference to indicate a tunnel */ |
48 | static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) | 49 | static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) |
49 | { | 50 | { |
51 | struct net *net = xs_net(x); | ||
50 | struct xfrm_state *t; | 52 | struct xfrm_state *t; |
51 | 53 | ||
52 | t = xfrm_state_alloc(&init_net); | 54 | t = xfrm_state_alloc(net); |
53 | if (t == NULL) | 55 | if (t == NULL) |
54 | goto out; | 56 | goto out; |
55 | 57 | ||
@@ -82,10 +84,11 @@ error: | |||
82 | */ | 84 | */ |
83 | static int ipcomp_tunnel_attach(struct xfrm_state *x) | 85 | static int ipcomp_tunnel_attach(struct xfrm_state *x) |
84 | { | 86 | { |
87 | struct net *net = xs_net(x); | ||
85 | int err = 0; | 88 | int err = 0; |
86 | struct xfrm_state *t; | 89 | struct xfrm_state *t; |
87 | 90 | ||
88 | t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr.a4, | 91 | t = xfrm_state_lookup(net, (xfrm_address_t *)&x->id.daddr.a4, |
89 | x->props.saddr.a4, IPPROTO_IPIP, AF_INET); | 92 | x->props.saddr.a4, IPPROTO_IPIP, AF_INET); |
90 | if (!t) { | 93 | if (!t) { |
91 | t = ipcomp_tunnel_create(x); | 94 | t = ipcomp_tunnel_create(x); |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index eda04fed3379..95db732e542b 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -130,7 +130,6 @@ struct ipip_net { | |||
130 | struct net_device *fb_tunnel_dev; | 130 | struct net_device *fb_tunnel_dev; |
131 | }; | 131 | }; |
132 | 132 | ||
133 | static void ipip_fb_tunnel_init(struct net_device *dev); | ||
134 | static void ipip_tunnel_init(struct net_device *dev); | 133 | static void ipip_tunnel_init(struct net_device *dev); |
135 | static void ipip_tunnel_setup(struct net_device *dev); | 134 | static void ipip_tunnel_setup(struct net_device *dev); |
136 | 135 | ||
@@ -730,7 +729,7 @@ static void ipip_tunnel_init(struct net_device *dev) | |||
730 | ipip_tunnel_bind_dev(dev); | 729 | ipip_tunnel_bind_dev(dev); |
731 | } | 730 | } |
732 | 731 | ||
733 | static void ipip_fb_tunnel_init(struct net_device *dev) | 732 | static void __net_init ipip_fb_tunnel_init(struct net_device *dev) |
734 | { | 733 | { |
735 | struct ip_tunnel *tunnel = netdev_priv(dev); | 734 | struct ip_tunnel *tunnel = netdev_priv(dev); |
736 | struct iphdr *iph = &tunnel->parms.iph; | 735 | struct iphdr *iph = &tunnel->parms.iph; |
@@ -773,7 +772,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head) | |||
773 | } | 772 | } |
774 | } | 773 | } |
775 | 774 | ||
776 | static int ipip_init_net(struct net *net) | 775 | static int __net_init ipip_init_net(struct net *net) |
777 | { | 776 | { |
778 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | 777 | struct ipip_net *ipn = net_generic(net, ipip_net_id); |
779 | int err; | 778 | int err; |
@@ -806,7 +805,7 @@ err_alloc_dev: | |||
806 | return err; | 805 | return err; |
807 | } | 806 | } |
808 | 807 | ||
809 | static void ipip_exit_net(struct net *net) | 808 | static void __net_exit ipip_exit_net(struct net *net) |
810 | { | 809 | { |
811 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | 810 | struct ipip_net *ipn = net_generic(net, ipip_net_id); |
812 | LIST_HEAD(list); | 811 | LIST_HEAD(list); |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 06632762ba5f..90203e1b9187 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) | |||
925 | if (t && !IS_ERR(t)) { | 925 | if (t && !IS_ERR(t)) { |
926 | struct arpt_getinfo info; | 926 | struct arpt_getinfo info; |
927 | const struct xt_table_info *private = t->private; | 927 | const struct xt_table_info *private = t->private; |
928 | |||
929 | #ifdef CONFIG_COMPAT | 928 | #ifdef CONFIG_COMPAT |
929 | struct xt_table_info tmp; | ||
930 | |||
930 | if (compat) { | 931 | if (compat) { |
931 | struct xt_table_info tmp; | ||
932 | ret = compat_table_info(private, &tmp); | 932 | ret = compat_table_info(private, &tmp); |
933 | xt_compat_flush_offsets(NFPROTO_ARP); | 933 | xt_compat_flush_offsets(NFPROTO_ARP); |
934 | private = &tmp; | 934 | private = &tmp; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 7fde8f6950d8..5bf7de1527a5 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -1137,10 +1137,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) | |||
1137 | if (t && !IS_ERR(t)) { | 1137 | if (t && !IS_ERR(t)) { |
1138 | struct ipt_getinfo info; | 1138 | struct ipt_getinfo info; |
1139 | const struct xt_table_info *private = t->private; | 1139 | const struct xt_table_info *private = t->private; |
1140 | |||
1141 | #ifdef CONFIG_COMPAT | 1140 | #ifdef CONFIG_COMPAT |
1141 | struct xt_table_info tmp; | ||
1142 | |||
1142 | if (compat) { | 1143 | if (compat) { |
1143 | struct xt_table_info tmp; | ||
1144 | ret = compat_table_info(private, &tmp); | 1144 | ret = compat_table_info(private, &tmp); |
1145 | xt_compat_flush_offsets(AF_INET); | 1145 | xt_compat_flush_offsets(AF_INET); |
1146 | private = &tmp; | 1146 | private = &tmp; |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index d171b123a656..d1ea38a7c490 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = { | |||
210 | }, | 210 | }, |
211 | { | 211 | { |
212 | .procname = "ip_conntrack_buckets", | 212 | .procname = "ip_conntrack_buckets", |
213 | .data = &nf_conntrack_htable_size, | 213 | .data = &init_net.ct.htable_size, |
214 | .maxlen = sizeof(unsigned int), | 214 | .maxlen = sizeof(unsigned int), |
215 | .mode = 0444, | 215 | .mode = 0444, |
216 | .proc_handler = proc_dointvec, | 216 | .proc_handler = proc_dointvec, |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 8668a3defda6..2fb7b76da94f 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) | |||
32 | struct hlist_nulls_node *n; | 32 | struct hlist_nulls_node *n; |
33 | 33 | ||
34 | for (st->bucket = 0; | 34 | for (st->bucket = 0; |
35 | st->bucket < nf_conntrack_htable_size; | 35 | st->bucket < net->ct.htable_size; |
36 | st->bucket++) { | 36 | st->bucket++) { |
37 | n = rcu_dereference(net->ct.hash[st->bucket].first); | 37 | n = rcu_dereference(net->ct.hash[st->bucket].first); |
38 | if (!is_a_nulls(n)) | 38 | if (!is_a_nulls(n)) |
@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, | |||
50 | head = rcu_dereference(head->next); | 50 | head = rcu_dereference(head->next); |
51 | while (is_a_nulls(head)) { | 51 | while (is_a_nulls(head)) { |
52 | if (likely(get_nulls_value(head) == st->bucket)) { | 52 | if (likely(get_nulls_value(head) == st->bucket)) { |
53 | if (++st->bucket >= nf_conntrack_htable_size) | 53 | if (++st->bucket >= net->ct.htable_size) |
54 | return NULL; | 54 | return NULL; |
55 | } | 55 | } |
56 | head = rcu_dereference(net->ct.hash[st->bucket].first); | 56 | head = rcu_dereference(net->ct.hash[st->bucket].first); |
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index fe1a64479dd0..26066a2327ad 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
@@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock); | |||
35 | 35 | ||
36 | static struct nf_conntrack_l3proto *l3proto __read_mostly; | 36 | static struct nf_conntrack_l3proto *l3proto __read_mostly; |
37 | 37 | ||
38 | /* Calculated at init based on memory size */ | ||
39 | static unsigned int nf_nat_htable_size __read_mostly; | ||
40 | |||
41 | #define MAX_IP_NAT_PROTO 256 | 38 | #define MAX_IP_NAT_PROTO 256 |
42 | static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] | 39 | static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] |
43 | __read_mostly; | 40 | __read_mostly; |
@@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put); | |||
72 | 69 | ||
73 | /* We keep an extra hash for each conntrack, for fast searching. */ | 70 | /* We keep an extra hash for each conntrack, for fast searching. */ |
74 | static inline unsigned int | 71 | static inline unsigned int |
75 | hash_by_src(const struct nf_conntrack_tuple *tuple) | 72 | hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) |
76 | { | 73 | { |
77 | unsigned int hash; | 74 | unsigned int hash; |
78 | 75 | ||
@@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tuple *tuple) | |||
80 | hash = jhash_3words((__force u32)tuple->src.u3.ip, | 77 | hash = jhash_3words((__force u32)tuple->src.u3.ip, |
81 | (__force u32)tuple->src.u.all, | 78 | (__force u32)tuple->src.u.all, |
82 | tuple->dst.protonum, 0); | 79 | tuple->dst.protonum, 0); |
83 | return ((u64)hash * nf_nat_htable_size) >> 32; | 80 | return ((u64)hash * net->ipv4.nat_htable_size) >> 32; |
84 | } | 81 | } |
85 | 82 | ||
86 | /* Is this tuple already taken? (not by us) */ | 83 | /* Is this tuple already taken? (not by us) */ |
@@ -147,7 +144,7 @@ find_appropriate_src(struct net *net, | |||
147 | struct nf_conntrack_tuple *result, | 144 | struct nf_conntrack_tuple *result, |
148 | const struct nf_nat_range *range) | 145 | const struct nf_nat_range *range) |
149 | { | 146 | { |
150 | unsigned int h = hash_by_src(tuple); | 147 | unsigned int h = hash_by_src(net, tuple); |
151 | const struct nf_conn_nat *nat; | 148 | const struct nf_conn_nat *nat; |
152 | const struct nf_conn *ct; | 149 | const struct nf_conn *ct; |
153 | const struct hlist_node *n; | 150 | const struct hlist_node *n; |
@@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct, | |||
330 | if (have_to_hash) { | 327 | if (have_to_hash) { |
331 | unsigned int srchash; | 328 | unsigned int srchash; |
332 | 329 | ||
333 | srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 330 | srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
334 | spin_lock_bh(&nf_nat_lock); | 331 | spin_lock_bh(&nf_nat_lock); |
335 | /* nf_conntrack_alter_reply might re-allocate exntension aera */ | 332 | /* nf_conntrack_alter_reply might re-allocate exntension aera */ |
336 | nat = nfct_nat(ct); | 333 | nat = nfct_nat(ct); |
@@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, | |||
679 | 676 | ||
680 | static int __net_init nf_nat_net_init(struct net *net) | 677 | static int __net_init nf_nat_net_init(struct net *net) |
681 | { | 678 | { |
682 | net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, | 679 | /* Leave them the same for the moment. */ |
683 | &net->ipv4.nat_vmalloced, 0); | 680 | net->ipv4.nat_htable_size = net->ct.htable_size; |
681 | net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, | ||
682 | &net->ipv4.nat_vmalloced, 0); | ||
684 | if (!net->ipv4.nat_bysource) | 683 | if (!net->ipv4.nat_bysource) |
685 | return -ENOMEM; | 684 | return -ENOMEM; |
686 | return 0; | 685 | return 0; |
@@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) | |||
703 | nf_ct_iterate_cleanup(net, &clean_nat, NULL); | 702 | nf_ct_iterate_cleanup(net, &clean_nat, NULL); |
704 | synchronize_rcu(); | 703 | synchronize_rcu(); |
705 | nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, | 704 | nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, |
706 | nf_nat_htable_size); | 705 | net->ipv4.nat_htable_size); |
707 | } | 706 | } |
708 | 707 | ||
709 | static struct pernet_operations nf_nat_net_ops = { | 708 | static struct pernet_operations nf_nat_net_ops = { |
@@ -724,9 +723,6 @@ static int __init nf_nat_init(void) | |||
724 | return ret; | 723 | return ret; |
725 | } | 724 | } |
726 | 725 | ||
727 | /* Leave them the same for the moment. */ | ||
728 | nf_nat_htable_size = nf_conntrack_htable_size; | ||
729 | |||
730 | ret = register_pernet_subsys(&nf_nat_net_ops); | 726 | ret = register_pernet_subsys(&nf_nat_net_ops); |
731 | if (ret < 0) | 727 | if (ret < 0) |
732 | goto cleanup_extend; | 728 | goto cleanup_extend; |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index f25542c48b7d..1b09a6dde7c0 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -127,8 +127,8 @@ static const struct snmp_mib snmp4_ipextstats_list[] = { | |||
127 | SNMP_MIB_SENTINEL | 127 | SNMP_MIB_SENTINEL |
128 | }; | 128 | }; |
129 | 129 | ||
130 | static struct { | 130 | static const struct { |
131 | char *name; | 131 | const char *name; |
132 | int index; | 132 | int index; |
133 | } icmpmibmap[] = { | 133 | } icmpmibmap[] = { |
134 | { "DestUnreachs", ICMP_DEST_UNREACH }, | 134 | { "DestUnreachs", ICMP_DEST_UNREACH }, |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e446496f564f..b16dfadbe6d6 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -586,7 +586,9 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net) | |||
586 | { | 586 | { |
587 | remove_proc_entry("rt_cache", net->proc_net_stat); | 587 | remove_proc_entry("rt_cache", net->proc_net_stat); |
588 | remove_proc_entry("rt_cache", net->proc_net); | 588 | remove_proc_entry("rt_cache", net->proc_net); |
589 | #ifdef CONFIG_NET_CLS_ROUTE | ||
589 | remove_proc_entry("rt_acct", net->proc_net); | 590 | remove_proc_entry("rt_acct", net->proc_net); |
591 | #endif | ||
590 | } | 592 | } |
591 | 593 | ||
592 | static struct pernet_operations ip_rt_proc_ops __net_initdata = { | 594 | static struct pernet_operations ip_rt_proc_ops __net_initdata = { |
@@ -1988,8 +1990,13 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1988 | if (skb->protocol != htons(ETH_P_IP)) { | 1990 | if (skb->protocol != htons(ETH_P_IP)) { |
1989 | /* Not IP (i.e. ARP). Do not create route, if it is | 1991 | /* Not IP (i.e. ARP). Do not create route, if it is |
1990 | * invalid for proxy arp. DNAT routes are always valid. | 1992 | * invalid for proxy arp. DNAT routes are always valid. |
1993 | * | ||
1994 | * Proxy arp feature have been extended to allow, ARP | ||
1995 | * replies back to the same interface, to support | ||
1996 | * Private VLAN switch technologies. See arp.c. | ||
1991 | */ | 1997 | */ |
1992 | if (out_dev == in_dev) { | 1998 | if (out_dev == in_dev && |
1999 | IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) { | ||
1993 | err = -EINVAL; | 2000 | err = -EINVAL; |
1994 | goto cleanup; | 2001 | goto cleanup; |
1995 | } | 2002 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 65b8ebfd078a..c3588b4fd979 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -742,9 +742,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
742 | * This still operates on a request_sock only, not on a big | 742 | * This still operates on a request_sock only, not on a big |
743 | * socket. | 743 | * socket. |
744 | */ | 744 | */ |
745 | static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, | 745 | static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, |
746 | struct request_sock *req, | 746 | struct request_sock *req, |
747 | struct request_values *rvp) | 747 | struct request_values *rvp) |
748 | { | 748 | { |
749 | const struct inet_request_sock *ireq = inet_rsk(req); | 749 | const struct inet_request_sock *ireq = inet_rsk(req); |
750 | int err = -1; | 750 | int err = -1; |
@@ -775,10 +775,11 @@ static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, | |||
775 | return err; | 775 | return err; |
776 | } | 776 | } |
777 | 777 | ||
778 | static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, | 778 | static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, |
779 | struct request_values *rvp) | 779 | struct request_values *rvp) |
780 | { | 780 | { |
781 | return __tcp_v4_send_synack(sk, NULL, req, rvp); | 781 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); |
782 | return tcp_v4_send_synack(sk, NULL, req, rvp); | ||
782 | } | 783 | } |
783 | 784 | ||
784 | /* | 785 | /* |
@@ -1192,10 +1193,11 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) | |||
1192 | struct request_sock_ops tcp_request_sock_ops __read_mostly = { | 1193 | struct request_sock_ops tcp_request_sock_ops __read_mostly = { |
1193 | .family = PF_INET, | 1194 | .family = PF_INET, |
1194 | .obj_size = sizeof(struct tcp_request_sock), | 1195 | .obj_size = sizeof(struct tcp_request_sock), |
1195 | .rtx_syn_ack = tcp_v4_send_synack, | 1196 | .rtx_syn_ack = tcp_v4_rtx_synack, |
1196 | .send_ack = tcp_v4_reqsk_send_ack, | 1197 | .send_ack = tcp_v4_reqsk_send_ack, |
1197 | .destructor = tcp_v4_reqsk_destructor, | 1198 | .destructor = tcp_v4_reqsk_destructor, |
1198 | .send_reset = tcp_v4_send_reset, | 1199 | .send_reset = tcp_v4_send_reset, |
1200 | .syn_ack_timeout = tcp_syn_ack_timeout, | ||
1199 | }; | 1201 | }; |
1200 | 1202 | ||
1201 | #ifdef CONFIG_TCP_MD5SIG | 1203 | #ifdef CONFIG_TCP_MD5SIG |
@@ -1373,8 +1375,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1373 | } | 1375 | } |
1374 | tcp_rsk(req)->snt_isn = isn; | 1376 | tcp_rsk(req)->snt_isn = isn; |
1375 | 1377 | ||
1376 | if (__tcp_v4_send_synack(sk, dst, req, | 1378 | if (tcp_v4_send_synack(sk, dst, req, |
1377 | (struct request_values *)&tmp_ext) || | 1379 | (struct request_values *)&tmp_ext) || |
1378 | want_cookie) | 1380 | want_cookie) |
1379 | goto drop_and_free; | 1381 | goto drop_and_free; |
1380 | 1382 | ||
@@ -1649,6 +1651,9 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
1649 | if (!sk) | 1651 | if (!sk) |
1650 | goto no_tcp_socket; | 1652 | goto no_tcp_socket; |
1651 | 1653 | ||
1654 | if (iph->ttl < inet_sk(sk)->min_ttl) | ||
1655 | goto discard_and_relse; | ||
1656 | |||
1652 | process: | 1657 | process: |
1653 | if (sk->sk_state == TCP_TIME_WAIT) | 1658 | if (sk->sk_state == TCP_TIME_WAIT) |
1654 | goto do_time_wait; | 1659 | goto do_time_wait; |
@@ -2425,12 +2430,12 @@ static struct tcp_seq_afinfo tcp4_seq_afinfo = { | |||
2425 | }, | 2430 | }, |
2426 | }; | 2431 | }; |
2427 | 2432 | ||
2428 | static int tcp4_proc_init_net(struct net *net) | 2433 | static int __net_init tcp4_proc_init_net(struct net *net) |
2429 | { | 2434 | { |
2430 | return tcp_proc_register(net, &tcp4_seq_afinfo); | 2435 | return tcp_proc_register(net, &tcp4_seq_afinfo); |
2431 | } | 2436 | } |
2432 | 2437 | ||
2433 | static void tcp4_proc_exit_net(struct net *net) | 2438 | static void __net_exit tcp4_proc_exit_net(struct net *net) |
2434 | { | 2439 | { |
2435 | tcp_proc_unregister(net, &tcp4_seq_afinfo); | 2440 | tcp_proc_unregister(net, &tcp4_seq_afinfo); |
2436 | } | 2441 | } |
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index bb110c5ce1d2..9bc805df95d2 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c | |||
@@ -39,9 +39,9 @@ static int port __read_mostly = 0; | |||
39 | MODULE_PARM_DESC(port, "Port to match (0=all)"); | 39 | MODULE_PARM_DESC(port, "Port to match (0=all)"); |
40 | module_param(port, int, 0); | 40 | module_param(port, int, 0); |
41 | 41 | ||
42 | static int bufsize __read_mostly = 4096; | 42 | static unsigned int bufsize __read_mostly = 4096; |
43 | MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); | 43 | MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); |
44 | module_param(bufsize, int, 0); | 44 | module_param(bufsize, uint, 0); |
45 | 45 | ||
46 | static int full __read_mostly; | 46 | static int full __read_mostly; |
47 | MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); | 47 | MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); |
@@ -75,12 +75,12 @@ static struct { | |||
75 | 75 | ||
76 | static inline int tcp_probe_used(void) | 76 | static inline int tcp_probe_used(void) |
77 | { | 77 | { |
78 | return (tcp_probe.head - tcp_probe.tail) % bufsize; | 78 | return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1); |
79 | } | 79 | } |
80 | 80 | ||
81 | static inline int tcp_probe_avail(void) | 81 | static inline int tcp_probe_avail(void) |
82 | { | 82 | { |
83 | return bufsize - tcp_probe_used(); | 83 | return bufsize - tcp_probe_used() - 1; |
84 | } | 84 | } |
85 | 85 | ||
86 | /* | 86 | /* |
@@ -116,7 +116,7 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
116 | p->ssthresh = tcp_current_ssthresh(sk); | 116 | p->ssthresh = tcp_current_ssthresh(sk); |
117 | p->srtt = tp->srtt >> 3; | 117 | p->srtt = tp->srtt >> 3; |
118 | 118 | ||
119 | tcp_probe.head = (tcp_probe.head + 1) % bufsize; | 119 | tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); |
120 | } | 120 | } |
121 | tcp_probe.lastcwnd = tp->snd_cwnd; | 121 | tcp_probe.lastcwnd = tp->snd_cwnd; |
122 | spin_unlock(&tcp_probe.lock); | 122 | spin_unlock(&tcp_probe.lock); |
@@ -149,7 +149,7 @@ static int tcpprobe_open(struct inode * inode, struct file * file) | |||
149 | static int tcpprobe_sprint(char *tbuf, int n) | 149 | static int tcpprobe_sprint(char *tbuf, int n) |
150 | { | 150 | { |
151 | const struct tcp_log *p | 151 | const struct tcp_log *p |
152 | = tcp_probe.log + tcp_probe.tail % bufsize; | 152 | = tcp_probe.log + tcp_probe.tail; |
153 | struct timespec tv | 153 | struct timespec tv |
154 | = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); | 154 | = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); |
155 | 155 | ||
@@ -192,7 +192,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf, | |||
192 | width = tcpprobe_sprint(tbuf, sizeof(tbuf)); | 192 | width = tcpprobe_sprint(tbuf, sizeof(tbuf)); |
193 | 193 | ||
194 | if (cnt + width < len) | 194 | if (cnt + width < len) |
195 | tcp_probe.tail = (tcp_probe.tail + 1) % bufsize; | 195 | tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1); |
196 | 196 | ||
197 | spin_unlock_bh(&tcp_probe.lock); | 197 | spin_unlock_bh(&tcp_probe.lock); |
198 | 198 | ||
@@ -222,9 +222,10 @@ static __init int tcpprobe_init(void) | |||
222 | init_waitqueue_head(&tcp_probe.wait); | 222 | init_waitqueue_head(&tcp_probe.wait); |
223 | spin_lock_init(&tcp_probe.lock); | 223 | spin_lock_init(&tcp_probe.lock); |
224 | 224 | ||
225 | if (bufsize < 0) | 225 | if (bufsize == 0) |
226 | return -EINVAL; | 226 | return -EINVAL; |
227 | 227 | ||
228 | bufsize = roundup_pow_of_two(bufsize); | ||
228 | tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); | 229 | tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); |
229 | if (!tcp_probe.log) | 230 | if (!tcp_probe.log) |
230 | goto err0; | 231 | goto err0; |
@@ -236,7 +237,7 @@ static __init int tcpprobe_init(void) | |||
236 | if (ret) | 237 | if (ret) |
237 | goto err1; | 238 | goto err1; |
238 | 239 | ||
239 | pr_info("TCP probe registered (port=%d)\n", port); | 240 | pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize); |
240 | return 0; | 241 | return 0; |
241 | err1: | 242 | err1: |
242 | proc_net_remove(&init_net, procname); | 243 | proc_net_remove(&init_net, procname); |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 8816a20c2597..de7d1bf9114f 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -474,6 +474,12 @@ static void tcp_synack_timer(struct sock *sk) | |||
474 | TCP_TIMEOUT_INIT, TCP_RTO_MAX); | 474 | TCP_TIMEOUT_INIT, TCP_RTO_MAX); |
475 | } | 475 | } |
476 | 476 | ||
477 | void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req) | ||
478 | { | ||
479 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); | ||
480 | } | ||
481 | EXPORT_SYMBOL(tcp_syn_ack_timeout); | ||
482 | |||
477 | void tcp_set_keepalive(struct sock *sk, int val) | 483 | void tcp_set_keepalive(struct sock *sk, int val) |
478 | { | 484 | { |
479 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) | 485 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f0126fdd7e04..4f7d2122d818 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -2027,12 +2027,12 @@ static struct udp_seq_afinfo udp4_seq_afinfo = { | |||
2027 | }, | 2027 | }, |
2028 | }; | 2028 | }; |
2029 | 2029 | ||
2030 | static int udp4_proc_init_net(struct net *net) | 2030 | static int __net_init udp4_proc_init_net(struct net *net) |
2031 | { | 2031 | { |
2032 | return udp_proc_register(net, &udp4_seq_afinfo); | 2032 | return udp_proc_register(net, &udp4_seq_afinfo); |
2033 | } | 2033 | } |
2034 | 2034 | ||
2035 | static void udp4_proc_exit_net(struct net *net) | 2035 | static void __net_exit udp4_proc_exit_net(struct net *net) |
2036 | { | 2036 | { |
2037 | udp_proc_unregister(net, &udp4_seq_afinfo); | 2037 | udp_proc_unregister(net, &udp4_seq_afinfo); |
2038 | } | 2038 | } |
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 66f79513f4a5..6610bf76369f 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c | |||
@@ -81,12 +81,12 @@ static struct udp_seq_afinfo udplite4_seq_afinfo = { | |||
81 | }, | 81 | }, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | static int udplite4_proc_init_net(struct net *net) | 84 | static int __net_init udplite4_proc_init_net(struct net *net) |
85 | { | 85 | { |
86 | return udp_proc_register(net, &udplite4_seq_afinfo); | 86 | return udp_proc_register(net, &udplite4_seq_afinfo); |
87 | } | 87 | } |
88 | 88 | ||
89 | static void udplite4_proc_exit_net(struct net *net) | 89 | static void __net_exit udplite4_proc_exit_net(struct net *net) |
90 | { | 90 | { |
91 | udp_proc_unregister(net, &udplite4_seq_afinfo); | 91 | udp_proc_unregister(net, &udplite4_seq_afinfo); |
92 | } | 92 | } |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 8c08a28d8f83..67107d63c1cd 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <net/xfrm.h> | 15 | #include <net/xfrm.h> |
16 | #include <net/ip.h> | 16 | #include <net/ip.h> |
17 | 17 | ||
18 | static struct dst_ops xfrm4_dst_ops; | ||
19 | static struct xfrm_policy_afinfo xfrm4_policy_afinfo; | 18 | static struct xfrm_policy_afinfo xfrm4_policy_afinfo; |
20 | 19 | ||
21 | static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, | 20 | static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, |
@@ -190,8 +189,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
190 | 189 | ||
191 | static inline int xfrm4_garbage_collect(struct dst_ops *ops) | 190 | static inline int xfrm4_garbage_collect(struct dst_ops *ops) |
192 | { | 191 | { |
193 | xfrm4_policy_afinfo.garbage_collect(&init_net); | 192 | struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops); |
194 | return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2); | 193 | |
194 | xfrm4_policy_afinfo.garbage_collect(net); | ||
195 | return (atomic_read(&ops->entries) > ops->gc_thresh * 2); | ||
195 | } | 196 | } |
196 | 197 | ||
197 | static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) | 198 | static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) |
@@ -268,7 +269,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { | |||
268 | static struct ctl_table xfrm4_policy_table[] = { | 269 | static struct ctl_table xfrm4_policy_table[] = { |
269 | { | 270 | { |
270 | .procname = "xfrm4_gc_thresh", | 271 | .procname = "xfrm4_gc_thresh", |
271 | .data = &xfrm4_dst_ops.gc_thresh, | 272 | .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh, |
272 | .maxlen = sizeof(int), | 273 | .maxlen = sizeof(int), |
273 | .mode = 0644, | 274 | .mode = 0644, |
274 | .proc_handler = proc_dointvec, | 275 | .proc_handler = proc_dointvec, |
@@ -295,8 +296,6 @@ static void __exit xfrm4_policy_fini(void) | |||
295 | 296 | ||
296 | void __init xfrm4_init(int rt_max_size) | 297 | void __init xfrm4_init(int rt_max_size) |
297 | { | 298 | { |
298 | xfrm4_state_init(); | ||
299 | xfrm4_policy_init(); | ||
300 | /* | 299 | /* |
301 | * Select a default value for the gc_thresh based on the main route | 300 | * Select a default value for the gc_thresh based on the main route |
302 | * table hash size. It seems to me the worst case scenario is when | 301 | * table hash size. It seems to me the worst case scenario is when |
@@ -308,6 +307,9 @@ void __init xfrm4_init(int rt_max_size) | |||
308 | * and start cleaning when were 1/2 full | 307 | * and start cleaning when were 1/2 full |
309 | */ | 308 | */ |
310 | xfrm4_dst_ops.gc_thresh = rt_max_size/2; | 309 | xfrm4_dst_ops.gc_thresh = rt_max_size/2; |
310 | |||
311 | xfrm4_state_init(); | ||
312 | xfrm4_policy_init(); | ||
311 | #ifdef CONFIG_SYSCTL | 313 | #ifdef CONFIG_SYSCTL |
312 | sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, | 314 | sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, |
313 | xfrm4_policy_table); | 315 | xfrm4_policy_table); |