diff options
author | NeilBrown <neilb@suse.de> | 2012-08-01 06:40:02 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2012-08-01 06:40:02 -0400 |
commit | bb181e2e48f8c85db08c9cb015cbba9618dbf05c (patch) | |
tree | 191bc24dd97bcb174535cc217af082f16da3b43d /net/ipv6 | |
parent | d57368afe63b3b7b45ce6c2b8c5276417935be2f (diff) | |
parent | c039c332f23e794deb6d6f37b9f07ff3b27fb2cf (diff) |
Merge commit 'c039c332f23e794deb6d6f37b9f07ff3b27fb2cf' into md
Pull in pre-requisites for adding raid10 support to dm-raid.
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/addrconf.c | 21 | ||||
-rw-r--r-- | net/ipv6/ah6.c | 11 | ||||
-rw-r--r-- | net/ipv6/esp6.c | 11 | ||||
-rw-r--r-- | net/ipv6/exthdrs.c | 4 | ||||
-rw-r--r-- | net/ipv6/icmp.c | 23 | ||||
-rw-r--r-- | net/ipv6/inet6_connection_sock.c | 103 | ||||
-rw-r--r-- | net/ipv6/ip6_fib.c | 5 | ||||
-rw-r--r-- | net/ipv6/ip6_input.c | 9 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 40 | ||||
-rw-r--r-- | net/ipv6/ip6_tunnel.c | 96 | ||||
-rw-r--r-- | net/ipv6/ip6mr.c | 5 | ||||
-rw-r--r-- | net/ipv6/ipcomp6.c | 11 | ||||
-rw-r--r-- | net/ipv6/mcast.c | 3 | ||||
-rw-r--r-- | net/ipv6/ndisc.c | 129 | ||||
-rw-r--r-- | net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 131 | ||||
-rw-r--r-- | net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | 51 | ||||
-rw-r--r-- | net/ipv6/protocol.c | 8 | ||||
-rw-r--r-- | net/ipv6/raw.c | 11 | ||||
-rw-r--r-- | net/ipv6/route.c | 538 | ||||
-rw-r--r-- | net/ipv6/sit.c | 25 | ||||
-rw-r--r-- | net/ipv6/syncookies.c | 5 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 204 | ||||
-rw-r--r-- | net/ipv6/udp.c | 13 | ||||
-rw-r--r-- | net/ipv6/xfrm6_policy.c | 26 |
24 files changed, 717 insertions, 766 deletions
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8f6411c97189..79181819a24f 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <linux/delay.h> | 63 | #include <linux/delay.h> |
64 | #include <linux/notifier.h> | 64 | #include <linux/notifier.h> |
65 | #include <linux/string.h> | 65 | #include <linux/string.h> |
66 | #include <linux/hash.h> | ||
66 | 67 | ||
67 | #include <net/net_namespace.h> | 68 | #include <net/net_namespace.h> |
68 | #include <net/sock.h> | 69 | #include <net/sock.h> |
@@ -579,15 +580,9 @@ ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) | |||
579 | list_add_tail(&ifp->if_list, p); | 580 | list_add_tail(&ifp->if_list, p); |
580 | } | 581 | } |
581 | 582 | ||
582 | static u32 ipv6_addr_hash(const struct in6_addr *addr) | 583 | static u32 inet6_addr_hash(const struct in6_addr *addr) |
583 | { | 584 | { |
584 | /* | 585 | return hash_32(ipv6_addr_hash(addr), IN6_ADDR_HSIZE_SHIFT); |
585 | * We perform the hash function over the last 64 bits of the address | ||
586 | * This will include the IEEE address token on links that support it. | ||
587 | */ | ||
588 | return jhash_2words((__force u32)addr->s6_addr32[2], | ||
589 | (__force u32)addr->s6_addr32[3], 0) | ||
590 | & (IN6_ADDR_HSIZE - 1); | ||
591 | } | 586 | } |
592 | 587 | ||
593 | /* On success it returns ifp with increased reference count */ | 588 | /* On success it returns ifp with increased reference count */ |
@@ -662,7 +657,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
662 | in6_ifa_hold(ifa); | 657 | in6_ifa_hold(ifa); |
663 | 658 | ||
664 | /* Add to big hash table */ | 659 | /* Add to big hash table */ |
665 | hash = ipv6_addr_hash(addr); | 660 | hash = inet6_addr_hash(addr); |
666 | 661 | ||
667 | hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]); | 662 | hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]); |
668 | spin_unlock(&addrconf_hash_lock); | 663 | spin_unlock(&addrconf_hash_lock); |
@@ -1270,7 +1265,7 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr, | |||
1270 | { | 1265 | { |
1271 | struct inet6_ifaddr *ifp; | 1266 | struct inet6_ifaddr *ifp; |
1272 | struct hlist_node *node; | 1267 | struct hlist_node *node; |
1273 | unsigned int hash = ipv6_addr_hash(addr); | 1268 | unsigned int hash = inet6_addr_hash(addr); |
1274 | 1269 | ||
1275 | rcu_read_lock_bh(); | 1270 | rcu_read_lock_bh(); |
1276 | hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { | 1271 | hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { |
@@ -1293,7 +1288,7 @@ EXPORT_SYMBOL(ipv6_chk_addr); | |||
1293 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | 1288 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, |
1294 | struct net_device *dev) | 1289 | struct net_device *dev) |
1295 | { | 1290 | { |
1296 | unsigned int hash = ipv6_addr_hash(addr); | 1291 | unsigned int hash = inet6_addr_hash(addr); |
1297 | struct inet6_ifaddr *ifp; | 1292 | struct inet6_ifaddr *ifp; |
1298 | struct hlist_node *node; | 1293 | struct hlist_node *node; |
1299 | 1294 | ||
@@ -1336,7 +1331,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add | |||
1336 | struct net_device *dev, int strict) | 1331 | struct net_device *dev, int strict) |
1337 | { | 1332 | { |
1338 | struct inet6_ifaddr *ifp, *result = NULL; | 1333 | struct inet6_ifaddr *ifp, *result = NULL; |
1339 | unsigned int hash = ipv6_addr_hash(addr); | 1334 | unsigned int hash = inet6_addr_hash(addr); |
1340 | struct hlist_node *node; | 1335 | struct hlist_node *node; |
1341 | 1336 | ||
1342 | rcu_read_lock_bh(); | 1337 | rcu_read_lock_bh(); |
@@ -3223,7 +3218,7 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) | |||
3223 | int ret = 0; | 3218 | int ret = 0; |
3224 | struct inet6_ifaddr *ifp = NULL; | 3219 | struct inet6_ifaddr *ifp = NULL; |
3225 | struct hlist_node *n; | 3220 | struct hlist_node *n; |
3226 | unsigned int hash = ipv6_addr_hash(addr); | 3221 | unsigned int hash = inet6_addr_hash(addr); |
3227 | 3222 | ||
3228 | rcu_read_lock_bh(); | 3223 | rcu_read_lock_bh(); |
3229 | hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) { | 3224 | hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) { |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index f1a4a2c28ed3..7e6139508ee7 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/pfkeyv2.h> | 35 | #include <linux/pfkeyv2.h> |
36 | #include <linux/string.h> | 36 | #include <linux/string.h> |
37 | #include <linux/scatterlist.h> | 37 | #include <linux/scatterlist.h> |
38 | #include <net/ip6_route.h> | ||
38 | #include <net/icmp.h> | 39 | #include <net/icmp.h> |
39 | #include <net/ipv6.h> | 40 | #include <net/ipv6.h> |
40 | #include <net/protocol.h> | 41 | #include <net/protocol.h> |
@@ -612,16 +613,18 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
612 | struct xfrm_state *x; | 613 | struct xfrm_state *x; |
613 | 614 | ||
614 | if (type != ICMPV6_DEST_UNREACH && | 615 | if (type != ICMPV6_DEST_UNREACH && |
615 | type != ICMPV6_PKT_TOOBIG) | 616 | type != ICMPV6_PKT_TOOBIG && |
617 | type != NDISC_REDIRECT) | ||
616 | return; | 618 | return; |
617 | 619 | ||
618 | x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6); | 620 | x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6); |
619 | if (!x) | 621 | if (!x) |
620 | return; | 622 | return; |
621 | 623 | ||
622 | NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/%pI6\n", | 624 | if (type == NDISC_REDIRECT) |
623 | ntohl(ah->spi), &iph->daddr); | 625 | ip6_redirect(skb, net, 0, 0); |
624 | 626 | else | |
627 | ip6_update_pmtu(skb, net, info, 0, 0); | ||
625 | xfrm_state_put(x); | 628 | xfrm_state_put(x); |
626 | } | 629 | } |
627 | 630 | ||
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index db1521fcda5b..6dc7fd353ef5 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/random.h> | 39 | #include <linux/random.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/spinlock.h> | 41 | #include <linux/spinlock.h> |
42 | #include <net/ip6_route.h> | ||
42 | #include <net/icmp.h> | 43 | #include <net/icmp.h> |
43 | #include <net/ipv6.h> | 44 | #include <net/ipv6.h> |
44 | #include <net/protocol.h> | 45 | #include <net/protocol.h> |
@@ -433,15 +434,19 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
433 | struct xfrm_state *x; | 434 | struct xfrm_state *x; |
434 | 435 | ||
435 | if (type != ICMPV6_DEST_UNREACH && | 436 | if (type != ICMPV6_DEST_UNREACH && |
436 | type != ICMPV6_PKT_TOOBIG) | 437 | type != ICMPV6_PKT_TOOBIG && |
438 | type != NDISC_REDIRECT) | ||
437 | return; | 439 | return; |
438 | 440 | ||
439 | x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, | 441 | x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, |
440 | esph->spi, IPPROTO_ESP, AF_INET6); | 442 | esph->spi, IPPROTO_ESP, AF_INET6); |
441 | if (!x) | 443 | if (!x) |
442 | return; | 444 | return; |
443 | pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n", | 445 | |
444 | ntohl(esph->spi), &iph->daddr); | 446 | if (type == NDISC_REDIRECT) |
447 | ip6_redirect(skb, net, 0, 0); | ||
448 | else | ||
449 | ip6_update_pmtu(skb, net, info, 0, 0); | ||
445 | xfrm_state_put(x); | 450 | xfrm_state_put(x); |
446 | } | 451 | } |
447 | 452 | ||
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 6447dc49429f..fa3d9c328092 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -791,14 +791,14 @@ static int ipv6_renew_option(void *ohdr, | |||
791 | if (ohdr) { | 791 | if (ohdr) { |
792 | memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); | 792 | memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); |
793 | *hdr = (struct ipv6_opt_hdr *)*p; | 793 | *hdr = (struct ipv6_opt_hdr *)*p; |
794 | *p += CMSG_ALIGN(ipv6_optlen(*(struct ipv6_opt_hdr **)hdr)); | 794 | *p += CMSG_ALIGN(ipv6_optlen(*hdr)); |
795 | } | 795 | } |
796 | } else { | 796 | } else { |
797 | if (newopt) { | 797 | if (newopt) { |
798 | if (copy_from_user(*p, newopt, newoptlen)) | 798 | if (copy_from_user(*p, newopt, newoptlen)) |
799 | return -EFAULT; | 799 | return -EFAULT; |
800 | *hdr = (struct ipv6_opt_hdr *)*p; | 800 | *hdr = (struct ipv6_opt_hdr *)*p; |
801 | if (ipv6_optlen(*(struct ipv6_opt_hdr **)hdr) > newoptlen) | 801 | if (ipv6_optlen(*hdr) > newoptlen) |
802 | return -EINVAL; | 802 | return -EINVAL; |
803 | *p += CMSG_ALIGN(newoptlen); | 803 | *p += CMSG_ALIGN(newoptlen); |
804 | } | 804 | } |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 091a2971c7b7..24d69dbca4d6 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -188,14 +188,16 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type, | |||
188 | } else { | 188 | } else { |
189 | struct rt6_info *rt = (struct rt6_info *)dst; | 189 | struct rt6_info *rt = (struct rt6_info *)dst; |
190 | int tmo = net->ipv6.sysctl.icmpv6_time; | 190 | int tmo = net->ipv6.sysctl.icmpv6_time; |
191 | struct inet_peer *peer; | ||
191 | 192 | ||
192 | /* Give more bandwidth to wider prefixes. */ | 193 | /* Give more bandwidth to wider prefixes. */ |
193 | if (rt->rt6i_dst.plen < 128) | 194 | if (rt->rt6i_dst.plen < 128) |
194 | tmo >>= ((128 - rt->rt6i_dst.plen)>>5); | 195 | tmo >>= ((128 - rt->rt6i_dst.plen)>>5); |
195 | 196 | ||
196 | if (!rt->rt6i_peer) | 197 | peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); |
197 | rt6_bind_peer(rt, 1); | 198 | res = inet_peer_xrlim_allow(peer, tmo); |
198 | res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo); | 199 | if (peer) |
200 | inet_putpeer(peer); | ||
199 | } | 201 | } |
200 | dst_release(dst); | 202 | dst_release(dst); |
201 | return res; | 203 | return res; |
@@ -596,13 +598,12 @@ out: | |||
596 | icmpv6_xmit_unlock(sk); | 598 | icmpv6_xmit_unlock(sk); |
597 | } | 599 | } |
598 | 600 | ||
599 | static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) | 601 | void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) |
600 | { | 602 | { |
601 | const struct inet6_protocol *ipprot; | 603 | const struct inet6_protocol *ipprot; |
602 | int inner_offset; | 604 | int inner_offset; |
603 | int hash; | ||
604 | u8 nexthdr; | ||
605 | __be16 frag_off; | 605 | __be16 frag_off; |
606 | u8 nexthdr; | ||
606 | 607 | ||
607 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) | 608 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) |
608 | return; | 609 | return; |
@@ -629,10 +630,8 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) | |||
629 | --ANK (980726) | 630 | --ANK (980726) |
630 | */ | 631 | */ |
631 | 632 | ||
632 | hash = nexthdr & (MAX_INET_PROTOS - 1); | ||
633 | |||
634 | rcu_read_lock(); | 633 | rcu_read_lock(); |
635 | ipprot = rcu_dereference(inet6_protos[hash]); | 634 | ipprot = rcu_dereference(inet6_protos[nexthdr]); |
636 | if (ipprot && ipprot->err_handler) | 635 | if (ipprot && ipprot->err_handler) |
637 | ipprot->err_handler(skb, NULL, type, code, inner_offset, info); | 636 | ipprot->err_handler(skb, NULL, type, code, inner_offset, info); |
638 | rcu_read_unlock(); | 637 | rcu_read_unlock(); |
@@ -649,7 +648,6 @@ static int icmpv6_rcv(struct sk_buff *skb) | |||
649 | struct net_device *dev = skb->dev; | 648 | struct net_device *dev = skb->dev; |
650 | struct inet6_dev *idev = __in6_dev_get(dev); | 649 | struct inet6_dev *idev = __in6_dev_get(dev); |
651 | const struct in6_addr *saddr, *daddr; | 650 | const struct in6_addr *saddr, *daddr; |
652 | const struct ipv6hdr *orig_hdr; | ||
653 | struct icmp6hdr *hdr; | 651 | struct icmp6hdr *hdr; |
654 | u8 type; | 652 | u8 type; |
655 | 653 | ||
@@ -661,7 +659,7 @@ static int icmpv6_rcv(struct sk_buff *skb) | |||
661 | XFRM_STATE_ICMP)) | 659 | XFRM_STATE_ICMP)) |
662 | goto drop_no_count; | 660 | goto drop_no_count; |
663 | 661 | ||
664 | if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr))) | 662 | if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr))) |
665 | goto drop_no_count; | 663 | goto drop_no_count; |
666 | 664 | ||
667 | nh = skb_network_offset(skb); | 665 | nh = skb_network_offset(skb); |
@@ -722,9 +720,6 @@ static int icmpv6_rcv(struct sk_buff *skb) | |||
722 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) | 720 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) |
723 | goto discard_it; | 721 | goto discard_it; |
724 | hdr = icmp6_hdr(skb); | 722 | hdr = icmp6_hdr(skb); |
725 | orig_hdr = (struct ipv6hdr *) (hdr + 1); | ||
726 | rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev, | ||
727 | ntohl(hdr->icmp6_mtu)); | ||
728 | 723 | ||
729 | /* | 724 | /* |
730 | * Drop through to notify | 725 | * Drop through to notify |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index e6cee5292a0b..0251a6005be8 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -55,26 +55,26 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
55 | EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); | 55 | EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); |
56 | 56 | ||
57 | struct dst_entry *inet6_csk_route_req(struct sock *sk, | 57 | struct dst_entry *inet6_csk_route_req(struct sock *sk, |
58 | struct flowi6 *fl6, | ||
58 | const struct request_sock *req) | 59 | const struct request_sock *req) |
59 | { | 60 | { |
60 | struct inet6_request_sock *treq = inet6_rsk(req); | 61 | struct inet6_request_sock *treq = inet6_rsk(req); |
61 | struct ipv6_pinfo *np = inet6_sk(sk); | 62 | struct ipv6_pinfo *np = inet6_sk(sk); |
62 | struct in6_addr *final_p, final; | 63 | struct in6_addr *final_p, final; |
63 | struct dst_entry *dst; | 64 | struct dst_entry *dst; |
64 | struct flowi6 fl6; | ||
65 | 65 | ||
66 | memset(&fl6, 0, sizeof(fl6)); | 66 | memset(fl6, 0, sizeof(*fl6)); |
67 | fl6.flowi6_proto = IPPROTO_TCP; | 67 | fl6->flowi6_proto = IPPROTO_TCP; |
68 | fl6.daddr = treq->rmt_addr; | 68 | fl6->daddr = treq->rmt_addr; |
69 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 69 | final_p = fl6_update_dst(fl6, np->opt, &final); |
70 | fl6.saddr = treq->loc_addr; | 70 | fl6->saddr = treq->loc_addr; |
71 | fl6.flowi6_oif = sk->sk_bound_dev_if; | 71 | fl6->flowi6_oif = treq->iif; |
72 | fl6.flowi6_mark = sk->sk_mark; | 72 | fl6->flowi6_mark = sk->sk_mark; |
73 | fl6.fl6_dport = inet_rsk(req)->rmt_port; | 73 | fl6->fl6_dport = inet_rsk(req)->rmt_port; |
74 | fl6.fl6_sport = inet_rsk(req)->loc_port; | 74 | fl6->fl6_sport = inet_rsk(req)->loc_port; |
75 | security_req_classify_flow(req, flowi6_to_flowi(&fl6)); | 75 | security_req_classify_flow(req, flowi6_to_flowi(fl6)); |
76 | 76 | ||
77 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); | 77 | dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); |
78 | if (IS_ERR(dst)) | 78 | if (IS_ERR(dst)) |
79 | return NULL; | 79 | return NULL; |
80 | 80 | ||
@@ -171,7 +171,8 @@ EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); | |||
171 | 171 | ||
172 | static inline | 172 | static inline |
173 | void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, | 173 | void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, |
174 | struct in6_addr *daddr, struct in6_addr *saddr) | 174 | const struct in6_addr *daddr, |
175 | const struct in6_addr *saddr) | ||
175 | { | 176 | { |
176 | __ip6_dst_store(sk, dst, daddr, saddr); | 177 | __ip6_dst_store(sk, dst, daddr, saddr); |
177 | 178 | ||
@@ -203,43 +204,52 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) | |||
203 | return dst; | 204 | return dst; |
204 | } | 205 | } |
205 | 206 | ||
206 | int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) | 207 | static struct dst_entry *inet6_csk_route_socket(struct sock *sk, |
208 | struct flowi6 *fl6) | ||
207 | { | 209 | { |
208 | struct sock *sk = skb->sk; | ||
209 | struct inet_sock *inet = inet_sk(sk); | 210 | struct inet_sock *inet = inet_sk(sk); |
210 | struct ipv6_pinfo *np = inet6_sk(sk); | 211 | struct ipv6_pinfo *np = inet6_sk(sk); |
211 | struct flowi6 fl6; | ||
212 | struct dst_entry *dst; | ||
213 | struct in6_addr *final_p, final; | 212 | struct in6_addr *final_p, final; |
214 | int res; | 213 | struct dst_entry *dst; |
215 | 214 | ||
216 | memset(&fl6, 0, sizeof(fl6)); | 215 | memset(fl6, 0, sizeof(*fl6)); |
217 | fl6.flowi6_proto = sk->sk_protocol; | 216 | fl6->flowi6_proto = sk->sk_protocol; |
218 | fl6.daddr = np->daddr; | 217 | fl6->daddr = np->daddr; |
219 | fl6.saddr = np->saddr; | 218 | fl6->saddr = np->saddr; |
220 | fl6.flowlabel = np->flow_label; | 219 | fl6->flowlabel = np->flow_label; |
221 | IP6_ECN_flow_xmit(sk, fl6.flowlabel); | 220 | IP6_ECN_flow_xmit(sk, fl6->flowlabel); |
222 | fl6.flowi6_oif = sk->sk_bound_dev_if; | 221 | fl6->flowi6_oif = sk->sk_bound_dev_if; |
223 | fl6.flowi6_mark = sk->sk_mark; | 222 | fl6->flowi6_mark = sk->sk_mark; |
224 | fl6.fl6_sport = inet->inet_sport; | 223 | fl6->fl6_sport = inet->inet_sport; |
225 | fl6.fl6_dport = inet->inet_dport; | 224 | fl6->fl6_dport = inet->inet_dport; |
226 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 225 | security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); |
227 | 226 | ||
228 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 227 | final_p = fl6_update_dst(fl6, np->opt, &final); |
229 | 228 | ||
230 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); | 229 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); |
230 | if (!dst) { | ||
231 | dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); | ||
231 | 232 | ||
232 | if (dst == NULL) { | 233 | if (!IS_ERR(dst)) |
233 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); | 234 | __inet6_csk_dst_store(sk, dst, NULL, NULL); |
235 | } | ||
236 | return dst; | ||
237 | } | ||
234 | 238 | ||
235 | if (IS_ERR(dst)) { | 239 | int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) |
236 | sk->sk_err_soft = -PTR_ERR(dst); | 240 | { |
237 | sk->sk_route_caps = 0; | 241 | struct sock *sk = skb->sk; |
238 | kfree_skb(skb); | 242 | struct ipv6_pinfo *np = inet6_sk(sk); |
239 | return PTR_ERR(dst); | 243 | struct flowi6 fl6; |
240 | } | 244 | struct dst_entry *dst; |
245 | int res; | ||
241 | 246 | ||
242 | __inet6_csk_dst_store(sk, dst, NULL, NULL); | 247 | dst = inet6_csk_route_socket(sk, &fl6); |
248 | if (IS_ERR(dst)) { | ||
249 | sk->sk_err_soft = -PTR_ERR(dst); | ||
250 | sk->sk_route_caps = 0; | ||
251 | kfree_skb(skb); | ||
252 | return PTR_ERR(dst); | ||
243 | } | 253 | } |
244 | 254 | ||
245 | rcu_read_lock(); | 255 | rcu_read_lock(); |
@@ -253,3 +263,16 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) | |||
253 | return res; | 263 | return res; |
254 | } | 264 | } |
255 | EXPORT_SYMBOL_GPL(inet6_csk_xmit); | 265 | EXPORT_SYMBOL_GPL(inet6_csk_xmit); |
266 | |||
267 | struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu) | ||
268 | { | ||
269 | struct flowi6 fl6; | ||
270 | struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6); | ||
271 | |||
272 | if (IS_ERR(dst)) | ||
273 | return NULL; | ||
274 | dst->ops->update_pmtu(dst, sk, NULL, mtu); | ||
275 | |||
276 | return inet6_csk_route_socket(sk, &fl6); | ||
277 | } | ||
278 | EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu); | ||
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 608327661960..13690d650c3e 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -197,6 +197,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id) | |||
197 | table->tb6_id = id; | 197 | table->tb6_id = id; |
198 | table->tb6_root.leaf = net->ipv6.ip6_null_entry; | 198 | table->tb6_root.leaf = net->ipv6.ip6_null_entry; |
199 | table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; | 199 | table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; |
200 | inet_peer_base_init(&table->tb6_peers); | ||
200 | } | 201 | } |
201 | 202 | ||
202 | return table; | 203 | return table; |
@@ -1633,6 +1634,7 @@ static int __net_init fib6_net_init(struct net *net) | |||
1633 | net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; | 1634 | net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; |
1634 | net->ipv6.fib6_main_tbl->tb6_root.fn_flags = | 1635 | net->ipv6.fib6_main_tbl->tb6_root.fn_flags = |
1635 | RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; | 1636 | RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; |
1637 | inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers); | ||
1636 | 1638 | ||
1637 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 1639 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
1638 | net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl), | 1640 | net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl), |
@@ -1643,6 +1645,7 @@ static int __net_init fib6_net_init(struct net *net) | |||
1643 | net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; | 1645 | net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry; |
1644 | net->ipv6.fib6_local_tbl->tb6_root.fn_flags = | 1646 | net->ipv6.fib6_local_tbl->tb6_root.fn_flags = |
1645 | RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; | 1647 | RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; |
1648 | inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers); | ||
1646 | #endif | 1649 | #endif |
1647 | fib6_tables_init(net); | 1650 | fib6_tables_init(net); |
1648 | 1651 | ||
@@ -1666,8 +1669,10 @@ static void fib6_net_exit(struct net *net) | |||
1666 | del_timer_sync(&net->ipv6.ip6_fib_timer); | 1669 | del_timer_sync(&net->ipv6.ip6_fib_timer); |
1667 | 1670 | ||
1668 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 1671 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
1672 | inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers); | ||
1669 | kfree(net->ipv6.fib6_local_tbl); | 1673 | kfree(net->ipv6.fib6_local_tbl); |
1670 | #endif | 1674 | #endif |
1675 | inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers); | ||
1671 | kfree(net->ipv6.fib6_main_tbl); | 1676 | kfree(net->ipv6.fib6_main_tbl); |
1672 | kfree(net->ipv6.fib_table_hash); | 1677 | kfree(net->ipv6.fib_table_hash); |
1673 | kfree(net->ipv6.rt6_stats); | 1678 | kfree(net->ipv6.rt6_stats); |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 21a15dfe4a9e..5ab923e51af3 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -168,13 +168,12 @@ drop: | |||
168 | 168 | ||
169 | static int ip6_input_finish(struct sk_buff *skb) | 169 | static int ip6_input_finish(struct sk_buff *skb) |
170 | { | 170 | { |
171 | struct net *net = dev_net(skb_dst(skb)->dev); | ||
171 | const struct inet6_protocol *ipprot; | 172 | const struct inet6_protocol *ipprot; |
173 | struct inet6_dev *idev; | ||
172 | unsigned int nhoff; | 174 | unsigned int nhoff; |
173 | int nexthdr; | 175 | int nexthdr; |
174 | bool raw; | 176 | bool raw; |
175 | u8 hash; | ||
176 | struct inet6_dev *idev; | ||
177 | struct net *net = dev_net(skb_dst(skb)->dev); | ||
178 | 177 | ||
179 | /* | 178 | /* |
180 | * Parse extension headers | 179 | * Parse extension headers |
@@ -189,9 +188,7 @@ resubmit: | |||
189 | nexthdr = skb_network_header(skb)[nhoff]; | 188 | nexthdr = skb_network_header(skb)[nhoff]; |
190 | 189 | ||
191 | raw = raw6_local_deliver(skb, nexthdr); | 190 | raw = raw6_local_deliver(skb, nexthdr); |
192 | 191 | if ((ipprot = rcu_dereference(inet6_protos[nexthdr])) != NULL) { | |
193 | hash = nexthdr & (MAX_INET_PROTOS - 1); | ||
194 | if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) { | ||
195 | int ret; | 192 | int ret; |
196 | 193 | ||
197 | if (ipprot->flags & INET6_PROTO_FINAL) { | 194 | if (ipprot->flags & INET6_PROTO_FINAL) { |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index decc21d19c53..5b2d63ed793e 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -83,24 +83,12 @@ int ip6_local_out(struct sk_buff *skb) | |||
83 | } | 83 | } |
84 | EXPORT_SYMBOL_GPL(ip6_local_out); | 84 | EXPORT_SYMBOL_GPL(ip6_local_out); |
85 | 85 | ||
86 | /* dev_loopback_xmit for use with netfilter. */ | ||
87 | static int ip6_dev_loopback_xmit(struct sk_buff *newskb) | ||
88 | { | ||
89 | skb_reset_mac_header(newskb); | ||
90 | __skb_pull(newskb, skb_network_offset(newskb)); | ||
91 | newskb->pkt_type = PACKET_LOOPBACK; | ||
92 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | ||
93 | WARN_ON(!skb_dst(newskb)); | ||
94 | |||
95 | netif_rx_ni(newskb); | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static int ip6_finish_output2(struct sk_buff *skb) | 86 | static int ip6_finish_output2(struct sk_buff *skb) |
100 | { | 87 | { |
101 | struct dst_entry *dst = skb_dst(skb); | 88 | struct dst_entry *dst = skb_dst(skb); |
102 | struct net_device *dev = dst->dev; | 89 | struct net_device *dev = dst->dev; |
103 | struct neighbour *neigh; | 90 | struct neighbour *neigh; |
91 | struct rt6_info *rt; | ||
104 | 92 | ||
105 | skb->protocol = htons(ETH_P_IPV6); | 93 | skb->protocol = htons(ETH_P_IPV6); |
106 | skb->dev = dev; | 94 | skb->dev = dev; |
@@ -121,7 +109,7 @@ static int ip6_finish_output2(struct sk_buff *skb) | |||
121 | if (newskb) | 109 | if (newskb) |
122 | NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, | 110 | NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, |
123 | newskb, NULL, newskb->dev, | 111 | newskb, NULL, newskb->dev, |
124 | ip6_dev_loopback_xmit); | 112 | dev_loopback_xmit); |
125 | 113 | ||
126 | if (ipv6_hdr(skb)->hop_limit == 0) { | 114 | if (ipv6_hdr(skb)->hop_limit == 0) { |
127 | IP6_INC_STATS(dev_net(dev), idev, | 115 | IP6_INC_STATS(dev_net(dev), idev, |
@@ -136,9 +124,10 @@ static int ip6_finish_output2(struct sk_buff *skb) | |||
136 | } | 124 | } |
137 | 125 | ||
138 | rcu_read_lock(); | 126 | rcu_read_lock(); |
139 | neigh = dst_get_neighbour_noref(dst); | 127 | rt = (struct rt6_info *) dst; |
128 | neigh = rt->n; | ||
140 | if (neigh) { | 129 | if (neigh) { |
141 | int res = neigh_output(neigh, skb); | 130 | int res = dst_neigh_output(dst, neigh, skb); |
142 | 131 | ||
143 | rcu_read_unlock(); | 132 | rcu_read_unlock(); |
144 | return res; | 133 | return res; |
@@ -463,6 +452,7 @@ int ip6_forward(struct sk_buff *skb) | |||
463 | */ | 452 | */ |
464 | if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) { | 453 | if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) { |
465 | struct in6_addr *target = NULL; | 454 | struct in6_addr *target = NULL; |
455 | struct inet_peer *peer; | ||
466 | struct rt6_info *rt; | 456 | struct rt6_info *rt; |
467 | 457 | ||
468 | /* | 458 | /* |
@@ -476,14 +466,15 @@ int ip6_forward(struct sk_buff *skb) | |||
476 | else | 466 | else |
477 | target = &hdr->daddr; | 467 | target = &hdr->daddr; |
478 | 468 | ||
479 | if (!rt->rt6i_peer) | 469 | peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); |
480 | rt6_bind_peer(rt, 1); | ||
481 | 470 | ||
482 | /* Limit redirects both by destination (here) | 471 | /* Limit redirects both by destination (here) |
483 | and by source (inside ndisc_send_redirect) | 472 | and by source (inside ndisc_send_redirect) |
484 | */ | 473 | */ |
485 | if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) | 474 | if (inet_peer_xrlim_allow(peer, 1*HZ)) |
486 | ndisc_send_redirect(skb, target); | 475 | ndisc_send_redirect(skb, target); |
476 | if (peer) | ||
477 | inet_putpeer(peer); | ||
487 | } else { | 478 | } else { |
488 | int addrtype = ipv6_addr_type(&hdr->saddr); | 479 | int addrtype = ipv6_addr_type(&hdr->saddr); |
489 | 480 | ||
@@ -604,12 +595,13 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) | |||
604 | 595 | ||
605 | if (rt && !(rt->dst.flags & DST_NOPEER)) { | 596 | if (rt && !(rt->dst.flags & DST_NOPEER)) { |
606 | struct inet_peer *peer; | 597 | struct inet_peer *peer; |
598 | struct net *net; | ||
607 | 599 | ||
608 | if (!rt->rt6i_peer) | 600 | net = dev_net(rt->dst.dev); |
609 | rt6_bind_peer(rt, 1); | 601 | peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); |
610 | peer = rt->rt6i_peer; | ||
611 | if (peer) { | 602 | if (peer) { |
612 | fhdr->identification = htonl(inet_getid(peer, 0)); | 603 | fhdr->identification = htonl(inet_getid(peer, 0)); |
604 | inet_putpeer(peer); | ||
613 | return; | 605 | return; |
614 | } | 606 | } |
615 | } | 607 | } |
@@ -960,6 +952,7 @@ static int ip6_dst_lookup_tail(struct sock *sk, | |||
960 | struct net *net = sock_net(sk); | 952 | struct net *net = sock_net(sk); |
961 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 953 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
962 | struct neighbour *n; | 954 | struct neighbour *n; |
955 | struct rt6_info *rt; | ||
963 | #endif | 956 | #endif |
964 | int err; | 957 | int err; |
965 | 958 | ||
@@ -988,7 +981,8 @@ static int ip6_dst_lookup_tail(struct sock *sk, | |||
988 | * dst entry of the nexthop router | 981 | * dst entry of the nexthop router |
989 | */ | 982 | */ |
990 | rcu_read_lock(); | 983 | rcu_read_lock(); |
991 | n = dst_get_neighbour_noref(*dst); | 984 | rt = (struct rt6_info *) *dst; |
985 | n = rt->n; | ||
992 | if (n && !(n->nud_state & NUD_VALID)) { | 986 | if (n && !(n->nud_state & NUD_VALID)) { |
993 | struct inet6_ifaddr *ifp; | 987 | struct inet6_ifaddr *ifp; |
994 | struct flowi6 fl_gw6; | 988 | struct flowi6 fl_gw6; |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index c9015fad8d65..9a1d5fe6aef8 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/rtnetlink.h> | 40 | #include <linux/rtnetlink.h> |
41 | #include <linux/netfilter_ipv6.h> | 41 | #include <linux/netfilter_ipv6.h> |
42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
43 | #include <linux/hash.h> | ||
43 | 44 | ||
44 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
45 | #include <linux/atomic.h> | 46 | #include <linux/atomic.h> |
@@ -70,11 +71,15 @@ MODULE_ALIAS_NETDEV("ip6tnl0"); | |||
70 | #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) | 71 | #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) |
71 | #define IPV6_TCLASS_SHIFT 20 | 72 | #define IPV6_TCLASS_SHIFT 20 |
72 | 73 | ||
73 | #define HASH_SIZE 32 | 74 | #define HASH_SIZE_SHIFT 5 |
75 | #define HASH_SIZE (1 << HASH_SIZE_SHIFT) | ||
74 | 76 | ||
75 | #define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \ | 77 | static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) |
76 | (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ | 78 | { |
77 | (HASH_SIZE - 1)) | 79 | u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); |
80 | |||
81 | return hash_32(hash, HASH_SIZE_SHIFT); | ||
82 | } | ||
78 | 83 | ||
79 | static int ip6_tnl_dev_init(struct net_device *dev); | 84 | static int ip6_tnl_dev_init(struct net_device *dev); |
80 | static void ip6_tnl_dev_setup(struct net_device *dev); | 85 | static void ip6_tnl_dev_setup(struct net_device *dev); |
@@ -166,12 +171,11 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) | |||
166 | static struct ip6_tnl * | 171 | static struct ip6_tnl * |
167 | ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) | 172 | ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local) |
168 | { | 173 | { |
169 | unsigned int h0 = HASH(remote); | 174 | unsigned int hash = HASH(remote, local); |
170 | unsigned int h1 = HASH(local); | ||
171 | struct ip6_tnl *t; | 175 | struct ip6_tnl *t; |
172 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 176 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
173 | 177 | ||
174 | for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) { | 178 | for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { |
175 | if (ipv6_addr_equal(local, &t->parms.laddr) && | 179 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
176 | ipv6_addr_equal(remote, &t->parms.raddr) && | 180 | ipv6_addr_equal(remote, &t->parms.raddr) && |
177 | (t->dev->flags & IFF_UP)) | 181 | (t->dev->flags & IFF_UP)) |
@@ -205,7 +209,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p) | |||
205 | 209 | ||
206 | if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { | 210 | if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { |
207 | prio = 1; | 211 | prio = 1; |
208 | h = HASH(remote) ^ HASH(local); | 212 | h = HASH(remote, local); |
209 | } | 213 | } |
210 | return &ip6n->tnls[prio][h]; | 214 | return &ip6n->tnls[prio][h]; |
211 | } | 215 | } |
@@ -252,7 +256,7 @@ static void ip6_dev_free(struct net_device *dev) | |||
252 | } | 256 | } |
253 | 257 | ||
254 | /** | 258 | /** |
255 | * ip6_tnl_create() - create a new tunnel | 259 | * ip6_tnl_create - create a new tunnel |
256 | * @p: tunnel parameters | 260 | * @p: tunnel parameters |
257 | * @pt: pointer to new tunnel | 261 | * @pt: pointer to new tunnel |
258 | * | 262 | * |
@@ -550,6 +554,9 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
550 | rel_type = ICMP_DEST_UNREACH; | 554 | rel_type = ICMP_DEST_UNREACH; |
551 | rel_code = ICMP_FRAG_NEEDED; | 555 | rel_code = ICMP_FRAG_NEEDED; |
552 | break; | 556 | break; |
557 | case NDISC_REDIRECT: | ||
558 | rel_type = ICMP_REDIRECT; | ||
559 | rel_code = ICMP_REDIR_HOST; | ||
553 | default: | 560 | default: |
554 | return 0; | 561 | return 0; |
555 | } | 562 | } |
@@ -606,8 +613,10 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
606 | if (rel_info > dst_mtu(skb_dst(skb2))) | 613 | if (rel_info > dst_mtu(skb_dst(skb2))) |
607 | goto out; | 614 | goto out; |
608 | 615 | ||
609 | skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), rel_info); | 616 | skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info); |
610 | } | 617 | } |
618 | if (rel_type == ICMP_REDIRECT) | ||
619 | skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2); | ||
611 | 620 | ||
612 | icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); | 621 | icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); |
613 | 622 | ||
@@ -684,24 +693,50 @@ static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, | |||
684 | IP6_ECN_set_ce(ipv6_hdr(skb)); | 693 | IP6_ECN_set_ce(ipv6_hdr(skb)); |
685 | } | 694 | } |
686 | 695 | ||
696 | static __u32 ip6_tnl_get_cap(struct ip6_tnl *t, | ||
697 | const struct in6_addr *laddr, | ||
698 | const struct in6_addr *raddr) | ||
699 | { | ||
700 | struct ip6_tnl_parm *p = &t->parms; | ||
701 | int ltype = ipv6_addr_type(laddr); | ||
702 | int rtype = ipv6_addr_type(raddr); | ||
703 | __u32 flags = 0; | ||
704 | |||
705 | if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) { | ||
706 | flags = IP6_TNL_F_CAP_PER_PACKET; | ||
707 | } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && | ||
708 | rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && | ||
709 | !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && | ||
710 | (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { | ||
711 | if (ltype&IPV6_ADDR_UNICAST) | ||
712 | flags |= IP6_TNL_F_CAP_XMIT; | ||
713 | if (rtype&IPV6_ADDR_UNICAST) | ||
714 | flags |= IP6_TNL_F_CAP_RCV; | ||
715 | } | ||
716 | return flags; | ||
717 | } | ||
718 | |||
687 | /* called with rcu_read_lock() */ | 719 | /* called with rcu_read_lock() */ |
688 | static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) | 720 | static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t, |
721 | const struct in6_addr *laddr, | ||
722 | const struct in6_addr *raddr) | ||
689 | { | 723 | { |
690 | struct ip6_tnl_parm *p = &t->parms; | 724 | struct ip6_tnl_parm *p = &t->parms; |
691 | int ret = 0; | 725 | int ret = 0; |
692 | struct net *net = dev_net(t->dev); | 726 | struct net *net = dev_net(t->dev); |
693 | 727 | ||
694 | if (p->flags & IP6_TNL_F_CAP_RCV) { | 728 | if ((p->flags & IP6_TNL_F_CAP_RCV) || |
729 | ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && | ||
730 | (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) { | ||
695 | struct net_device *ldev = NULL; | 731 | struct net_device *ldev = NULL; |
696 | 732 | ||
697 | if (p->link) | 733 | if (p->link) |
698 | ldev = dev_get_by_index_rcu(net, p->link); | 734 | ldev = dev_get_by_index_rcu(net, p->link); |
699 | 735 | ||
700 | if ((ipv6_addr_is_multicast(&p->laddr) || | 736 | if ((ipv6_addr_is_multicast(laddr) || |
701 | likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) && | 737 | likely(ipv6_chk_addr(net, laddr, ldev, 0))) && |
702 | likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0))) | 738 | likely(!ipv6_chk_addr(net, raddr, NULL, 0))) |
703 | ret = 1; | 739 | ret = 1; |
704 | |||
705 | } | 740 | } |
706 | return ret; | 741 | return ret; |
707 | } | 742 | } |
@@ -740,7 +775,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
740 | goto discard; | 775 | goto discard; |
741 | } | 776 | } |
742 | 777 | ||
743 | if (!ip6_tnl_rcv_ctl(t)) { | 778 | if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) { |
744 | t->dev->stats.rx_dropped++; | 779 | t->dev->stats.rx_dropped++; |
745 | rcu_read_unlock(); | 780 | rcu_read_unlock(); |
746 | goto discard; | 781 | goto discard; |
@@ -921,7 +956,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
921 | if (mtu < IPV6_MIN_MTU) | 956 | if (mtu < IPV6_MIN_MTU) |
922 | mtu = IPV6_MIN_MTU; | 957 | mtu = IPV6_MIN_MTU; |
923 | if (skb_dst(skb)) | 958 | if (skb_dst(skb)) |
924 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); | 959 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); |
925 | if (skb->len > mtu) { | 960 | if (skb->len > mtu) { |
926 | *pmtu = mtu; | 961 | *pmtu = mtu; |
927 | err = -EMSGSIZE; | 962 | err = -EMSGSIZE; |
@@ -1114,25 +1149,6 @@ tx_err: | |||
1114 | return NETDEV_TX_OK; | 1149 | return NETDEV_TX_OK; |
1115 | } | 1150 | } |
1116 | 1151 | ||
1117 | static void ip6_tnl_set_cap(struct ip6_tnl *t) | ||
1118 | { | ||
1119 | struct ip6_tnl_parm *p = &t->parms; | ||
1120 | int ltype = ipv6_addr_type(&p->laddr); | ||
1121 | int rtype = ipv6_addr_type(&p->raddr); | ||
1122 | |||
1123 | p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV); | ||
1124 | |||
1125 | if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && | ||
1126 | rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && | ||
1127 | !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && | ||
1128 | (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { | ||
1129 | if (ltype&IPV6_ADDR_UNICAST) | ||
1130 | p->flags |= IP6_TNL_F_CAP_XMIT; | ||
1131 | if (rtype&IPV6_ADDR_UNICAST) | ||
1132 | p->flags |= IP6_TNL_F_CAP_RCV; | ||
1133 | } | ||
1134 | } | ||
1135 | |||
1136 | static void ip6_tnl_link_config(struct ip6_tnl *t) | 1152 | static void ip6_tnl_link_config(struct ip6_tnl *t) |
1137 | { | 1153 | { |
1138 | struct net_device *dev = t->dev; | 1154 | struct net_device *dev = t->dev; |
@@ -1153,7 +1169,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) | |||
1153 | if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) | 1169 | if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) |
1154 | fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; | 1170 | fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; |
1155 | 1171 | ||
1156 | ip6_tnl_set_cap(t); | 1172 | p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); |
1173 | p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); | ||
1157 | 1174 | ||
1158 | if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) | 1175 | if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV) |
1159 | dev->flags |= IFF_POINTOPOINT; | 1176 | dev->flags |= IFF_POINTOPOINT; |
@@ -1438,6 +1455,9 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) | |||
1438 | 1455 | ||
1439 | t->parms.proto = IPPROTO_IPV6; | 1456 | t->parms.proto = IPPROTO_IPV6; |
1440 | dev_hold(dev); | 1457 | dev_hold(dev); |
1458 | |||
1459 | ip6_tnl_link_config(t); | ||
1460 | |||
1441 | rcu_assign_pointer(ip6n->tnls_wc[0], t); | 1461 | rcu_assign_pointer(ip6n->tnls_wc[0], t); |
1442 | return 0; | 1462 | return 0; |
1443 | } | 1463 | } |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 461e47c8e956..4532973f0dd4 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -2104,8 +2104,9 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, | |||
2104 | if (c->mf6c_parent >= MAXMIFS) | 2104 | if (c->mf6c_parent >= MAXMIFS) |
2105 | return -ENOENT; | 2105 | return -ENOENT; |
2106 | 2106 | ||
2107 | if (MIF_EXISTS(mrt, c->mf6c_parent)) | 2107 | if (MIF_EXISTS(mrt, c->mf6c_parent) && |
2108 | RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex); | 2108 | nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0) |
2109 | return -EMSGSIZE; | ||
2109 | 2110 | ||
2110 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); | 2111 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); |
2111 | 2112 | ||
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 5cb75bfe45b1..7af5aee75d98 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/list.h> | 46 | #include <linux/list.h> |
47 | #include <linux/vmalloc.h> | 47 | #include <linux/vmalloc.h> |
48 | #include <linux/rtnetlink.h> | 48 | #include <linux/rtnetlink.h> |
49 | #include <net/ip6_route.h> | ||
49 | #include <net/icmp.h> | 50 | #include <net/icmp.h> |
50 | #include <net/ipv6.h> | 51 | #include <net/ipv6.h> |
51 | #include <net/protocol.h> | 52 | #include <net/protocol.h> |
@@ -63,7 +64,9 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
63 | (struct ip_comp_hdr *)(skb->data + offset); | 64 | (struct ip_comp_hdr *)(skb->data + offset); |
64 | struct xfrm_state *x; | 65 | struct xfrm_state *x; |
65 | 66 | ||
66 | if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG) | 67 | if (type != ICMPV6_DEST_UNREACH && |
68 | type != ICMPV6_PKT_TOOBIG && | ||
69 | type != NDISC_REDIRECT) | ||
67 | return; | 70 | return; |
68 | 71 | ||
69 | spi = htonl(ntohs(ipcomph->cpi)); | 72 | spi = htonl(ntohs(ipcomph->cpi)); |
@@ -72,8 +75,10 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
72 | if (!x) | 75 | if (!x) |
73 | return; | 76 | return; |
74 | 77 | ||
75 | pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n", | 78 | if (type == NDISC_REDIRECT) |
76 | spi, &iph->daddr); | 79 | ip6_redirect(skb, net, 0, 0); |
80 | else | ||
81 | ip6_update_pmtu(skb, net, info, 0, 0); | ||
77 | xfrm_state_put(x); | 82 | xfrm_state_put(x); |
78 | } | 83 | } |
79 | 84 | ||
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 6d0f5dc8e3a6..92f8e48e4ba4 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -211,6 +211,9 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
211 | struct ipv6_mc_socklist __rcu **lnk; | 211 | struct ipv6_mc_socklist __rcu **lnk; |
212 | struct net *net = sock_net(sk); | 212 | struct net *net = sock_net(sk); |
213 | 213 | ||
214 | if (!ipv6_addr_is_multicast(addr)) | ||
215 | return -EINVAL; | ||
216 | |||
214 | spin_lock(&ipv6_sk_mc_lock); | 217 | spin_lock(&ipv6_sk_mc_lock); |
215 | for (lnk = &np->ipv6_mc_list; | 218 | for (lnk = &np->ipv6_mc_list; |
216 | (mc_lst = rcu_dereference_protected(*lnk, | 219 | (mc_lst = rcu_dereference_protected(*lnk, |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 54f62d3b8dd6..ff36194a71aa 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -143,40 +143,6 @@ struct neigh_table nd_tbl = { | |||
143 | .gc_thresh3 = 1024, | 143 | .gc_thresh3 = 1024, |
144 | }; | 144 | }; |
145 | 145 | ||
146 | /* ND options */ | ||
147 | struct ndisc_options { | ||
148 | struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX]; | ||
149 | #ifdef CONFIG_IPV6_ROUTE_INFO | ||
150 | struct nd_opt_hdr *nd_opts_ri; | ||
151 | struct nd_opt_hdr *nd_opts_ri_end; | ||
152 | #endif | ||
153 | struct nd_opt_hdr *nd_useropts; | ||
154 | struct nd_opt_hdr *nd_useropts_end; | ||
155 | }; | ||
156 | |||
157 | #define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR] | ||
158 | #define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR] | ||
159 | #define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO] | ||
160 | #define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END] | ||
161 | #define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR] | ||
162 | #define nd_opts_mtu nd_opt_array[ND_OPT_MTU] | ||
163 | |||
164 | #define NDISC_OPT_SPACE(len) (((len)+2+7)&~7) | ||
165 | |||
166 | /* | ||
167 | * Return the padding between the option length and the start of the | ||
168 | * link addr. Currently only IP-over-InfiniBand needs this, although | ||
169 | * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may | ||
170 | * also need a pad of 2. | ||
171 | */ | ||
172 | static int ndisc_addr_option_pad(unsigned short type) | ||
173 | { | ||
174 | switch (type) { | ||
175 | case ARPHRD_INFINIBAND: return 2; | ||
176 | default: return 0; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | static inline int ndisc_opt_addr_space(struct net_device *dev) | 146 | static inline int ndisc_opt_addr_space(struct net_device *dev) |
181 | { | 147 | { |
182 | return NDISC_OPT_SPACE(dev->addr_len + ndisc_addr_option_pad(dev->type)); | 148 | return NDISC_OPT_SPACE(dev->addr_len + ndisc_addr_option_pad(dev->type)); |
@@ -233,8 +199,8 @@ static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, | |||
233 | return cur <= end && ndisc_is_useropt(cur) ? cur : NULL; | 199 | return cur <= end && ndisc_is_useropt(cur) ? cur : NULL; |
234 | } | 200 | } |
235 | 201 | ||
236 | static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, | 202 | struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, |
237 | struct ndisc_options *ndopts) | 203 | struct ndisc_options *ndopts) |
238 | { | 204 | { |
239 | struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)opt; | 205 | struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)opt; |
240 | 206 | ||
@@ -297,17 +263,6 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, | |||
297 | return ndopts; | 263 | return ndopts; |
298 | } | 264 | } |
299 | 265 | ||
300 | static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p, | ||
301 | struct net_device *dev) | ||
302 | { | ||
303 | u8 *lladdr = (u8 *)(p + 1); | ||
304 | int lladdrlen = p->nd_opt_len << 3; | ||
305 | int prepad = ndisc_addr_option_pad(dev->type); | ||
306 | if (lladdrlen != NDISC_OPT_SPACE(dev->addr_len + prepad)) | ||
307 | return NULL; | ||
308 | return lladdr + prepad; | ||
309 | } | ||
310 | |||
311 | int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir) | 266 | int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir) |
312 | { | 267 | { |
313 | switch (dev->type) { | 268 | switch (dev->type) { |
@@ -1379,16 +1334,6 @@ out: | |||
1379 | 1334 | ||
1380 | static void ndisc_redirect_rcv(struct sk_buff *skb) | 1335 | static void ndisc_redirect_rcv(struct sk_buff *skb) |
1381 | { | 1336 | { |
1382 | struct inet6_dev *in6_dev; | ||
1383 | struct icmp6hdr *icmph; | ||
1384 | const struct in6_addr *dest; | ||
1385 | const struct in6_addr *target; /* new first hop to destination */ | ||
1386 | struct neighbour *neigh; | ||
1387 | int on_link = 0; | ||
1388 | struct ndisc_options ndopts; | ||
1389 | int optlen; | ||
1390 | u8 *lladdr = NULL; | ||
1391 | |||
1392 | #ifdef CONFIG_IPV6_NDISC_NODETYPE | 1337 | #ifdef CONFIG_IPV6_NDISC_NODETYPE |
1393 | switch (skb->ndisc_nodetype) { | 1338 | switch (skb->ndisc_nodetype) { |
1394 | case NDISC_NODETYPE_HOST: | 1339 | case NDISC_NODETYPE_HOST: |
@@ -1405,65 +1350,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) | |||
1405 | return; | 1350 | return; |
1406 | } | 1351 | } |
1407 | 1352 | ||
1408 | optlen = skb->tail - skb->transport_header; | 1353 | icmpv6_notify(skb, NDISC_REDIRECT, 0, 0); |
1409 | optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); | ||
1410 | |||
1411 | if (optlen < 0) { | ||
1412 | ND_PRINTK(2, warn, "Redirect: packet too short\n"); | ||
1413 | return; | ||
1414 | } | ||
1415 | |||
1416 | icmph = icmp6_hdr(skb); | ||
1417 | target = (const struct in6_addr *) (icmph + 1); | ||
1418 | dest = target + 1; | ||
1419 | |||
1420 | if (ipv6_addr_is_multicast(dest)) { | ||
1421 | ND_PRINTK(2, warn, | ||
1422 | "Redirect: destination address is multicast\n"); | ||
1423 | return; | ||
1424 | } | ||
1425 | |||
1426 | if (ipv6_addr_equal(dest, target)) { | ||
1427 | on_link = 1; | ||
1428 | } else if (ipv6_addr_type(target) != | ||
1429 | (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { | ||
1430 | ND_PRINTK(2, warn, | ||
1431 | "Redirect: target address is not link-local unicast\n"); | ||
1432 | return; | ||
1433 | } | ||
1434 | |||
1435 | in6_dev = __in6_dev_get(skb->dev); | ||
1436 | if (!in6_dev) | ||
1437 | return; | ||
1438 | if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) | ||
1439 | return; | ||
1440 | |||
1441 | /* RFC2461 8.1: | ||
1442 | * The IP source address of the Redirect MUST be the same as the current | ||
1443 | * first-hop router for the specified ICMP Destination Address. | ||
1444 | */ | ||
1445 | |||
1446 | if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { | ||
1447 | ND_PRINTK(2, warn, "Redirect: invalid ND options\n"); | ||
1448 | return; | ||
1449 | } | ||
1450 | if (ndopts.nd_opts_tgt_lladdr) { | ||
1451 | lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, | ||
1452 | skb->dev); | ||
1453 | if (!lladdr) { | ||
1454 | ND_PRINTK(2, warn, | ||
1455 | "Redirect: invalid link-layer address length\n"); | ||
1456 | return; | ||
1457 | } | ||
1458 | } | ||
1459 | |||
1460 | neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1); | ||
1461 | if (neigh) { | ||
1462 | rt6_redirect(dest, &ipv6_hdr(skb)->daddr, | ||
1463 | &ipv6_hdr(skb)->saddr, neigh, lladdr, | ||
1464 | on_link); | ||
1465 | neigh_release(neigh); | ||
1466 | } | ||
1467 | } | 1354 | } |
1468 | 1355 | ||
1469 | void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) | 1356 | void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) |
@@ -1472,6 +1359,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) | |||
1472 | struct net *net = dev_net(dev); | 1359 | struct net *net = dev_net(dev); |
1473 | struct sock *sk = net->ipv6.ndisc_sk; | 1360 | struct sock *sk = net->ipv6.ndisc_sk; |
1474 | int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); | 1361 | int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); |
1362 | struct inet_peer *peer; | ||
1475 | struct sk_buff *buff; | 1363 | struct sk_buff *buff; |
1476 | struct icmp6hdr *icmph; | 1364 | struct icmp6hdr *icmph; |
1477 | struct in6_addr saddr_buf; | 1365 | struct in6_addr saddr_buf; |
@@ -1485,6 +1373,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) | |||
1485 | int rd_len; | 1373 | int rd_len; |
1486 | int err; | 1374 | int err; |
1487 | u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; | 1375 | u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; |
1376 | bool ret; | ||
1488 | 1377 | ||
1489 | if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { | 1378 | if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { |
1490 | ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n", | 1379 | ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n", |
@@ -1518,9 +1407,11 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) | |||
1518 | "Redirect: destination is not a neighbour\n"); | 1407 | "Redirect: destination is not a neighbour\n"); |
1519 | goto release; | 1408 | goto release; |
1520 | } | 1409 | } |
1521 | if (!rt->rt6i_peer) | 1410 | peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); |
1522 | rt6_bind_peer(rt, 1); | 1411 | ret = inet_peer_xrlim_allow(peer, 1*HZ); |
1523 | if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) | 1412 | if (peer) |
1413 | inet_putpeer(peer); | ||
1414 | if (!ret) | ||
1524 | goto release; | 1415 | goto release; |
1525 | 1416 | ||
1526 | if (dev->addr_len) { | 1417 | if (dev->addr_len) { |
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 3224ef90a21a..4794f96cf2e0 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -143,11 +143,11 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, | |||
143 | return NF_ACCEPT; | 143 | return NF_ACCEPT; |
144 | } | 144 | } |
145 | 145 | ||
146 | static unsigned int ipv6_confirm(unsigned int hooknum, | 146 | static unsigned int ipv6_helper(unsigned int hooknum, |
147 | struct sk_buff *skb, | 147 | struct sk_buff *skb, |
148 | const struct net_device *in, | 148 | const struct net_device *in, |
149 | const struct net_device *out, | 149 | const struct net_device *out, |
150 | int (*okfn)(struct sk_buff *)) | 150 | int (*okfn)(struct sk_buff *)) |
151 | { | 151 | { |
152 | struct nf_conn *ct; | 152 | struct nf_conn *ct; |
153 | const struct nf_conn_help *help; | 153 | const struct nf_conn_help *help; |
@@ -161,15 +161,15 @@ static unsigned int ipv6_confirm(unsigned int hooknum, | |||
161 | /* This is where we call the helper: as the packet goes out. */ | 161 | /* This is where we call the helper: as the packet goes out. */ |
162 | ct = nf_ct_get(skb, &ctinfo); | 162 | ct = nf_ct_get(skb, &ctinfo); |
163 | if (!ct || ctinfo == IP_CT_RELATED_REPLY) | 163 | if (!ct || ctinfo == IP_CT_RELATED_REPLY) |
164 | goto out; | 164 | return NF_ACCEPT; |
165 | 165 | ||
166 | help = nfct_help(ct); | 166 | help = nfct_help(ct); |
167 | if (!help) | 167 | if (!help) |
168 | goto out; | 168 | return NF_ACCEPT; |
169 | /* rcu_read_lock()ed by nf_hook_slow */ | 169 | /* rcu_read_lock()ed by nf_hook_slow */ |
170 | helper = rcu_dereference(help->helper); | 170 | helper = rcu_dereference(help->helper); |
171 | if (!helper) | 171 | if (!helper) |
172 | goto out; | 172 | return NF_ACCEPT; |
173 | 173 | ||
174 | protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, | 174 | protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, |
175 | skb->len - extoff); | 175 | skb->len - extoff); |
@@ -179,12 +179,19 @@ static unsigned int ipv6_confirm(unsigned int hooknum, | |||
179 | } | 179 | } |
180 | 180 | ||
181 | ret = helper->help(skb, protoff, ct, ctinfo); | 181 | ret = helper->help(skb, protoff, ct, ctinfo); |
182 | if (ret != NF_ACCEPT) { | 182 | if (ret != NF_ACCEPT && (ret & NF_VERDICT_MASK) != NF_QUEUE) { |
183 | nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL, | 183 | nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL, |
184 | "nf_ct_%s: dropping packet", helper->name); | 184 | "nf_ct_%s: dropping packet", helper->name); |
185 | return ret; | ||
186 | } | 185 | } |
187 | out: | 186 | return ret; |
187 | } | ||
188 | |||
189 | static unsigned int ipv6_confirm(unsigned int hooknum, | ||
190 | struct sk_buff *skb, | ||
191 | const struct net_device *in, | ||
192 | const struct net_device *out, | ||
193 | int (*okfn)(struct sk_buff *)) | ||
194 | { | ||
188 | /* We've seen it coming out the other side: confirm it */ | 195 | /* We've seen it coming out the other side: confirm it */ |
189 | return nf_conntrack_confirm(skb); | 196 | return nf_conntrack_confirm(skb); |
190 | } | 197 | } |
@@ -254,6 +261,13 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { | |||
254 | .priority = NF_IP6_PRI_CONNTRACK, | 261 | .priority = NF_IP6_PRI_CONNTRACK, |
255 | }, | 262 | }, |
256 | { | 263 | { |
264 | .hook = ipv6_helper, | ||
265 | .owner = THIS_MODULE, | ||
266 | .pf = NFPROTO_IPV6, | ||
267 | .hooknum = NF_INET_POST_ROUTING, | ||
268 | .priority = NF_IP6_PRI_CONNTRACK_HELPER, | ||
269 | }, | ||
270 | { | ||
257 | .hook = ipv6_confirm, | 271 | .hook = ipv6_confirm, |
258 | .owner = THIS_MODULE, | 272 | .owner = THIS_MODULE, |
259 | .pf = NFPROTO_IPV6, | 273 | .pf = NFPROTO_IPV6, |
@@ -261,6 +275,13 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { | |||
261 | .priority = NF_IP6_PRI_LAST, | 275 | .priority = NF_IP6_PRI_LAST, |
262 | }, | 276 | }, |
263 | { | 277 | { |
278 | .hook = ipv6_helper, | ||
279 | .owner = THIS_MODULE, | ||
280 | .pf = NFPROTO_IPV6, | ||
281 | .hooknum = NF_INET_LOCAL_IN, | ||
282 | .priority = NF_IP6_PRI_CONNTRACK_HELPER, | ||
283 | }, | ||
284 | { | ||
264 | .hook = ipv6_confirm, | 285 | .hook = ipv6_confirm, |
265 | .owner = THIS_MODULE, | 286 | .owner = THIS_MODULE, |
266 | .pf = NFPROTO_IPV6, | 287 | .pf = NFPROTO_IPV6, |
@@ -333,37 +354,75 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6)); | |||
333 | MODULE_LICENSE("GPL"); | 354 | MODULE_LICENSE("GPL"); |
334 | MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>"); | 355 | MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>"); |
335 | 356 | ||
336 | static int __init nf_conntrack_l3proto_ipv6_init(void) | 357 | static int ipv6_net_init(struct net *net) |
337 | { | 358 | { |
338 | int ret = 0; | 359 | int ret = 0; |
339 | 360 | ||
340 | need_conntrack(); | 361 | ret = nf_conntrack_l4proto_register(net, |
341 | nf_defrag_ipv6_enable(); | 362 | &nf_conntrack_l4proto_tcp6); |
342 | |||
343 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6); | ||
344 | if (ret < 0) { | 363 | if (ret < 0) { |
345 | pr_err("nf_conntrack_ipv6: can't register tcp.\n"); | 364 | printk(KERN_ERR "nf_conntrack_l4proto_tcp6: protocol register failed\n"); |
346 | return ret; | 365 | goto out; |
347 | } | 366 | } |
348 | 367 | ret = nf_conntrack_l4proto_register(net, | |
349 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6); | 368 | &nf_conntrack_l4proto_udp6); |
350 | if (ret < 0) { | 369 | if (ret < 0) { |
351 | pr_err("nf_conntrack_ipv6: can't register udp.\n"); | 370 | printk(KERN_ERR "nf_conntrack_l4proto_udp6: protocol register failed\n"); |
352 | goto cleanup_tcp; | 371 | goto cleanup_tcp6; |
353 | } | 372 | } |
354 | 373 | ret = nf_conntrack_l4proto_register(net, | |
355 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6); | 374 | &nf_conntrack_l4proto_icmpv6); |
356 | if (ret < 0) { | 375 | if (ret < 0) { |
357 | pr_err("nf_conntrack_ipv6: can't register icmpv6.\n"); | 376 | printk(KERN_ERR "nf_conntrack_l4proto_icmp6: protocol register failed\n"); |
358 | goto cleanup_udp; | 377 | goto cleanup_udp6; |
359 | } | 378 | } |
360 | 379 | ret = nf_conntrack_l3proto_register(net, | |
361 | ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6); | 380 | &nf_conntrack_l3proto_ipv6); |
362 | if (ret < 0) { | 381 | if (ret < 0) { |
363 | pr_err("nf_conntrack_ipv6: can't register ipv6\n"); | 382 | printk(KERN_ERR "nf_conntrack_l3proto_ipv6: protocol register failed\n"); |
364 | goto cleanup_icmpv6; | 383 | goto cleanup_icmpv6; |
365 | } | 384 | } |
385 | return 0; | ||
386 | cleanup_icmpv6: | ||
387 | nf_conntrack_l4proto_unregister(net, | ||
388 | &nf_conntrack_l4proto_icmpv6); | ||
389 | cleanup_udp6: | ||
390 | nf_conntrack_l4proto_unregister(net, | ||
391 | &nf_conntrack_l4proto_udp6); | ||
392 | cleanup_tcp6: | ||
393 | nf_conntrack_l4proto_unregister(net, | ||
394 | &nf_conntrack_l4proto_tcp6); | ||
395 | out: | ||
396 | return ret; | ||
397 | } | ||
366 | 398 | ||
399 | static void ipv6_net_exit(struct net *net) | ||
400 | { | ||
401 | nf_conntrack_l3proto_unregister(net, | ||
402 | &nf_conntrack_l3proto_ipv6); | ||
403 | nf_conntrack_l4proto_unregister(net, | ||
404 | &nf_conntrack_l4proto_icmpv6); | ||
405 | nf_conntrack_l4proto_unregister(net, | ||
406 | &nf_conntrack_l4proto_udp6); | ||
407 | nf_conntrack_l4proto_unregister(net, | ||
408 | &nf_conntrack_l4proto_tcp6); | ||
409 | } | ||
410 | |||
411 | static struct pernet_operations ipv6_net_ops = { | ||
412 | .init = ipv6_net_init, | ||
413 | .exit = ipv6_net_exit, | ||
414 | }; | ||
415 | |||
416 | static int __init nf_conntrack_l3proto_ipv6_init(void) | ||
417 | { | ||
418 | int ret = 0; | ||
419 | |||
420 | need_conntrack(); | ||
421 | nf_defrag_ipv6_enable(); | ||
422 | |||
423 | ret = register_pernet_subsys(&ipv6_net_ops); | ||
424 | if (ret < 0) | ||
425 | goto cleanup_pernet; | ||
367 | ret = nf_register_hooks(ipv6_conntrack_ops, | 426 | ret = nf_register_hooks(ipv6_conntrack_ops, |
368 | ARRAY_SIZE(ipv6_conntrack_ops)); | 427 | ARRAY_SIZE(ipv6_conntrack_ops)); |
369 | if (ret < 0) { | 428 | if (ret < 0) { |
@@ -374,13 +433,8 @@ static int __init nf_conntrack_l3proto_ipv6_init(void) | |||
374 | return ret; | 433 | return ret; |
375 | 434 | ||
376 | cleanup_ipv6: | 435 | cleanup_ipv6: |
377 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); | 436 | unregister_pernet_subsys(&ipv6_net_ops); |
378 | cleanup_icmpv6: | 437 | cleanup_pernet: |
379 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6); | ||
380 | cleanup_udp: | ||
381 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6); | ||
382 | cleanup_tcp: | ||
383 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6); | ||
384 | return ret; | 438 | return ret; |
385 | } | 439 | } |
386 | 440 | ||
@@ -388,10 +442,7 @@ static void __exit nf_conntrack_l3proto_ipv6_fini(void) | |||
388 | { | 442 | { |
389 | synchronize_net(); | 443 | synchronize_net(); |
390 | nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); | 444 | nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); |
391 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); | 445 | unregister_pernet_subsys(&ipv6_net_ops); |
392 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6); | ||
393 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6); | ||
394 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6); | ||
395 | } | 446 | } |
396 | 447 | ||
397 | module_init(nf_conntrack_l3proto_ipv6_init); | 448 | module_init(nf_conntrack_l3proto_ipv6_init); |
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 3e81904fbbcd..2d54b2061d68 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | |||
@@ -29,6 +29,11 @@ | |||
29 | 29 | ||
30 | static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ; | 30 | static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ; |
31 | 31 | ||
32 | static inline struct nf_icmp_net *icmpv6_pernet(struct net *net) | ||
33 | { | ||
34 | return &net->ct.nf_ct_proto.icmpv6; | ||
35 | } | ||
36 | |||
32 | static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, | 37 | static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, |
33 | unsigned int dataoff, | 38 | unsigned int dataoff, |
34 | struct nf_conntrack_tuple *tuple) | 39 | struct nf_conntrack_tuple *tuple) |
@@ -90,7 +95,7 @@ static int icmpv6_print_tuple(struct seq_file *s, | |||
90 | 95 | ||
91 | static unsigned int *icmpv6_get_timeouts(struct net *net) | 96 | static unsigned int *icmpv6_get_timeouts(struct net *net) |
92 | { | 97 | { |
93 | return &nf_ct_icmpv6_timeout; | 98 | return &icmpv6_pernet(net)->timeout; |
94 | } | 99 | } |
95 | 100 | ||
96 | /* Returns verdict for packet, or -1 for invalid. */ | 101 | /* Returns verdict for packet, or -1 for invalid. */ |
@@ -281,16 +286,18 @@ static int icmpv6_nlattr_tuple_size(void) | |||
281 | #include <linux/netfilter/nfnetlink.h> | 286 | #include <linux/netfilter/nfnetlink.h> |
282 | #include <linux/netfilter/nfnetlink_cttimeout.h> | 287 | #include <linux/netfilter/nfnetlink_cttimeout.h> |
283 | 288 | ||
284 | static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) | 289 | static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], |
290 | struct net *net, void *data) | ||
285 | { | 291 | { |
286 | unsigned int *timeout = data; | 292 | unsigned int *timeout = data; |
293 | struct nf_icmp_net *in = icmpv6_pernet(net); | ||
287 | 294 | ||
288 | if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) { | 295 | if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) { |
289 | *timeout = | 296 | *timeout = |
290 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ; | 297 | ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ; |
291 | } else { | 298 | } else { |
292 | /* Set default ICMPv6 timeout. */ | 299 | /* Set default ICMPv6 timeout. */ |
293 | *timeout = nf_ct_icmpv6_timeout; | 300 | *timeout = in->timeout; |
294 | } | 301 | } |
295 | return 0; | 302 | return 0; |
296 | } | 303 | } |
@@ -315,11 +322,9 @@ icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = { | |||
315 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 322 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ |
316 | 323 | ||
317 | #ifdef CONFIG_SYSCTL | 324 | #ifdef CONFIG_SYSCTL |
318 | static struct ctl_table_header *icmpv6_sysctl_header; | ||
319 | static struct ctl_table icmpv6_sysctl_table[] = { | 325 | static struct ctl_table icmpv6_sysctl_table[] = { |
320 | { | 326 | { |
321 | .procname = "nf_conntrack_icmpv6_timeout", | 327 | .procname = "nf_conntrack_icmpv6_timeout", |
322 | .data = &nf_ct_icmpv6_timeout, | ||
323 | .maxlen = sizeof(unsigned int), | 328 | .maxlen = sizeof(unsigned int), |
324 | .mode = 0644, | 329 | .mode = 0644, |
325 | .proc_handler = proc_dointvec_jiffies, | 330 | .proc_handler = proc_dointvec_jiffies, |
@@ -328,6 +333,36 @@ static struct ctl_table icmpv6_sysctl_table[] = { | |||
328 | }; | 333 | }; |
329 | #endif /* CONFIG_SYSCTL */ | 334 | #endif /* CONFIG_SYSCTL */ |
330 | 335 | ||
336 | static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn, | ||
337 | struct nf_icmp_net *in) | ||
338 | { | ||
339 | #ifdef CONFIG_SYSCTL | ||
340 | pn->ctl_table = kmemdup(icmpv6_sysctl_table, | ||
341 | sizeof(icmpv6_sysctl_table), | ||
342 | GFP_KERNEL); | ||
343 | if (!pn->ctl_table) | ||
344 | return -ENOMEM; | ||
345 | |||
346 | pn->ctl_table[0].data = &in->timeout; | ||
347 | #endif | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | static int icmpv6_init_net(struct net *net, u_int16_t proto) | ||
352 | { | ||
353 | struct nf_icmp_net *in = icmpv6_pernet(net); | ||
354 | struct nf_proto_net *pn = &in->pn; | ||
355 | |||
356 | in->timeout = nf_ct_icmpv6_timeout; | ||
357 | |||
358 | return icmpv6_kmemdup_sysctl_table(pn, in); | ||
359 | } | ||
360 | |||
361 | static struct nf_proto_net *icmpv6_get_net_proto(struct net *net) | ||
362 | { | ||
363 | return &net->ct.nf_ct_proto.icmpv6.pn; | ||
364 | } | ||
365 | |||
331 | struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly = | 366 | struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly = |
332 | { | 367 | { |
333 | .l3proto = PF_INET6, | 368 | .l3proto = PF_INET6, |
@@ -355,8 +390,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly = | |||
355 | .nla_policy = icmpv6_timeout_nla_policy, | 390 | .nla_policy = icmpv6_timeout_nla_policy, |
356 | }, | 391 | }, |
357 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ | 392 | #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ |
358 | #ifdef CONFIG_SYSCTL | 393 | .init_net = icmpv6_init_net, |
359 | .ctl_table_header = &icmpv6_sysctl_header, | 394 | .get_net_proto = icmpv6_get_net_proto, |
360 | .ctl_table = icmpv6_sysctl_table, | ||
361 | #endif | ||
362 | }; | 395 | }; |
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c index 9a7978fdc02a..053082dfc93e 100644 --- a/net/ipv6/protocol.c +++ b/net/ipv6/protocol.c | |||
@@ -29,9 +29,7 @@ const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly; | |||
29 | 29 | ||
30 | int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) | 30 | int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) |
31 | { | 31 | { |
32 | int hash = protocol & (MAX_INET_PROTOS - 1); | 32 | return !cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol], |
33 | |||
34 | return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash], | ||
35 | NULL, prot) ? 0 : -1; | 33 | NULL, prot) ? 0 : -1; |
36 | } | 34 | } |
37 | EXPORT_SYMBOL(inet6_add_protocol); | 35 | EXPORT_SYMBOL(inet6_add_protocol); |
@@ -42,9 +40,9 @@ EXPORT_SYMBOL(inet6_add_protocol); | |||
42 | 40 | ||
43 | int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) | 41 | int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) |
44 | { | 42 | { |
45 | int ret, hash = protocol & (MAX_INET_PROTOS - 1); | 43 | int ret; |
46 | 44 | ||
47 | ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash], | 45 | ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol], |
48 | prot, NULL) == prot) ? 0 : -1; | 46 | prot, NULL) == prot) ? 0 : -1; |
49 | 47 | ||
50 | synchronize_net(); | 48 | synchronize_net(); |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 93d69836fded..ef0579d5bca6 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -165,7 +165,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) | |||
165 | saddr = &ipv6_hdr(skb)->saddr; | 165 | saddr = &ipv6_hdr(skb)->saddr; |
166 | daddr = saddr + 1; | 166 | daddr = saddr + 1; |
167 | 167 | ||
168 | hash = nexthdr & (MAX_INET_PROTOS - 1); | 168 | hash = nexthdr & (RAW_HTABLE_SIZE - 1); |
169 | 169 | ||
170 | read_lock(&raw_v6_hashinfo.lock); | 170 | read_lock(&raw_v6_hashinfo.lock); |
171 | sk = sk_head(&raw_v6_hashinfo.ht[hash]); | 171 | sk = sk_head(&raw_v6_hashinfo.ht[hash]); |
@@ -229,7 +229,7 @@ bool raw6_local_deliver(struct sk_buff *skb, int nexthdr) | |||
229 | { | 229 | { |
230 | struct sock *raw_sk; | 230 | struct sock *raw_sk; |
231 | 231 | ||
232 | raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (MAX_INET_PROTOS - 1)]); | 232 | raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]); |
233 | if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) | 233 | if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) |
234 | raw_sk = NULL; | 234 | raw_sk = NULL; |
235 | 235 | ||
@@ -328,9 +328,12 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb, | |||
328 | return; | 328 | return; |
329 | 329 | ||
330 | harderr = icmpv6_err_convert(type, code, &err); | 330 | harderr = icmpv6_err_convert(type, code, &err); |
331 | if (type == ICMPV6_PKT_TOOBIG) | 331 | if (type == ICMPV6_PKT_TOOBIG) { |
332 | ip6_sk_update_pmtu(skb, sk, info); | ||
332 | harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); | 333 | harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); |
333 | 334 | } | |
335 | if (type == NDISC_REDIRECT) | ||
336 | ip6_sk_redirect(skb, sk); | ||
334 | if (np->recverr) { | 337 | if (np->recverr) { |
335 | u8 *payload = skb->data; | 338 | u8 *payload = skb->data; |
336 | if (!inet->hdrincl) | 339 | if (!inet->hdrincl) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index becb048d18d4..cf02cb97bbdd 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -78,7 +78,10 @@ static int ip6_dst_gc(struct dst_ops *ops); | |||
78 | static int ip6_pkt_discard(struct sk_buff *skb); | 78 | static int ip6_pkt_discard(struct sk_buff *skb); |
79 | static int ip6_pkt_discard_out(struct sk_buff *skb); | 79 | static int ip6_pkt_discard_out(struct sk_buff *skb); |
80 | static void ip6_link_failure(struct sk_buff *skb); | 80 | static void ip6_link_failure(struct sk_buff *skb); |
81 | static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu); | 81 | static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, |
82 | struct sk_buff *skb, u32 mtu); | ||
83 | static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, | ||
84 | struct sk_buff *skb); | ||
82 | 85 | ||
83 | #ifdef CONFIG_IPV6_ROUTE_INFO | 86 | #ifdef CONFIG_IPV6_ROUTE_INFO |
84 | static struct rt6_info *rt6_add_route_info(struct net *net, | 87 | static struct rt6_info *rt6_add_route_info(struct net *net, |
@@ -99,10 +102,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) | |||
99 | if (!(rt->dst.flags & DST_HOST)) | 102 | if (!(rt->dst.flags & DST_HOST)) |
100 | return NULL; | 103 | return NULL; |
101 | 104 | ||
102 | if (!rt->rt6i_peer) | 105 | peer = rt6_get_peer_create(rt); |
103 | rt6_bind_peer(rt, 1); | ||
104 | |||
105 | peer = rt->rt6i_peer; | ||
106 | if (peer) { | 106 | if (peer) { |
107 | u32 *old_p = __DST_METRICS_PTR(old); | 107 | u32 *old_p = __DST_METRICS_PTR(old); |
108 | unsigned long prev, new; | 108 | unsigned long prev, new; |
@@ -123,21 +123,27 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) | |||
123 | return p; | 123 | return p; |
124 | } | 124 | } |
125 | 125 | ||
126 | static inline const void *choose_neigh_daddr(struct rt6_info *rt, const void *daddr) | 126 | static inline const void *choose_neigh_daddr(struct rt6_info *rt, |
127 | struct sk_buff *skb, | ||
128 | const void *daddr) | ||
127 | { | 129 | { |
128 | struct in6_addr *p = &rt->rt6i_gateway; | 130 | struct in6_addr *p = &rt->rt6i_gateway; |
129 | 131 | ||
130 | if (!ipv6_addr_any(p)) | 132 | if (!ipv6_addr_any(p)) |
131 | return (const void *) p; | 133 | return (const void *) p; |
134 | else if (skb) | ||
135 | return &ipv6_hdr(skb)->daddr; | ||
132 | return daddr; | 136 | return daddr; |
133 | } | 137 | } |
134 | 138 | ||
135 | static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr) | 139 | static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, |
140 | struct sk_buff *skb, | ||
141 | const void *daddr) | ||
136 | { | 142 | { |
137 | struct rt6_info *rt = (struct rt6_info *) dst; | 143 | struct rt6_info *rt = (struct rt6_info *) dst; |
138 | struct neighbour *n; | 144 | struct neighbour *n; |
139 | 145 | ||
140 | daddr = choose_neigh_daddr(rt, daddr); | 146 | daddr = choose_neigh_daddr(rt, skb, daddr); |
141 | n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr); | 147 | n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr); |
142 | if (n) | 148 | if (n) |
143 | return n; | 149 | return n; |
@@ -152,7 +158,7 @@ static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev) | |||
152 | if (IS_ERR(n)) | 158 | if (IS_ERR(n)) |
153 | return PTR_ERR(n); | 159 | return PTR_ERR(n); |
154 | } | 160 | } |
155 | dst_set_neighbour(&rt->dst, n); | 161 | rt->n = n; |
156 | 162 | ||
157 | return 0; | 163 | return 0; |
158 | } | 164 | } |
@@ -171,6 +177,7 @@ static struct dst_ops ip6_dst_ops_template = { | |||
171 | .negative_advice = ip6_negative_advice, | 177 | .negative_advice = ip6_negative_advice, |
172 | .link_failure = ip6_link_failure, | 178 | .link_failure = ip6_link_failure, |
173 | .update_pmtu = ip6_rt_update_pmtu, | 179 | .update_pmtu = ip6_rt_update_pmtu, |
180 | .redirect = rt6_do_redirect, | ||
174 | .local_out = __ip6_local_out, | 181 | .local_out = __ip6_local_out, |
175 | .neigh_lookup = ip6_neigh_lookup, | 182 | .neigh_lookup = ip6_neigh_lookup, |
176 | }; | 183 | }; |
@@ -182,7 +189,13 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) | |||
182 | return mtu ? : dst->dev->mtu; | 189 | return mtu ? : dst->dev->mtu; |
183 | } | 190 | } |
184 | 191 | ||
185 | static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) | 192 | static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, |
193 | struct sk_buff *skb, u32 mtu) | ||
194 | { | ||
195 | } | ||
196 | |||
197 | static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, | ||
198 | struct sk_buff *skb) | ||
186 | { | 199 | { |
187 | } | 200 | } |
188 | 201 | ||
@@ -200,6 +213,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { | |||
200 | .mtu = ip6_blackhole_mtu, | 213 | .mtu = ip6_blackhole_mtu, |
201 | .default_advmss = ip6_default_advmss, | 214 | .default_advmss = ip6_default_advmss, |
202 | .update_pmtu = ip6_rt_blackhole_update_pmtu, | 215 | .update_pmtu = ip6_rt_blackhole_update_pmtu, |
216 | .redirect = ip6_rt_blackhole_redirect, | ||
203 | .cow_metrics = ip6_rt_blackhole_cow_metrics, | 217 | .cow_metrics = ip6_rt_blackhole_cow_metrics, |
204 | .neigh_lookup = ip6_neigh_lookup, | 218 | .neigh_lookup = ip6_neigh_lookup, |
205 | }; | 219 | }; |
@@ -261,16 +275,20 @@ static struct rt6_info ip6_blk_hole_entry_template = { | |||
261 | #endif | 275 | #endif |
262 | 276 | ||
263 | /* allocate dst with ip6_dst_ops */ | 277 | /* allocate dst with ip6_dst_ops */ |
264 | static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops, | 278 | static inline struct rt6_info *ip6_dst_alloc(struct net *net, |
265 | struct net_device *dev, | 279 | struct net_device *dev, |
266 | int flags) | 280 | int flags, |
281 | struct fib6_table *table) | ||
267 | { | 282 | { |
268 | struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); | 283 | struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, |
284 | 0, DST_OBSOLETE_NONE, flags); | ||
269 | 285 | ||
270 | if (rt) | 286 | if (rt) { |
271 | memset(&rt->rt6i_table, 0, | 287 | struct dst_entry *dst = &rt->dst; |
272 | sizeof(*rt) - sizeof(struct dst_entry)); | ||
273 | 288 | ||
289 | memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); | ||
290 | rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); | ||
291 | } | ||
274 | return rt; | 292 | return rt; |
275 | } | 293 | } |
276 | 294 | ||
@@ -278,7 +296,9 @@ static void ip6_dst_destroy(struct dst_entry *dst) | |||
278 | { | 296 | { |
279 | struct rt6_info *rt = (struct rt6_info *)dst; | 297 | struct rt6_info *rt = (struct rt6_info *)dst; |
280 | struct inet6_dev *idev = rt->rt6i_idev; | 298 | struct inet6_dev *idev = rt->rt6i_idev; |
281 | struct inet_peer *peer = rt->rt6i_peer; | 299 | |
300 | if (rt->n) | ||
301 | neigh_release(rt->n); | ||
282 | 302 | ||
283 | if (!(rt->dst.flags & DST_HOST)) | 303 | if (!(rt->dst.flags & DST_HOST)) |
284 | dst_destroy_metrics_generic(dst); | 304 | dst_destroy_metrics_generic(dst); |
@@ -291,8 +311,8 @@ static void ip6_dst_destroy(struct dst_entry *dst) | |||
291 | if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from) | 311 | if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from) |
292 | dst_release(dst->from); | 312 | dst_release(dst->from); |
293 | 313 | ||
294 | if (peer) { | 314 | if (rt6_has_peer(rt)) { |
295 | rt->rt6i_peer = NULL; | 315 | struct inet_peer *peer = rt6_peer_ptr(rt); |
296 | inet_putpeer(peer); | 316 | inet_putpeer(peer); |
297 | } | 317 | } |
298 | } | 318 | } |
@@ -306,13 +326,20 @@ static u32 rt6_peer_genid(void) | |||
306 | 326 | ||
307 | void rt6_bind_peer(struct rt6_info *rt, int create) | 327 | void rt6_bind_peer(struct rt6_info *rt, int create) |
308 | { | 328 | { |
329 | struct inet_peer_base *base; | ||
309 | struct inet_peer *peer; | 330 | struct inet_peer *peer; |
310 | 331 | ||
311 | peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create); | 332 | base = inetpeer_base_ptr(rt->_rt6i_peer); |
312 | if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL) | 333 | if (!base) |
313 | inet_putpeer(peer); | 334 | return; |
314 | else | 335 | |
315 | rt->rt6i_peer_genid = rt6_peer_genid(); | 336 | peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create); |
337 | if (peer) { | ||
338 | if (!rt6_set_peer(rt, peer)) | ||
339 | inet_putpeer(peer); | ||
340 | else | ||
341 | rt->rt6i_peer_genid = rt6_peer_genid(); | ||
342 | } | ||
316 | } | 343 | } |
317 | 344 | ||
318 | static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, | 345 | static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, |
@@ -323,12 +350,19 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |||
323 | struct net_device *loopback_dev = | 350 | struct net_device *loopback_dev = |
324 | dev_net(dev)->loopback_dev; | 351 | dev_net(dev)->loopback_dev; |
325 | 352 | ||
326 | if (dev != loopback_dev && idev && idev->dev == dev) { | 353 | if (dev != loopback_dev) { |
327 | struct inet6_dev *loopback_idev = | 354 | if (idev && idev->dev == dev) { |
328 | in6_dev_get(loopback_dev); | 355 | struct inet6_dev *loopback_idev = |
329 | if (loopback_idev) { | 356 | in6_dev_get(loopback_dev); |
330 | rt->rt6i_idev = loopback_idev; | 357 | if (loopback_idev) { |
331 | in6_dev_put(idev); | 358 | rt->rt6i_idev = loopback_idev; |
359 | in6_dev_put(idev); | ||
360 | } | ||
361 | } | ||
362 | if (rt->n && rt->n->dev == dev) { | ||
363 | rt->n->dev = loopback_dev; | ||
364 | dev_hold(loopback_dev); | ||
365 | dev_put(dev); | ||
332 | } | 366 | } |
333 | } | 367 | } |
334 | } | 368 | } |
@@ -418,7 +452,7 @@ static void rt6_probe(struct rt6_info *rt) | |||
418 | * to no more than one per minute. | 452 | * to no more than one per minute. |
419 | */ | 453 | */ |
420 | rcu_read_lock(); | 454 | rcu_read_lock(); |
421 | neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL; | 455 | neigh = rt ? rt->n : NULL; |
422 | if (!neigh || (neigh->nud_state & NUD_VALID)) | 456 | if (!neigh || (neigh->nud_state & NUD_VALID)) |
423 | goto out; | 457 | goto out; |
424 | read_lock_bh(&neigh->lock); | 458 | read_lock_bh(&neigh->lock); |
@@ -465,7 +499,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt) | |||
465 | int m; | 499 | int m; |
466 | 500 | ||
467 | rcu_read_lock(); | 501 | rcu_read_lock(); |
468 | neigh = dst_get_neighbour_noref(&rt->dst); | 502 | neigh = rt->n; |
469 | if (rt->rt6i_flags & RTF_NONEXTHOP || | 503 | if (rt->rt6i_flags & RTF_NONEXTHOP || |
470 | !(rt->rt6i_flags & RTF_GATEWAY)) | 504 | !(rt->rt6i_flags & RTF_GATEWAY)) |
471 | m = 1; | 505 | m = 1; |
@@ -812,7 +846,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, | |||
812 | 846 | ||
813 | if (rt) { | 847 | if (rt) { |
814 | rt->rt6i_flags |= RTF_CACHE; | 848 | rt->rt6i_flags |= RTF_CACHE; |
815 | dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst))); | 849 | rt->n = neigh_clone(ort->n); |
816 | } | 850 | } |
817 | return rt; | 851 | return rt; |
818 | } | 852 | } |
@@ -846,7 +880,7 @@ restart: | |||
846 | dst_hold(&rt->dst); | 880 | dst_hold(&rt->dst); |
847 | read_unlock_bh(&table->tb6_lock); | 881 | read_unlock_bh(&table->tb6_lock); |
848 | 882 | ||
849 | if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) | 883 | if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP)) |
850 | nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); | 884 | nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); |
851 | else if (!(rt->dst.flags & DST_HOST)) | 885 | else if (!(rt->dst.flags & DST_HOST)) |
852 | nrt = rt6_alloc_clone(rt, &fl6->daddr); | 886 | nrt = rt6_alloc_clone(rt, &fl6->daddr); |
@@ -931,6 +965,8 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk, | |||
931 | { | 965 | { |
932 | int flags = 0; | 966 | int flags = 0; |
933 | 967 | ||
968 | fl6->flowi6_iif = net->loopback_dev->ifindex; | ||
969 | |||
934 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) | 970 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) |
935 | flags |= RT6_LOOKUP_F_IFACE; | 971 | flags |= RT6_LOOKUP_F_IFACE; |
936 | 972 | ||
@@ -949,12 +985,13 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori | |||
949 | struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig; | 985 | struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig; |
950 | struct dst_entry *new = NULL; | 986 | struct dst_entry *new = NULL; |
951 | 987 | ||
952 | rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0); | 988 | rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0); |
953 | if (rt) { | 989 | if (rt) { |
954 | memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); | ||
955 | |||
956 | new = &rt->dst; | 990 | new = &rt->dst; |
957 | 991 | ||
992 | memset(new + 1, 0, sizeof(*rt) - sizeof(*new)); | ||
993 | rt6_init_peer(rt, net->ipv6.peers); | ||
994 | |||
958 | new->__use = 1; | 995 | new->__use = 1; |
959 | new->input = dst_discard; | 996 | new->input = dst_discard; |
960 | new->output = dst_discard; | 997 | new->output = dst_discard; |
@@ -996,7 +1033,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) | |||
996 | 1033 | ||
997 | if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { | 1034 | if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { |
998 | if (rt->rt6i_peer_genid != rt6_peer_genid()) { | 1035 | if (rt->rt6i_peer_genid != rt6_peer_genid()) { |
999 | if (!rt->rt6i_peer) | 1036 | if (!rt6_has_peer(rt)) |
1000 | rt6_bind_peer(rt, 0); | 1037 | rt6_bind_peer(rt, 0); |
1001 | rt->rt6i_peer_genid = rt6_peer_genid(); | 1038 | rt->rt6i_peer_genid = rt6_peer_genid(); |
1002 | } | 1039 | } |
@@ -1038,11 +1075,15 @@ static void ip6_link_failure(struct sk_buff *skb) | |||
1038 | } | 1075 | } |
1039 | } | 1076 | } |
1040 | 1077 | ||
1041 | static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu) | 1078 | static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, |
1079 | struct sk_buff *skb, u32 mtu) | ||
1042 | { | 1080 | { |
1043 | struct rt6_info *rt6 = (struct rt6_info*)dst; | 1081 | struct rt6_info *rt6 = (struct rt6_info*)dst; |
1044 | 1082 | ||
1083 | dst_confirm(dst); | ||
1045 | if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) { | 1084 | if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) { |
1085 | struct net *net = dev_net(dst->dev); | ||
1086 | |||
1046 | rt6->rt6i_flags |= RTF_MODIFIED; | 1087 | rt6->rt6i_flags |= RTF_MODIFIED; |
1047 | if (mtu < IPV6_MIN_MTU) { | 1088 | if (mtu < IPV6_MIN_MTU) { |
1048 | u32 features = dst_metric(dst, RTAX_FEATURES); | 1089 | u32 features = dst_metric(dst, RTAX_FEATURES); |
@@ -1051,9 +1092,66 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
1051 | dst_metric_set(dst, RTAX_FEATURES, features); | 1092 | dst_metric_set(dst, RTAX_FEATURES, features); |
1052 | } | 1093 | } |
1053 | dst_metric_set(dst, RTAX_MTU, mtu); | 1094 | dst_metric_set(dst, RTAX_MTU, mtu); |
1095 | rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires); | ||
1054 | } | 1096 | } |
1055 | } | 1097 | } |
1056 | 1098 | ||
1099 | void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, | ||
1100 | int oif, u32 mark) | ||
1101 | { | ||
1102 | const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; | ||
1103 | struct dst_entry *dst; | ||
1104 | struct flowi6 fl6; | ||
1105 | |||
1106 | memset(&fl6, 0, sizeof(fl6)); | ||
1107 | fl6.flowi6_oif = oif; | ||
1108 | fl6.flowi6_mark = mark; | ||
1109 | fl6.flowi6_flags = 0; | ||
1110 | fl6.daddr = iph->daddr; | ||
1111 | fl6.saddr = iph->saddr; | ||
1112 | fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK; | ||
1113 | |||
1114 | dst = ip6_route_output(net, NULL, &fl6); | ||
1115 | if (!dst->error) | ||
1116 | ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu)); | ||
1117 | dst_release(dst); | ||
1118 | } | ||
1119 | EXPORT_SYMBOL_GPL(ip6_update_pmtu); | ||
1120 | |||
1121 | void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) | ||
1122 | { | ||
1123 | ip6_update_pmtu(skb, sock_net(sk), mtu, | ||
1124 | sk->sk_bound_dev_if, sk->sk_mark); | ||
1125 | } | ||
1126 | EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); | ||
1127 | |||
1128 | void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark) | ||
1129 | { | ||
1130 | const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; | ||
1131 | struct dst_entry *dst; | ||
1132 | struct flowi6 fl6; | ||
1133 | |||
1134 | memset(&fl6, 0, sizeof(fl6)); | ||
1135 | fl6.flowi6_oif = oif; | ||
1136 | fl6.flowi6_mark = mark; | ||
1137 | fl6.flowi6_flags = 0; | ||
1138 | fl6.daddr = iph->daddr; | ||
1139 | fl6.saddr = iph->saddr; | ||
1140 | fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK; | ||
1141 | |||
1142 | dst = ip6_route_output(net, NULL, &fl6); | ||
1143 | if (!dst->error) | ||
1144 | rt6_do_redirect(dst, NULL, skb); | ||
1145 | dst_release(dst); | ||
1146 | } | ||
1147 | EXPORT_SYMBOL_GPL(ip6_redirect); | ||
1148 | |||
1149 | void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) | ||
1150 | { | ||
1151 | ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark); | ||
1152 | } | ||
1153 | EXPORT_SYMBOL_GPL(ip6_sk_redirect); | ||
1154 | |||
1057 | static unsigned int ip6_default_advmss(const struct dst_entry *dst) | 1155 | static unsigned int ip6_default_advmss(const struct dst_entry *dst) |
1058 | { | 1156 | { |
1059 | struct net_device *dev = dst->dev; | 1157 | struct net_device *dev = dst->dev; |
@@ -1110,7 +1208,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | |||
1110 | if (unlikely(!idev)) | 1208 | if (unlikely(!idev)) |
1111 | return ERR_PTR(-ENODEV); | 1209 | return ERR_PTR(-ENODEV); |
1112 | 1210 | ||
1113 | rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0); | 1211 | rt = ip6_dst_alloc(net, dev, 0, NULL); |
1114 | if (unlikely(!rt)) { | 1212 | if (unlikely(!rt)) { |
1115 | in6_dev_put(idev); | 1213 | in6_dev_put(idev); |
1116 | dst = ERR_PTR(-ENOMEM); | 1214 | dst = ERR_PTR(-ENOMEM); |
@@ -1120,7 +1218,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | |||
1120 | if (neigh) | 1218 | if (neigh) |
1121 | neigh_hold(neigh); | 1219 | neigh_hold(neigh); |
1122 | else { | 1220 | else { |
1123 | neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr); | 1221 | neigh = ip6_neigh_lookup(&rt->dst, NULL, &fl6->daddr); |
1124 | if (IS_ERR(neigh)) { | 1222 | if (IS_ERR(neigh)) { |
1125 | in6_dev_put(idev); | 1223 | in6_dev_put(idev); |
1126 | dst_free(&rt->dst); | 1224 | dst_free(&rt->dst); |
@@ -1130,7 +1228,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | |||
1130 | 1228 | ||
1131 | rt->dst.flags |= DST_HOST; | 1229 | rt->dst.flags |= DST_HOST; |
1132 | rt->dst.output = ip6_output; | 1230 | rt->dst.output = ip6_output; |
1133 | dst_set_neighbour(&rt->dst, neigh); | 1231 | rt->n = neigh; |
1134 | atomic_set(&rt->dst.__refcnt, 1); | 1232 | atomic_set(&rt->dst.__refcnt, 1); |
1135 | rt->rt6i_dst.addr = fl6->daddr; | 1233 | rt->rt6i_dst.addr = fl6->daddr; |
1136 | rt->rt6i_dst.plen = 128; | 1234 | rt->rt6i_dst.plen = 128; |
@@ -1292,7 +1390,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1292 | if (!table) | 1390 | if (!table) |
1293 | goto out; | 1391 | goto out; |
1294 | 1392 | ||
1295 | rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT); | 1393 | rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table); |
1296 | 1394 | ||
1297 | if (!rt) { | 1395 | if (!rt) { |
1298 | err = -ENOMEM; | 1396 | err = -ENOMEM; |
@@ -1546,107 +1644,94 @@ static int ip6_route_del(struct fib6_config *cfg) | |||
1546 | return err; | 1644 | return err; |
1547 | } | 1645 | } |
1548 | 1646 | ||
1549 | /* | 1647 | static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) |
1550 | * Handle redirects | ||
1551 | */ | ||
1552 | struct ip6rd_flowi { | ||
1553 | struct flowi6 fl6; | ||
1554 | struct in6_addr gateway; | ||
1555 | }; | ||
1556 | |||
1557 | static struct rt6_info *__ip6_route_redirect(struct net *net, | ||
1558 | struct fib6_table *table, | ||
1559 | struct flowi6 *fl6, | ||
1560 | int flags) | ||
1561 | { | 1648 | { |
1562 | struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; | 1649 | struct net *net = dev_net(skb->dev); |
1563 | struct rt6_info *rt; | 1650 | struct netevent_redirect netevent; |
1564 | struct fib6_node *fn; | 1651 | struct rt6_info *rt, *nrt = NULL; |
1652 | const struct in6_addr *target; | ||
1653 | struct ndisc_options ndopts; | ||
1654 | const struct in6_addr *dest; | ||
1655 | struct neighbour *old_neigh; | ||
1656 | struct inet6_dev *in6_dev; | ||
1657 | struct neighbour *neigh; | ||
1658 | struct icmp6hdr *icmph; | ||
1659 | int optlen, on_link; | ||
1660 | u8 *lladdr; | ||
1565 | 1661 | ||
1566 | /* | 1662 | optlen = skb->tail - skb->transport_header; |
1567 | * Get the "current" route for this destination and | 1663 | optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); |
1568 | * check if the redirect has come from approriate router. | ||
1569 | * | ||
1570 | * RFC 2461 specifies that redirects should only be | ||
1571 | * accepted if they come from the nexthop to the target. | ||
1572 | * Due to the way the routes are chosen, this notion | ||
1573 | * is a bit fuzzy and one might need to check all possible | ||
1574 | * routes. | ||
1575 | */ | ||
1576 | 1664 | ||
1577 | read_lock_bh(&table->tb6_lock); | 1665 | if (optlen < 0) { |
1578 | fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); | 1666 | net_dbg_ratelimited("rt6_do_redirect: packet too short\n"); |
1579 | restart: | 1667 | return; |
1580 | for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { | ||
1581 | /* | ||
1582 | * Current route is on-link; redirect is always invalid. | ||
1583 | * | ||
1584 | * Seems, previous statement is not true. It could | ||
1585 | * be node, which looks for us as on-link (f.e. proxy ndisc) | ||
1586 | * But then router serving it might decide, that we should | ||
1587 | * know truth 8)8) --ANK (980726). | ||
1588 | */ | ||
1589 | if (rt6_check_expired(rt)) | ||
1590 | continue; | ||
1591 | if (!(rt->rt6i_flags & RTF_GATEWAY)) | ||
1592 | continue; | ||
1593 | if (fl6->flowi6_oif != rt->dst.dev->ifindex) | ||
1594 | continue; | ||
1595 | if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) | ||
1596 | continue; | ||
1597 | break; | ||
1598 | } | 1668 | } |
1599 | 1669 | ||
1600 | if (!rt) | 1670 | icmph = icmp6_hdr(skb); |
1601 | rt = net->ipv6.ip6_null_entry; | 1671 | target = (const struct in6_addr *) (icmph + 1); |
1602 | BACKTRACK(net, &fl6->saddr); | 1672 | dest = target + 1; |
1603 | out: | ||
1604 | dst_hold(&rt->dst); | ||
1605 | |||
1606 | read_unlock_bh(&table->tb6_lock); | ||
1607 | |||
1608 | return rt; | ||
1609 | }; | ||
1610 | 1673 | ||
1611 | static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest, | 1674 | if (ipv6_addr_is_multicast(dest)) { |
1612 | const struct in6_addr *src, | 1675 | net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n"); |
1613 | const struct in6_addr *gateway, | 1676 | return; |
1614 | struct net_device *dev) | 1677 | } |
1615 | { | ||
1616 | int flags = RT6_LOOKUP_F_HAS_SADDR; | ||
1617 | struct net *net = dev_net(dev); | ||
1618 | struct ip6rd_flowi rdfl = { | ||
1619 | .fl6 = { | ||
1620 | .flowi6_oif = dev->ifindex, | ||
1621 | .daddr = *dest, | ||
1622 | .saddr = *src, | ||
1623 | }, | ||
1624 | }; | ||
1625 | 1678 | ||
1626 | rdfl.gateway = *gateway; | 1679 | on_link = 0; |
1680 | if (ipv6_addr_equal(dest, target)) { | ||
1681 | on_link = 1; | ||
1682 | } else if (ipv6_addr_type(target) != | ||
1683 | (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { | ||
1684 | net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n"); | ||
1685 | return; | ||
1686 | } | ||
1627 | 1687 | ||
1628 | if (rt6_need_strict(dest)) | 1688 | in6_dev = __in6_dev_get(skb->dev); |
1629 | flags |= RT6_LOOKUP_F_IFACE; | 1689 | if (!in6_dev) |
1690 | return; | ||
1691 | if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) | ||
1692 | return; | ||
1630 | 1693 | ||
1631 | return (struct rt6_info *)fib6_rule_lookup(net, &rdfl.fl6, | 1694 | /* RFC2461 8.1: |
1632 | flags, __ip6_route_redirect); | 1695 | * The IP source address of the Redirect MUST be the same as the current |
1633 | } | 1696 | * first-hop router for the specified ICMP Destination Address. |
1697 | */ | ||
1634 | 1698 | ||
1635 | void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, | 1699 | if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) { |
1636 | const struct in6_addr *saddr, | 1700 | net_dbg_ratelimited("rt6_redirect: invalid ND options\n"); |
1637 | struct neighbour *neigh, u8 *lladdr, int on_link) | 1701 | return; |
1638 | { | 1702 | } |
1639 | struct rt6_info *rt, *nrt = NULL; | ||
1640 | struct netevent_redirect netevent; | ||
1641 | struct net *net = dev_net(neigh->dev); | ||
1642 | 1703 | ||
1643 | rt = ip6_route_redirect(dest, src, saddr, neigh->dev); | 1704 | lladdr = NULL; |
1705 | if (ndopts.nd_opts_tgt_lladdr) { | ||
1706 | lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, | ||
1707 | skb->dev); | ||
1708 | if (!lladdr) { | ||
1709 | net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n"); | ||
1710 | return; | ||
1711 | } | ||
1712 | } | ||
1644 | 1713 | ||
1714 | rt = (struct rt6_info *) dst; | ||
1645 | if (rt == net->ipv6.ip6_null_entry) { | 1715 | if (rt == net->ipv6.ip6_null_entry) { |
1646 | net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n"); | 1716 | net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n"); |
1647 | goto out; | 1717 | return; |
1648 | } | 1718 | } |
1649 | 1719 | ||
1720 | /* Redirect received -> path was valid. | ||
1721 | * Look, redirects are sent only in response to data packets, | ||
1722 | * so that this nexthop apparently is reachable. --ANK | ||
1723 | */ | ||
1724 | dst_confirm(&rt->dst); | ||
1725 | |||
1726 | neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1); | ||
1727 | if (!neigh) | ||
1728 | return; | ||
1729 | |||
1730 | /* Duplicate redirect: silently ignore. */ | ||
1731 | old_neigh = rt->n; | ||
1732 | if (neigh == old_neigh) | ||
1733 | goto out; | ||
1734 | |||
1650 | /* | 1735 | /* |
1651 | * We have finally decided to accept it. | 1736 | * We have finally decided to accept it. |
1652 | */ | 1737 | */ |
@@ -1658,17 +1743,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, | |||
1658 | NEIGH_UPDATE_F_ISROUTER)) | 1743 | NEIGH_UPDATE_F_ISROUTER)) |
1659 | ); | 1744 | ); |
1660 | 1745 | ||
1661 | /* | ||
1662 | * Redirect received -> path was valid. | ||
1663 | * Look, redirects are sent only in response to data packets, | ||
1664 | * so that this nexthop apparently is reachable. --ANK | ||
1665 | */ | ||
1666 | dst_confirm(&rt->dst); | ||
1667 | |||
1668 | /* Duplicate redirect: silently ignore. */ | ||
1669 | if (neigh == dst_get_neighbour_noref_raw(&rt->dst)) | ||
1670 | goto out; | ||
1671 | |||
1672 | nrt = ip6_rt_copy(rt, dest); | 1746 | nrt = ip6_rt_copy(rt, dest); |
1673 | if (!nrt) | 1747 | if (!nrt) |
1674 | goto out; | 1748 | goto out; |
@@ -1678,132 +1752,25 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, | |||
1678 | nrt->rt6i_flags &= ~RTF_GATEWAY; | 1752 | nrt->rt6i_flags &= ~RTF_GATEWAY; |
1679 | 1753 | ||
1680 | nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; | 1754 | nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; |
1681 | dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); | 1755 | nrt->n = neigh_clone(neigh); |
1682 | 1756 | ||
1683 | if (ip6_ins_rt(nrt)) | 1757 | if (ip6_ins_rt(nrt)) |
1684 | goto out; | 1758 | goto out; |
1685 | 1759 | ||
1686 | netevent.old = &rt->dst; | 1760 | netevent.old = &rt->dst; |
1761 | netevent.old_neigh = old_neigh; | ||
1687 | netevent.new = &nrt->dst; | 1762 | netevent.new = &nrt->dst; |
1763 | netevent.new_neigh = neigh; | ||
1764 | netevent.daddr = dest; | ||
1688 | call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); | 1765 | call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); |
1689 | 1766 | ||
1690 | if (rt->rt6i_flags & RTF_CACHE) { | 1767 | if (rt->rt6i_flags & RTF_CACHE) { |
1768 | rt = (struct rt6_info *) dst_clone(&rt->dst); | ||
1691 | ip6_del_rt(rt); | 1769 | ip6_del_rt(rt); |
1692 | return; | ||
1693 | } | 1770 | } |
1694 | 1771 | ||
1695 | out: | 1772 | out: |
1696 | dst_release(&rt->dst); | 1773 | neigh_release(neigh); |
1697 | } | ||
1698 | |||
1699 | /* | ||
1700 | * Handle ICMP "packet too big" messages | ||
1701 | * i.e. Path MTU discovery | ||
1702 | */ | ||
1703 | |||
1704 | static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr, | ||
1705 | struct net *net, u32 pmtu, int ifindex) | ||
1706 | { | ||
1707 | struct rt6_info *rt, *nrt; | ||
1708 | int allfrag = 0; | ||
1709 | again: | ||
1710 | rt = rt6_lookup(net, daddr, saddr, ifindex, 0); | ||
1711 | if (!rt) | ||
1712 | return; | ||
1713 | |||
1714 | if (rt6_check_expired(rt)) { | ||
1715 | ip6_del_rt(rt); | ||
1716 | goto again; | ||
1717 | } | ||
1718 | |||
1719 | if (pmtu >= dst_mtu(&rt->dst)) | ||
1720 | goto out; | ||
1721 | |||
1722 | if (pmtu < IPV6_MIN_MTU) { | ||
1723 | /* | ||
1724 | * According to RFC2460, PMTU is set to the IPv6 Minimum Link | ||
1725 | * MTU (1280) and a fragment header should always be included | ||
1726 | * after a node receiving Too Big message reporting PMTU is | ||
1727 | * less than the IPv6 Minimum Link MTU. | ||
1728 | */ | ||
1729 | pmtu = IPV6_MIN_MTU; | ||
1730 | allfrag = 1; | ||
1731 | } | ||
1732 | |||
1733 | /* New mtu received -> path was valid. | ||
1734 | They are sent only in response to data packets, | ||
1735 | so that this nexthop apparently is reachable. --ANK | ||
1736 | */ | ||
1737 | dst_confirm(&rt->dst); | ||
1738 | |||
1739 | /* Host route. If it is static, it would be better | ||
1740 | not to override it, but add new one, so that | ||
1741 | when cache entry will expire old pmtu | ||
1742 | would return automatically. | ||
1743 | */ | ||
1744 | if (rt->rt6i_flags & RTF_CACHE) { | ||
1745 | dst_metric_set(&rt->dst, RTAX_MTU, pmtu); | ||
1746 | if (allfrag) { | ||
1747 | u32 features = dst_metric(&rt->dst, RTAX_FEATURES); | ||
1748 | features |= RTAX_FEATURE_ALLFRAG; | ||
1749 | dst_metric_set(&rt->dst, RTAX_FEATURES, features); | ||
1750 | } | ||
1751 | rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); | ||
1752 | rt->rt6i_flags |= RTF_MODIFIED; | ||
1753 | goto out; | ||
1754 | } | ||
1755 | |||
1756 | /* Network route. | ||
1757 | Two cases are possible: | ||
1758 | 1. It is connected route. Action: COW | ||
1759 | 2. It is gatewayed route or NONEXTHOP route. Action: clone it. | ||
1760 | */ | ||
1761 | if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP)) | ||
1762 | nrt = rt6_alloc_cow(rt, daddr, saddr); | ||
1763 | else | ||
1764 | nrt = rt6_alloc_clone(rt, daddr); | ||
1765 | |||
1766 | if (nrt) { | ||
1767 | dst_metric_set(&nrt->dst, RTAX_MTU, pmtu); | ||
1768 | if (allfrag) { | ||
1769 | u32 features = dst_metric(&nrt->dst, RTAX_FEATURES); | ||
1770 | features |= RTAX_FEATURE_ALLFRAG; | ||
1771 | dst_metric_set(&nrt->dst, RTAX_FEATURES, features); | ||
1772 | } | ||
1773 | |||
1774 | /* According to RFC 1981, detecting PMTU increase shouldn't be | ||
1775 | * happened within 5 mins, the recommended timer is 10 mins. | ||
1776 | * Here this route expiration time is set to ip6_rt_mtu_expires | ||
1777 | * which is 10 mins. After 10 mins the decreased pmtu is expired | ||
1778 | * and detecting PMTU increase will be automatically happened. | ||
1779 | */ | ||
1780 | rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires); | ||
1781 | nrt->rt6i_flags |= RTF_DYNAMIC; | ||
1782 | ip6_ins_rt(nrt); | ||
1783 | } | ||
1784 | out: | ||
1785 | dst_release(&rt->dst); | ||
1786 | } | ||
1787 | |||
1788 | void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr, | ||
1789 | struct net_device *dev, u32 pmtu) | ||
1790 | { | ||
1791 | struct net *net = dev_net(dev); | ||
1792 | |||
1793 | /* | ||
1794 | * RFC 1981 states that a node "MUST reduce the size of the packets it | ||
1795 | * is sending along the path" that caused the Packet Too Big message. | ||
1796 | * Since it's not possible in the general case to determine which | ||
1797 | * interface was used to send the original packet, we update the MTU | ||
1798 | * on the interface that will be used to send future packets. We also | ||
1799 | * update the MTU on the interface that received the Packet Too Big in | ||
1800 | * case the original packet was forced out that interface with | ||
1801 | * SO_BINDTODEVICE or similar. This is the next best thing to the | ||
1802 | * correct behaviour, which would be to update the MTU on all | ||
1803 | * interfaces. | ||
1804 | */ | ||
1805 | rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0); | ||
1806 | rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex); | ||
1807 | } | 1774 | } |
1808 | 1775 | ||
1809 | /* | 1776 | /* |
@@ -1814,8 +1781,8 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, | |||
1814 | const struct in6_addr *dest) | 1781 | const struct in6_addr *dest) |
1815 | { | 1782 | { |
1816 | struct net *net = dev_net(ort->dst.dev); | 1783 | struct net *net = dev_net(ort->dst.dev); |
1817 | struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, | 1784 | struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0, |
1818 | ort->dst.dev, 0); | 1785 | ort->rt6i_table); |
1819 | 1786 | ||
1820 | if (rt) { | 1787 | if (rt) { |
1821 | rt->dst.input = ort->dst.input; | 1788 | rt->dst.input = ort->dst.input; |
@@ -2099,8 +2066,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
2099 | bool anycast) | 2066 | bool anycast) |
2100 | { | 2067 | { |
2101 | struct net *net = dev_net(idev->dev); | 2068 | struct net *net = dev_net(idev->dev); |
2102 | struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, | 2069 | struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL); |
2103 | net->loopback_dev, 0); | ||
2104 | int err; | 2070 | int err; |
2105 | 2071 | ||
2106 | if (!rt) { | 2072 | if (!rt) { |
@@ -2396,13 +2362,11 @@ static int rt6_fill_node(struct net *net, | |||
2396 | int iif, int type, u32 pid, u32 seq, | 2362 | int iif, int type, u32 pid, u32 seq, |
2397 | int prefix, int nowait, unsigned int flags) | 2363 | int prefix, int nowait, unsigned int flags) |
2398 | { | 2364 | { |
2399 | const struct inet_peer *peer; | ||
2400 | struct rtmsg *rtm; | 2365 | struct rtmsg *rtm; |
2401 | struct nlmsghdr *nlh; | 2366 | struct nlmsghdr *nlh; |
2402 | long expires; | 2367 | long expires; |
2403 | u32 table; | 2368 | u32 table; |
2404 | struct neighbour *n; | 2369 | struct neighbour *n; |
2405 | u32 ts, tsage; | ||
2406 | 2370 | ||
2407 | if (prefix) { /* user wants prefix routes only */ | 2371 | if (prefix) { /* user wants prefix routes only */ |
2408 | if (!(rt->rt6i_flags & RTF_PREFIX_RT)) { | 2372 | if (!(rt->rt6i_flags & RTF_PREFIX_RT)) { |
@@ -2440,10 +2404,12 @@ static int rt6_fill_node(struct net *net, | |||
2440 | rtm->rtm_protocol = rt->rt6i_protocol; | 2404 | rtm->rtm_protocol = rt->rt6i_protocol; |
2441 | if (rt->rt6i_flags & RTF_DYNAMIC) | 2405 | if (rt->rt6i_flags & RTF_DYNAMIC) |
2442 | rtm->rtm_protocol = RTPROT_REDIRECT; | 2406 | rtm->rtm_protocol = RTPROT_REDIRECT; |
2443 | else if (rt->rt6i_flags & RTF_ADDRCONF) | 2407 | else if (rt->rt6i_flags & RTF_ADDRCONF) { |
2444 | rtm->rtm_protocol = RTPROT_KERNEL; | 2408 | if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO)) |
2445 | else if (rt->rt6i_flags & RTF_DEFAULT) | 2409 | rtm->rtm_protocol = RTPROT_RA; |
2446 | rtm->rtm_protocol = RTPROT_RA; | 2410 | else |
2411 | rtm->rtm_protocol = RTPROT_KERNEL; | ||
2412 | } | ||
2447 | 2413 | ||
2448 | if (rt->rt6i_flags & RTF_CACHE) | 2414 | if (rt->rt6i_flags & RTF_CACHE) |
2449 | rtm->rtm_flags |= RTM_F_CLONED; | 2415 | rtm->rtm_flags |= RTM_F_CLONED; |
@@ -2500,7 +2466,7 @@ static int rt6_fill_node(struct net *net, | |||
2500 | goto nla_put_failure; | 2466 | goto nla_put_failure; |
2501 | 2467 | ||
2502 | rcu_read_lock(); | 2468 | rcu_read_lock(); |
2503 | n = dst_get_neighbour_noref(&rt->dst); | 2469 | n = rt->n; |
2504 | if (n) { | 2470 | if (n) { |
2505 | if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) { | 2471 | if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) { |
2506 | rcu_read_unlock(); | 2472 | rcu_read_unlock(); |
@@ -2521,15 +2487,7 @@ static int rt6_fill_node(struct net *net, | |||
2521 | else | 2487 | else |
2522 | expires = INT_MAX; | 2488 | expires = INT_MAX; |
2523 | 2489 | ||
2524 | peer = rt->rt6i_peer; | 2490 | if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0) |
2525 | ts = tsage = 0; | ||
2526 | if (peer && peer->tcp_ts_stamp) { | ||
2527 | ts = peer->tcp_ts; | ||
2528 | tsage = get_seconds() - peer->tcp_ts_stamp; | ||
2529 | } | ||
2530 | |||
2531 | if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage, | ||
2532 | expires, rt->dst.error) < 0) | ||
2533 | goto nla_put_failure; | 2491 | goto nla_put_failure; |
2534 | 2492 | ||
2535 | return nlmsg_end(skb, nlh); | 2493 | return nlmsg_end(skb, nlh); |
@@ -2722,7 +2680,7 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg) | |||
2722 | seq_puts(m, "00000000000000000000000000000000 00 "); | 2680 | seq_puts(m, "00000000000000000000000000000000 00 "); |
2723 | #endif | 2681 | #endif |
2724 | rcu_read_lock(); | 2682 | rcu_read_lock(); |
2725 | n = dst_get_neighbour_noref(&rt->dst); | 2683 | n = rt->n; |
2726 | if (n) { | 2684 | if (n) { |
2727 | seq_printf(m, "%pi6", n->primary_key); | 2685 | seq_printf(m, "%pi6", n->primary_key); |
2728 | } else { | 2686 | } else { |
@@ -3007,6 +2965,31 @@ static struct pernet_operations ip6_route_net_ops = { | |||
3007 | .exit = ip6_route_net_exit, | 2965 | .exit = ip6_route_net_exit, |
3008 | }; | 2966 | }; |
3009 | 2967 | ||
2968 | static int __net_init ipv6_inetpeer_init(struct net *net) | ||
2969 | { | ||
2970 | struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); | ||
2971 | |||
2972 | if (!bp) | ||
2973 | return -ENOMEM; | ||
2974 | inet_peer_base_init(bp); | ||
2975 | net->ipv6.peers = bp; | ||
2976 | return 0; | ||
2977 | } | ||
2978 | |||
2979 | static void __net_exit ipv6_inetpeer_exit(struct net *net) | ||
2980 | { | ||
2981 | struct inet_peer_base *bp = net->ipv6.peers; | ||
2982 | |||
2983 | net->ipv6.peers = NULL; | ||
2984 | inetpeer_invalidate_tree(bp); | ||
2985 | kfree(bp); | ||
2986 | } | ||
2987 | |||
2988 | static struct pernet_operations ipv6_inetpeer_ops = { | ||
2989 | .init = ipv6_inetpeer_init, | ||
2990 | .exit = ipv6_inetpeer_exit, | ||
2991 | }; | ||
2992 | |||
3010 | static struct pernet_operations ip6_route_net_late_ops = { | 2993 | static struct pernet_operations ip6_route_net_late_ops = { |
3011 | .init = ip6_route_net_init_late, | 2994 | .init = ip6_route_net_init_late, |
3012 | .exit = ip6_route_net_exit_late, | 2995 | .exit = ip6_route_net_exit_late, |
@@ -3032,10 +3015,14 @@ int __init ip6_route_init(void) | |||
3032 | if (ret) | 3015 | if (ret) |
3033 | goto out_kmem_cache; | 3016 | goto out_kmem_cache; |
3034 | 3017 | ||
3035 | ret = register_pernet_subsys(&ip6_route_net_ops); | 3018 | ret = register_pernet_subsys(&ipv6_inetpeer_ops); |
3036 | if (ret) | 3019 | if (ret) |
3037 | goto out_dst_entries; | 3020 | goto out_dst_entries; |
3038 | 3021 | ||
3022 | ret = register_pernet_subsys(&ip6_route_net_ops); | ||
3023 | if (ret) | ||
3024 | goto out_register_inetpeer; | ||
3025 | |||
3039 | ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; | 3026 | ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; |
3040 | 3027 | ||
3041 | /* Registering of the loopback is done before this portion of code, | 3028 | /* Registering of the loopback is done before this portion of code, |
@@ -3088,6 +3075,8 @@ out_fib6_init: | |||
3088 | fib6_gc_cleanup(); | 3075 | fib6_gc_cleanup(); |
3089 | out_register_subsys: | 3076 | out_register_subsys: |
3090 | unregister_pernet_subsys(&ip6_route_net_ops); | 3077 | unregister_pernet_subsys(&ip6_route_net_ops); |
3078 | out_register_inetpeer: | ||
3079 | unregister_pernet_subsys(&ipv6_inetpeer_ops); | ||
3091 | out_dst_entries: | 3080 | out_dst_entries: |
3092 | dst_entries_destroy(&ip6_dst_blackhole_ops); | 3081 | dst_entries_destroy(&ip6_dst_blackhole_ops); |
3093 | out_kmem_cache: | 3082 | out_kmem_cache: |
@@ -3102,6 +3091,7 @@ void ip6_route_cleanup(void) | |||
3102 | fib6_rules_cleanup(); | 3091 | fib6_rules_cleanup(); |
3103 | xfrm6_fini(); | 3092 | xfrm6_fini(); |
3104 | fib6_gc_cleanup(); | 3093 | fib6_gc_cleanup(); |
3094 | unregister_pernet_subsys(&ipv6_inetpeer_ops); | ||
3105 | unregister_pernet_subsys(&ip6_route_net_ops); | 3095 | unregister_pernet_subsys(&ip6_route_net_ops); |
3106 | dst_entries_destroy(&ip6_dst_blackhole_ops); | 3096 | dst_entries_destroy(&ip6_dst_blackhole_ops); |
3107 | kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); | 3097 | kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 60415711563f..3bd1bfc01f85 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -527,9 +527,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
527 | case ICMP_PORT_UNREACH: | 527 | case ICMP_PORT_UNREACH: |
528 | /* Impossible event. */ | 528 | /* Impossible event. */ |
529 | return 0; | 529 | return 0; |
530 | case ICMP_FRAG_NEEDED: | ||
531 | /* Soft state for pmtu is maintained by IP core. */ | ||
532 | return 0; | ||
533 | default: | 530 | default: |
534 | /* All others are translated to HOST_UNREACH. | 531 | /* All others are translated to HOST_UNREACH. |
535 | rfc2003 contains "deep thoughts" about NET_UNREACH, | 532 | rfc2003 contains "deep thoughts" about NET_UNREACH, |
@@ -542,6 +539,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
542 | if (code != ICMP_EXC_TTL) | 539 | if (code != ICMP_EXC_TTL) |
543 | return 0; | 540 | return 0; |
544 | break; | 541 | break; |
542 | case ICMP_REDIRECT: | ||
543 | break; | ||
545 | } | 544 | } |
546 | 545 | ||
547 | err = -ENOENT; | 546 | err = -ENOENT; |
@@ -551,7 +550,23 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
551 | skb->dev, | 550 | skb->dev, |
552 | iph->daddr, | 551 | iph->daddr, |
553 | iph->saddr); | 552 | iph->saddr); |
554 | if (t == NULL || t->parms.iph.daddr == 0) | 553 | if (t == NULL) |
554 | goto out; | ||
555 | |||
556 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { | ||
557 | ipv4_update_pmtu(skb, dev_net(skb->dev), info, | ||
558 | t->dev->ifindex, 0, IPPROTO_IPV6, 0); | ||
559 | err = 0; | ||
560 | goto out; | ||
561 | } | ||
562 | if (type == ICMP_REDIRECT) { | ||
563 | ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0, | ||
564 | IPPROTO_IPV6, 0); | ||
565 | err = 0; | ||
566 | goto out; | ||
567 | } | ||
568 | |||
569 | if (t->parms.iph.daddr == 0) | ||
555 | goto out; | 570 | goto out; |
556 | 571 | ||
557 | err = 0; | 572 | err = 0; |
@@ -792,7 +807,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
792 | } | 807 | } |
793 | 808 | ||
794 | if (tunnel->parms.iph.daddr && skb_dst(skb)) | 809 | if (tunnel->parms.iph.daddr && skb_dst(skb)) |
795 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); | 810 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); |
796 | 811 | ||
797 | if (skb->len > mtu) { | 812 | if (skb->len > mtu) { |
798 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 813 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 8e951d8d3b81..bb46061c813a 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -21,9 +21,6 @@ | |||
21 | #include <net/ipv6.h> | 21 | #include <net/ipv6.h> |
22 | #include <net/tcp.h> | 22 | #include <net/tcp.h> |
23 | 23 | ||
24 | extern int sysctl_tcp_syncookies; | ||
25 | extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; | ||
26 | |||
27 | #define COOKIEBITS 24 /* Upper bits store count */ | 24 | #define COOKIEBITS 24 /* Upper bits store count */ |
28 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) | 25 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) |
29 | 26 | ||
@@ -180,7 +177,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
180 | 177 | ||
181 | /* check for timestamp cookie support */ | 178 | /* check for timestamp cookie support */ |
182 | memset(&tcp_opt, 0, sizeof(tcp_opt)); | 179 | memset(&tcp_opt, 0, sizeof(tcp_opt)); |
183 | tcp_parse_options(skb, &tcp_opt, &hash_location, 0); | 180 | tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL); |
184 | 181 | ||
185 | if (!cookie_check_timestamp(&tcp_opt, &ecn_ok)) | 182 | if (!cookie_check_timestamp(&tcp_opt, &ecn_ok)) |
186 | goto out; | 183 | goto out; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 9df64a50b075..f49476e2d884 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -277,22 +277,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
277 | rt = (struct rt6_info *) dst; | 277 | rt = (struct rt6_info *) dst; |
278 | if (tcp_death_row.sysctl_tw_recycle && | 278 | if (tcp_death_row.sysctl_tw_recycle && |
279 | !tp->rx_opt.ts_recent_stamp && | 279 | !tp->rx_opt.ts_recent_stamp && |
280 | ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) { | 280 | ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) |
281 | struct inet_peer *peer = rt6_get_peer(rt); | 281 | tcp_fetch_timewait_stamp(sk, dst); |
282 | /* | ||
283 | * VJ's idea. We save last timestamp seen from | ||
284 | * the destination in peer table, when entering state | ||
285 | * TIME-WAIT * and initialize rx_opt.ts_recent from it, | ||
286 | * when trying new connection. | ||
287 | */ | ||
288 | if (peer) { | ||
289 | inet_peer_refcheck(peer); | ||
290 | if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) { | ||
291 | tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; | ||
292 | tp->rx_opt.ts_recent = peer->tcp_ts; | ||
293 | } | ||
294 | } | ||
295 | } | ||
296 | 282 | ||
297 | icsk->icsk_ext_hdr_len = 0; | 283 | icsk->icsk_ext_hdr_len = 0; |
298 | if (np->opt) | 284 | if (np->opt) |
@@ -329,6 +315,23 @@ failure: | |||
329 | return err; | 315 | return err; |
330 | } | 316 | } |
331 | 317 | ||
318 | static void tcp_v6_mtu_reduced(struct sock *sk) | ||
319 | { | ||
320 | struct dst_entry *dst; | ||
321 | |||
322 | if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) | ||
323 | return; | ||
324 | |||
325 | dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); | ||
326 | if (!dst) | ||
327 | return; | ||
328 | |||
329 | if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { | ||
330 | tcp_sync_mss(sk, dst_mtu(dst)); | ||
331 | tcp_simple_retransmit(sk); | ||
332 | } | ||
333 | } | ||
334 | |||
332 | static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 335 | static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
333 | u8 type, u8 code, int offset, __be32 info) | 336 | u8 type, u8 code, int offset, __be32 info) |
334 | { | 337 | { |
@@ -356,7 +359,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
356 | } | 359 | } |
357 | 360 | ||
358 | bh_lock_sock(sk); | 361 | bh_lock_sock(sk); |
359 | if (sock_owned_by_user(sk)) | 362 | if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) |
360 | NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); | 363 | NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); |
361 | 364 | ||
362 | if (sk->sk_state == TCP_CLOSE) | 365 | if (sk->sk_state == TCP_CLOSE) |
@@ -377,49 +380,19 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
377 | 380 | ||
378 | np = inet6_sk(sk); | 381 | np = inet6_sk(sk); |
379 | 382 | ||
380 | if (type == ICMPV6_PKT_TOOBIG) { | 383 | if (type == NDISC_REDIRECT) { |
381 | struct dst_entry *dst; | 384 | struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); |
382 | |||
383 | if (sock_owned_by_user(sk)) | ||
384 | goto out; | ||
385 | if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) | ||
386 | goto out; | ||
387 | |||
388 | /* icmp should have updated the destination cache entry */ | ||
389 | dst = __sk_dst_check(sk, np->dst_cookie); | ||
390 | |||
391 | if (dst == NULL) { | ||
392 | struct inet_sock *inet = inet_sk(sk); | ||
393 | struct flowi6 fl6; | ||
394 | |||
395 | /* BUGGG_FUTURE: Again, it is not clear how | ||
396 | to handle rthdr case. Ignore this complexity | ||
397 | for now. | ||
398 | */ | ||
399 | memset(&fl6, 0, sizeof(fl6)); | ||
400 | fl6.flowi6_proto = IPPROTO_TCP; | ||
401 | fl6.daddr = np->daddr; | ||
402 | fl6.saddr = np->saddr; | ||
403 | fl6.flowi6_oif = sk->sk_bound_dev_if; | ||
404 | fl6.flowi6_mark = sk->sk_mark; | ||
405 | fl6.fl6_dport = inet->inet_dport; | ||
406 | fl6.fl6_sport = inet->inet_sport; | ||
407 | security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); | ||
408 | |||
409 | dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false); | ||
410 | if (IS_ERR(dst)) { | ||
411 | sk->sk_err_soft = -PTR_ERR(dst); | ||
412 | goto out; | ||
413 | } | ||
414 | 385 | ||
415 | } else | 386 | if (dst) |
416 | dst_hold(dst); | 387 | dst->ops->redirect(dst, sk, skb); |
388 | } | ||
417 | 389 | ||
418 | if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { | 390 | if (type == ICMPV6_PKT_TOOBIG) { |
419 | tcp_sync_mss(sk, dst_mtu(dst)); | 391 | tp->mtu_info = ntohl(info); |
420 | tcp_simple_retransmit(sk); | 392 | if (!sock_owned_by_user(sk)) |
421 | } /* else let the usual retransmit timer handle it */ | 393 | tcp_v6_mtu_reduced(sk); |
422 | dst_release(dst); | 394 | else |
395 | set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags); | ||
423 | goto out; | 396 | goto out; |
424 | } | 397 | } |
425 | 398 | ||
@@ -475,62 +448,43 @@ out: | |||
475 | } | 448 | } |
476 | 449 | ||
477 | 450 | ||
478 | static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | 451 | static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, |
452 | struct flowi6 *fl6, | ||
453 | struct request_sock *req, | ||
479 | struct request_values *rvp, | 454 | struct request_values *rvp, |
480 | u16 queue_mapping) | 455 | u16 queue_mapping) |
481 | { | 456 | { |
482 | struct inet6_request_sock *treq = inet6_rsk(req); | 457 | struct inet6_request_sock *treq = inet6_rsk(req); |
483 | struct ipv6_pinfo *np = inet6_sk(sk); | 458 | struct ipv6_pinfo *np = inet6_sk(sk); |
484 | struct sk_buff * skb; | 459 | struct sk_buff * skb; |
485 | struct ipv6_txoptions *opt = NULL; | 460 | int err = -ENOMEM; |
486 | struct in6_addr * final_p, final; | ||
487 | struct flowi6 fl6; | ||
488 | struct dst_entry *dst; | ||
489 | int err; | ||
490 | 461 | ||
491 | memset(&fl6, 0, sizeof(fl6)); | 462 | /* First, grab a route. */ |
492 | fl6.flowi6_proto = IPPROTO_TCP; | 463 | if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) |
493 | fl6.daddr = treq->rmt_addr; | ||
494 | fl6.saddr = treq->loc_addr; | ||
495 | fl6.flowlabel = 0; | ||
496 | fl6.flowi6_oif = treq->iif; | ||
497 | fl6.flowi6_mark = sk->sk_mark; | ||
498 | fl6.fl6_dport = inet_rsk(req)->rmt_port; | ||
499 | fl6.fl6_sport = inet_rsk(req)->loc_port; | ||
500 | security_req_classify_flow(req, flowi6_to_flowi(&fl6)); | ||
501 | |||
502 | opt = np->opt; | ||
503 | final_p = fl6_update_dst(&fl6, opt, &final); | ||
504 | |||
505 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); | ||
506 | if (IS_ERR(dst)) { | ||
507 | err = PTR_ERR(dst); | ||
508 | dst = NULL; | ||
509 | goto done; | 464 | goto done; |
510 | } | 465 | |
511 | skb = tcp_make_synack(sk, dst, req, rvp); | 466 | skb = tcp_make_synack(sk, dst, req, rvp); |
512 | err = -ENOMEM; | 467 | |
513 | if (skb) { | 468 | if (skb) { |
514 | __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); | 469 | __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); |
515 | 470 | ||
516 | fl6.daddr = treq->rmt_addr; | 471 | fl6->daddr = treq->rmt_addr; |
517 | skb_set_queue_mapping(skb, queue_mapping); | 472 | skb_set_queue_mapping(skb, queue_mapping); |
518 | err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); | 473 | err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); |
519 | err = net_xmit_eval(err); | 474 | err = net_xmit_eval(err); |
520 | } | 475 | } |
521 | 476 | ||
522 | done: | 477 | done: |
523 | if (opt && opt != np->opt) | ||
524 | sock_kfree_s(sk, opt, opt->tot_len); | ||
525 | dst_release(dst); | ||
526 | return err; | 478 | return err; |
527 | } | 479 | } |
528 | 480 | ||
529 | static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, | 481 | static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, |
530 | struct request_values *rvp) | 482 | struct request_values *rvp) |
531 | { | 483 | { |
484 | struct flowi6 fl6; | ||
485 | |||
532 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); | 486 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); |
533 | return tcp_v6_send_synack(sk, req, rvp, 0); | 487 | return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0); |
534 | } | 488 | } |
535 | 489 | ||
536 | static void tcp_v6_reqsk_destructor(struct request_sock *req) | 490 | static void tcp_v6_reqsk_destructor(struct request_sock *req) |
@@ -1057,6 +1011,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1057 | struct tcp_sock *tp = tcp_sk(sk); | 1011 | struct tcp_sock *tp = tcp_sk(sk); |
1058 | __u32 isn = TCP_SKB_CB(skb)->when; | 1012 | __u32 isn = TCP_SKB_CB(skb)->when; |
1059 | struct dst_entry *dst = NULL; | 1013 | struct dst_entry *dst = NULL; |
1014 | struct flowi6 fl6; | ||
1060 | bool want_cookie = false; | 1015 | bool want_cookie = false; |
1061 | 1016 | ||
1062 | if (skb->protocol == htons(ETH_P_IP)) | 1017 | if (skb->protocol == htons(ETH_P_IP)) |
@@ -1085,7 +1040,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1085 | tcp_clear_options(&tmp_opt); | 1040 | tcp_clear_options(&tmp_opt); |
1086 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | 1041 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); |
1087 | tmp_opt.user_mss = tp->rx_opt.user_mss; | 1042 | tmp_opt.user_mss = tp->rx_opt.user_mss; |
1088 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0); | 1043 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); |
1089 | 1044 | ||
1090 | if (tmp_opt.cookie_plus > 0 && | 1045 | if (tmp_opt.cookie_plus > 0 && |
1091 | tmp_opt.saw_tstamp && | 1046 | tmp_opt.saw_tstamp && |
@@ -1150,8 +1105,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1150 | treq->iif = inet6_iif(skb); | 1105 | treq->iif = inet6_iif(skb); |
1151 | 1106 | ||
1152 | if (!isn) { | 1107 | if (!isn) { |
1153 | struct inet_peer *peer = NULL; | ||
1154 | |||
1155 | if (ipv6_opt_accepted(sk, skb) || | 1108 | if (ipv6_opt_accepted(sk, skb) || |
1156 | np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || | 1109 | np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || |
1157 | np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { | 1110 | np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { |
@@ -1176,14 +1129,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1176 | */ | 1129 | */ |
1177 | if (tmp_opt.saw_tstamp && | 1130 | if (tmp_opt.saw_tstamp && |
1178 | tcp_death_row.sysctl_tw_recycle && | 1131 | tcp_death_row.sysctl_tw_recycle && |
1179 | (dst = inet6_csk_route_req(sk, req)) != NULL && | 1132 | (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) { |
1180 | (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL && | 1133 | if (!tcp_peer_is_proven(req, dst, true)) { |
1181 | ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6, | ||
1182 | &treq->rmt_addr)) { | ||
1183 | inet_peer_refcheck(peer); | ||
1184 | if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && | ||
1185 | (s32)(peer->tcp_ts - req->ts_recent) > | ||
1186 | TCP_PAWS_WINDOW) { | ||
1187 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); | 1134 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); |
1188 | goto drop_and_release; | 1135 | goto drop_and_release; |
1189 | } | 1136 | } |
@@ -1192,8 +1139,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1192 | else if (!sysctl_tcp_syncookies && | 1139 | else if (!sysctl_tcp_syncookies && |
1193 | (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < | 1140 | (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < |
1194 | (sysctl_max_syn_backlog >> 2)) && | 1141 | (sysctl_max_syn_backlog >> 2)) && |
1195 | (!peer || !peer->tcp_ts_stamp) && | 1142 | !tcp_peer_is_proven(req, dst, false)) { |
1196 | (!dst || !dst_metric(dst, RTAX_RTT))) { | ||
1197 | /* Without syncookies last quarter of | 1143 | /* Without syncookies last quarter of |
1198 | * backlog is filled with destinations, | 1144 | * backlog is filled with destinations, |
1199 | * proven to be alive. | 1145 | * proven to be alive. |
@@ -1215,7 +1161,7 @@ have_isn: | |||
1215 | if (security_inet_conn_request(sk, skb, req)) | 1161 | if (security_inet_conn_request(sk, skb, req)) |
1216 | goto drop_and_release; | 1162 | goto drop_and_release; |
1217 | 1163 | ||
1218 | if (tcp_v6_send_synack(sk, req, | 1164 | if (tcp_v6_send_synack(sk, dst, &fl6, req, |
1219 | (struct request_values *)&tmp_ext, | 1165 | (struct request_values *)&tmp_ext, |
1220 | skb_get_queue_mapping(skb)) || | 1166 | skb_get_queue_mapping(skb)) || |
1221 | want_cookie) | 1167 | want_cookie) |
@@ -1242,10 +1188,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1242 | struct inet_sock *newinet; | 1188 | struct inet_sock *newinet; |
1243 | struct tcp_sock *newtp; | 1189 | struct tcp_sock *newtp; |
1244 | struct sock *newsk; | 1190 | struct sock *newsk; |
1245 | struct ipv6_txoptions *opt; | ||
1246 | #ifdef CONFIG_TCP_MD5SIG | 1191 | #ifdef CONFIG_TCP_MD5SIG |
1247 | struct tcp_md5sig_key *key; | 1192 | struct tcp_md5sig_key *key; |
1248 | #endif | 1193 | #endif |
1194 | struct flowi6 fl6; | ||
1249 | 1195 | ||
1250 | if (skb->protocol == htons(ETH_P_IP)) { | 1196 | if (skb->protocol == htons(ETH_P_IP)) { |
1251 | /* | 1197 | /* |
@@ -1302,13 +1248,12 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1302 | } | 1248 | } |
1303 | 1249 | ||
1304 | treq = inet6_rsk(req); | 1250 | treq = inet6_rsk(req); |
1305 | opt = np->opt; | ||
1306 | 1251 | ||
1307 | if (sk_acceptq_is_full(sk)) | 1252 | if (sk_acceptq_is_full(sk)) |
1308 | goto out_overflow; | 1253 | goto out_overflow; |
1309 | 1254 | ||
1310 | if (!dst) { | 1255 | if (!dst) { |
1311 | dst = inet6_csk_route_req(sk, req); | 1256 | dst = inet6_csk_route_req(sk, &fl6, req); |
1312 | if (!dst) | 1257 | if (!dst) |
1313 | goto out; | 1258 | goto out; |
1314 | } | 1259 | } |
@@ -1371,11 +1316,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1371 | but we make one more one thing there: reattach optmem | 1316 | but we make one more one thing there: reattach optmem |
1372 | to newsk. | 1317 | to newsk. |
1373 | */ | 1318 | */ |
1374 | if (opt) { | 1319 | if (np->opt) |
1375 | newnp->opt = ipv6_dup_options(newsk, opt); | 1320 | newnp->opt = ipv6_dup_options(newsk, np->opt); |
1376 | if (opt != np->opt) | ||
1377 | sock_kfree_s(sk, opt, opt->tot_len); | ||
1378 | } | ||
1379 | 1321 | ||
1380 | inet_csk(newsk)->icsk_ext_hdr_len = 0; | 1322 | inet_csk(newsk)->icsk_ext_hdr_len = 0; |
1381 | if (newnp->opt) | 1323 | if (newnp->opt) |
@@ -1422,8 +1364,6 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1422 | out_overflow: | 1364 | out_overflow: |
1423 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); | 1365 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); |
1424 | out_nonewsk: | 1366 | out_nonewsk: |
1425 | if (opt && opt != np->opt) | ||
1426 | sock_kfree_s(sk, opt, opt->tot_len); | ||
1427 | dst_release(dst); | 1367 | dst_release(dst); |
1428 | out: | 1368 | out: |
1429 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | 1369 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); |
@@ -1734,42 +1674,10 @@ do_time_wait: | |||
1734 | goto discard_it; | 1674 | goto discard_it; |
1735 | } | 1675 | } |
1736 | 1676 | ||
1737 | static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it) | ||
1738 | { | ||
1739 | struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk); | ||
1740 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
1741 | struct inet_peer *peer; | ||
1742 | |||
1743 | if (!rt || | ||
1744 | !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) { | ||
1745 | peer = inet_getpeer_v6(&np->daddr, 1); | ||
1746 | *release_it = true; | ||
1747 | } else { | ||
1748 | if (!rt->rt6i_peer) | ||
1749 | rt6_bind_peer(rt, 1); | ||
1750 | peer = rt->rt6i_peer; | ||
1751 | *release_it = false; | ||
1752 | } | ||
1753 | |||
1754 | return peer; | ||
1755 | } | ||
1756 | |||
1757 | static void *tcp_v6_tw_get_peer(struct sock *sk) | ||
1758 | { | ||
1759 | const struct inet6_timewait_sock *tw6 = inet6_twsk(sk); | ||
1760 | const struct inet_timewait_sock *tw = inet_twsk(sk); | ||
1761 | |||
1762 | if (tw->tw_family == AF_INET) | ||
1763 | return tcp_v4_tw_get_peer(sk); | ||
1764 | |||
1765 | return inet_getpeer_v6(&tw6->tw_v6_daddr, 1); | ||
1766 | } | ||
1767 | |||
1768 | static struct timewait_sock_ops tcp6_timewait_sock_ops = { | 1677 | static struct timewait_sock_ops tcp6_timewait_sock_ops = { |
1769 | .twsk_obj_size = sizeof(struct tcp6_timewait_sock), | 1678 | .twsk_obj_size = sizeof(struct tcp6_timewait_sock), |
1770 | .twsk_unique = tcp_twsk_unique, | 1679 | .twsk_unique = tcp_twsk_unique, |
1771 | .twsk_destructor= tcp_twsk_destructor, | 1680 | .twsk_destructor= tcp_twsk_destructor, |
1772 | .twsk_getpeer = tcp_v6_tw_get_peer, | ||
1773 | }; | 1681 | }; |
1774 | 1682 | ||
1775 | static const struct inet_connection_sock_af_ops ipv6_specific = { | 1683 | static const struct inet_connection_sock_af_ops ipv6_specific = { |
@@ -1778,7 +1686,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { | |||
1778 | .rebuild_header = inet6_sk_rebuild_header, | 1686 | .rebuild_header = inet6_sk_rebuild_header, |
1779 | .conn_request = tcp_v6_conn_request, | 1687 | .conn_request = tcp_v6_conn_request, |
1780 | .syn_recv_sock = tcp_v6_syn_recv_sock, | 1688 | .syn_recv_sock = tcp_v6_syn_recv_sock, |
1781 | .get_peer = tcp_v6_get_peer, | ||
1782 | .net_header_len = sizeof(struct ipv6hdr), | 1689 | .net_header_len = sizeof(struct ipv6hdr), |
1783 | .net_frag_header_len = sizeof(struct frag_hdr), | 1690 | .net_frag_header_len = sizeof(struct frag_hdr), |
1784 | .setsockopt = ipv6_setsockopt, | 1691 | .setsockopt = ipv6_setsockopt, |
@@ -1810,7 +1717,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { | |||
1810 | .rebuild_header = inet_sk_rebuild_header, | 1717 | .rebuild_header = inet_sk_rebuild_header, |
1811 | .conn_request = tcp_v6_conn_request, | 1718 | .conn_request = tcp_v6_conn_request, |
1812 | .syn_recv_sock = tcp_v6_syn_recv_sock, | 1719 | .syn_recv_sock = tcp_v6_syn_recv_sock, |
1813 | .get_peer = tcp_v4_get_peer, | ||
1814 | .net_header_len = sizeof(struct iphdr), | 1720 | .net_header_len = sizeof(struct iphdr), |
1815 | .setsockopt = ipv6_setsockopt, | 1721 | .setsockopt = ipv6_setsockopt, |
1816 | .getsockopt = ipv6_getsockopt, | 1722 | .getsockopt = ipv6_getsockopt, |
@@ -2049,6 +1955,8 @@ struct proto tcpv6_prot = { | |||
2049 | .sendmsg = tcp_sendmsg, | 1955 | .sendmsg = tcp_sendmsg, |
2050 | .sendpage = tcp_sendpage, | 1956 | .sendpage = tcp_sendpage, |
2051 | .backlog_rcv = tcp_v6_do_rcv, | 1957 | .backlog_rcv = tcp_v6_do_rcv, |
1958 | .release_cb = tcp_release_cb, | ||
1959 | .mtu_reduced = tcp_v6_mtu_reduced, | ||
2052 | .hash = tcp_v6_hash, | 1960 | .hash = tcp_v6_hash, |
2053 | .unhash = inet_unhash, | 1961 | .unhash = inet_unhash, |
2054 | .get_port = inet_csk_get_port, | 1962 | .get_port = inet_csk_get_port, |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f05099fc5901..99d0077b56b8 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -48,6 +48,7 @@ | |||
48 | 48 | ||
49 | #include <linux/proc_fs.h> | 49 | #include <linux/proc_fs.h> |
50 | #include <linux/seq_file.h> | 50 | #include <linux/seq_file.h> |
51 | #include <trace/events/skb.h> | ||
51 | #include "udp_impl.h" | 52 | #include "udp_impl.h" |
52 | 53 | ||
53 | int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) | 54 | int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) |
@@ -385,15 +386,16 @@ try_again: | |||
385 | 386 | ||
386 | if (skb_csum_unnecessary(skb)) | 387 | if (skb_csum_unnecessary(skb)) |
387 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), | 388 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), |
388 | msg->msg_iov, copied ); | 389 | msg->msg_iov, copied); |
389 | else { | 390 | else { |
390 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); | 391 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); |
391 | if (err == -EINVAL) | 392 | if (err == -EINVAL) |
392 | goto csum_copy_err; | 393 | goto csum_copy_err; |
393 | } | 394 | } |
394 | if (err) | 395 | if (unlikely(err)) { |
396 | trace_kfree_skb(skb, udpv6_recvmsg); | ||
395 | goto out_free; | 397 | goto out_free; |
396 | 398 | } | |
397 | if (!peeked) { | 399 | if (!peeked) { |
398 | if (is_udp4) | 400 | if (is_udp4) |
399 | UDP_INC_STATS_USER(sock_net(sk), | 401 | UDP_INC_STATS_USER(sock_net(sk), |
@@ -479,6 +481,11 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
479 | if (sk == NULL) | 481 | if (sk == NULL) |
480 | return; | 482 | return; |
481 | 483 | ||
484 | if (type == ICMPV6_PKT_TOOBIG) | ||
485 | ip6_sk_update_pmtu(skb, sk, info); | ||
486 | if (type == NDISC_REDIRECT) | ||
487 | ip6_sk_redirect(skb, sk); | ||
488 | |||
482 | np = inet6_sk(sk); | 489 | np = inet6_sk(sk); |
483 | 490 | ||
484 | if (!icmpv6_err_convert(type, code, &err) && !np->recverr) | 491 | if (!icmpv6_err_convert(type, code, &err) && !np->recverr) |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 8625fba96db9..ef39812107b1 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -99,12 +99,11 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
99 | if (!xdst->u.rt6.rt6i_idev) | 99 | if (!xdst->u.rt6.rt6i_idev) |
100 | return -ENODEV; | 100 | return -ENODEV; |
101 | 101 | ||
102 | xdst->u.rt6.rt6i_peer = rt->rt6i_peer; | 102 | rt6_transfer_peer(&xdst->u.rt6, rt); |
103 | if (rt->rt6i_peer) | ||
104 | atomic_inc(&rt->rt6i_peer->refcnt); | ||
105 | 103 | ||
106 | /* Sheit... I remember I did this right. Apparently, | 104 | /* Sheit... I remember I did this right. Apparently, |
107 | * it was magically lost, so this code needs audit */ | 105 | * it was magically lost, so this code needs audit */ |
106 | xdst->u.rt6.n = neigh_clone(rt->n); | ||
108 | xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | | 107 | xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | |
109 | RTF_LOCAL); | 108 | RTF_LOCAL); |
110 | xdst->u.rt6.rt6i_metric = rt->rt6i_metric; | 109 | xdst->u.rt6.rt6i_metric = rt->rt6i_metric; |
@@ -208,12 +207,22 @@ static inline int xfrm6_garbage_collect(struct dst_ops *ops) | |||
208 | return dst_entries_get_fast(ops) > ops->gc_thresh * 2; | 207 | return dst_entries_get_fast(ops) > ops->gc_thresh * 2; |
209 | } | 208 | } |
210 | 209 | ||
211 | static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) | 210 | static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk, |
211 | struct sk_buff *skb, u32 mtu) | ||
212 | { | 212 | { |
213 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | 213 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; |
214 | struct dst_entry *path = xdst->route; | 214 | struct dst_entry *path = xdst->route; |
215 | 215 | ||
216 | path->ops->update_pmtu(path, mtu); | 216 | path->ops->update_pmtu(path, sk, skb, mtu); |
217 | } | ||
218 | |||
219 | static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk, | ||
220 | struct sk_buff *skb) | ||
221 | { | ||
222 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | ||
223 | struct dst_entry *path = xdst->route; | ||
224 | |||
225 | path->ops->redirect(path, sk, skb); | ||
217 | } | 226 | } |
218 | 227 | ||
219 | static void xfrm6_dst_destroy(struct dst_entry *dst) | 228 | static void xfrm6_dst_destroy(struct dst_entry *dst) |
@@ -223,8 +232,10 @@ static void xfrm6_dst_destroy(struct dst_entry *dst) | |||
223 | if (likely(xdst->u.rt6.rt6i_idev)) | 232 | if (likely(xdst->u.rt6.rt6i_idev)) |
224 | in6_dev_put(xdst->u.rt6.rt6i_idev); | 233 | in6_dev_put(xdst->u.rt6.rt6i_idev); |
225 | dst_destroy_metrics_generic(dst); | 234 | dst_destroy_metrics_generic(dst); |
226 | if (likely(xdst->u.rt6.rt6i_peer)) | 235 | if (rt6_has_peer(&xdst->u.rt6)) { |
227 | inet_putpeer(xdst->u.rt6.rt6i_peer); | 236 | struct inet_peer *peer = rt6_peer_ptr(&xdst->u.rt6); |
237 | inet_putpeer(peer); | ||
238 | } | ||
228 | xfrm_dst_destroy(xdst); | 239 | xfrm_dst_destroy(xdst); |
229 | } | 240 | } |
230 | 241 | ||
@@ -260,6 +271,7 @@ static struct dst_ops xfrm6_dst_ops = { | |||
260 | .protocol = cpu_to_be16(ETH_P_IPV6), | 271 | .protocol = cpu_to_be16(ETH_P_IPV6), |
261 | .gc = xfrm6_garbage_collect, | 272 | .gc = xfrm6_garbage_collect, |
262 | .update_pmtu = xfrm6_update_pmtu, | 273 | .update_pmtu = xfrm6_update_pmtu, |
274 | .redirect = xfrm6_redirect, | ||
263 | .cow_metrics = dst_cow_metrics_generic, | 275 | .cow_metrics = dst_cow_metrics_generic, |
264 | .destroy = xfrm6_dst_destroy, | 276 | .destroy = xfrm6_dst_destroy, |
265 | .ifdown = xfrm6_dst_ifdown, | 277 | .ifdown = xfrm6_dst_ifdown, |