aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 17:27:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 17:27:40 -0400
commitf9da455b93f6ba076935b4ef4589f61e529ae046 (patch)
tree3c4e69ce1ba1d6bf65915b97a76ca2172105b278 /net/ipv6
parent0e04c641b199435f3779454055f6a7de258ecdfc (diff)
parente5eca6d41f53db48edd8cf88a3f59d2c30227f8e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Seccomp BPF filters can now be JIT'd, from Alexei Starovoitov. 2) Multiqueue support in xen-netback and xen-netfront, from Andrew J Benniston. 3) Allow tweaking of aggregation settings in cdc_ncm driver, from Bjørn Mork. 4) BPF now has a "random" opcode, from Chema Gonzalez. 5) Add more BPF documentation and improve test framework, from Daniel Borkmann. 6) Support TCP fastopen over ipv6, from Daniel Lee. 7) Add software TSO helper functions and use them to support software TSO in mvneta and mv643xx_eth drivers. From Ezequiel Garcia. 8) Support software TSO in fec driver too, from Nimrod Andy. 9) Add Broadcom SYSTEMPORT driver, from Florian Fainelli. 10) Handle broadcasts more gracefully over macvlan when there are large numbers of interfaces configured, from Herbert Xu. 11) Allow more control over fwmark used for non-socket based responses, from Lorenzo Colitti. 12) Do TCP congestion window limiting based upon measurements, from Neal Cardwell. 13) Support busy polling in SCTP, from Neal Horman. 14) Allow RSS key to be configured via ethtool, from Venkata Duvvuru. 15) Bridge promisc mode handling improvements from Vlad Yasevich. 16) Don't use inetpeer entries to implement ID generation any more, it performs poorly, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1522 commits) rtnetlink: fix userspace API breakage for iproute2 < v3.9.0 tcp: fixing TLP's FIN recovery net: fec: Add software TSO support net: fec: Add Scatter/gather support net: fec: Increase buffer descriptor entry number net: fec: Factorize feature setting net: fec: Enable IP header hardware checksum net: fec: Factorize the .xmit transmit function bridge: fix compile error when compiling without IPv6 support bridge: fix smatch warning / potential null pointer dereference via-rhine: fix full-duplex with autoneg disable bnx2x: Enlarge the dorq threshold for VFs bnx2x: Check for UNDI in uncommon branch bnx2x: Fix 1G-baseT link bnx2x: Fix link for KR with swapped polarity lane sctp: Fix sk_ack_backlog wrap-around problem net/core: Add VF link state control policy net/fsl: xgmac_mdio is dependent on OF_MDIO net/fsl: Make xgmac_mdio read error message useful net_sched: drr: warn when qdisc is not work conserving ...
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/addrconf.c49
-rw-r--r--net/ipv6/addrconf_core.c2
-rw-r--r--net/ipv6/af_inet6.c45
-rw-r--r--net/ipv6/icmp.c41
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_checksum.c63
-rw-r--r--net/ipv6/ip6_fib.c12
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_gre.c64
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c24
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/ip6_vti.c3
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c12
-rw-r--r--net/ipv6/output_core.c27
-rw-r--r--net/ipv6/ping.c8
-rw-r--r--net/ipv6/proc.c14
-rw-r--r--net/ipv6/raw.c11
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/sit.c5
-rw-r--r--net/ipv6/syncookies.c4
-rw-r--r--net/ipv6/sysctl_net_ipv6.c7
-rw-r--r--net/ipv6/tcp_ipv6.c86
-rw-r--r--net/ipv6/udp.c66
-rw-r--r--net/ipv6/udp_offload.c5
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_output.c6
29 files changed, 291 insertions, 290 deletions
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6c7fa0853fc7..5667b3003af9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -275,19 +275,14 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
275{ 275{
276 int i; 276 int i;
277 277
278 if (snmp_mib_init((void __percpu **)idev->stats.ipv6, 278 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
279 sizeof(struct ipstats_mib), 279 if (!idev->stats.ipv6)
280 __alignof__(struct ipstats_mib)) < 0)
281 goto err_ip; 280 goto err_ip;
282 281
283 for_each_possible_cpu(i) { 282 for_each_possible_cpu(i) {
284 struct ipstats_mib *addrconf_stats; 283 struct ipstats_mib *addrconf_stats;
285 addrconf_stats = per_cpu_ptr(idev->stats.ipv6[0], i); 284 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
286 u64_stats_init(&addrconf_stats->syncp); 285 u64_stats_init(&addrconf_stats->syncp);
287#if SNMP_ARRAY_SZ == 2
288 addrconf_stats = per_cpu_ptr(idev->stats.ipv6[1], i);
289 u64_stats_init(&addrconf_stats->syncp);
290#endif
291 } 286 }
292 287
293 288
@@ -305,7 +300,7 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
305err_icmpmsg: 300err_icmpmsg:
306 kfree(idev->stats.icmpv6dev); 301 kfree(idev->stats.icmpv6dev);
307err_icmp: 302err_icmp:
308 snmp_mib_free((void __percpu **)idev->stats.ipv6); 303 free_percpu(idev->stats.ipv6);
309err_ip: 304err_ip:
310 return -ENOMEM; 305 return -ENOMEM;
311} 306}
@@ -2504,8 +2499,8 @@ static int inet6_addr_add(struct net *net, int ifindex,
2504 return PTR_ERR(ifp); 2499 return PTR_ERR(ifp);
2505} 2500}
2506 2501
2507static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *pfx, 2502static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2508 unsigned int plen) 2503 const struct in6_addr *pfx, unsigned int plen)
2509{ 2504{
2510 struct inet6_ifaddr *ifp; 2505 struct inet6_ifaddr *ifp;
2511 struct inet6_dev *idev; 2506 struct inet6_dev *idev;
@@ -2528,7 +2523,12 @@ static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *p
2528 in6_ifa_hold(ifp); 2523 in6_ifa_hold(ifp);
2529 read_unlock_bh(&idev->lock); 2524 read_unlock_bh(&idev->lock);
2530 2525
2526 if (!(ifp->flags & IFA_F_TEMPORARY) &&
2527 (ifa_flags & IFA_F_MANAGETEMPADDR))
2528 manage_tempaddrs(idev, ifp, 0, 0, false,
2529 jiffies);
2531 ipv6_del_addr(ifp); 2530 ipv6_del_addr(ifp);
2531 addrconf_verify_rtnl();
2532 return 0; 2532 return 0;
2533 } 2533 }
2534 } 2534 }
@@ -2568,7 +2568,7 @@ int addrconf_del_ifaddr(struct net *net, void __user *arg)
2568 return -EFAULT; 2568 return -EFAULT;
2569 2569
2570 rtnl_lock(); 2570 rtnl_lock();
2571 err = inet6_addr_del(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, 2571 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
2572 ireq.ifr6_prefixlen); 2572 ireq.ifr6_prefixlen);
2573 rtnl_unlock(); 2573 rtnl_unlock();
2574 return err; 2574 return err;
@@ -2813,18 +2813,6 @@ static void addrconf_gre_config(struct net_device *dev)
2813} 2813}
2814#endif 2814#endif
2815 2815
2816static inline int
2817ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
2818{
2819 struct in6_addr lladdr;
2820
2821 if (!ipv6_get_lladdr(link_dev, &lladdr, IFA_F_TENTATIVE)) {
2822 addrconf_add_linklocal(idev, &lladdr);
2823 return 0;
2824 }
2825 return -1;
2826}
2827
2828static int addrconf_notify(struct notifier_block *this, unsigned long event, 2816static int addrconf_notify(struct notifier_block *this, unsigned long event,
2829 void *ptr) 2817 void *ptr)
2830{ 2818{
@@ -3743,6 +3731,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
3743 struct ifaddrmsg *ifm; 3731 struct ifaddrmsg *ifm;
3744 struct nlattr *tb[IFA_MAX+1]; 3732 struct nlattr *tb[IFA_MAX+1];
3745 struct in6_addr *pfx, *peer_pfx; 3733 struct in6_addr *pfx, *peer_pfx;
3734 u32 ifa_flags;
3746 int err; 3735 int err;
3747 3736
3748 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); 3737 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
@@ -3754,7 +3743,13 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
3754 if (pfx == NULL) 3743 if (pfx == NULL)
3755 return -EINVAL; 3744 return -EINVAL;
3756 3745
3757 return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen); 3746 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
3747
3748 /* We ignore other flags so far. */
3749 ifa_flags &= IFA_F_MANAGETEMPADDR;
3750
3751 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
3752 ifm->ifa_prefixlen);
3758} 3753}
3759 3754
3760static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags, 3755static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
@@ -4363,7 +4358,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
4363 memset(&stats[items], 0, pad); 4358 memset(&stats[items], 0, pad);
4364} 4359}
4365 4360
4366static inline void __snmp6_fill_stats64(u64 *stats, void __percpu **mib, 4361static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
4367 int items, int bytes, size_t syncpoff) 4362 int items, int bytes, size_t syncpoff)
4368{ 4363{
4369 int i; 4364 int i;
@@ -4383,7 +4378,7 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
4383{ 4378{
4384 switch (attrtype) { 4379 switch (attrtype) {
4385 case IFLA_INET6_STATS: 4380 case IFLA_INET6_STATS:
4386 __snmp6_fill_stats64(stats, (void __percpu **)idev->stats.ipv6, 4381 __snmp6_fill_stats64(stats, idev->stats.ipv6,
4387 IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp)); 4382 IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
4388 break; 4383 break;
4389 case IFLA_INET6_ICMP6STATS: 4384 case IFLA_INET6_ICMP6STATS:
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 4c11cbcf8308..e6960457f625 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -123,7 +123,7 @@ static void snmp6_free_dev(struct inet6_dev *idev)
123{ 123{
124 kfree(idev->stats.icmpv6msgdev); 124 kfree(idev->stats.icmpv6msgdev);
125 kfree(idev->stats.icmpv6dev); 125 kfree(idev->stats.icmpv6dev);
126 snmp_mib_free((void __percpu **)idev->stats.ipv6); 126 free_percpu(idev->stats.ipv6);
127} 127}
128 128
129/* Nobody refers to this device, we may destroy it. */ 129/* Nobody refers to this device, we may destroy it. */
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index d935889f1008..7cb4392690dd 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -106,7 +106,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
106 struct inet_protosw *answer; 106 struct inet_protosw *answer;
107 struct proto *answer_prot; 107 struct proto *answer_prot;
108 unsigned char answer_flags; 108 unsigned char answer_flags;
109 char answer_no_check;
110 int try_loading_module = 0; 109 int try_loading_module = 0;
111 int err; 110 int err;
112 111
@@ -162,7 +161,6 @@ lookup_protocol:
162 161
163 sock->ops = answer->ops; 162 sock->ops = answer->ops;
164 answer_prot = answer->prot; 163 answer_prot = answer->prot;
165 answer_no_check = answer->no_check;
166 answer_flags = answer->flags; 164 answer_flags = answer->flags;
167 rcu_read_unlock(); 165 rcu_read_unlock();
168 166
@@ -176,7 +174,6 @@ lookup_protocol:
176 sock_init_data(sock, sk); 174 sock_init_data(sock, sk);
177 175
178 err = 0; 176 err = 0;
179 sk->sk_no_check = answer_no_check;
180 if (INET_PROTOSW_REUSE & answer_flags) 177 if (INET_PROTOSW_REUSE & answer_flags)
181 sk->sk_reuse = SK_CAN_REUSE; 178 sk->sk_reuse = SK_CAN_REUSE;
182 179
@@ -715,33 +712,25 @@ static int __net_init ipv6_init_mibs(struct net *net)
715{ 712{
716 int i; 713 int i;
717 714
718 if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6, 715 net->mib.udp_stats_in6 = alloc_percpu(struct udp_mib);
719 sizeof(struct udp_mib), 716 if (!net->mib.udp_stats_in6)
720 __alignof__(struct udp_mib)) < 0)
721 return -ENOMEM; 717 return -ENOMEM;
722 if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6, 718 net->mib.udplite_stats_in6 = alloc_percpu(struct udp_mib);
723 sizeof(struct udp_mib), 719 if (!net->mib.udplite_stats_in6)
724 __alignof__(struct udp_mib)) < 0)
725 goto err_udplite_mib; 720 goto err_udplite_mib;
726 if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics, 721 net->mib.ipv6_statistics = alloc_percpu(struct ipstats_mib);
727 sizeof(struct ipstats_mib), 722 if (!net->mib.ipv6_statistics)
728 __alignof__(struct ipstats_mib)) < 0)
729 goto err_ip_mib; 723 goto err_ip_mib;
730 724
731 for_each_possible_cpu(i) { 725 for_each_possible_cpu(i) {
732 struct ipstats_mib *af_inet6_stats; 726 struct ipstats_mib *af_inet6_stats;
733 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[0], i); 727 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics, i);
734 u64_stats_init(&af_inet6_stats->syncp); 728 u64_stats_init(&af_inet6_stats->syncp);
735#if SNMP_ARRAY_SZ == 2
736 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[1], i);
737 u64_stats_init(&af_inet6_stats->syncp);
738#endif
739 } 729 }
740 730
741 731
742 if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics, 732 net->mib.icmpv6_statistics = alloc_percpu(struct icmpv6_mib);
743 sizeof(struct icmpv6_mib), 733 if (!net->mib.icmpv6_statistics)
744 __alignof__(struct icmpv6_mib)) < 0)
745 goto err_icmp_mib; 734 goto err_icmp_mib;
746 net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib), 735 net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib),
747 GFP_KERNEL); 736 GFP_KERNEL);
@@ -750,22 +739,22 @@ static int __net_init ipv6_init_mibs(struct net *net)
750 return 0; 739 return 0;
751 740
752err_icmpmsg_mib: 741err_icmpmsg_mib:
753 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); 742 free_percpu(net->mib.icmpv6_statistics);
754err_icmp_mib: 743err_icmp_mib:
755 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); 744 free_percpu(net->mib.ipv6_statistics);
756err_ip_mib: 745err_ip_mib:
757 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); 746 free_percpu(net->mib.udplite_stats_in6);
758err_udplite_mib: 747err_udplite_mib:
759 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6); 748 free_percpu(net->mib.udp_stats_in6);
760 return -ENOMEM; 749 return -ENOMEM;
761} 750}
762 751
763static void ipv6_cleanup_mibs(struct net *net) 752static void ipv6_cleanup_mibs(struct net *net)
764{ 753{
765 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6); 754 free_percpu(net->mib.udp_stats_in6);
766 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); 755 free_percpu(net->mib.udplite_stats_in6);
767 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); 756 free_percpu(net->mib.ipv6_statistics);
768 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); 757 free_percpu(net->mib.icmpv6_statistics);
769 kfree(net->mib.icmpv6msg_statistics); 758 kfree(net->mib.icmpv6msg_statistics);
770} 759}
771 760
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 7b326529e6a2..f6c84a6eb238 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -400,6 +400,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
400 int len; 400 int len;
401 int hlimit; 401 int hlimit;
402 int err = 0; 402 int err = 0;
403 u32 mark = IP6_REPLY_MARK(net, skb->mark);
403 404
404 if ((u8 *)hdr < skb->head || 405 if ((u8 *)hdr < skb->head ||
405 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) 406 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
@@ -466,6 +467,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
466 fl6.daddr = hdr->saddr; 467 fl6.daddr = hdr->saddr;
467 if (saddr) 468 if (saddr)
468 fl6.saddr = *saddr; 469 fl6.saddr = *saddr;
470 fl6.flowi6_mark = mark;
469 fl6.flowi6_oif = iif; 471 fl6.flowi6_oif = iif;
470 fl6.fl6_icmp_type = type; 472 fl6.fl6_icmp_type = type;
471 fl6.fl6_icmp_code = code; 473 fl6.fl6_icmp_code = code;
@@ -474,6 +476,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
474 sk = icmpv6_xmit_lock(net); 476 sk = icmpv6_xmit_lock(net);
475 if (sk == NULL) 477 if (sk == NULL)
476 return; 478 return;
479 sk->sk_mark = mark;
477 np = inet6_sk(sk); 480 np = inet6_sk(sk);
478 481
479 if (!icmpv6_xrlim_allow(sk, type, &fl6)) 482 if (!icmpv6_xrlim_allow(sk, type, &fl6))
@@ -493,12 +496,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
493 if (IS_ERR(dst)) 496 if (IS_ERR(dst))
494 goto out; 497 goto out;
495 498
496 if (ipv6_addr_is_multicast(&fl6.daddr)) 499 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
497 hlimit = np->mcast_hops;
498 else
499 hlimit = np->hop_limit;
500 if (hlimit < 0)
501 hlimit = ip6_dst_hoplimit(dst);
502 500
503 msg.skb = skb; 501 msg.skb = skb;
504 msg.offset = skb_network_offset(skb); 502 msg.offset = skb_network_offset(skb);
@@ -556,6 +554,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
556 int err = 0; 554 int err = 0;
557 int hlimit; 555 int hlimit;
558 u8 tclass; 556 u8 tclass;
557 u32 mark = IP6_REPLY_MARK(net, skb->mark);
559 558
560 saddr = &ipv6_hdr(skb)->daddr; 559 saddr = &ipv6_hdr(skb)->daddr;
561 560
@@ -574,11 +573,13 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
574 fl6.saddr = *saddr; 573 fl6.saddr = *saddr;
575 fl6.flowi6_oif = skb->dev->ifindex; 574 fl6.flowi6_oif = skb->dev->ifindex;
576 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; 575 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
576 fl6.flowi6_mark = mark;
577 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 577 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
578 578
579 sk = icmpv6_xmit_lock(net); 579 sk = icmpv6_xmit_lock(net);
580 if (sk == NULL) 580 if (sk == NULL)
581 return; 581 return;
582 sk->sk_mark = mark;
582 np = inet6_sk(sk); 583 np = inet6_sk(sk);
583 584
584 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) 585 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
@@ -593,12 +594,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
593 if (IS_ERR(dst)) 594 if (IS_ERR(dst))
594 goto out; 595 goto out;
595 596
596 if (ipv6_addr_is_multicast(&fl6.daddr)) 597 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
597 hlimit = np->mcast_hops;
598 else
599 hlimit = np->hop_limit;
600 if (hlimit < 0)
601 hlimit = ip6_dst_hoplimit(dst);
602 598
603 idev = __in6_dev_get(skb->dev); 599 idev = __in6_dev_get(skb->dev);
604 600
@@ -702,22 +698,11 @@ static int icmpv6_rcv(struct sk_buff *skb)
702 saddr = &ipv6_hdr(skb)->saddr; 698 saddr = &ipv6_hdr(skb)->saddr;
703 daddr = &ipv6_hdr(skb)->daddr; 699 daddr = &ipv6_hdr(skb)->daddr;
704 700
705 /* Perform checksum. */ 701 if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) {
706 switch (skb->ip_summed) { 702 LIMIT_NETDEBUG(KERN_DEBUG
707 case CHECKSUM_COMPLETE: 703 "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
708 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, 704 saddr, daddr);
709 skb->csum)) 705 goto csum_error;
710 break;
711 /* fall through */
712 case CHECKSUM_NONE:
713 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
714 IPPROTO_ICMPV6, 0));
715 if (__skb_checksum_complete(skb)) {
716 LIMIT_NETDEBUG(KERN_DEBUG
717 "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
718 saddr, daddr);
719 goto csum_error;
720 }
721 } 706 }
722 707
723 if (!pskb_pull(skb, sizeof(*hdr))) 708 if (!pskb_pull(skb, sizeof(*hdr)))
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index d4ade34ab375..a245e5ddffbd 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -81,7 +81,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
81 final_p = fl6_update_dst(fl6, np->opt, &final); 81 final_p = fl6_update_dst(fl6, np->opt, &final);
82 fl6->saddr = ireq->ir_v6_loc_addr; 82 fl6->saddr = ireq->ir_v6_loc_addr;
83 fl6->flowi6_oif = ireq->ir_iif; 83 fl6->flowi6_oif = ireq->ir_iif;
84 fl6->flowi6_mark = sk->sk_mark; 84 fl6->flowi6_mark = ireq->ir_mark;
85 fl6->fl6_dport = ireq->ir_rmt_port; 85 fl6->fl6_dport = ireq->ir_rmt_port;
86 fl6->fl6_sport = htons(ireq->ir_num); 86 fl6->fl6_sport = htons(ireq->ir_num);
87 security_req_classify_flow(req, flowi6_to_flowi(fl6)); 87 security_req_classify_flow(req, flowi6_to_flowi(fl6));
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index ee7a97f510cb..9a4d7322fb22 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -75,25 +75,50 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
75 return err; 75 return err;
76 } 76 }
77 77
78 if (uh->check == 0) { 78 /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
79 /* RFC 2460 section 8.1 says that we SHOULD log 79 * we accept a checksum of zero here. When we find the socket
80 this error. Well, it is reasonable. 80 * for the UDP packet we'll check if that socket allows zero checksum
81 */ 81 * for IPv6 (set by socket option).
82 LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 82 */
83 &ipv6_hdr(skb)->saddr, ntohs(uh->source), 83 return skb_checksum_init_zero_check(skb, proto, uh->check,
84 &ipv6_hdr(skb)->daddr, ntohs(uh->dest)); 84 ip6_compute_pseudo);
85 return 1; 85}
86 } 86EXPORT_SYMBOL(udp6_csum_init);
87 if (skb->ip_summed == CHECKSUM_COMPLETE && 87
88 !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 88/* Function to set UDP checksum for an IPv6 UDP packet. This is intended
89 skb->len, proto, skb->csum)) 89 * for the simple case like when setting the checksum for a UDP tunnel.
90 skb->ip_summed = CHECKSUM_UNNECESSARY; 90 */
91void udp6_set_csum(bool nocheck, struct sk_buff *skb,
92 const struct in6_addr *saddr,
93 const struct in6_addr *daddr, int len)
94{
95 struct udphdr *uh = udp_hdr(skb);
96
97 if (nocheck)
98 uh->check = 0;
99 else if (skb_is_gso(skb))
100 uh->check = ~udp_v6_check(len, saddr, daddr, 0);
101 else if (skb_dst(skb) && skb_dst(skb)->dev &&
102 (skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
91 103
92 if (!skb_csum_unnecessary(skb)) 104 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
93 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
94 &ipv6_hdr(skb)->daddr,
95 skb->len, proto, 0));
96 105
97 return 0; 106 skb->ip_summed = CHECKSUM_PARTIAL;
107 skb->csum_start = skb_transport_header(skb) - skb->head;
108 skb->csum_offset = offsetof(struct udphdr, check);
109 uh->check = ~udp_v6_check(len, saddr, daddr, 0);
110 } else {
111 __wsum csum;
112
113 BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL);
114
115 uh->check = 0;
116 csum = skb_checksum(skb, 0, len, 0);
117 uh->check = udp_v6_check(len, saddr, daddr, csum);
118 if (uh->check == 0)
119 uh->check = CSUM_MANGLED_0;
120
121 skb->ip_summed = CHECKSUM_UNNECESSARY;
122 }
98} 123}
99EXPORT_SYMBOL(udp6_csum_init); 124EXPORT_SYMBOL(udp6_set_csum);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 87891f5f57b5..cb4459bd1d29 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -71,8 +71,7 @@ static DEFINE_RWLOCK(fib6_walker_lock);
71#define FWS_INIT FWS_L 71#define FWS_INIT FWS_L
72#endif 72#endif
73 73
74static void fib6_prune_clones(struct net *net, struct fib6_node *fn, 74static void fib6_prune_clones(struct net *net, struct fib6_node *fn);
75 struct rt6_info *rt);
76static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn); 75static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn);
77static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn); 76static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn);
78static int fib6_walk(struct fib6_walker_t *w); 77static int fib6_walk(struct fib6_walker_t *w);
@@ -941,7 +940,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
941 if (!err) { 940 if (!err) {
942 fib6_start_gc(info->nl_net, rt); 941 fib6_start_gc(info->nl_net, rt);
943 if (!(rt->rt6i_flags & RTF_CACHE)) 942 if (!(rt->rt6i_flags & RTF_CACHE))
944 fib6_prune_clones(info->nl_net, pn, rt); 943 fib6_prune_clones(info->nl_net, pn);
945 } 944 }
946 945
947out: 946out:
@@ -1375,7 +1374,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1375 pn = pn->parent; 1374 pn = pn->parent;
1376 } 1375 }
1377#endif 1376#endif
1378 fib6_prune_clones(info->nl_net, pn, rt); 1377 fib6_prune_clones(info->nl_net, pn);
1379 } 1378 }
1380 1379
1381 /* 1380 /*
@@ -1601,10 +1600,9 @@ static int fib6_prune_clone(struct rt6_info *rt, void *arg)
1601 return 0; 1600 return 0;
1602} 1601}
1603 1602
1604static void fib6_prune_clones(struct net *net, struct fib6_node *fn, 1603static void fib6_prune_clones(struct net *net, struct fib6_node *fn)
1605 struct rt6_info *rt)
1606{ 1604{
1607 fib6_clean_tree(net, fn, fib6_prune_clone, 1, rt); 1605 fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL);
1608} 1606}
1609 1607
1610/* 1608/*
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 0961b5ef866d..4052694c6f2c 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -26,7 +26,6 @@
26#include <net/sock.h> 26#include <net/sock.h>
27 27
28#include <net/ipv6.h> 28#include <net/ipv6.h>
29#include <net/addrconf.h>
30#include <net/rawv6.h> 29#include <net/rawv6.h>
31#include <net/transp_v6.h> 30#include <net/transp_v6.h>
32 31
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 9d921462b57f..3873181ed856 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -72,6 +72,7 @@ struct ip6gre_net {
72}; 72};
73 73
74static struct rtnl_link_ops ip6gre_link_ops __read_mostly; 74static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
75static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
75static int ip6gre_tunnel_init(struct net_device *dev); 76static int ip6gre_tunnel_init(struct net_device *dev);
76static void ip6gre_tunnel_setup(struct net_device *dev); 77static void ip6gre_tunnel_setup(struct net_device *dev);
77static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); 78static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
@@ -353,10 +354,10 @@ failed_free:
353 354
354static void ip6gre_tunnel_uninit(struct net_device *dev) 355static void ip6gre_tunnel_uninit(struct net_device *dev)
355{ 356{
356 struct net *net = dev_net(dev); 357 struct ip6_tnl *t = netdev_priv(dev);
357 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 358 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
358 359
359 ip6gre_tunnel_unlink(ign, netdev_priv(dev)); 360 ip6gre_tunnel_unlink(ign, t);
360 dev_put(dev); 361 dev_put(dev);
361} 362}
362 363
@@ -467,17 +468,7 @@ static int ip6gre_rcv(struct sk_buff *skb)
467 goto drop; 468 goto drop;
468 469
469 if (flags&GRE_CSUM) { 470 if (flags&GRE_CSUM) {
470 switch (skb->ip_summed) { 471 csum = skb_checksum_simple_validate(skb);
471 case CHECKSUM_COMPLETE:
472 csum = csum_fold(skb->csum);
473 if (!csum)
474 break;
475 /* fall through */
476 case CHECKSUM_NONE:
477 skb->csum = 0;
478 csum = __skb_checksum_complete(skb);
479 skb->ip_summed = CHECKSUM_COMPLETE;
480 }
481 offset += 4; 472 offset += 4;
482 } 473 }
483 if (flags&GRE_KEY) { 474 if (flags&GRE_KEY) {
@@ -611,8 +602,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
611 int encap_limit, 602 int encap_limit,
612 __u32 *pmtu) 603 __u32 *pmtu)
613{ 604{
614 struct net *net = dev_net(dev);
615 struct ip6_tnl *tunnel = netdev_priv(dev); 605 struct ip6_tnl *tunnel = netdev_priv(dev);
606 struct net *net = tunnel->net;
616 struct net_device *tdev; /* Device to other host */ 607 struct net_device *tdev; /* Device to other host */
617 struct ipv6hdr *ipv6h; /* Our new IP header */ 608 struct ipv6hdr *ipv6h; /* Our new IP header */
618 unsigned int max_headroom = 0; /* The extra header space needed */ 609 unsigned int max_headroom = 0; /* The extra header space needed */
@@ -979,7 +970,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
979 int strict = (ipv6_addr_type(&p->raddr) & 970 int strict = (ipv6_addr_type(&p->raddr) &
980 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 971 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
981 972
982 struct rt6_info *rt = rt6_lookup(dev_net(dev), 973 struct rt6_info *rt = rt6_lookup(t->net,
983 &p->raddr, &p->laddr, 974 &p->raddr, &p->laddr,
984 p->link, strict); 975 p->link, strict);
985 976
@@ -1063,13 +1054,12 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1063 int err = 0; 1054 int err = 0;
1064 struct ip6_tnl_parm2 p; 1055 struct ip6_tnl_parm2 p;
1065 struct __ip6_tnl_parm p1; 1056 struct __ip6_tnl_parm p1;
1066 struct ip6_tnl *t; 1057 struct ip6_tnl *t = netdev_priv(dev);
1067 struct net *net = dev_net(dev); 1058 struct net *net = t->net;
1068 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1059 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1069 1060
1070 switch (cmd) { 1061 switch (cmd) {
1071 case SIOCGETTUNNEL: 1062 case SIOCGETTUNNEL:
1072 t = NULL;
1073 if (dev == ign->fb_tunnel_dev) { 1063 if (dev == ign->fb_tunnel_dev) {
1074 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1064 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1075 err = -EFAULT; 1065 err = -EFAULT;
@@ -1077,9 +1067,9 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1077 } 1067 }
1078 ip6gre_tnl_parm_from_user(&p1, &p); 1068 ip6gre_tnl_parm_from_user(&p1, &p);
1079 t = ip6gre_tunnel_locate(net, &p1, 0); 1069 t = ip6gre_tunnel_locate(net, &p1, 0);
1070 if (t == NULL)
1071 t = netdev_priv(dev);
1080 } 1072 }
1081 if (t == NULL)
1082 t = netdev_priv(dev);
1083 memset(&p, 0, sizeof(p)); 1073 memset(&p, 0, sizeof(p));
1084 ip6gre_tnl_parm_to_user(&p, &t->parms); 1074 ip6gre_tnl_parm_to_user(&p, &t->parms);
1085 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1075 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
@@ -1242,7 +1232,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
1242 dev->flags |= IFF_NOARP; 1232 dev->flags |= IFF_NOARP;
1243 dev->iflink = 0; 1233 dev->iflink = 0;
1244 dev->addr_len = sizeof(struct in6_addr); 1234 dev->addr_len = sizeof(struct in6_addr);
1245 dev->features |= NETIF_F_NETNS_LOCAL;
1246 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1235 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1247} 1236}
1248 1237
@@ -1297,11 +1286,17 @@ static struct inet6_protocol ip6gre_protocol __read_mostly = {
1297 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1286 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1298}; 1287};
1299 1288
1300static void ip6gre_destroy_tunnels(struct ip6gre_net *ign, 1289static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
1301 struct list_head *head)
1302{ 1290{
1291 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1292 struct net_device *dev, *aux;
1303 int prio; 1293 int prio;
1304 1294
1295 for_each_netdev_safe(net, dev, aux)
1296 if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1297 dev->rtnl_link_ops == &ip6gre_tap_ops)
1298 unregister_netdevice_queue(dev, head);
1299
1305 for (prio = 0; prio < 4; prio++) { 1300 for (prio = 0; prio < 4; prio++) {
1306 int h; 1301 int h;
1307 for (h = 0; h < HASH_SIZE; h++) { 1302 for (h = 0; h < HASH_SIZE; h++) {
@@ -1310,7 +1305,12 @@ static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
1310 t = rtnl_dereference(ign->tunnels[prio][h]); 1305 t = rtnl_dereference(ign->tunnels[prio][h]);
1311 1306
1312 while (t != NULL) { 1307 while (t != NULL) {
1313 unregister_netdevice_queue(t->dev, head); 1308 /* If dev is in the same netns, it has already
1309 * been added to the list by the previous loop.
1310 */
1311 if (!net_eq(dev_net(t->dev), net))
1312 unregister_netdevice_queue(t->dev,
1313 head);
1314 t = rtnl_dereference(t->next); 1314 t = rtnl_dereference(t->next);
1315 } 1315 }
1316 } 1316 }
@@ -1329,6 +1329,11 @@ static int __net_init ip6gre_init_net(struct net *net)
1329 goto err_alloc_dev; 1329 goto err_alloc_dev;
1330 } 1330 }
1331 dev_net_set(ign->fb_tunnel_dev, net); 1331 dev_net_set(ign->fb_tunnel_dev, net);
1332 /* FB netdevice is special: we have one, and only one per netns.
1333 * Allowing to move it to another netns is clearly unsafe.
1334 */
1335 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1336
1332 1337
1333 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev); 1338 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1334 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops; 1339 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
@@ -1349,12 +1354,10 @@ err_alloc_dev:
1349 1354
1350static void __net_exit ip6gre_exit_net(struct net *net) 1355static void __net_exit ip6gre_exit_net(struct net *net)
1351{ 1356{
1352 struct ip6gre_net *ign;
1353 LIST_HEAD(list); 1357 LIST_HEAD(list);
1354 1358
1355 ign = net_generic(net, ip6gre_net_id);
1356 rtnl_lock(); 1359 rtnl_lock();
1357 ip6gre_destroy_tunnels(ign, &list); 1360 ip6gre_destroy_tunnels(net, &list);
1358 unregister_netdevice_many(&list); 1361 unregister_netdevice_many(&list);
1359 rtnl_unlock(); 1362 rtnl_unlock();
1360} 1363}
@@ -1531,15 +1534,14 @@ out:
1531static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], 1534static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1532 struct nlattr *data[]) 1535 struct nlattr *data[])
1533{ 1536{
1534 struct ip6_tnl *t, *nt; 1537 struct ip6_tnl *t, *nt = netdev_priv(dev);
1535 struct net *net = dev_net(dev); 1538 struct net *net = nt->net;
1536 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1539 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1537 struct __ip6_tnl_parm p; 1540 struct __ip6_tnl_parm p;
1538 1541
1539 if (dev == ign->fb_tunnel_dev) 1542 if (dev == ign->fb_tunnel_dev)
1540 return -EINVAL; 1543 return -EINVAL;
1541 1544
1542 nt = netdev_priv(dev);
1543 ip6gre_netlink_parms(data, &p); 1545 ip6gre_netlink_parms(data, &p);
1544 1546
1545 t = ip6gre_tunnel_locate(net, &p, 0); 1547 t = ip6gre_tunnel_locate(net, &p, 0);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index b2f091566f88..65eda2a8af48 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -97,9 +97,11 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
97 SKB_GSO_DODGY | 97 SKB_GSO_DODGY |
98 SKB_GSO_TCP_ECN | 98 SKB_GSO_TCP_ECN |
99 SKB_GSO_GRE | 99 SKB_GSO_GRE |
100 SKB_GSO_GRE_CSUM |
100 SKB_GSO_IPIP | 101 SKB_GSO_IPIP |
101 SKB_GSO_SIT | 102 SKB_GSO_SIT |
102 SKB_GSO_UDP_TUNNEL | 103 SKB_GSO_UDP_TUNNEL |
104 SKB_GSO_UDP_TUNNEL_CSUM |
103 SKB_GSO_MPLS | 105 SKB_GSO_MPLS |
104 SKB_GSO_TCPV6 | 106 SKB_GSO_TCPV6 |
105 0))) 107 0)))
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index fbf11562b54c..cb9df0eb4023 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -219,7 +219,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
219 skb->mark = sk->sk_mark; 219 skb->mark = sk->sk_mark;
220 220
221 mtu = dst_mtu(dst); 221 mtu = dst_mtu(dst);
222 if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { 222 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
223 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), 223 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
224 IPSTATS_MIB_OUT, skb->len); 224 IPSTATS_MIB_OUT, skb->len);
225 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, 225 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
@@ -347,11 +347,11 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
347 if (skb->len <= mtu) 347 if (skb->len <= mtu)
348 return false; 348 return false;
349 349
350 /* ipv6 conntrack defrag sets max_frag_size + local_df */ 350 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
351 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) 351 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
352 return true; 352 return true;
353 353
354 if (skb->local_df) 354 if (skb->ignore_df)
355 return false; 355 return false;
356 356
357 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 357 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -537,6 +537,18 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
537 skb_copy_secmark(to, from); 537 skb_copy_secmark(to, from);
538} 538}
539 539
540static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
541{
542 static u32 ip6_idents_hashrnd __read_mostly;
543 u32 hash, id;
544
545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
546
547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
548 id = ip_idents_reserve(hash, 1);
549 fhdr->identification = htonl(id);
550}
551
540int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 552int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
541{ 553{
542 struct sk_buff *frag; 554 struct sk_buff *frag;
@@ -559,7 +571,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
559 /* We must not fragment if the socket is set to force MTU discovery 571 /* We must not fragment if the socket is set to force MTU discovery
560 * or if the skb it not generated by a local socket. 572 * or if the skb it not generated by a local socket.
561 */ 573 */
562 if (unlikely(!skb->local_df && skb->len > mtu) || 574 if (unlikely(!skb->ignore_df && skb->len > mtu) ||
563 (IP6CB(skb)->frag_max_size && 575 (IP6CB(skb)->frag_max_size &&
564 IP6CB(skb)->frag_max_size > mtu)) { 576 IP6CB(skb)->frag_max_size > mtu)) {
565 if (skb->sk && dst_allfrag(skb_dst(skb))) 577 if (skb->sk && dst_allfrag(skb_dst(skb)))
@@ -1234,7 +1246,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1234 sizeof(struct frag_hdr) : 0) + 1246 sizeof(struct frag_hdr) : 0) +
1235 rt->rt6i_nfheader_len; 1247 rt->rt6i_nfheader_len;
1236 1248
1237 if (ip6_sk_local_df(sk)) 1249 if (ip6_sk_ignore_df(sk))
1238 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN; 1250 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1239 else 1251 else
1240 maxnonfragsize = mtu; 1252 maxnonfragsize = mtu;
@@ -1544,7 +1556,7 @@ int ip6_push_pending_frames(struct sock *sk)
1544 } 1556 }
1545 1557
1546 /* Allow local fragmentation. */ 1558 /* Allow local fragmentation. */
1547 skb->local_df = ip6_sk_local_df(sk); 1559 skb->ignore_df = ip6_sk_ignore_df(sk);
1548 1560
1549 *final_dst = fl6->daddr; 1561 *final_dst = fl6->daddr;
1550 __skb_pull(skb, skb_network_header_len(skb)); 1562 __skb_pull(skb, skb_network_header_len(skb));
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index f6a66bb4114d..afa082458360 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -61,6 +61,7 @@
61MODULE_AUTHOR("Ville Nuorvala"); 61MODULE_AUTHOR("Ville Nuorvala");
62MODULE_DESCRIPTION("IPv6 tunneling device"); 62MODULE_DESCRIPTION("IPv6 tunneling device");
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64MODULE_ALIAS_RTNL_LINK("ip6tnl");
64MODULE_ALIAS_NETDEV("ip6tnl0"); 65MODULE_ALIAS_NETDEV("ip6tnl0");
65 66
66#ifdef IP6_TNL_DEBUG 67#ifdef IP6_TNL_DEBUG
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 6cc9f9371cc5..9aaa6bb229e4 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -795,15 +795,12 @@ static const struct net_device_ops vti6_netdev_ops = {
795 **/ 795 **/
796static void vti6_dev_setup(struct net_device *dev) 796static void vti6_dev_setup(struct net_device *dev)
797{ 797{
798 struct ip6_tnl *t;
799
800 dev->netdev_ops = &vti6_netdev_ops; 798 dev->netdev_ops = &vti6_netdev_ops;
801 dev->destructor = vti6_dev_free; 799 dev->destructor = vti6_dev_free;
802 800
803 dev->type = ARPHRD_TUNNEL6; 801 dev->type = ARPHRD_TUNNEL6;
804 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); 802 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
805 dev->mtu = ETH_DATA_LEN; 803 dev->mtu = ETH_DATA_LEN;
806 t = netdev_priv(dev);
807 dev->flags |= IFF_NOARP; 804 dev->flags |= IFF_NOARP;
808 dev->addr_len = sizeof(struct in6_addr); 805 dev->addr_len = sizeof(struct in6_addr);
809 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 806 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 84c7f33d0cf8..387d8b8fc18d 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -90,17 +90,9 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
90 if (nf_ct_is_untracked(ct)) 90 if (nf_ct_is_untracked(ct))
91 return NF_ACCEPT; 91 return NF_ACCEPT;
92 92
93 nat = nfct_nat(ct); 93 nat = nf_ct_nat_ext_add(ct);
94 if (!nat) { 94 if (nat == NULL)
95 /* NAT module was loaded late. */ 95 return NF_ACCEPT;
96 if (nf_ct_is_confirmed(ct))
97 return NF_ACCEPT;
98 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
99 if (nat == NULL) {
100 pr_debug("failed to add NAT extension\n");
101 return NF_ACCEPT;
102 }
103 }
104 96
105 switch (ctinfo) { 97 switch (ctinfo) {
106 case IP_CT_RELATED: 98 case IP_CT_RELATED:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 767ab8da8218..0d5279fd852a 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -451,7 +451,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
451 } 451 }
452 sub_frag_mem_limit(&fq->q, head->truesize); 452 sub_frag_mem_limit(&fq->q, head->truesize);
453 453
454 head->local_df = 1; 454 head->ignore_df = 1;
455 head->next = NULL; 455 head->next = NULL;
456 head->dev = dev; 456 head->dev = dev;
457 head->tstamp = fq->q.stamp; 457 head->tstamp = fq->q.stamp;
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index 9c3297a768fd..d189fcb437fe 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -47,15 +47,9 @@ static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
47 if (ct == NULL || nf_ct_is_untracked(ct)) 47 if (ct == NULL || nf_ct_is_untracked(ct))
48 return NF_ACCEPT; 48 return NF_ACCEPT;
49 49
50 nat = nfct_nat(ct); 50 nat = nf_ct_nat_ext_add(ct);
51 if (nat == NULL) { 51 if (nat == NULL)
52 /* Conntrack module was loaded late, can't add extension. */ 52 return NF_ACCEPT;
53 if (nf_ct_is_confirmed(ct))
54 return NF_ACCEPT;
55 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
56 if (nat == NULL)
57 return NF_ACCEPT;
58 }
59 53
60 switch (ctinfo) { 54 switch (ctinfo) {
61 case IP_CT_RELATED: 55 case IP_CT_RELATED:
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 56596ce390a1..5ec867e4a8b7 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -8,32 +8,6 @@
8#include <net/addrconf.h> 8#include <net/addrconf.h>
9#include <net/secure_seq.h> 9#include <net/secure_seq.h>
10 10
11void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
12{
13 static atomic_t ipv6_fragmentation_id;
14 struct in6_addr addr;
15 int ident;
16
17#if IS_ENABLED(CONFIG_IPV6)
18 struct inet_peer *peer;
19 struct net *net;
20
21 net = dev_net(rt->dst.dev);
22 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
23 if (peer) {
24 fhdr->identification = htonl(inet_getid(peer, 0));
25 inet_putpeer(peer);
26 return;
27 }
28#endif
29 ident = atomic_inc_return(&ipv6_fragmentation_id);
30
31 addr = rt->rt6i_dst.addr;
32 addr.s6_addr32[0] ^= (__force __be32)ident;
33 fhdr->identification = htonl(secure_ipv6_id(addr.s6_addr32));
34}
35EXPORT_SYMBOL(ipv6_select_ident);
36
37int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 11int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
38{ 12{
39 u16 offset = sizeof(struct ipv6hdr); 13 u16 offset = sizeof(struct ipv6hdr);
@@ -104,6 +78,7 @@ int __ip6_local_out(struct sk_buff *skb)
104 if (len > IPV6_MAXPLEN) 78 if (len > IPV6_MAXPLEN)
105 len = 0; 79 len = 0;
106 ipv6_hdr(skb)->payload_len = htons(len); 80 ipv6_hdr(skb)->payload_len = htons(len);
81 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
107 82
108 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, 83 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
109 skb_dst(skb)->dev, dst_output); 84 skb_dst(skb)->dev, dst_output);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index bda74291c3e0..5b7a1ed2aba9 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -51,7 +51,6 @@ static struct inet_protosw pingv6_protosw = {
51 .protocol = IPPROTO_ICMPV6, 51 .protocol = IPPROTO_ICMPV6,
52 .prot = &pingv6_prot, 52 .prot = &pingv6_prot,
53 .ops = &inet6_dgram_ops, 53 .ops = &inet6_dgram_ops,
54 .no_check = UDP_CSUM_DEFAULT,
55 .flags = INET_PROTOSW_REUSE, 54 .flags = INET_PROTOSW_REUSE,
56}; 55};
57 56
@@ -168,12 +167,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
168 pfh.wcheck = 0; 167 pfh.wcheck = 0;
169 pfh.family = AF_INET6; 168 pfh.family = AF_INET6;
170 169
171 if (ipv6_addr_is_multicast(&fl6.daddr)) 170 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
172 hlimit = np->mcast_hops;
173 else
174 hlimit = np->hop_limit;
175 if (hlimit < 0)
176 hlimit = ip6_dst_hoplimit(dst);
177 171
178 lock_sock(sk); 172 lock_sock(sk);
179 err = ip6_append_data(sk, ping_getfrag, &pfh, len, 173 err = ip6_append_data(sk, ping_getfrag, &pfh, len,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 091d066a57b3..3317440ea341 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -186,7 +186,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
186/* can be called either with percpu mib (pcpumib != NULL), 186/* can be called either with percpu mib (pcpumib != NULL),
187 * or shared one (smib != NULL) 187 * or shared one (smib != NULL)
188 */ 188 */
189static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib, 189static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
190 atomic_long_t *smib, 190 atomic_long_t *smib,
191 const struct snmp_mib *itemlist) 191 const struct snmp_mib *itemlist)
192{ 192{
@@ -201,7 +201,7 @@ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib,
201 } 201 }
202} 202}
203 203
204static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib, 204static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
205 const struct snmp_mib *itemlist, size_t syncpoff) 205 const struct snmp_mib *itemlist, size_t syncpoff)
206{ 206{
207 int i; 207 int i;
@@ -215,14 +215,14 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
215{ 215{
216 struct net *net = (struct net *)seq->private; 216 struct net *net = (struct net *)seq->private;
217 217
218 snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics, 218 snmp6_seq_show_item64(seq, net->mib.ipv6_statistics,
219 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); 219 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
220 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics, 220 snmp6_seq_show_item(seq, net->mib.icmpv6_statistics,
221 NULL, snmp6_icmp6_list); 221 NULL, snmp6_icmp6_list);
222 snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs); 222 snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
223 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6, 223 snmp6_seq_show_item(seq, net->mib.udp_stats_in6,
224 NULL, snmp6_udp6_list); 224 NULL, snmp6_udp6_list);
225 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6, 225 snmp6_seq_show_item(seq, net->mib.udplite_stats_in6,
226 NULL, snmp6_udplite6_list); 226 NULL, snmp6_udplite6_list);
227 return 0; 227 return 0;
228} 228}
@@ -245,7 +245,7 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
245 struct inet6_dev *idev = (struct inet6_dev *)seq->private; 245 struct inet6_dev *idev = (struct inet6_dev *)seq->private;
246 246
247 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); 247 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
248 snmp6_seq_show_item64(seq, (void __percpu **)idev->stats.ipv6, 248 snmp6_seq_show_item64(seq, idev->stats.ipv6,
249 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); 249 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
250 snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs, 250 snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
251 snmp6_icmp6_list); 251 snmp6_icmp6_list);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1f29996e368a..b2dc60b0c764 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -873,14 +873,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
873 err = PTR_ERR(dst); 873 err = PTR_ERR(dst);
874 goto out; 874 goto out;
875 } 875 }
876 if (hlimit < 0) { 876 if (hlimit < 0)
877 if (ipv6_addr_is_multicast(&fl6.daddr)) 877 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
878 hlimit = np->mcast_hops;
879 else
880 hlimit = np->hop_limit;
881 if (hlimit < 0)
882 hlimit = ip6_dst_hoplimit(dst);
883 }
884 878
885 if (tclass < 0) 879 if (tclass < 0)
886 tclass = np->tclass; 880 tclass = np->tclass;
@@ -1328,7 +1322,6 @@ static struct inet_protosw rawv6_protosw = {
1328 .protocol = IPPROTO_IP, /* wild card */ 1322 .protocol = IPPROTO_IP, /* wild card */
1329 .prot = &rawv6_prot, 1323 .prot = &rawv6_prot,
1330 .ops = &inet6_sockraw_ops, 1324 .ops = &inet6_sockraw_ops,
1331 .no_check = UDP_CSUM_DEFAULT,
1332 .flags = INET_PROTOSW_REUSE, 1325 .flags = INET_PROTOSW_REUSE,
1333}; 1326};
1334 1327
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6ebdb7b6744c..f23fbd28a501 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1176,7 +1176,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1176 1176
1177 memset(&fl6, 0, sizeof(fl6)); 1177 memset(&fl6, 0, sizeof(fl6));
1178 fl6.flowi6_oif = oif; 1178 fl6.flowi6_oif = oif;
1179 fl6.flowi6_mark = mark; 1179 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1180 fl6.daddr = iph->daddr; 1180 fl6.daddr = iph->daddr;
1181 fl6.saddr = iph->saddr; 1181 fl6.saddr = iph->saddr;
1182 fl6.flowlabel = ip6_flowinfo(iph); 1182 fl6.flowlabel = ip6_flowinfo(iph);
@@ -1455,7 +1455,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
1455 goto out; 1455 goto out;
1456 1456
1457 net->ipv6.ip6_rt_gc_expire++; 1457 net->ipv6.ip6_rt_gc_expire++;
1458 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size); 1458 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1459 entries = dst_entries_get_slow(ops); 1459 entries = dst_entries_get_slow(ops);
1460 if (entries < ops->gc_thresh) 1460 if (entries < ops->gc_thresh)
1461 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; 1461 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e5a453ca302e..4f408176dc64 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -560,12 +560,12 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
560 560
561 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 561 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
562 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 562 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
563 t->dev->ifindex, 0, IPPROTO_IPV6, 0); 563 t->parms.link, 0, IPPROTO_IPV6, 0);
564 err = 0; 564 err = 0;
565 goto out; 565 goto out;
566 } 566 }
567 if (type == ICMP_REDIRECT) { 567 if (type == ICMP_REDIRECT) {
568 ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0, 568 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
569 IPPROTO_IPV6, 0); 569 IPPROTO_IPV6, 0);
570 err = 0; 570 err = 0;
571 goto out; 571 goto out;
@@ -1828,4 +1828,5 @@ xfrm_tunnel_failed:
1828module_init(sit_init); 1828module_init(sit_init);
1829module_exit(sit_cleanup); 1829module_exit(sit_cleanup);
1830MODULE_LICENSE("GPL"); 1830MODULE_LICENSE("GPL");
1831MODULE_ALIAS_RTNL_LINK("sit");
1831MODULE_ALIAS_NETDEV("sit0"); 1832MODULE_ALIAS_NETDEV("sit0");
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bb53a5e73c1a..a822b880689b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -216,6 +216,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
216 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) 216 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
217 ireq->ir_iif = inet6_iif(skb); 217 ireq->ir_iif = inet6_iif(skb);
218 218
219 ireq->ir_mark = inet_request_mark(sk, skb);
220
219 req->expires = 0UL; 221 req->expires = 0UL;
220 req->num_retrans = 0; 222 req->num_retrans = 0;
221 ireq->ecn_ok = ecn_ok; 223 ireq->ecn_ok = ecn_ok;
@@ -242,7 +244,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
242 final_p = fl6_update_dst(&fl6, np->opt, &final); 244 final_p = fl6_update_dst(&fl6, np->opt, &final);
243 fl6.saddr = ireq->ir_v6_loc_addr; 245 fl6.saddr = ireq->ir_v6_loc_addr;
244 fl6.flowi6_oif = sk->sk_bound_dev_if; 246 fl6.flowi6_oif = sk->sk_bound_dev_if;
245 fl6.flowi6_mark = sk->sk_mark; 247 fl6.flowi6_mark = ireq->ir_mark;
246 fl6.fl6_dport = ireq->ir_rmt_port; 248 fl6.fl6_dport = ireq->ir_rmt_port;
247 fl6.fl6_sport = inet_sk(sk)->inet_sport; 249 fl6.fl6_sport = inet_sk(sk)->inet_sport;
248 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 250 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 7f405a168822..058f3eca2e53 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -38,6 +38,13 @@ static struct ctl_table ipv6_table_template[] = {
38 .mode = 0644, 38 .mode = 0644,
39 .proc_handler = proc_dointvec 39 .proc_handler = proc_dointvec
40 }, 40 },
41 {
42 .procname = "fwmark_reflect",
43 .data = &init_net.ipv6.sysctl.fwmark_reflect,
44 .maxlen = sizeof(int),
45 .mode = 0644,
46 .proc_handler = proc_dointvec
47 },
41 { } 48 { }
42}; 49};
43 50
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e289830ed6e3..229239ad96b1 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -340,7 +340,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
340 struct sock *sk; 340 struct sock *sk;
341 int err; 341 int err;
342 struct tcp_sock *tp; 342 struct tcp_sock *tp;
343 __u32 seq; 343 struct request_sock *fastopen;
344 __u32 seq, snd_una;
344 struct net *net = dev_net(skb->dev); 345 struct net *net = dev_net(skb->dev);
345 346
346 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr, 347 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
@@ -371,8 +372,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
371 372
372 tp = tcp_sk(sk); 373 tp = tcp_sk(sk);
373 seq = ntohl(th->seq); 374 seq = ntohl(th->seq);
375 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
376 fastopen = tp->fastopen_rsk;
377 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
374 if (sk->sk_state != TCP_LISTEN && 378 if (sk->sk_state != TCP_LISTEN &&
375 !between(seq, tp->snd_una, tp->snd_nxt)) { 379 !between(seq, snd_una, tp->snd_nxt)) {
376 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 380 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
377 goto out; 381 goto out;
378 } 382 }
@@ -436,8 +440,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
436 goto out; 440 goto out;
437 441
438 case TCP_SYN_SENT: 442 case TCP_SYN_SENT:
439 case TCP_SYN_RECV: /* Cannot happen. 443 case TCP_SYN_RECV:
440 It can, it SYNs are crossed. --ANK */ 444 /* Only in fast or simultaneous open. If a fast open socket is
445 * is already accepted it is treated as a connected one below.
446 */
447 if (fastopen && fastopen->sk == NULL)
448 break;
449
441 if (!sock_owned_by_user(sk)) { 450 if (!sock_owned_by_user(sk)) {
442 sk->sk_err = err; 451 sk->sk_err = err;
443 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 452 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
@@ -463,7 +472,8 @@ out:
463static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, 472static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
464 struct flowi6 *fl6, 473 struct flowi6 *fl6,
465 struct request_sock *req, 474 struct request_sock *req,
466 u16 queue_mapping) 475 u16 queue_mapping,
476 struct tcp_fastopen_cookie *foc)
467{ 477{
468 struct inet_request_sock *ireq = inet_rsk(req); 478 struct inet_request_sock *ireq = inet_rsk(req);
469 struct ipv6_pinfo *np = inet6_sk(sk); 479 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -474,7 +484,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
474 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) 484 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
475 goto done; 485 goto done;
476 486
477 skb = tcp_make_synack(sk, dst, req, NULL); 487 skb = tcp_make_synack(sk, dst, req, foc);
478 488
479 if (skb) { 489 if (skb) {
480 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, 490 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -498,7 +508,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
498 struct flowi6 fl6; 508 struct flowi6 fl6;
499 int res; 509 int res;
500 510
501 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0); 511 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0, NULL);
502 if (!res) { 512 if (!res) {
503 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 513 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
504 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 514 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
@@ -802,6 +812,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
802 fl6.flowi6_oif = inet6_iif(skb); 812 fl6.flowi6_oif = inet6_iif(skb);
803 else 813 else
804 fl6.flowi6_oif = oif; 814 fl6.flowi6_oif = oif;
815 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
805 fl6.fl6_dport = t1->dest; 816 fl6.fl6_dport = t1->dest;
806 fl6.fl6_sport = t1->source; 817 fl6.fl6_sport = t1->source;
807 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 818 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -917,7 +928,12 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
917static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 928static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
918 struct request_sock *req) 929 struct request_sock *req)
919{ 930{
920 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, 931 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
932 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
933 */
934 tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
935 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
936 tcp_rsk(req)->rcv_nxt,
921 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, 937 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
922 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 938 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
923 0, 0); 939 0, 0);
@@ -969,8 +985,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
969 struct tcp_sock *tp = tcp_sk(sk); 985 struct tcp_sock *tp = tcp_sk(sk);
970 __u32 isn = TCP_SKB_CB(skb)->when; 986 __u32 isn = TCP_SKB_CB(skb)->when;
971 struct dst_entry *dst = NULL; 987 struct dst_entry *dst = NULL;
988 struct tcp_fastopen_cookie foc = { .len = -1 };
989 bool want_cookie = false, fastopen;
972 struct flowi6 fl6; 990 struct flowi6 fl6;
973 bool want_cookie = false; 991 int err;
974 992
975 if (skb->protocol == htons(ETH_P_IP)) 993 if (skb->protocol == htons(ETH_P_IP))
976 return tcp_v4_conn_request(sk, skb); 994 return tcp_v4_conn_request(sk, skb);
@@ -1001,7 +1019,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1001 tcp_clear_options(&tmp_opt); 1019 tcp_clear_options(&tmp_opt);
1002 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1020 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1003 tmp_opt.user_mss = tp->rx_opt.user_mss; 1021 tmp_opt.user_mss = tp->rx_opt.user_mss;
1004 tcp_parse_options(skb, &tmp_opt, 0, NULL); 1022 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1005 1023
1006 if (want_cookie && !tmp_opt.saw_tstamp) 1024 if (want_cookie && !tmp_opt.saw_tstamp)
1007 tcp_clear_options(&tmp_opt); 1025 tcp_clear_options(&tmp_opt);
@@ -1016,6 +1034,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1016 TCP_ECN_create_request(req, skb, sock_net(sk)); 1034 TCP_ECN_create_request(req, skb, sock_net(sk));
1017 1035
1018 ireq->ir_iif = sk->sk_bound_dev_if; 1036 ireq->ir_iif = sk->sk_bound_dev_if;
1037 ireq->ir_mark = inet_request_mark(sk, skb);
1019 1038
1020 /* So that link locals have meaning */ 1039 /* So that link locals have meaning */
1021 if (!sk->sk_bound_dev_if && 1040 if (!sk->sk_bound_dev_if &&
@@ -1074,19 +1093,27 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1074 isn = tcp_v6_init_sequence(skb); 1093 isn = tcp_v6_init_sequence(skb);
1075 } 1094 }
1076have_isn: 1095have_isn:
1077 tcp_rsk(req)->snt_isn = isn;
1078 1096
1079 if (security_inet_conn_request(sk, skb, req)) 1097 if (security_inet_conn_request(sk, skb, req))
1080 goto drop_and_release; 1098 goto drop_and_release;
1081 1099
1082 if (tcp_v6_send_synack(sk, dst, &fl6, req, 1100 if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL)
1083 skb_get_queue_mapping(skb)) ||
1084 want_cookie)
1085 goto drop_and_free; 1101 goto drop_and_free;
1086 1102
1103 tcp_rsk(req)->snt_isn = isn;
1087 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1104 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1088 tcp_rsk(req)->listener = NULL; 1105 tcp_openreq_init_rwin(req, sk, dst);
1089 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1106 fastopen = !want_cookie &&
1107 tcp_try_fastopen(sk, skb, req, &foc, dst);
1108 err = tcp_v6_send_synack(sk, dst, &fl6, req,
1109 skb_get_queue_mapping(skb), &foc);
1110 if (!fastopen) {
1111 if (err || want_cookie)
1112 goto drop_and_free;
1113
1114 tcp_rsk(req)->listener = NULL;
1115 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1116 }
1090 return 0; 1117 return 0;
1091 1118
1092drop_and_release: 1119drop_and_release:
@@ -1294,25 +1321,6 @@ out:
1294 return NULL; 1321 return NULL;
1295} 1322}
1296 1323
1297static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1298{
1299 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1300 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1301 &ipv6_hdr(skb)->daddr, skb->csum)) {
1302 skb->ip_summed = CHECKSUM_UNNECESSARY;
1303 return 0;
1304 }
1305 }
1306
1307 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1308 &ipv6_hdr(skb)->saddr,
1309 &ipv6_hdr(skb)->daddr, 0));
1310
1311 if (skb->len <= 76)
1312 return __skb_checksum_complete(skb);
1313 return 0;
1314}
1315
1316/* The socket must have it's spinlock held when we get 1324/* The socket must have it's spinlock held when we get
1317 * here. 1325 * here.
1318 * 1326 *
@@ -1486,7 +1494,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1486 if (!pskb_may_pull(skb, th->doff*4)) 1494 if (!pskb_may_pull(skb, th->doff*4))
1487 goto discard_it; 1495 goto discard_it;
1488 1496
1489 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb)) 1497 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1490 goto csum_error; 1498 goto csum_error;
1491 1499
1492 th = tcp_hdr(skb); 1500 th = tcp_hdr(skb);
@@ -1779,6 +1787,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1779 const struct inet_sock *inet = inet_sk(sp); 1787 const struct inet_sock *inet = inet_sk(sp);
1780 const struct tcp_sock *tp = tcp_sk(sp); 1788 const struct tcp_sock *tp = tcp_sk(sp);
1781 const struct inet_connection_sock *icsk = inet_csk(sp); 1789 const struct inet_connection_sock *icsk = inet_csk(sp);
1790 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1782 1791
1783 dest = &sp->sk_v6_daddr; 1792 dest = &sp->sk_v6_daddr;
1784 src = &sp->sk_v6_rcv_saddr; 1793 src = &sp->sk_v6_rcv_saddr;
@@ -1821,7 +1830,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1821 jiffies_to_clock_t(icsk->icsk_ack.ato), 1830 jiffies_to_clock_t(icsk->icsk_ack.ato),
1822 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 1831 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1823 tp->snd_cwnd, 1832 tp->snd_cwnd,
1824 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh 1833 sp->sk_state == TCP_LISTEN ?
1834 (fastopenq ? fastopenq->max_qlen : 0) :
1835 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1825 ); 1836 );
1826} 1837}
1827 1838
@@ -1981,7 +1992,6 @@ static struct inet_protosw tcpv6_protosw = {
1981 .protocol = IPPROTO_TCP, 1992 .protocol = IPPROTO_TCP,
1982 .prot = &tcpv6_prot, 1993 .prot = &tcpv6_prot,
1983 .ops = &inet6_stream_ops, 1994 .ops = &inet6_stream_ops,
1984 .no_check = 0,
1985 .flags = INET_PROTOSW_PERMANENT | 1995 .flags = INET_PROTOSW_PERMANENT |
1986 INET_PROTOSW_ICSK, 1996 INET_PROTOSW_ICSK,
1987}; 1997};
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1e586d92260e..95c834799288 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -634,6 +634,10 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
634 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { 634 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
635 int ret; 635 int ret;
636 636
637 /* Verify checksum before giving to encap */
638 if (udp_lib_checksum_complete(skb))
639 goto csum_error;
640
637 ret = encap_rcv(sk, skb); 641 ret = encap_rcv(sk, skb);
638 if (ret <= 0) { 642 if (ret <= 0) {
639 UDP_INC_STATS_BH(sock_net(sk), 643 UDP_INC_STATS_BH(sock_net(sk),
@@ -701,17 +705,16 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
701 int dif) 705 int dif)
702{ 706{
703 struct hlist_nulls_node *node; 707 struct hlist_nulls_node *node;
704 struct sock *s = sk;
705 unsigned short num = ntohs(loc_port); 708 unsigned short num = ntohs(loc_port);
706 709
707 sk_nulls_for_each_from(s, node) { 710 sk_nulls_for_each_from(sk, node) {
708 struct inet_sock *inet = inet_sk(s); 711 struct inet_sock *inet = inet_sk(sk);
709 712
710 if (!net_eq(sock_net(s), net)) 713 if (!net_eq(sock_net(sk), net))
711 continue; 714 continue;
712 715
713 if (udp_sk(s)->udp_port_hash == num && 716 if (udp_sk(sk)->udp_port_hash == num &&
714 s->sk_family == PF_INET6) { 717 sk->sk_family == PF_INET6) {
715 if (inet->inet_dport) { 718 if (inet->inet_dport) {
716 if (inet->inet_dport != rmt_port) 719 if (inet->inet_dport != rmt_port)
717 continue; 720 continue;
@@ -720,16 +723,16 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
720 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) 723 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
721 continue; 724 continue;
722 725
723 if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif) 726 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
724 continue; 727 continue;
725 728
726 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { 729 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
727 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)) 730 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
728 continue; 731 continue;
729 } 732 }
730 if (!inet6_mc_check(s, loc_addr, rmt_addr)) 733 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
731 continue; 734 continue;
732 return s; 735 return sk;
733 } 736 }
734 } 737 }
735 return NULL; 738 return NULL;
@@ -760,6 +763,17 @@ static void flush_stack(struct sock **stack, unsigned int count,
760 if (unlikely(skb1)) 763 if (unlikely(skb1))
761 kfree_skb(skb1); 764 kfree_skb(skb1);
762} 765}
766
767static void udp6_csum_zero_error(struct sk_buff *skb)
768{
769 /* RFC 2460 section 8.1 says that we SHOULD log
770 * this error. Well, it is reasonable.
771 */
772 LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
773 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
774 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
775}
776
763/* 777/*
764 * Note: called only from the BH handler context, 778 * Note: called only from the BH handler context,
765 * so we don't need to lock the hashes. 779 * so we don't need to lock the hashes.
@@ -779,7 +793,12 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
779 dif = inet6_iif(skb); 793 dif = inet6_iif(skb);
780 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); 794 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
781 while (sk) { 795 while (sk) {
782 stack[count++] = sk; 796 /* If zero checksum and no_check is not on for
797 * the socket then skip it.
798 */
799 if (uh->check || udp_sk(sk)->no_check6_rx)
800 stack[count++] = sk;
801
783 sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, 802 sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
784 uh->source, saddr, dif); 803 uh->source, saddr, dif);
785 if (unlikely(count == ARRAY_SIZE(stack))) { 804 if (unlikely(count == ARRAY_SIZE(stack))) {
@@ -867,6 +886,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
867 if (sk != NULL) { 886 if (sk != NULL) {
868 int ret; 887 int ret;
869 888
889 if (!uh->check && !udp_sk(sk)->no_check6_rx) {
890 sock_put(sk);
891 udp6_csum_zero_error(skb);
892 goto csum_error;
893 }
894
870 ret = udpv6_queue_rcv_skb(sk, skb); 895 ret = udpv6_queue_rcv_skb(sk, skb);
871 sock_put(sk); 896 sock_put(sk);
872 897
@@ -879,6 +904,11 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
879 return 0; 904 return 0;
880 } 905 }
881 906
907 if (!uh->check) {
908 udp6_csum_zero_error(skb);
909 goto csum_error;
910 }
911
882 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 912 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
883 goto discard; 913 goto discard;
884 914
@@ -1006,7 +1036,10 @@ static int udp_v6_push_pending_frames(struct sock *sk)
1006 1036
1007 if (is_udplite) 1037 if (is_udplite)
1008 csum = udplite_csum_outgoing(sk, skb); 1038 csum = udplite_csum_outgoing(sk, skb);
1009 else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1039 else if (up->no_check6_tx) { /* UDP csum disabled */
1040 skb->ip_summed = CHECKSUM_NONE;
1041 goto send;
1042 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1010 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, 1043 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
1011 up->len); 1044 up->len);
1012 goto send; 1045 goto send;
@@ -1232,14 +1265,8 @@ do_udp_sendmsg:
1232 goto out; 1265 goto out;
1233 } 1266 }
1234 1267
1235 if (hlimit < 0) { 1268 if (hlimit < 0)
1236 if (ipv6_addr_is_multicast(&fl6.daddr)) 1269 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1237 hlimit = np->mcast_hops;
1238 else
1239 hlimit = np->hop_limit;
1240 if (hlimit < 0)
1241 hlimit = ip6_dst_hoplimit(dst);
1242 }
1243 1270
1244 if (tclass < 0) 1271 if (tclass < 0)
1245 tclass = np->tclass; 1272 tclass = np->tclass;
@@ -1479,7 +1506,6 @@ static struct inet_protosw udpv6_protosw = {
1479 .protocol = IPPROTO_UDP, 1506 .protocol = IPPROTO_UDP,
1480 .prot = &udpv6_prot, 1507 .prot = &udpv6_prot,
1481 .ops = &inet6_dgram_ops, 1508 .ops = &inet6_dgram_ops,
1482 .no_check = UDP_CSUM_DEFAULT,
1483 .flags = INET_PROTOSW_PERMANENT, 1509 .flags = INET_PROTOSW_PERMANENT,
1484}; 1510};
1485 1511
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index b261ee8b83fc..0ae3d98f83e0 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -63,7 +63,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
63 if (unlikely(type & ~(SKB_GSO_UDP | 63 if (unlikely(type & ~(SKB_GSO_UDP |
64 SKB_GSO_DODGY | 64 SKB_GSO_DODGY |
65 SKB_GSO_UDP_TUNNEL | 65 SKB_GSO_UDP_TUNNEL |
66 SKB_GSO_UDP_TUNNEL_CSUM |
66 SKB_GSO_GRE | 67 SKB_GSO_GRE |
68 SKB_GSO_GRE_CSUM |
67 SKB_GSO_IPIP | 69 SKB_GSO_IPIP |
68 SKB_GSO_SIT | 70 SKB_GSO_SIT |
69 SKB_GSO_MPLS) || 71 SKB_GSO_MPLS) ||
@@ -76,7 +78,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
76 goto out; 78 goto out;
77 } 79 }
78 80
79 if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) 81 if (skb->encapsulation && skb_shinfo(skb)->gso_type &
82 (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
80 segs = skb_udp_tunnel_segment(skb, features); 83 segs = skb_udp_tunnel_segment(skb, features);
81 else { 84 else {
82 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot 85 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index dfcc4be46898..9cf097e206e9 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -64,7 +64,6 @@ static struct inet_protosw udplite6_protosw = {
64 .protocol = IPPROTO_UDPLITE, 64 .protocol = IPPROTO_UDPLITE,
65 .prot = &udplitev6_prot, 65 .prot = &udplitev6_prot,
66 .ops = &inet6_dgram_ops, 66 .ops = &inet6_dgram_ops,
67 .no_check = 0,
68 .flags = INET_PROTOSW_PERMANENT, 67 .flags = INET_PROTOSW_PERMANENT,
69}; 68};
70 69
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index b930d080c66f..433672d07d0b 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -78,7 +78,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
78 if (mtu < IPV6_MIN_MTU) 78 if (mtu < IPV6_MIN_MTU)
79 mtu = IPV6_MIN_MTU; 79 mtu = IPV6_MIN_MTU;
80 80
81 if (!skb->local_df && skb->len > mtu) { 81 if (!skb->ignore_df && skb->len > mtu) {
82 skb->dev = dst->dev; 82 skb->dev = dst->dev;
83 83
84 if (xfrm6_local_dontfrag(skb)) 84 if (xfrm6_local_dontfrag(skb))
@@ -114,7 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
114 if (err) 114 if (err)
115 return err; 115 return err;
116 116
117 skb->local_df = 1; 117 skb->ignore_df = 1;
118 118
119 return x->outer_mode->output2(x, skb); 119 return x->outer_mode->output2(x, skb);
120} 120}
@@ -153,7 +153,7 @@ static int __xfrm6_output(struct sk_buff *skb)
153 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { 153 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
154 xfrm6_local_rxpmtu(skb, mtu); 154 xfrm6_local_rxpmtu(skb, mtu);
155 return -EMSGSIZE; 155 return -EMSGSIZE;
156 } else if (!skb->local_df && skb->len > mtu && skb->sk) { 156 } else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
157 xfrm_local_error(skb, mtu); 157 xfrm_local_error(skb, mtu);
158 return -EMSGSIZE; 158 return -EMSGSIZE;
159 } 159 }