aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-08 10:55:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-08 10:55:01 -0500
commitd7fc02c7bae7b1cf69269992cf880a43a350cdaa (patch)
treea43d56fa72913a1cc98a0bbebe054d08581b3a7c /net/ipv6
parentee1262dbc65ce0b6234a915d8432171e8d77f518 (diff)
parent28b4d5cc17c20786848cdc07b7ea237a309776bb (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1815 commits) mac80211: fix reorder buffer release iwmc3200wifi: Enable wimax core through module parameter iwmc3200wifi: Add wifi-wimax coexistence mode as a module parameter iwmc3200wifi: Coex table command does not expect a response iwmc3200wifi: Update wiwi priority table iwlwifi: driver version track kernel version iwlwifi: indicate uCode type when fail dump error/event log iwl3945: remove duplicated event logging code b43: fix two warnings ipw2100: fix rebooting hang with driver loaded cfg80211: indent regulatory messages with spaces iwmc3200wifi: fix NULL pointer dereference in pmkid update mac80211: Fix TX status reporting for injected data frames ath9k: enable 2GHz band only if the device supports it airo: Fix integer overflow warning rt2x00: Fix padding bug on L2PAD devices. WE: Fix set events not propagated b43legacy: avoid PPC fault during resume b43: avoid PPC fault during resume tcp: fix a timewait refcnt race ... Fix up conflicts due to sysctl cleanups (dead sysctl_check code and CTL_UNNUMBERED removed) in kernel/sysctl_check.c net/ipv4/sysctl_net_ipv4.c net/ipv6/addrconf.c net/sctp/sysctl.c
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/Kconfig19
-rw-r--r--net/ipv6/addrconf.c244
-rw-r--r--net/ipv6/af_inet6.c53
-rw-r--r--net/ipv6/ah6.c354
-rw-r--r--net/ipv6/anycast.c35
-rw-r--r--net/ipv6/datagram.c48
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/fib6_rules.c24
-rw-r--r--net/ipv6/inet6_connection_sock.c10
-rw-r--r--net/ipv6/inet6_hashtables.c29
-rw-r--r--net/ipv6/ip6_flowlabel.c17
-rw-r--r--net/ipv6/ip6_tunnel.c93
-rw-r--r--net/ipv6/ip6mr.c17
-rw-r--r--net/ipv6/ipv6_sockglue.c9
-rw-r--r--net/ipv6/mcast.c51
-rw-r--r--net/ipv6/ndisc.c1
-rw-r--r--net/ipv6/netfilter/ip6_queue.c7
-rw-r--r--net/ipv6/netfilter/ip6_tables.c42
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c4
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c4
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c19
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c47
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c9
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c4
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c12
-rw-r--r--net/ipv6/raw.c60
-rw-r--r--net/ipv6/reassembly.c17
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/ipv6/sit.c338
-rw-r--r--net/ipv6/syncookies.c34
-rw-r--r--net/ipv6/tcp_ipv6.c143
-rw-r--r--net/ipv6/udp.c265
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_tunnel.c47
35 files changed, 1346 insertions, 730 deletions
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index ead6c7a42f4..a578096152a 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -170,6 +170,25 @@ config IPV6_SIT
170 170
171 Saying M here will produce a module called sit. If unsure, say Y. 171 Saying M here will produce a module called sit. If unsure, say Y.
172 172
173config IPV6_SIT_6RD
174 bool "IPv6: IPv6 Rapid Deployment (6RD) (EXPERIMENTAL)"
175 depends on IPV6_SIT && EXPERIMENTAL
176 default n
177 ---help---
178 IPv6 Rapid Deployment (6rd; draft-ietf-softwire-ipv6-6rd) builds upon
179 mechanisms of 6to4 (RFC3056) to enable a service provider to rapidly
180 deploy IPv6 unicast service to IPv4 sites to which it provides
181 customer premise equipment. Like 6to4, it utilizes stateless IPv6 in
182 IPv4 encapsulation in order to transit IPv4-only network
183 infrastructure. Unlike 6to4, a 6rd service provider uses an IPv6
184 prefix of its own in place of the fixed 6to4 prefix.
185
186 With this option enabled, the SIT driver offers 6rd functionality by
187 providing additional ioctl API to configure the IPv6 Prefix for in
188 stead of static 2002::/16 for 6to4.
189
190 If unsure, say N.
191
173config IPV6_NDISC_NODETYPE 192config IPV6_NDISC_NODETYPE
174 bool 193 bool
175 194
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f918399c985..de7a194a64a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -481,9 +481,8 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
481 struct net_device *dev; 481 struct net_device *dev;
482 struct inet6_dev *idev; 482 struct inet6_dev *idev;
483 483
484 read_lock(&dev_base_lock); 484 rcu_read_lock();
485 for_each_netdev(net, dev) { 485 for_each_netdev_rcu(net, dev) {
486 rcu_read_lock();
487 idev = __in6_dev_get(dev); 486 idev = __in6_dev_get(dev);
488 if (idev) { 487 if (idev) {
489 int changed = (!idev->cnf.forwarding) ^ (!newf); 488 int changed = (!idev->cnf.forwarding) ^ (!newf);
@@ -491,9 +490,8 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
491 if (changed) 490 if (changed)
492 dev_forward_change(idev); 491 dev_forward_change(idev);
493 } 492 }
494 rcu_read_unlock();
495 } 493 }
496 read_unlock(&dev_base_lock); 494 rcu_read_unlock();
497} 495}
498 496
499static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) 497static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
@@ -1137,10 +1135,9 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
1137 hiscore->rule = -1; 1135 hiscore->rule = -1;
1138 hiscore->ifa = NULL; 1136 hiscore->ifa = NULL;
1139 1137
1140 read_lock(&dev_base_lock);
1141 rcu_read_lock(); 1138 rcu_read_lock();
1142 1139
1143 for_each_netdev(net, dev) { 1140 for_each_netdev_rcu(net, dev) {
1144 struct inet6_dev *idev; 1141 struct inet6_dev *idev;
1145 1142
1146 /* Candidate Source Address (section 4) 1143 /* Candidate Source Address (section 4)
@@ -1235,7 +1232,6 @@ try_nextdev:
1235 read_unlock_bh(&idev->lock); 1232 read_unlock_bh(&idev->lock);
1236 } 1233 }
1237 rcu_read_unlock(); 1234 rcu_read_unlock();
1238 read_unlock(&dev_base_lock);
1239 1235
1240 if (!hiscore->ifa) 1236 if (!hiscore->ifa)
1241 return -EADDRNOTAVAIL; 1237 return -EADDRNOTAVAIL;
@@ -3485,85 +3481,114 @@ enum addr_type_t
3485 ANYCAST_ADDR, 3481 ANYCAST_ADDR,
3486}; 3482};
3487 3483
3484/* called with rcu_read_lock() */
3485static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3486 struct netlink_callback *cb, enum addr_type_t type,
3487 int s_ip_idx, int *p_ip_idx)
3488{
3489 struct inet6_ifaddr *ifa;
3490 struct ifmcaddr6 *ifmca;
3491 struct ifacaddr6 *ifaca;
3492 int err = 1;
3493 int ip_idx = *p_ip_idx;
3494
3495 read_lock_bh(&idev->lock);
3496 switch (type) {
3497 case UNICAST_ADDR:
3498 /* unicast address incl. temp addr */
3499 for (ifa = idev->addr_list; ifa;
3500 ifa = ifa->if_next, ip_idx++) {
3501 if (ip_idx < s_ip_idx)
3502 continue;
3503 err = inet6_fill_ifaddr(skb, ifa,
3504 NETLINK_CB(cb->skb).pid,
3505 cb->nlh->nlmsg_seq,
3506 RTM_NEWADDR,
3507 NLM_F_MULTI);
3508 if (err <= 0)
3509 break;
3510 }
3511 break;
3512 case MULTICAST_ADDR:
3513 /* multicast address */
3514 for (ifmca = idev->mc_list; ifmca;
3515 ifmca = ifmca->next, ip_idx++) {
3516 if (ip_idx < s_ip_idx)
3517 continue;
3518 err = inet6_fill_ifmcaddr(skb, ifmca,
3519 NETLINK_CB(cb->skb).pid,
3520 cb->nlh->nlmsg_seq,
3521 RTM_GETMULTICAST,
3522 NLM_F_MULTI);
3523 if (err <= 0)
3524 break;
3525 }
3526 break;
3527 case ANYCAST_ADDR:
3528 /* anycast address */
3529 for (ifaca = idev->ac_list; ifaca;
3530 ifaca = ifaca->aca_next, ip_idx++) {
3531 if (ip_idx < s_ip_idx)
3532 continue;
3533 err = inet6_fill_ifacaddr(skb, ifaca,
3534 NETLINK_CB(cb->skb).pid,
3535 cb->nlh->nlmsg_seq,
3536 RTM_GETANYCAST,
3537 NLM_F_MULTI);
3538 if (err <= 0)
3539 break;
3540 }
3541 break;
3542 default:
3543 break;
3544 }
3545 read_unlock_bh(&idev->lock);
3546 *p_ip_idx = ip_idx;
3547 return err;
3548}
3549
3488static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, 3550static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3489 enum addr_type_t type) 3551 enum addr_type_t type)
3490{ 3552{
3553 struct net *net = sock_net(skb->sk);
3554 int h, s_h;
3491 int idx, ip_idx; 3555 int idx, ip_idx;
3492 int s_idx, s_ip_idx; 3556 int s_idx, s_ip_idx;
3493 int err = 1;
3494 struct net_device *dev; 3557 struct net_device *dev;
3495 struct inet6_dev *idev = NULL; 3558 struct inet6_dev *idev;
3496 struct inet6_ifaddr *ifa; 3559 struct hlist_head *head;
3497 struct ifmcaddr6 *ifmca; 3560 struct hlist_node *node;
3498 struct ifacaddr6 *ifaca;
3499 struct net *net = sock_net(skb->sk);
3500
3501 s_idx = cb->args[0];
3502 s_ip_idx = ip_idx = cb->args[1];
3503 3561
3504 idx = 0; 3562 s_h = cb->args[0];
3505 for_each_netdev(net, dev) { 3563 s_idx = idx = cb->args[1];
3506 if (idx < s_idx) 3564 s_ip_idx = ip_idx = cb->args[2];
3507 goto cont;
3508 if (idx > s_idx)
3509 s_ip_idx = 0;
3510 ip_idx = 0;
3511 if ((idev = in6_dev_get(dev)) == NULL)
3512 goto cont;
3513 read_lock_bh(&idev->lock);
3514 switch (type) {
3515 case UNICAST_ADDR:
3516 /* unicast address incl. temp addr */
3517 for (ifa = idev->addr_list; ifa;
3518 ifa = ifa->if_next, ip_idx++) {
3519 if (ip_idx < s_ip_idx)
3520 continue;
3521 err = inet6_fill_ifaddr(skb, ifa,
3522 NETLINK_CB(cb->skb).pid,
3523 cb->nlh->nlmsg_seq,
3524 RTM_NEWADDR,
3525 NLM_F_MULTI);
3526 }
3527 break;
3528 case MULTICAST_ADDR:
3529 /* multicast address */
3530 for (ifmca = idev->mc_list; ifmca;
3531 ifmca = ifmca->next, ip_idx++) {
3532 if (ip_idx < s_ip_idx)
3533 continue;
3534 err = inet6_fill_ifmcaddr(skb, ifmca,
3535 NETLINK_CB(cb->skb).pid,
3536 cb->nlh->nlmsg_seq,
3537 RTM_GETMULTICAST,
3538 NLM_F_MULTI);
3539 }
3540 break;
3541 case ANYCAST_ADDR:
3542 /* anycast address */
3543 for (ifaca = idev->ac_list; ifaca;
3544 ifaca = ifaca->aca_next, ip_idx++) {
3545 if (ip_idx < s_ip_idx)
3546 continue;
3547 err = inet6_fill_ifacaddr(skb, ifaca,
3548 NETLINK_CB(cb->skb).pid,
3549 cb->nlh->nlmsg_seq,
3550 RTM_GETANYCAST,
3551 NLM_F_MULTI);
3552 }
3553 break;
3554 default:
3555 break;
3556 }
3557 read_unlock_bh(&idev->lock);
3558 in6_dev_put(idev);
3559 3565
3560 if (err <= 0) 3566 rcu_read_lock();
3561 break; 3567 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3568 idx = 0;
3569 head = &net->dev_index_head[h];
3570 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
3571 if (idx < s_idx)
3572 goto cont;
3573 if (idx > s_idx)
3574 s_ip_idx = 0;
3575 ip_idx = 0;
3576 if ((idev = __in6_dev_get(dev)) == NULL)
3577 goto cont;
3578
3579 if (in6_dump_addrs(idev, skb, cb, type,
3580 s_ip_idx, &ip_idx) <= 0)
3581 goto done;
3562cont: 3582cont:
3563 idx++; 3583 idx++;
3584 }
3564 } 3585 }
3565 cb->args[0] = idx; 3586done:
3566 cb->args[1] = ip_idx; 3587 rcu_read_unlock();
3588 cb->args[0] = h;
3589 cb->args[1] = idx;
3590 cb->args[2] = ip_idx;
3591
3567 return skb->len; 3592 return skb->len;
3568} 3593}
3569 3594
@@ -3708,6 +3733,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
3708#endif 3733#endif
3709 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; 3734 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
3710 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; 3735 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
3736 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
3711} 3737}
3712 3738
3713static inline size_t inet6_if_nlmsg_size(void) 3739static inline size_t inet6_if_nlmsg_size(void)
@@ -3826,28 +3852,39 @@ nla_put_failure:
3826static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) 3852static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
3827{ 3853{
3828 struct net *net = sock_net(skb->sk); 3854 struct net *net = sock_net(skb->sk);
3829 int idx, err; 3855 int h, s_h;
3830 int s_idx = cb->args[0]; 3856 int idx = 0, s_idx;
3831 struct net_device *dev; 3857 struct net_device *dev;
3832 struct inet6_dev *idev; 3858 struct inet6_dev *idev;
3859 struct hlist_head *head;
3860 struct hlist_node *node;
3833 3861
3834 read_lock(&dev_base_lock); 3862 s_h = cb->args[0];
3835 idx = 0; 3863 s_idx = cb->args[1];
3836 for_each_netdev(net, dev) { 3864
3837 if (idx < s_idx) 3865 rcu_read_lock();
3838 goto cont; 3866 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3839 if ((idev = in6_dev_get(dev)) == NULL) 3867 idx = 0;
3840 goto cont; 3868 head = &net->dev_index_head[h];
3841 err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid, 3869 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
3842 cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); 3870 if (idx < s_idx)
3843 in6_dev_put(idev); 3871 goto cont;
3844 if (err <= 0) 3872 idev = __in6_dev_get(dev);
3845 break; 3873 if (!idev)
3874 goto cont;
3875 if (inet6_fill_ifinfo(skb, idev,
3876 NETLINK_CB(cb->skb).pid,
3877 cb->nlh->nlmsg_seq,
3878 RTM_NEWLINK, NLM_F_MULTI) <= 0)
3879 goto out;
3846cont: 3880cont:
3847 idx++; 3881 idx++;
3882 }
3848 } 3883 }
3849 read_unlock(&dev_base_lock); 3884out:
3850 cb->args[0] = idx; 3885 rcu_read_unlock();
3886 cb->args[1] = idx;
3887 cb->args[0] = h;
3851 3888
3852 return skb->len; 3889 return skb->len;
3853} 3890}
@@ -4016,9 +4053,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
4016 struct net_device *dev; 4053 struct net_device *dev;
4017 struct inet6_dev *idev; 4054 struct inet6_dev *idev;
4018 4055
4019 read_lock(&dev_base_lock); 4056 rcu_read_lock();
4020 for_each_netdev(net, dev) { 4057 for_each_netdev_rcu(net, dev) {
4021 rcu_read_lock();
4022 idev = __in6_dev_get(dev); 4058 idev = __in6_dev_get(dev);
4023 if (idev) { 4059 if (idev) {
4024 int changed = (!idev->cnf.disable_ipv6) ^ (!newf); 4060 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -4026,9 +4062,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
4026 if (changed) 4062 if (changed)
4027 dev_disable_change(idev); 4063 dev_disable_change(idev);
4028 } 4064 }
4029 rcu_read_unlock();
4030 } 4065 }
4031 read_unlock(&dev_base_lock); 4066 rcu_read_unlock();
4032} 4067}
4033 4068
4034static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) 4069static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
@@ -4285,6 +4320,13 @@ static struct addrconf_sysctl_table
4285 .proc_handler = proc_dointvec, 4320 .proc_handler = proc_dointvec,
4286 }, 4321 },
4287 { 4322 {
4323 .procname = "force_tllao",
4324 .data = &ipv6_devconf.force_tllao,
4325 .maxlen = sizeof(int),
4326 .mode = 0644,
4327 .proc_handler = proc_dointvec
4328 },
4329 {
4288 /* sentinel */ 4330 /* sentinel */
4289 } 4331 }
4290 }, 4332 },
@@ -4385,7 +4427,7 @@ static int addrconf_init_net(struct net *net)
4385 all = &ipv6_devconf; 4427 all = &ipv6_devconf;
4386 dflt = &ipv6_devconf_dflt; 4428 dflt = &ipv6_devconf_dflt;
4387 4429
4388 if (net != &init_net) { 4430 if (!net_eq(net, &init_net)) {
4389 all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL); 4431 all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL);
4390 if (all == NULL) 4432 if (all == NULL)
4391 goto err_alloc_all; 4433 goto err_alloc_all;
@@ -4431,7 +4473,7 @@ static void addrconf_exit_net(struct net *net)
4431 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt); 4473 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
4432 __addrconf_sysctl_unregister(net->ipv6.devconf_all); 4474 __addrconf_sysctl_unregister(net->ipv6.devconf_all);
4433#endif 4475#endif
4434 if (net != &init_net) { 4476 if (!net_eq(net, &init_net)) {
4435 kfree(net->ipv6.devconf_dflt); 4477 kfree(net->ipv6.devconf_dflt);
4436 kfree(net->ipv6.devconf_all); 4478 kfree(net->ipv6.devconf_all);
4437 } 4479 }
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e127a32f954..12e69d364dd 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -95,7 +95,8 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
95 return (struct ipv6_pinfo *)(((u8 *)sk) + offset); 95 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
96} 96}
97 97
98static int inet6_create(struct net *net, struct socket *sock, int protocol) 98static int inet6_create(struct net *net, struct socket *sock, int protocol,
99 int kern)
99{ 100{
100 struct inet_sock *inet; 101 struct inet_sock *inet;
101 struct ipv6_pinfo *np; 102 struct ipv6_pinfo *np;
@@ -158,7 +159,7 @@ lookup_protocol:
158 } 159 }
159 160
160 err = -EPERM; 161 err = -EPERM;
161 if (answer->capability > 0 && !capable(answer->capability)) 162 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
162 goto out_rcu_unlock; 163 goto out_rcu_unlock;
163 164
164 sock->ops = answer->ops; 165 sock->ops = answer->ops;
@@ -185,7 +186,7 @@ lookup_protocol:
185 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; 186 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
186 187
187 if (SOCK_RAW == sock->type) { 188 if (SOCK_RAW == sock->type) {
188 inet->num = protocol; 189 inet->inet_num = protocol;
189 if (IPPROTO_RAW == protocol) 190 if (IPPROTO_RAW == protocol)
190 inet->hdrincl = 1; 191 inet->hdrincl = 1;
191 } 192 }
@@ -228,12 +229,12 @@ lookup_protocol:
228 */ 229 */
229 sk_refcnt_debug_inc(sk); 230 sk_refcnt_debug_inc(sk);
230 231
231 if (inet->num) { 232 if (inet->inet_num) {
232 /* It assumes that any protocol which allows 233 /* It assumes that any protocol which allows
233 * the user to assign a number at socket 234 * the user to assign a number at socket
234 * creation time automatically shares. 235 * creation time automatically shares.
235 */ 236 */
236 inet->sport = htons(inet->num); 237 inet->inet_sport = htons(inet->inet_num);
237 sk->sk_prot->hash(sk); 238 sk->sk_prot->hash(sk);
238 } 239 }
239 if (sk->sk_prot->init) { 240 if (sk->sk_prot->init) {
@@ -281,7 +282,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
281 lock_sock(sk); 282 lock_sock(sk);
282 283
283 /* Check these errors (active socket, double bind). */ 284 /* Check these errors (active socket, double bind). */
284 if (sk->sk_state != TCP_CLOSE || inet->num) { 285 if (sk->sk_state != TCP_CLOSE || inet->inet_num) {
285 err = -EINVAL; 286 err = -EINVAL;
286 goto out; 287 goto out;
287 } 288 }
@@ -314,6 +315,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
314 if (addr_type != IPV6_ADDR_ANY) { 315 if (addr_type != IPV6_ADDR_ANY) {
315 struct net_device *dev = NULL; 316 struct net_device *dev = NULL;
316 317
318 rcu_read_lock();
317 if (addr_type & IPV6_ADDR_LINKLOCAL) { 319 if (addr_type & IPV6_ADDR_LINKLOCAL) {
318 if (addr_len >= sizeof(struct sockaddr_in6) && 320 if (addr_len >= sizeof(struct sockaddr_in6) &&
319 addr->sin6_scope_id) { 321 addr->sin6_scope_id) {
@@ -326,12 +328,12 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
326 /* Binding to link-local address requires an interface */ 328 /* Binding to link-local address requires an interface */
327 if (!sk->sk_bound_dev_if) { 329 if (!sk->sk_bound_dev_if) {
328 err = -EINVAL; 330 err = -EINVAL;
329 goto out; 331 goto out_unlock;
330 } 332 }
331 dev = dev_get_by_index(net, sk->sk_bound_dev_if); 333 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
332 if (!dev) { 334 if (!dev) {
333 err = -ENODEV; 335 err = -ENODEV;
334 goto out; 336 goto out_unlock;
335 } 337 }
336 } 338 }
337 339
@@ -342,19 +344,16 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
342 if (!(addr_type & IPV6_ADDR_MULTICAST)) { 344 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
343 if (!ipv6_chk_addr(net, &addr->sin6_addr, 345 if (!ipv6_chk_addr(net, &addr->sin6_addr,
344 dev, 0)) { 346 dev, 0)) {
345 if (dev)
346 dev_put(dev);
347 err = -EADDRNOTAVAIL; 347 err = -EADDRNOTAVAIL;
348 goto out; 348 goto out_unlock;
349 } 349 }
350 } 350 }
351 if (dev) 351 rcu_read_unlock();
352 dev_put(dev);
353 } 352 }
354 } 353 }
355 354
356 inet->rcv_saddr = v4addr; 355 inet->inet_rcv_saddr = v4addr;
357 inet->saddr = v4addr; 356 inet->inet_saddr = v4addr;
358 357
359 ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); 358 ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
360 359
@@ -375,12 +374,15 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
375 } 374 }
376 if (snum) 375 if (snum)
377 sk->sk_userlocks |= SOCK_BINDPORT_LOCK; 376 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
378 inet->sport = htons(inet->num); 377 inet->inet_sport = htons(inet->inet_num);
379 inet->dport = 0; 378 inet->inet_dport = 0;
380 inet->daddr = 0; 379 inet->inet_daddr = 0;
381out: 380out:
382 release_sock(sk); 381 release_sock(sk);
383 return err; 382 return err;
383out_unlock:
384 rcu_read_unlock();
385 goto out;
384} 386}
385 387
386EXPORT_SYMBOL(inet6_bind); 388EXPORT_SYMBOL(inet6_bind);
@@ -441,12 +443,12 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
441 sin->sin6_flowinfo = 0; 443 sin->sin6_flowinfo = 0;
442 sin->sin6_scope_id = 0; 444 sin->sin6_scope_id = 0;
443 if (peer) { 445 if (peer) {
444 if (!inet->dport) 446 if (!inet->inet_dport)
445 return -ENOTCONN; 447 return -ENOTCONN;
446 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && 448 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
447 peer == 1) 449 peer == 1)
448 return -ENOTCONN; 450 return -ENOTCONN;
449 sin->sin6_port = inet->dport; 451 sin->sin6_port = inet->inet_dport;
450 ipv6_addr_copy(&sin->sin6_addr, &np->daddr); 452 ipv6_addr_copy(&sin->sin6_addr, &np->daddr);
451 if (np->sndflow) 453 if (np->sndflow)
452 sin->sin6_flowinfo = np->flow_label; 454 sin->sin6_flowinfo = np->flow_label;
@@ -456,7 +458,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
456 else 458 else
457 ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr); 459 ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr);
458 460
459 sin->sin6_port = inet->sport; 461 sin->sin6_port = inet->inet_sport;
460 } 462 }
461 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 463 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
462 sin->sin6_scope_id = sk->sk_bound_dev_if; 464 sin->sin6_scope_id = sk->sk_bound_dev_if;
@@ -552,7 +554,7 @@ const struct proto_ops inet6_dgram_ops = {
552#endif 554#endif
553}; 555};
554 556
555static struct net_proto_family inet6_family_ops = { 557static const struct net_proto_family inet6_family_ops = {
556 .family = PF_INET6, 558 .family = PF_INET6,
557 .create = inet6_create, 559 .create = inet6_create,
558 .owner = THIS_MODULE, 560 .owner = THIS_MODULE,
@@ -654,8 +656,9 @@ int inet6_sk_rebuild_header(struct sock *sk)
654 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 656 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
655 fl.fl6_flowlabel = np->flow_label; 657 fl.fl6_flowlabel = np->flow_label;
656 fl.oif = sk->sk_bound_dev_if; 658 fl.oif = sk->sk_bound_dev_if;
657 fl.fl_ip_dport = inet->dport; 659 fl.mark = sk->sk_mark;
658 fl.fl_ip_sport = inet->sport; 660 fl.fl_ip_dport = inet->inet_dport;
661 fl.fl_ip_sport = inet->inet_sport;
659 security_sk_classify_flow(sk, &fl); 662 security_sk_classify_flow(sk, &fl);
660 663
661 if (np->opt && np->opt->srcrt) { 664 if (np->opt && np->opt->srcrt) {
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index c1589e2f1dc..c2f300c314b 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -24,18 +24,92 @@
24 * This file is derived from net/ipv4/ah.c. 24 * This file is derived from net/ipv4/ah.c.
25 */ 25 */
26 26
27#include <crypto/hash.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <net/ip.h> 29#include <net/ip.h>
29#include <net/ah.h> 30#include <net/ah.h>
30#include <linux/crypto.h> 31#include <linux/crypto.h>
31#include <linux/pfkeyv2.h> 32#include <linux/pfkeyv2.h>
32#include <linux/spinlock.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/scatterlist.h>
34#include <net/icmp.h> 35#include <net/icmp.h>
35#include <net/ipv6.h> 36#include <net/ipv6.h>
36#include <net/protocol.h> 37#include <net/protocol.h>
37#include <net/xfrm.h> 38#include <net/xfrm.h>
38 39
40#define IPV6HDR_BASELEN 8
41
42struct tmp_ext {
43#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
44 struct in6_addr saddr;
45#endif
46 struct in6_addr daddr;
47 char hdrs[0];
48};
49
50struct ah_skb_cb {
51 struct xfrm_skb_cb xfrm;
52 void *tmp;
53};
54
55#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
56
57static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
58 unsigned int size)
59{
60 unsigned int len;
61
62 len = size + crypto_ahash_digestsize(ahash) +
63 (crypto_ahash_alignmask(ahash) &
64 ~(crypto_tfm_ctx_alignment() - 1));
65
66 len = ALIGN(len, crypto_tfm_ctx_alignment());
67
68 len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
69 len = ALIGN(len, __alignof__(struct scatterlist));
70
71 len += sizeof(struct scatterlist) * nfrags;
72
73 return kmalloc(len, GFP_ATOMIC);
74}
75
76static inline struct tmp_ext *ah_tmp_ext(void *base)
77{
78 return base + IPV6HDR_BASELEN;
79}
80
81static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset)
82{
83 return tmp + offset;
84}
85
86static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
87 unsigned int offset)
88{
89 return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
90}
91
92static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
93 u8 *icv)
94{
95 struct ahash_request *req;
96
97 req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
98 crypto_tfm_ctx_alignment());
99
100 ahash_request_set_tfm(req, ahash);
101
102 return req;
103}
104
105static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
106 struct ahash_request *req)
107{
108 return (void *)ALIGN((unsigned long)(req + 1) +
109 crypto_ahash_reqsize(ahash),
110 __alignof__(struct scatterlist));
111}
112
39static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) 113static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
40{ 114{
41 u8 *opt = (u8 *)opthdr; 115 u8 *opt = (u8 *)opthdr;
@@ -218,24 +292,85 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
218 return 0; 292 return 0;
219} 293}
220 294
295static void ah6_output_done(struct crypto_async_request *base, int err)
296{
297 int extlen;
298 u8 *iph_base;
299 u8 *icv;
300 struct sk_buff *skb = base->data;
301 struct xfrm_state *x = skb_dst(skb)->xfrm;
302 struct ah_data *ahp = x->data;
303 struct ipv6hdr *top_iph = ipv6_hdr(skb);
304 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
305 struct tmp_ext *iph_ext;
306
307 extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
308 if (extlen)
309 extlen += sizeof(*iph_ext);
310
311 iph_base = AH_SKB_CB(skb)->tmp;
312 iph_ext = ah_tmp_ext(iph_base);
313 icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen);
314
315 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
316 memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
317
318 if (extlen) {
319#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
320 memcpy(&top_iph->saddr, iph_ext, extlen);
321#else
322 memcpy(&top_iph->daddr, iph_ext, extlen);
323#endif
324 }
325
326 err = ah->nexthdr;
327
328 kfree(AH_SKB_CB(skb)->tmp);
329 xfrm_output_resume(skb, err);
330}
331
221static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) 332static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
222{ 333{
223 int err; 334 int err;
335 int nfrags;
224 int extlen; 336 int extlen;
337 u8 *iph_base;
338 u8 *icv;
339 u8 nexthdr;
340 struct sk_buff *trailer;
341 struct crypto_ahash *ahash;
342 struct ahash_request *req;
343 struct scatterlist *sg;
225 struct ipv6hdr *top_iph; 344 struct ipv6hdr *top_iph;
226 struct ip_auth_hdr *ah; 345 struct ip_auth_hdr *ah;
227 struct ah_data *ahp; 346 struct ah_data *ahp;
228 u8 nexthdr; 347 struct tmp_ext *iph_ext;
229 char tmp_base[8]; 348
230 struct { 349 ahp = x->data;
231#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 350 ahash = ahp->ahash;
232 struct in6_addr saddr; 351
233#endif 352 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
234 struct in6_addr daddr; 353 goto out;
235 char hdrs[0]; 354 nfrags = err;
236 } *tmp_ext;
237 355
238 skb_push(skb, -skb_network_offset(skb)); 356 skb_push(skb, -skb_network_offset(skb));
357 extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
358 if (extlen)
359 extlen += sizeof(*iph_ext);
360
361 err = -ENOMEM;
362 iph_base = ah_alloc_tmp(ahash, nfrags, IPV6HDR_BASELEN + extlen);
363 if (!iph_base)
364 goto out;
365
366 iph_ext = ah_tmp_ext(iph_base);
367 icv = ah_tmp_icv(ahash, iph_ext, extlen);
368 req = ah_tmp_req(ahash, icv);
369 sg = ah_req_sg(ahash, req);
370
371 ah = ip_auth_hdr(skb);
372 memset(ah->auth_data, 0, ahp->icv_trunc_len);
373
239 top_iph = ipv6_hdr(skb); 374 top_iph = ipv6_hdr(skb);
240 top_iph->payload_len = htons(skb->len - sizeof(*top_iph)); 375 top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
241 376
@@ -245,31 +380,22 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
245 /* When there are no extension headers, we only need to save the first 380 /* When there are no extension headers, we only need to save the first
246 * 8 bytes of the base IP header. 381 * 8 bytes of the base IP header.
247 */ 382 */
248 memcpy(tmp_base, top_iph, sizeof(tmp_base)); 383 memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
249 384
250 tmp_ext = NULL;
251 extlen = skb_transport_offset(skb) - sizeof(struct ipv6hdr);
252 if (extlen) { 385 if (extlen) {
253 extlen += sizeof(*tmp_ext);
254 tmp_ext = kmalloc(extlen, GFP_ATOMIC);
255 if (!tmp_ext) {
256 err = -ENOMEM;
257 goto error;
258 }
259#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 386#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
260 memcpy(tmp_ext, &top_iph->saddr, extlen); 387 memcpy(iph_ext, &top_iph->saddr, extlen);
261#else 388#else
262 memcpy(tmp_ext, &top_iph->daddr, extlen); 389 memcpy(iph_ext, &top_iph->daddr, extlen);
263#endif 390#endif
264 err = ipv6_clear_mutable_options(top_iph, 391 err = ipv6_clear_mutable_options(top_iph,
265 extlen - sizeof(*tmp_ext) + 392 extlen - sizeof(*iph_ext) +
266 sizeof(*top_iph), 393 sizeof(*top_iph),
267 XFRM_POLICY_OUT); 394 XFRM_POLICY_OUT);
268 if (err) 395 if (err)
269 goto error_free_iph; 396 goto out_free;
270 } 397 }
271 398
272 ah = ip_auth_hdr(skb);
273 ah->nexthdr = nexthdr; 399 ah->nexthdr = nexthdr;
274 400
275 top_iph->priority = 0; 401 top_iph->priority = 0;
@@ -278,36 +404,80 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
278 top_iph->flow_lbl[2] = 0; 404 top_iph->flow_lbl[2] = 0;
279 top_iph->hop_limit = 0; 405 top_iph->hop_limit = 0;
280 406
281 ahp = x->data;
282 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; 407 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
283 408
284 ah->reserved = 0; 409 ah->reserved = 0;
285 ah->spi = x->id.spi; 410 ah->spi = x->id.spi;
286 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); 411 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output);
287 412
288 spin_lock_bh(&x->lock); 413 sg_init_table(sg, nfrags);
289 err = ah_mac_digest(ahp, skb, ah->auth_data); 414 skb_to_sgvec(skb, sg, 0, skb->len);
290 memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);
291 spin_unlock_bh(&x->lock);
292 415
293 if (err) 416 ahash_request_set_crypt(req, sg, icv, skb->len);
294 goto error_free_iph; 417 ahash_request_set_callback(req, 0, ah6_output_done, skb);
418
419 AH_SKB_CB(skb)->tmp = iph_base;
295 420
296 memcpy(top_iph, tmp_base, sizeof(tmp_base)); 421 err = crypto_ahash_digest(req);
297 if (tmp_ext) { 422 if (err) {
423 if (err == -EINPROGRESS)
424 goto out;
425
426 if (err == -EBUSY)
427 err = NET_XMIT_DROP;
428 goto out_free;
429 }
430
431 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
432 memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
433
434 if (extlen) {
298#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 435#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
299 memcpy(&top_iph->saddr, tmp_ext, extlen); 436 memcpy(&top_iph->saddr, iph_ext, extlen);
300#else 437#else
301 memcpy(&top_iph->daddr, tmp_ext, extlen); 438 memcpy(&top_iph->daddr, iph_ext, extlen);
302#endif 439#endif
303error_free_iph:
304 kfree(tmp_ext);
305 } 440 }
306 441
307error: 442out_free:
443 kfree(iph_base);
444out:
308 return err; 445 return err;
309} 446}
310 447
448static void ah6_input_done(struct crypto_async_request *base, int err)
449{
450 u8 *auth_data;
451 u8 *icv;
452 u8 *work_iph;
453 struct sk_buff *skb = base->data;
454 struct xfrm_state *x = xfrm_input_state(skb);
455 struct ah_data *ahp = x->data;
456 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
457 int hdr_len = skb_network_header_len(skb);
458 int ah_hlen = (ah->hdrlen + 2) << 2;
459
460 work_iph = AH_SKB_CB(skb)->tmp;
461 auth_data = ah_tmp_auth(work_iph, hdr_len);
462 icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
463
464 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
465 if (err)
466 goto out;
467
468 skb->network_header += ah_hlen;
469 memcpy(skb_network_header(skb), work_iph, hdr_len);
470 __skb_pull(skb, ah_hlen + hdr_len);
471 skb_set_transport_header(skb, -hdr_len);
472
473 err = ah->nexthdr;
474out:
475 kfree(AH_SKB_CB(skb)->tmp);
476 xfrm_input_resume(skb, err);
477}
478
479
480
311static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) 481static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
312{ 482{
313 /* 483 /*
@@ -325,14 +495,21 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
325 * There is offset of AH before IPv6 header after the process. 495 * There is offset of AH before IPv6 header after the process.
326 */ 496 */
327 497
498 u8 *auth_data;
499 u8 *icv;
500 u8 *work_iph;
501 struct sk_buff *trailer;
502 struct crypto_ahash *ahash;
503 struct ahash_request *req;
504 struct scatterlist *sg;
328 struct ip_auth_hdr *ah; 505 struct ip_auth_hdr *ah;
329 struct ipv6hdr *ip6h; 506 struct ipv6hdr *ip6h;
330 struct ah_data *ahp; 507 struct ah_data *ahp;
331 unsigned char *tmp_hdr = NULL;
332 u16 hdr_len; 508 u16 hdr_len;
333 u16 ah_hlen; 509 u16 ah_hlen;
334 int nexthdr; 510 int nexthdr;
335 int err = -EINVAL; 511 int nfrags;
512 int err = -ENOMEM;
336 513
337 if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) 514 if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
338 goto out; 515 goto out;
@@ -345,9 +522,11 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
345 522
346 skb->ip_summed = CHECKSUM_NONE; 523 skb->ip_summed = CHECKSUM_NONE;
347 524
348 hdr_len = skb->data - skb_network_header(skb); 525 hdr_len = skb_network_header_len(skb);
349 ah = (struct ip_auth_hdr *)skb->data; 526 ah = (struct ip_auth_hdr *)skb->data;
350 ahp = x->data; 527 ahp = x->data;
528 ahash = ahp->ahash;
529
351 nexthdr = ah->nexthdr; 530 nexthdr = ah->nexthdr;
352 ah_hlen = (ah->hdrlen + 2) << 2; 531 ah_hlen = (ah->hdrlen + 2) << 2;
353 532
@@ -358,48 +537,67 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
358 if (!pskb_may_pull(skb, ah_hlen)) 537 if (!pskb_may_pull(skb, ah_hlen))
359 goto out; 538 goto out;
360 539
361 tmp_hdr = kmemdup(skb_network_header(skb), hdr_len, GFP_ATOMIC);
362 if (!tmp_hdr)
363 goto out;
364 ip6h = ipv6_hdr(skb); 540 ip6h = ipv6_hdr(skb);
541
542 skb_push(skb, hdr_len);
543
544 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
545 goto out;
546 nfrags = err;
547
548 work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
549 if (!work_iph)
550 goto out;
551
552 auth_data = ah_tmp_auth(work_iph, hdr_len);
553 icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
554 req = ah_tmp_req(ahash, icv);
555 sg = ah_req_sg(ahash, req);
556
557 memcpy(work_iph, ip6h, hdr_len);
558 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
559 memset(ah->auth_data, 0, ahp->icv_trunc_len);
560
365 if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) 561 if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN))
366 goto free_out; 562 goto out_free;
563
367 ip6h->priority = 0; 564 ip6h->priority = 0;
368 ip6h->flow_lbl[0] = 0; 565 ip6h->flow_lbl[0] = 0;
369 ip6h->flow_lbl[1] = 0; 566 ip6h->flow_lbl[1] = 0;
370 ip6h->flow_lbl[2] = 0; 567 ip6h->flow_lbl[2] = 0;
371 ip6h->hop_limit = 0; 568 ip6h->hop_limit = 0;
372 569
373 spin_lock(&x->lock); 570 sg_init_table(sg, nfrags);
374 { 571 skb_to_sgvec(skb, sg, 0, skb->len);
375 u8 auth_data[MAX_AH_AUTH_LEN];
376 572
377 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 573 ahash_request_set_crypt(req, sg, icv, skb->len);
378 memset(ah->auth_data, 0, ahp->icv_trunc_len); 574 ahash_request_set_callback(req, 0, ah6_input_done, skb);
379 skb_push(skb, hdr_len); 575
380 err = ah_mac_digest(ahp, skb, ah->auth_data); 576 AH_SKB_CB(skb)->tmp = work_iph;
381 if (err) 577
382 goto unlock; 578 err = crypto_ahash_digest(req);
383 if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) 579 if (err) {
384 err = -EBADMSG; 580 if (err == -EINPROGRESS)
581 goto out;
582
583 if (err == -EBUSY)
584 err = NET_XMIT_DROP;
585 goto out_free;
385 } 586 }
386unlock:
387 spin_unlock(&x->lock);
388 587
588 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
389 if (err) 589 if (err)
390 goto free_out; 590 goto out_free;
391 591
392 skb->network_header += ah_hlen; 592 skb->network_header += ah_hlen;
393 memcpy(skb_network_header(skb), tmp_hdr, hdr_len); 593 memcpy(skb_network_header(skb), work_iph, hdr_len);
394 skb->transport_header = skb->network_header; 594 skb->transport_header = skb->network_header;
395 __skb_pull(skb, ah_hlen + hdr_len); 595 __skb_pull(skb, ah_hlen + hdr_len);
396 596
397 kfree(tmp_hdr); 597 err = nexthdr;
398 598
399 return nexthdr; 599out_free:
400 600 kfree(work_iph);
401free_out:
402 kfree(tmp_hdr);
403out: 601out:
404 return err; 602 return err;
405} 603}
@@ -430,7 +628,7 @@ static int ah6_init_state(struct xfrm_state *x)
430{ 628{
431 struct ah_data *ahp = NULL; 629 struct ah_data *ahp = NULL;
432 struct xfrm_algo_desc *aalg_desc; 630 struct xfrm_algo_desc *aalg_desc;
433 struct crypto_hash *tfm; 631 struct crypto_ahash *ahash;
434 632
435 if (!x->aalg) 633 if (!x->aalg)
436 goto error; 634 goto error;
@@ -442,12 +640,12 @@ static int ah6_init_state(struct xfrm_state *x)
442 if (ahp == NULL) 640 if (ahp == NULL)
443 return -ENOMEM; 641 return -ENOMEM;
444 642
445 tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); 643 ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
446 if (IS_ERR(tfm)) 644 if (IS_ERR(ahash))
447 goto error; 645 goto error;
448 646
449 ahp->tfm = tfm; 647 ahp->ahash = ahash;
450 if (crypto_hash_setkey(tfm, x->aalg->alg_key, 648 if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
451 (x->aalg->alg_key_len + 7) / 8)) 649 (x->aalg->alg_key_len + 7) / 8))
452 goto error; 650 goto error;
453 651
@@ -461,22 +659,18 @@ static int ah6_init_state(struct xfrm_state *x)
461 BUG_ON(!aalg_desc); 659 BUG_ON(!aalg_desc);
462 660
463 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 661 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
464 crypto_hash_digestsize(tfm)) { 662 crypto_ahash_digestsize(ahash)) {
465 printk(KERN_INFO "AH: %s digestsize %u != %hu\n", 663 printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
466 x->aalg->alg_name, crypto_hash_digestsize(tfm), 664 x->aalg->alg_name, crypto_ahash_digestsize(ahash),
467 aalg_desc->uinfo.auth.icv_fullbits/8); 665 aalg_desc->uinfo.auth.icv_fullbits/8);
468 goto error; 666 goto error;
469 } 667 }
470 668
471 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 669 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
472 ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; 670 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
473 671
474 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); 672 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
475 673
476 ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL);
477 if (!ahp->work_icv)
478 goto error;
479
480 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + 674 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
481 ahp->icv_trunc_len); 675 ahp->icv_trunc_len);
482 switch (x->props.mode) { 676 switch (x->props.mode) {
@@ -495,8 +689,7 @@ static int ah6_init_state(struct xfrm_state *x)
495 689
496error: 690error:
497 if (ahp) { 691 if (ahp) {
498 kfree(ahp->work_icv); 692 crypto_free_ahash(ahp->ahash);
499 crypto_free_hash(ahp->tfm);
500 kfree(ahp); 693 kfree(ahp);
501 } 694 }
502 return -EINVAL; 695 return -EINVAL;
@@ -509,8 +702,7 @@ static void ah6_destroy(struct xfrm_state *x)
509 if (!ahp) 702 if (!ahp)
510 return; 703 return;
511 704
512 kfree(ahp->work_icv); 705 crypto_free_ahash(ahp->ahash);
513 crypto_free_hash(ahp->tfm);
514 kfree(ahp); 706 kfree(ahp);
515} 707}
516 708
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 1ae58bec1de..f1c74c8ef9d 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -404,13 +404,13 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
404 404
405 if (dev) 405 if (dev)
406 return ipv6_chk_acast_dev(dev, addr); 406 return ipv6_chk_acast_dev(dev, addr);
407 read_lock(&dev_base_lock); 407 rcu_read_lock();
408 for_each_netdev(net, dev) 408 for_each_netdev_rcu(net, dev)
409 if (ipv6_chk_acast_dev(dev, addr)) { 409 if (ipv6_chk_acast_dev(dev, addr)) {
410 found = 1; 410 found = 1;
411 break; 411 break;
412 } 412 }
413 read_unlock(&dev_base_lock); 413 rcu_read_unlock();
414 return found; 414 return found;
415} 415}
416 416
@@ -431,9 +431,9 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
431 struct net *net = seq_file_net(seq); 431 struct net *net = seq_file_net(seq);
432 432
433 state->idev = NULL; 433 state->idev = NULL;
434 for_each_netdev(net, state->dev) { 434 for_each_netdev_rcu(net, state->dev) {
435 struct inet6_dev *idev; 435 struct inet6_dev *idev;
436 idev = in6_dev_get(state->dev); 436 idev = __in6_dev_get(state->dev);
437 if (!idev) 437 if (!idev)
438 continue; 438 continue;
439 read_lock_bh(&idev->lock); 439 read_lock_bh(&idev->lock);
@@ -443,7 +443,6 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
443 break; 443 break;
444 } 444 }
445 read_unlock_bh(&idev->lock); 445 read_unlock_bh(&idev->lock);
446 in6_dev_put(idev);
447 } 446 }
448 return im; 447 return im;
449} 448}
@@ -454,16 +453,15 @@ static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im
454 453
455 im = im->aca_next; 454 im = im->aca_next;
456 while (!im) { 455 while (!im) {
457 if (likely(state->idev != NULL)) { 456 if (likely(state->idev != NULL))
458 read_unlock_bh(&state->idev->lock); 457 read_unlock_bh(&state->idev->lock);
459 in6_dev_put(state->idev); 458
460 } 459 state->dev = next_net_device_rcu(state->dev);
461 state->dev = next_net_device(state->dev);
462 if (!state->dev) { 460 if (!state->dev) {
463 state->idev = NULL; 461 state->idev = NULL;
464 break; 462 break;
465 } 463 }
466 state->idev = in6_dev_get(state->dev); 464 state->idev = __in6_dev_get(state->dev);
467 if (!state->idev) 465 if (!state->idev)
468 continue; 466 continue;
469 read_lock_bh(&state->idev->lock); 467 read_lock_bh(&state->idev->lock);
@@ -482,29 +480,30 @@ static struct ifacaddr6 *ac6_get_idx(struct seq_file *seq, loff_t pos)
482} 480}
483 481
484static void *ac6_seq_start(struct seq_file *seq, loff_t *pos) 482static void *ac6_seq_start(struct seq_file *seq, loff_t *pos)
485 __acquires(dev_base_lock) 483 __acquires(RCU)
486{ 484{
487 read_lock(&dev_base_lock); 485 rcu_read_lock();
488 return ac6_get_idx(seq, *pos); 486 return ac6_get_idx(seq, *pos);
489} 487}
490 488
491static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos) 489static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
492{ 490{
493 struct ifacaddr6 *im; 491 struct ifacaddr6 *im = ac6_get_next(seq, v);
494 im = ac6_get_next(seq, v); 492
495 ++*pos; 493 ++*pos;
496 return im; 494 return im;
497} 495}
498 496
499static void ac6_seq_stop(struct seq_file *seq, void *v) 497static void ac6_seq_stop(struct seq_file *seq, void *v)
500 __releases(dev_base_lock) 498 __releases(RCU)
501{ 499{
502 struct ac6_iter_state *state = ac6_seq_private(seq); 500 struct ac6_iter_state *state = ac6_seq_private(seq);
501
503 if (likely(state->idev != NULL)) { 502 if (likely(state->idev != NULL)) {
504 read_unlock_bh(&state->idev->lock); 503 read_unlock_bh(&state->idev->lock);
505 in6_dev_put(state->idev); 504 state->idev = NULL;
506 } 505 }
507 read_unlock(&dev_base_lock); 506 rcu_read_unlock();
508} 507}
509 508
510static int ac6_seq_show(struct seq_file *seq, void *v) 509static int ac6_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index e2bdc6d83a4..e6f9cdf780f 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -98,17 +98,15 @@ ipv4_connected:
98 if (err) 98 if (err)
99 goto out; 99 goto out;
100 100
101 ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr); 101 ipv6_addr_set_v4mapped(inet->inet_daddr, &np->daddr);
102 102
103 if (ipv6_addr_any(&np->saddr)) { 103 if (ipv6_addr_any(&np->saddr))
104 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000ffff), 104 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
105 inet->saddr); 105
106 } 106 if (ipv6_addr_any(&np->rcv_saddr))
107 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
108 &np->rcv_saddr);
107 109
108 if (ipv6_addr_any(&np->rcv_saddr)) {
109 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000ffff),
110 inet->rcv_saddr);
111 }
112 goto out; 110 goto out;
113 } 111 }
114 112
@@ -136,7 +134,7 @@ ipv4_connected:
136 ipv6_addr_copy(&np->daddr, daddr); 134 ipv6_addr_copy(&np->daddr, daddr);
137 np->flow_label = fl.fl6_flowlabel; 135 np->flow_label = fl.fl6_flowlabel;
138 136
139 inet->dport = usin->sin6_port; 137 inet->inet_dport = usin->sin6_port;
140 138
141 /* 139 /*
142 * Check for a route to destination an obtain the 140 * Check for a route to destination an obtain the
@@ -147,8 +145,9 @@ ipv4_connected:
147 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 145 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
148 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 146 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
149 fl.oif = sk->sk_bound_dev_if; 147 fl.oif = sk->sk_bound_dev_if;
150 fl.fl_ip_dport = inet->dport; 148 fl.mark = sk->sk_mark;
151 fl.fl_ip_sport = inet->sport; 149 fl.fl_ip_dport = inet->inet_dport;
150 fl.fl_ip_sport = inet->inet_sport;
152 151
153 if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST)) 152 if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST))
154 fl.oif = np->mcast_oif; 153 fl.oif = np->mcast_oif;
@@ -190,7 +189,7 @@ ipv4_connected:
190 189
191 if (ipv6_addr_any(&np->rcv_saddr)) { 190 if (ipv6_addr_any(&np->rcv_saddr)) {
192 ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); 191 ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
193 inet->rcv_saddr = LOOPBACK4_IPV6; 192 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
194 } 193 }
195 194
196 ip6_dst_store(sk, dst, 195 ip6_dst_store(sk, dst,
@@ -329,9 +328,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
329 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 328 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
330 sin->sin6_scope_id = IP6CB(skb)->iif; 329 sin->sin6_scope_id = IP6CB(skb)->iif;
331 } else { 330 } else {
332 ipv6_addr_set(&sin->sin6_addr, 0, 0, 331 ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset),
333 htonl(0xffff), 332 &sin->sin6_addr);
334 *(__be32 *)(nh + serr->addr_offset));
335 } 333 }
336 } 334 }
337 335
@@ -351,8 +349,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
351 } else { 349 } else {
352 struct inet_sock *inet = inet_sk(sk); 350 struct inet_sock *inet = inet_sk(sk);
353 351
354 ipv6_addr_set(&sin->sin6_addr, 0, 0, 352 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
355 htonl(0xffff), ip_hdr(skb)->saddr); 353 &sin->sin6_addr);
356 if (inet->cmsg_flags) 354 if (inet->cmsg_flags)
357 ip_cmsg_recv(msg, skb); 355 ip_cmsg_recv(msg, skb);
358 } 356 }
@@ -539,12 +537,17 @@ int datagram_send_ctl(struct net *net,
539 537
540 addr_type = __ipv6_addr_type(&src_info->ipi6_addr); 538 addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
541 539
540 rcu_read_lock();
542 if (fl->oif) { 541 if (fl->oif) {
543 dev = dev_get_by_index(net, fl->oif); 542 dev = dev_get_by_index_rcu(net, fl->oif);
544 if (!dev) 543 if (!dev) {
544 rcu_read_unlock();
545 return -ENODEV; 545 return -ENODEV;
546 } else if (addr_type & IPV6_ADDR_LINKLOCAL) 546 }
547 } else if (addr_type & IPV6_ADDR_LINKLOCAL) {
548 rcu_read_unlock();
547 return -EINVAL; 549 return -EINVAL;
550 }
548 551
549 if (addr_type != IPV6_ADDR_ANY) { 552 if (addr_type != IPV6_ADDR_ANY) {
550 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; 553 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
@@ -555,8 +558,7 @@ int datagram_send_ctl(struct net *net,
555 ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr); 558 ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr);
556 } 559 }
557 560
558 if (dev) 561 rcu_read_unlock();
559 dev_put(dev);
560 562
561 if (err) 563 if (err)
562 goto exit_f; 564 goto exit_f;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index af597c73ebe..668a46b655e 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -473,7 +473,7 @@ static int esp_init_authenc(struct xfrm_state *x)
473 } 473 }
474 474
475 err = crypto_aead_setauthsize( 475 err = crypto_aead_setauthsize(
476 aead, aalg_desc->uinfo.auth.icv_truncbits / 8); 476 aead, x->aalg->alg_trunc_len / 8);
477 if (err) 477 if (err)
478 goto free_key; 478 goto free_key;
479 } 479 }
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 00a7a5e4ac9..b7aa7c64cc4 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -264,44 +264,36 @@ static struct fib_rules_ops fib6_rules_ops_template = {
264 264
265static int fib6_rules_net_init(struct net *net) 265static int fib6_rules_net_init(struct net *net)
266{ 266{
267 struct fib_rules_ops *ops;
267 int err = -ENOMEM; 268 int err = -ENOMEM;
268 269
269 net->ipv6.fib6_rules_ops = kmemdup(&fib6_rules_ops_template, 270 ops = fib_rules_register(&fib6_rules_ops_template, net);
270 sizeof(*net->ipv6.fib6_rules_ops), 271 if (IS_ERR(ops))
271 GFP_KERNEL); 272 return PTR_ERR(ops);
272 if (!net->ipv6.fib6_rules_ops) 273 net->ipv6.fib6_rules_ops = ops;
273 goto out;
274 274
275 net->ipv6.fib6_rules_ops->fro_net = net;
276 INIT_LIST_HEAD(&net->ipv6.fib6_rules_ops->rules_list);
277 275
278 err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0, 276 err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0,
279 RT6_TABLE_LOCAL, FIB_RULE_PERMANENT); 277 RT6_TABLE_LOCAL, 0);
280 if (err) 278 if (err)
281 goto out_fib6_rules_ops; 279 goto out_fib6_rules_ops;
282 280
283 err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 281 err = fib_default_rule_add(net->ipv6.fib6_rules_ops,
284 0x7FFE, RT6_TABLE_MAIN, 0); 282 0x7FFE, RT6_TABLE_MAIN, 0);
285 if (err) 283 if (err)
286 goto out_fib6_default_rule_add; 284 goto out_fib6_rules_ops;
287 285
288 err = fib_rules_register(net->ipv6.fib6_rules_ops);
289 if (err)
290 goto out_fib6_default_rule_add;
291out: 286out:
292 return err; 287 return err;
293 288
294out_fib6_default_rule_add:
295 fib_rules_cleanup_ops(net->ipv6.fib6_rules_ops);
296out_fib6_rules_ops: 289out_fib6_rules_ops:
297 kfree(net->ipv6.fib6_rules_ops); 290 fib_rules_unregister(ops);
298 goto out; 291 goto out;
299} 292}
300 293
301static void fib6_rules_net_exit(struct net *net) 294static void fib6_rules_net_exit(struct net *net)
302{ 295{
303 fib_rules_unregister(net->ipv6.fib6_rules_ops); 296 fib_rules_unregister(net->ipv6.fib6_rules_ops);
304 kfree(net->ipv6.fib6_rules_ops);
305} 297}
306 298
307static struct pernet_operations fib6_rules_net_ops = { 299static struct pernet_operations fib6_rules_net_ops = {
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index cc4797dd832..3516e6fe2e5 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -132,7 +132,7 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
132 132
133 sin6->sin6_family = AF_INET6; 133 sin6->sin6_family = AF_INET6;
134 ipv6_addr_copy(&sin6->sin6_addr, &np->daddr); 134 ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
135 sin6->sin6_port = inet_sk(sk)->dport; 135 sin6->sin6_port = inet_sk(sk)->inet_dport;
136 /* We do not store received flowlabel for TCP */ 136 /* We do not store received flowlabel for TCP */
137 sin6->sin6_flowinfo = 0; 137 sin6->sin6_flowinfo = 0;
138 sin6->sin6_scope_id = 0; 138 sin6->sin6_scope_id = 0;
@@ -168,8 +168,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
168 if (dst) { 168 if (dst) {
169 struct rt6_info *rt = (struct rt6_info *)dst; 169 struct rt6_info *rt = (struct rt6_info *)dst;
170 if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { 170 if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
171 sk->sk_dst_cache = NULL; 171 __sk_dst_reset(sk);
172 dst_release(dst);
173 dst = NULL; 172 dst = NULL;
174 } 173 }
175 } 174 }
@@ -194,8 +193,9 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
194 fl.fl6_flowlabel = np->flow_label; 193 fl.fl6_flowlabel = np->flow_label;
195 IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel); 194 IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
196 fl.oif = sk->sk_bound_dev_if; 195 fl.oif = sk->sk_bound_dev_if;
197 fl.fl_ip_sport = inet->sport; 196 fl.mark = sk->sk_mark;
198 fl.fl_ip_dport = inet->dport; 197 fl.fl_ip_sport = inet->inet_sport;
198 fl.fl_ip_dport = inet->inet_dport;
199 security_sk_classify_flow(sk, &fl); 199 security_sk_classify_flow(sk, &fl);
200 200
201 if (np->opt && np->opt->srcrt) { 201 if (np->opt && np->opt->srcrt) {
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 1bcc3431859..c813e294ec0 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -73,7 +73,7 @@ struct sock *__inet6_lookup_established(struct net *net,
73 * have wildcards anyways. 73 * have wildcards anyways.
74 */ 74 */
75 unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); 75 unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
76 unsigned int slot = hash & (hashinfo->ehash_size - 1); 76 unsigned int slot = hash & hashinfo->ehash_mask;
77 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; 77 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
78 78
79 79
@@ -125,7 +125,7 @@ static int inline compute_score(struct sock *sk, struct net *net,
125{ 125{
126 int score = -1; 126 int score = -1;
127 127
128 if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum && 128 if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
129 sk->sk_family == PF_INET6) { 129 sk->sk_family == PF_INET6) {
130 const struct ipv6_pinfo *np = inet6_sk(sk); 130 const struct ipv6_pinfo *np = inet6_sk(sk);
131 131
@@ -214,15 +214,16 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
214 const struct in6_addr *daddr = &np->rcv_saddr; 214 const struct in6_addr *daddr = &np->rcv_saddr;
215 const struct in6_addr *saddr = &np->daddr; 215 const struct in6_addr *saddr = &np->daddr;
216 const int dif = sk->sk_bound_dev_if; 216 const int dif = sk->sk_bound_dev_if;
217 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); 217 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
218 struct net *net = sock_net(sk); 218 struct net *net = sock_net(sk);
219 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, 219 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr,
220 inet->dport); 220 inet->inet_dport);
221 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 221 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
222 spinlock_t *lock = inet_ehash_lockp(hinfo, hash); 222 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
223 struct sock *sk2; 223 struct sock *sk2;
224 const struct hlist_nulls_node *node; 224 const struct hlist_nulls_node *node;
225 struct inet_timewait_sock *tw; 225 struct inet_timewait_sock *tw;
226 int twrefcnt = 0;
226 227
227 spin_lock(lock); 228 spin_lock(lock);
228 229
@@ -248,21 +249,25 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
248unique: 249unique:
249 /* Must record num and sport now. Otherwise we will see 250 /* Must record num and sport now. Otherwise we will see
250 * in hash table socket with a funny identity. */ 251 * in hash table socket with a funny identity. */
251 inet->num = lport; 252 inet->inet_num = lport;
252 inet->sport = htons(lport); 253 inet->inet_sport = htons(lport);
254 sk->sk_hash = hash;
253 WARN_ON(!sk_unhashed(sk)); 255 WARN_ON(!sk_unhashed(sk));
254 __sk_nulls_add_node_rcu(sk, &head->chain); 256 __sk_nulls_add_node_rcu(sk, &head->chain);
255 sk->sk_hash = hash; 257 if (tw) {
258 twrefcnt = inet_twsk_unhash(tw);
259 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
260 }
256 spin_unlock(lock); 261 spin_unlock(lock);
262 if (twrefcnt)
263 inet_twsk_put(tw);
257 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 264 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
258 265
259 if (twp != NULL) { 266 if (twp) {
260 *twp = tw; 267 *twp = tw;
261 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); 268 } else if (tw) {
262 } else if (tw != NULL) {
263 /* Silly. Should hash-dance instead... */ 269 /* Silly. Should hash-dance instead... */
264 inet_twsk_deschedule(tw, death_row); 270 inet_twsk_deschedule(tw, death_row);
265 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
266 271
267 inet_twsk_put(tw); 272 inet_twsk_put(tw);
268 } 273 }
@@ -279,7 +284,7 @@ static inline u32 inet6_sk_port_offset(const struct sock *sk)
279 const struct ipv6_pinfo *np = inet6_sk(sk); 284 const struct ipv6_pinfo *np = inet6_sk(sk);
280 return secure_ipv6_port_ephemeral(np->rcv_saddr.s6_addr32, 285 return secure_ipv6_port_ephemeral(np->rcv_saddr.s6_addr32,
281 np->daddr.s6_addr32, 286 np->daddr.s6_addr32,
282 inet->dport); 287 inet->inet_dport);
283} 288}
284 289
285int inet6_hash_connect(struct inet_timewait_death_row *death_row, 290int inet6_hash_connect(struct inet_timewait_death_row *death_row,
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 7712578bdc6..6e7bffa2205 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -67,7 +67,7 @@ static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
67 struct ip6_flowlabel *fl; 67 struct ip6_flowlabel *fl;
68 68
69 for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) { 69 for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) {
70 if (fl->label == label && fl->fl_net == net) 70 if (fl->label == label && net_eq(fl->fl_net, net))
71 return fl; 71 return fl;
72 } 72 }
73 return NULL; 73 return NULL;
@@ -163,7 +163,8 @@ static void ip6_fl_purge(struct net *net)
163 struct ip6_flowlabel *fl, **flp; 163 struct ip6_flowlabel *fl, **flp;
164 flp = &fl_ht[i]; 164 flp = &fl_ht[i];
165 while ((fl = *flp) != NULL) { 165 while ((fl = *flp) != NULL) {
166 if (fl->fl_net == net && atomic_read(&fl->users) == 0) { 166 if (net_eq(fl->fl_net, net) &&
167 atomic_read(&fl->users) == 0) {
167 *flp = fl->next; 168 *flp = fl->next;
168 fl_free(fl); 169 fl_free(fl);
169 atomic_dec(&fl_size); 170 atomic_dec(&fl_size);
@@ -377,8 +378,8 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
377 goto done; 378 goto done;
378 fl->share = freq->flr_share; 379 fl->share = freq->flr_share;
379 addr_type = ipv6_addr_type(&freq->flr_dst); 380 addr_type = ipv6_addr_type(&freq->flr_dst);
380 if ((addr_type&IPV6_ADDR_MAPPED) 381 if ((addr_type & IPV6_ADDR_MAPPED) ||
381 || addr_type == IPV6_ADDR_ANY) { 382 addr_type == IPV6_ADDR_ANY) {
382 err = -EINVAL; 383 err = -EINVAL;
383 goto done; 384 goto done;
384 } 385 }
@@ -421,8 +422,8 @@ static int mem_check(struct sock *sk)
421 422
422 if (room <= 0 || 423 if (room <= 0 ||
423 ((count >= FL_MAX_PER_SOCK || 424 ((count >= FL_MAX_PER_SOCK ||
424 (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) 425 (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
425 && !capable(CAP_NET_ADMIN))) 426 !capable(CAP_NET_ADMIN)))
426 return -ENOBUFS; 427 return -ENOBUFS;
427 428
428 return 0; 429 return 0;
@@ -630,7 +631,7 @@ static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
630 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { 631 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
631 fl = fl_ht[state->bucket]; 632 fl = fl_ht[state->bucket];
632 633
633 while (fl && fl->fl_net != net) 634 while (fl && !net_eq(fl->fl_net, net))
634 fl = fl->next; 635 fl = fl->next;
635 if (fl) 636 if (fl)
636 break; 637 break;
@@ -645,7 +646,7 @@ static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flo
645 646
646 fl = fl->next; 647 fl = fl->next;
647try_again: 648try_again:
648 while (fl && fl->fl_net != net) 649 while (fl && !net_eq(fl->fl_net, net))
649 fl = fl->next; 650 fl = fl->next;
650 651
651 while (!fl) { 652 while (!fl) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c595bbe1ed9..d453d07b0df 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -78,7 +78,7 @@ static void ip6_fb_tnl_dev_init(struct net_device *dev);
78static void ip6_tnl_dev_init(struct net_device *dev); 78static void ip6_tnl_dev_init(struct net_device *dev);
79static void ip6_tnl_dev_setup(struct net_device *dev); 79static void ip6_tnl_dev_setup(struct net_device *dev);
80 80
81static int ip6_tnl_net_id; 81static int ip6_tnl_net_id __read_mostly;
82struct ip6_tnl_net { 82struct ip6_tnl_net {
83 /* the IPv6 tunnel fallback device */ 83 /* the IPv6 tunnel fallback device */
84 struct net_device *fb_tnl_dev; 84 struct net_device *fb_tnl_dev;
@@ -88,8 +88,10 @@ struct ip6_tnl_net {
88 struct ip6_tnl **tnls[2]; 88 struct ip6_tnl **tnls[2];
89}; 89};
90 90
91/* lock for the tunnel lists */ 91/*
92static DEFINE_RWLOCK(ip6_tnl_lock); 92 * Locking : hash tables are protected by RCU and a spinlock
93 */
94static DEFINE_SPINLOCK(ip6_tnl_lock);
93 95
94static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) 96static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
95{ 97{
@@ -130,6 +132,9 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
130 * else %NULL 132 * else %NULL
131 **/ 133 **/
132 134
135#define for_each_ip6_tunnel_rcu(start) \
136 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
137
133static struct ip6_tnl * 138static struct ip6_tnl *
134ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) 139ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
135{ 140{
@@ -138,13 +143,14 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
138 struct ip6_tnl *t; 143 struct ip6_tnl *t;
139 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 144 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
140 145
141 for (t = ip6n->tnls_r_l[h0 ^ h1]; t; t = t->next) { 146 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) {
142 if (ipv6_addr_equal(local, &t->parms.laddr) && 147 if (ipv6_addr_equal(local, &t->parms.laddr) &&
143 ipv6_addr_equal(remote, &t->parms.raddr) && 148 ipv6_addr_equal(remote, &t->parms.raddr) &&
144 (t->dev->flags & IFF_UP)) 149 (t->dev->flags & IFF_UP))
145 return t; 150 return t;
146 } 151 }
147 if ((t = ip6n->tnls_wc[0]) != NULL && (t->dev->flags & IFF_UP)) 152 t = rcu_dereference(ip6n->tnls_wc[0]);
153 if (t && (t->dev->flags & IFF_UP))
148 return t; 154 return t;
149 155
150 return NULL; 156 return NULL;
@@ -186,10 +192,10 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
186{ 192{
187 struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms); 193 struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms);
188 194
195 spin_lock_bh(&ip6_tnl_lock);
189 t->next = *tp; 196 t->next = *tp;
190 write_lock_bh(&ip6_tnl_lock); 197 rcu_assign_pointer(*tp, t);
191 *tp = t; 198 spin_unlock_bh(&ip6_tnl_lock);
192 write_unlock_bh(&ip6_tnl_lock);
193} 199}
194 200
195/** 201/**
@@ -204,9 +210,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
204 210
205 for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) { 211 for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) {
206 if (t == *tp) { 212 if (t == *tp) {
207 write_lock_bh(&ip6_tnl_lock); 213 spin_lock_bh(&ip6_tnl_lock);
208 *tp = t->next; 214 *tp = t->next;
209 write_unlock_bh(&ip6_tnl_lock); 215 spin_unlock_bh(&ip6_tnl_lock);
210 break; 216 break;
211 } 217 }
212 } 218 }
@@ -313,9 +319,9 @@ ip6_tnl_dev_uninit(struct net_device *dev)
313 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 319 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
314 320
315 if (dev == ip6n->fb_tnl_dev) { 321 if (dev == ip6n->fb_tnl_dev) {
316 write_lock_bh(&ip6_tnl_lock); 322 spin_lock_bh(&ip6_tnl_lock);
317 ip6n->tnls_wc[0] = NULL; 323 ip6n->tnls_wc[0] = NULL;
318 write_unlock_bh(&ip6_tnl_lock); 324 spin_unlock_bh(&ip6_tnl_lock);
319 } else { 325 } else {
320 ip6_tnl_unlink(ip6n, t); 326 ip6_tnl_unlink(ip6n, t);
321 } 327 }
@@ -409,7 +415,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
409 in trouble since we might need the source address for further 415 in trouble since we might need the source address for further
410 processing of the error. */ 416 processing of the error. */
411 417
412 read_lock(&ip6_tnl_lock); 418 rcu_read_lock();
413 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, 419 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr,
414 &ipv6h->saddr)) == NULL) 420 &ipv6h->saddr)) == NULL)
415 goto out; 421 goto out;
@@ -482,7 +488,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
482 *msg = rel_msg; 488 *msg = rel_msg;
483 489
484out: 490out:
485 read_unlock(&ip6_tnl_lock); 491 rcu_read_unlock();
486 return err; 492 return err;
487} 493}
488 494
@@ -652,6 +658,7 @@ static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t,
652 IP6_ECN_set_ce(ipv6_hdr(skb)); 658 IP6_ECN_set_ce(ipv6_hdr(skb));
653} 659}
654 660
661/* called with rcu_read_lock() */
655static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) 662static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
656{ 663{
657 struct ip6_tnl_parm *p = &t->parms; 664 struct ip6_tnl_parm *p = &t->parms;
@@ -662,15 +669,13 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
662 struct net_device *ldev = NULL; 669 struct net_device *ldev = NULL;
663 670
664 if (p->link) 671 if (p->link)
665 ldev = dev_get_by_index(net, p->link); 672 ldev = dev_get_by_index_rcu(net, p->link);
666 673
667 if ((ipv6_addr_is_multicast(&p->laddr) || 674 if ((ipv6_addr_is_multicast(&p->laddr) ||
668 likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) && 675 likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) &&
669 likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0))) 676 likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0)))
670 ret = 1; 677 ret = 1;
671 678
672 if (ldev)
673 dev_put(ldev);
674 } 679 }
675 return ret; 680 return ret;
676} 681}
@@ -693,23 +698,23 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
693 struct ip6_tnl *t; 698 struct ip6_tnl *t;
694 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 699 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
695 700
696 read_lock(&ip6_tnl_lock); 701 rcu_read_lock();
697 702
698 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, 703 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
699 &ipv6h->daddr)) != NULL) { 704 &ipv6h->daddr)) != NULL) {
700 if (t->parms.proto != ipproto && t->parms.proto != 0) { 705 if (t->parms.proto != ipproto && t->parms.proto != 0) {
701 read_unlock(&ip6_tnl_lock); 706 rcu_read_unlock();
702 goto discard; 707 goto discard;
703 } 708 }
704 709
705 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 710 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
706 read_unlock(&ip6_tnl_lock); 711 rcu_read_unlock();
707 goto discard; 712 goto discard;
708 } 713 }
709 714
710 if (!ip6_tnl_rcv_ctl(t)) { 715 if (!ip6_tnl_rcv_ctl(t)) {
711 t->dev->stats.rx_dropped++; 716 t->dev->stats.rx_dropped++;
712 read_unlock(&ip6_tnl_lock); 717 rcu_read_unlock();
713 goto discard; 718 goto discard;
714 } 719 }
715 secpath_reset(skb); 720 secpath_reset(skb);
@@ -727,10 +732,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
727 t->dev->stats.rx_packets++; 732 t->dev->stats.rx_packets++;
728 t->dev->stats.rx_bytes += skb->len; 733 t->dev->stats.rx_bytes += skb->len;
729 netif_rx(skb); 734 netif_rx(skb);
730 read_unlock(&ip6_tnl_lock); 735 rcu_read_unlock();
731 return 0; 736 return 0;
732 } 737 }
733 read_unlock(&ip6_tnl_lock); 738 rcu_read_unlock();
734 return 1; 739 return 1;
735 740
736discard: 741discard:
@@ -798,8 +803,9 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
798 if (p->flags & IP6_TNL_F_CAP_XMIT) { 803 if (p->flags & IP6_TNL_F_CAP_XMIT) {
799 struct net_device *ldev = NULL; 804 struct net_device *ldev = NULL;
800 805
806 rcu_read_lock();
801 if (p->link) 807 if (p->link)
802 ldev = dev_get_by_index(net, p->link); 808 ldev = dev_get_by_index_rcu(net, p->link);
803 809
804 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) 810 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
805 printk(KERN_WARNING 811 printk(KERN_WARNING
@@ -813,8 +819,7 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
813 p->name); 819 p->name);
814 else 820 else
815 ret = 1; 821 ret = 1;
816 if (ldev) 822 rcu_read_unlock();
817 dev_put(ldev);
818 } 823 }
819 return ret; 824 return ret;
820} 825}
@@ -1387,29 +1392,25 @@ static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1387{ 1392{
1388 int h; 1393 int h;
1389 struct ip6_tnl *t; 1394 struct ip6_tnl *t;
1395 LIST_HEAD(list);
1390 1396
1391 for (h = 0; h < HASH_SIZE; h++) { 1397 for (h = 0; h < HASH_SIZE; h++) {
1392 while ((t = ip6n->tnls_r_l[h]) != NULL) 1398 t = ip6n->tnls_r_l[h];
1393 unregister_netdevice(t->dev); 1399 while (t != NULL) {
1400 unregister_netdevice_queue(t->dev, &list);
1401 t = t->next;
1402 }
1394 } 1403 }
1395 1404
1396 t = ip6n->tnls_wc[0]; 1405 t = ip6n->tnls_wc[0];
1397 unregister_netdevice(t->dev); 1406 unregister_netdevice_queue(t->dev, &list);
1407 unregister_netdevice_many(&list);
1398} 1408}
1399 1409
1400static int ip6_tnl_init_net(struct net *net) 1410static int ip6_tnl_init_net(struct net *net)
1401{ 1411{
1412 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1402 int err; 1413 int err;
1403 struct ip6_tnl_net *ip6n;
1404
1405 err = -ENOMEM;
1406 ip6n = kzalloc(sizeof(struct ip6_tnl_net), GFP_KERNEL);
1407 if (ip6n == NULL)
1408 goto err_alloc;
1409
1410 err = net_assign_generic(net, ip6_tnl_net_id, ip6n);
1411 if (err < 0)
1412 goto err_assign;
1413 1414
1414 ip6n->tnls[0] = ip6n->tnls_wc; 1415 ip6n->tnls[0] = ip6n->tnls_wc;
1415 ip6n->tnls[1] = ip6n->tnls_r_l; 1416 ip6n->tnls[1] = ip6n->tnls_r_l;
@@ -1432,27 +1433,23 @@ static int ip6_tnl_init_net(struct net *net)
1432err_register: 1433err_register:
1433 free_netdev(ip6n->fb_tnl_dev); 1434 free_netdev(ip6n->fb_tnl_dev);
1434err_alloc_dev: 1435err_alloc_dev:
1435 /* nothing */
1436err_assign:
1437 kfree(ip6n);
1438err_alloc:
1439 return err; 1436 return err;
1440} 1437}
1441 1438
1442static void ip6_tnl_exit_net(struct net *net) 1439static void ip6_tnl_exit_net(struct net *net)
1443{ 1440{
1444 struct ip6_tnl_net *ip6n; 1441 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1445 1442
1446 ip6n = net_generic(net, ip6_tnl_net_id);
1447 rtnl_lock(); 1443 rtnl_lock();
1448 ip6_tnl_destroy_tunnels(ip6n); 1444 ip6_tnl_destroy_tunnels(ip6n);
1449 rtnl_unlock(); 1445 rtnl_unlock();
1450 kfree(ip6n);
1451} 1446}
1452 1447
1453static struct pernet_operations ip6_tnl_net_ops = { 1448static struct pernet_operations ip6_tnl_net_ops = {
1454 .init = ip6_tnl_init_net, 1449 .init = ip6_tnl_init_net,
1455 .exit = ip6_tnl_exit_net, 1450 .exit = ip6_tnl_exit_net,
1451 .id = &ip6_tnl_net_id,
1452 .size = sizeof(struct ip6_tnl_net),
1456}; 1453};
1457 1454
1458/** 1455/**
@@ -1477,7 +1474,7 @@ static int __init ip6_tunnel_init(void)
1477 goto unreg_ip4ip6; 1474 goto unreg_ip4ip6;
1478 } 1475 }
1479 1476
1480 err = register_pernet_gen_device(&ip6_tnl_net_id, &ip6_tnl_net_ops); 1477 err = register_pernet_device(&ip6_tnl_net_ops);
1481 if (err < 0) 1478 if (err < 0)
1482 goto err_pernet; 1479 goto err_pernet;
1483 return 0; 1480 return 0;
@@ -1501,7 +1498,7 @@ static void __exit ip6_tunnel_cleanup(void)
1501 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6)) 1498 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
1502 printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n"); 1499 printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n");
1503 1500
1504 unregister_pernet_gen_device(ip6_tnl_net_id, &ip6_tnl_net_ops); 1501 unregister_pernet_device(&ip6_tnl_net_ops);
1505} 1502}
1506 1503
1507module_init(ip6_tunnel_init); 1504module_init(ip6_tunnel_init);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 716153941fc..52e0f74fdfe 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -477,7 +477,7 @@ failure:
477 * Delete a VIF entry 477 * Delete a VIF entry
478 */ 478 */
479 479
480static int mif6_delete(struct net *net, int vifi) 480static int mif6_delete(struct net *net, int vifi, struct list_head *head)
481{ 481{
482 struct mif_device *v; 482 struct mif_device *v;
483 struct net_device *dev; 483 struct net_device *dev;
@@ -519,7 +519,7 @@ static int mif6_delete(struct net *net, int vifi)
519 in6_dev->cnf.mc_forwarding--; 519 in6_dev->cnf.mc_forwarding--;
520 520
521 if (v->flags & MIFF_REGISTER) 521 if (v->flags & MIFF_REGISTER)
522 unregister_netdevice(dev); 522 unregister_netdevice_queue(dev, head);
523 523
524 dev_put(dev); 524 dev_put(dev);
525 return 0; 525 return 0;
@@ -976,6 +976,7 @@ static int ip6mr_device_event(struct notifier_block *this,
976 struct net *net = dev_net(dev); 976 struct net *net = dev_net(dev);
977 struct mif_device *v; 977 struct mif_device *v;
978 int ct; 978 int ct;
979 LIST_HEAD(list);
979 980
980 if (event != NETDEV_UNREGISTER) 981 if (event != NETDEV_UNREGISTER)
981 return NOTIFY_DONE; 982 return NOTIFY_DONE;
@@ -983,8 +984,10 @@ static int ip6mr_device_event(struct notifier_block *this,
983 v = &net->ipv6.vif6_table[0]; 984 v = &net->ipv6.vif6_table[0];
984 for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) { 985 for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) {
985 if (v->dev == dev) 986 if (v->dev == dev)
986 mif6_delete(net, ct); 987 mif6_delete(net, ct, &list);
987 } 988 }
989 unregister_netdevice_many(&list);
990
988 return NOTIFY_DONE; 991 return NOTIFY_DONE;
989} 992}
990 993
@@ -1188,14 +1191,16 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1188static void mroute_clean_tables(struct net *net) 1191static void mroute_clean_tables(struct net *net)
1189{ 1192{
1190 int i; 1193 int i;
1194 LIST_HEAD(list);
1191 1195
1192 /* 1196 /*
1193 * Shut down all active vif entries 1197 * Shut down all active vif entries
1194 */ 1198 */
1195 for (i = 0; i < net->ipv6.maxvif; i++) { 1199 for (i = 0; i < net->ipv6.maxvif; i++) {
1196 if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC)) 1200 if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC))
1197 mif6_delete(net, i); 1201 mif6_delete(net, i, &list);
1198 } 1202 }
1203 unregister_netdevice_many(&list);
1199 1204
1200 /* 1205 /*
1201 * Wipe the cache 1206 * Wipe the cache
@@ -1297,7 +1302,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1297 switch (optname) { 1302 switch (optname) {
1298 case MRT6_INIT: 1303 case MRT6_INIT:
1299 if (sk->sk_type != SOCK_RAW || 1304 if (sk->sk_type != SOCK_RAW ||
1300 inet_sk(sk)->num != IPPROTO_ICMPV6) 1305 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1301 return -EOPNOTSUPP; 1306 return -EOPNOTSUPP;
1302 if (optlen < sizeof(int)) 1307 if (optlen < sizeof(int))
1303 return -EINVAL; 1308 return -EINVAL;
@@ -1325,7 +1330,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1325 if (copy_from_user(&mifi, optval, sizeof(mifi_t))) 1330 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1326 return -EFAULT; 1331 return -EFAULT;
1327 rtnl_lock(); 1332 rtnl_lock();
1328 ret = mif6_delete(net, mifi); 1333 ret = mif6_delete(net, mifi, NULL);
1329 rtnl_unlock(); 1334 rtnl_unlock();
1330 return ret; 1335 return ret;
1331 1336
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 4f7aaf6996a..430454ee5ea 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -64,7 +64,7 @@ int ip6_ra_control(struct sock *sk, int sel)
64 struct ip6_ra_chain *ra, *new_ra, **rap; 64 struct ip6_ra_chain *ra, *new_ra, **rap;
65 65
66 /* RA packet may be delivered ONLY to IPPROTO_RAW socket */ 66 /* RA packet may be delivered ONLY to IPPROTO_RAW socket */
67 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW) 67 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_RAW)
68 return -ENOPROTOOPT; 68 return -ENOPROTOOPT;
69 69
70 new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; 70 new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
@@ -106,7 +106,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
106 if (inet_sk(sk)->is_icsk) { 106 if (inet_sk(sk)->is_icsk) {
107 if (opt && 107 if (opt &&
108 !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && 108 !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
109 inet_sk(sk)->daddr != LOOPBACK4_IPV6) { 109 inet_sk(sk)->inet_daddr != LOOPBACK4_IPV6) {
110 struct inet_connection_sock *icsk = inet_csk(sk); 110 struct inet_connection_sock *icsk = inet_csk(sk);
111 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; 111 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
112 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); 112 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
@@ -234,7 +234,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
234 234
235 case IPV6_V6ONLY: 235 case IPV6_V6ONLY:
236 if (optlen < sizeof(int) || 236 if (optlen < sizeof(int) ||
237 inet_sk(sk)->num) 237 inet_sk(sk)->inet_num)
238 goto e_inval; 238 goto e_inval;
239 np->ipv6only = valbool; 239 np->ipv6only = valbool;
240 retv = 0; 240 retv = 0;
@@ -424,6 +424,7 @@ sticky_done:
424 424
425 fl.fl6_flowlabel = 0; 425 fl.fl6_flowlabel = 0;
426 fl.oif = sk->sk_bound_dev_if; 426 fl.oif = sk->sk_bound_dev_if;
427 fl.mark = sk->sk_mark;
427 428
428 if (optlen == 0) 429 if (optlen == 0)
429 goto update; 430 goto update;
@@ -665,7 +666,7 @@ done:
665 case IPV6_MTU_DISCOVER: 666 case IPV6_MTU_DISCOVER:
666 if (optlen < sizeof(int)) 667 if (optlen < sizeof(int))
667 goto e_inval; 668 goto e_inval;
668 if (val<0 || val>3) 669 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
669 goto e_inval; 670 goto e_inval;
670 np->pmtudisc = val; 671 np->pmtudisc = val;
671 retv = 0; 672 retv = 0;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index f9fcf690bd5..1f9c44442e6 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2375,9 +2375,9 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2375 struct net *net = seq_file_net(seq); 2375 struct net *net = seq_file_net(seq);
2376 2376
2377 state->idev = NULL; 2377 state->idev = NULL;
2378 for_each_netdev(net, state->dev) { 2378 for_each_netdev_rcu(net, state->dev) {
2379 struct inet6_dev *idev; 2379 struct inet6_dev *idev;
2380 idev = in6_dev_get(state->dev); 2380 idev = __in6_dev_get(state->dev);
2381 if (!idev) 2381 if (!idev)
2382 continue; 2382 continue;
2383 read_lock_bh(&idev->lock); 2383 read_lock_bh(&idev->lock);
@@ -2387,7 +2387,6 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2387 break; 2387 break;
2388 } 2388 }
2389 read_unlock_bh(&idev->lock); 2389 read_unlock_bh(&idev->lock);
2390 in6_dev_put(idev);
2391 } 2390 }
2392 return im; 2391 return im;
2393} 2392}
@@ -2398,16 +2397,15 @@ static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr
2398 2397
2399 im = im->next; 2398 im = im->next;
2400 while (!im) { 2399 while (!im) {
2401 if (likely(state->idev != NULL)) { 2400 if (likely(state->idev != NULL))
2402 read_unlock_bh(&state->idev->lock); 2401 read_unlock_bh(&state->idev->lock);
2403 in6_dev_put(state->idev); 2402
2404 } 2403 state->dev = next_net_device_rcu(state->dev);
2405 state->dev = next_net_device(state->dev);
2406 if (!state->dev) { 2404 if (!state->dev) {
2407 state->idev = NULL; 2405 state->idev = NULL;
2408 break; 2406 break;
2409 } 2407 }
2410 state->idev = in6_dev_get(state->dev); 2408 state->idev = __in6_dev_get(state->dev);
2411 if (!state->idev) 2409 if (!state->idev)
2412 continue; 2410 continue;
2413 read_lock_bh(&state->idev->lock); 2411 read_lock_bh(&state->idev->lock);
@@ -2426,31 +2424,31 @@ static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2426} 2424}
2427 2425
2428static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) 2426static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2429 __acquires(dev_base_lock) 2427 __acquires(RCU)
2430{ 2428{
2431 read_lock(&dev_base_lock); 2429 rcu_read_lock();
2432 return igmp6_mc_get_idx(seq, *pos); 2430 return igmp6_mc_get_idx(seq, *pos);
2433} 2431}
2434 2432
2435static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2433static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2436{ 2434{
2437 struct ifmcaddr6 *im; 2435 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2438 im = igmp6_mc_get_next(seq, v); 2436
2439 ++*pos; 2437 ++*pos;
2440 return im; 2438 return im;
2441} 2439}
2442 2440
2443static void igmp6_mc_seq_stop(struct seq_file *seq, void *v) 2441static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2444 __releases(dev_base_lock) 2442 __releases(RCU)
2445{ 2443{
2446 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2444 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2445
2447 if (likely(state->idev != NULL)) { 2446 if (likely(state->idev != NULL)) {
2448 read_unlock_bh(&state->idev->lock); 2447 read_unlock_bh(&state->idev->lock);
2449 in6_dev_put(state->idev);
2450 state->idev = NULL; 2448 state->idev = NULL;
2451 } 2449 }
2452 state->dev = NULL; 2450 state->dev = NULL;
2453 read_unlock(&dev_base_lock); 2451 rcu_read_unlock();
2454} 2452}
2455 2453
2456static int igmp6_mc_seq_show(struct seq_file *seq, void *v) 2454static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
@@ -2507,9 +2505,9 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2507 2505
2508 state->idev = NULL; 2506 state->idev = NULL;
2509 state->im = NULL; 2507 state->im = NULL;
2510 for_each_netdev(net, state->dev) { 2508 for_each_netdev_rcu(net, state->dev) {
2511 struct inet6_dev *idev; 2509 struct inet6_dev *idev;
2512 idev = in6_dev_get(state->dev); 2510 idev = __in6_dev_get(state->dev);
2513 if (unlikely(idev == NULL)) 2511 if (unlikely(idev == NULL))
2514 continue; 2512 continue;
2515 read_lock_bh(&idev->lock); 2513 read_lock_bh(&idev->lock);
@@ -2525,7 +2523,6 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2525 spin_unlock_bh(&im->mca_lock); 2523 spin_unlock_bh(&im->mca_lock);
2526 } 2524 }
2527 read_unlock_bh(&idev->lock); 2525 read_unlock_bh(&idev->lock);
2528 in6_dev_put(idev);
2529 } 2526 }
2530 return psf; 2527 return psf;
2531} 2528}
@@ -2539,16 +2536,15 @@ static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_s
2539 spin_unlock_bh(&state->im->mca_lock); 2536 spin_unlock_bh(&state->im->mca_lock);
2540 state->im = state->im->next; 2537 state->im = state->im->next;
2541 while (!state->im) { 2538 while (!state->im) {
2542 if (likely(state->idev != NULL)) { 2539 if (likely(state->idev != NULL))
2543 read_unlock_bh(&state->idev->lock); 2540 read_unlock_bh(&state->idev->lock);
2544 in6_dev_put(state->idev); 2541
2545 } 2542 state->dev = next_net_device_rcu(state->dev);
2546 state->dev = next_net_device(state->dev);
2547 if (!state->dev) { 2543 if (!state->dev) {
2548 state->idev = NULL; 2544 state->idev = NULL;
2549 goto out; 2545 goto out;
2550 } 2546 }
2551 state->idev = in6_dev_get(state->dev); 2547 state->idev = __in6_dev_get(state->dev);
2552 if (!state->idev) 2548 if (!state->idev)
2553 continue; 2549 continue;
2554 read_lock_bh(&state->idev->lock); 2550 read_lock_bh(&state->idev->lock);
@@ -2573,9 +2569,9 @@ static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
2573} 2569}
2574 2570
2575static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) 2571static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2576 __acquires(dev_base_lock) 2572 __acquires(RCU)
2577{ 2573{
2578 read_lock(&dev_base_lock); 2574 rcu_read_lock();
2579 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2575 return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2580} 2576}
2581 2577
@@ -2591,7 +2587,7 @@ static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2591} 2587}
2592 2588
2593static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) 2589static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2594 __releases(dev_base_lock) 2590 __releases(RCU)
2595{ 2591{
2596 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2592 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2597 if (likely(state->im != NULL)) { 2593 if (likely(state->im != NULL)) {
@@ -2600,11 +2596,10 @@ static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
2600 } 2596 }
2601 if (likely(state->idev != NULL)) { 2597 if (likely(state->idev != NULL)) {
2602 read_unlock_bh(&state->idev->lock); 2598 read_unlock_bh(&state->idev->lock);
2603 in6_dev_put(state->idev);
2604 state->idev = NULL; 2599 state->idev = NULL;
2605 } 2600 }
2606 state->dev = NULL; 2601 state->dev = NULL;
2607 read_unlock(&dev_base_lock); 2602 rcu_read_unlock();
2608} 2603}
2609 2604
2610static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) 2605static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3d0520e455d..c4585279809 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -598,6 +598,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
598 icmp6h.icmp6_solicited = solicited; 598 icmp6h.icmp6_solicited = solicited;
599 icmp6h.icmp6_override = override; 599 icmp6h.icmp6_override = override;
600 600
601 inc_opt |= ifp->idev->cnf.force_tllao;
601 __ndisc_send(dev, neigh, daddr, src_addr, 602 __ndisc_send(dev, neigh, daddr, src_addr,
602 &icmp6h, solicited_addr, 603 &icmp6h, solicited_addr,
603 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); 604 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0);
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 14e52aa624c..7854052be60 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -498,10 +498,9 @@ ipq_rcv_nl_event(struct notifier_block *this,
498{ 498{
499 struct netlink_notify *n = ptr; 499 struct netlink_notify *n = ptr;
500 500
501 if (event == NETLINK_URELEASE && 501 if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
502 n->protocol == NETLINK_IP6_FW && n->pid) {
503 write_lock_bh(&queue_lock); 502 write_lock_bh(&queue_lock);
504 if ((n->net == &init_net) && (n->pid == peer_pid)) 503 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
505 __ipq_reset(); 504 __ipq_reset();
506 write_unlock_bh(&queue_lock); 505 write_unlock_bh(&queue_lock);
507 } 506 }
@@ -623,7 +622,7 @@ cleanup_netlink_notifier:
623static void __exit ip6_queue_fini(void) 622static void __exit ip6_queue_fini(void)
624{ 623{
625 nf_unregister_queue_handlers(&nfqh); 624 nf_unregister_queue_handlers(&nfqh);
626 synchronize_net(); 625
627 ipq_flush(NULL, 0); 626 ipq_flush(NULL, 0);
628 627
629#ifdef CONFIG_SYSCTL 628#ifdef CONFIG_SYSCTL
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index cc9f8ef303f..480d7f8c980 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -105,9 +105,9 @@ ip6_packet_match(const struct sk_buff *skb,
105#define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg))) 105#define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
106 106
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, 107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP) 108 &ip6info->src), IP6T_INV_SRCIP) ||
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, 109 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) { 110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n"); 111 dprintf("Source or dest mismatch.\n");
112/* 112/*
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, 113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
@@ -277,11 +277,11 @@ get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
277 } else if (s == e) { 277 } else if (s == e) {
278 (*rulenum)++; 278 (*rulenum)++;
279 279
280 if (s->target_offset == sizeof(struct ip6t_entry) 280 if (s->target_offset == sizeof(struct ip6t_entry) &&
281 && strcmp(t->target.u.kernel.target->name, 281 strcmp(t->target.u.kernel.target->name,
282 IP6T_STANDARD_TARGET) == 0 282 IP6T_STANDARD_TARGET) == 0 &&
283 && t->verdict < 0 283 t->verdict < 0 &&
284 && unconditional(&s->ipv6)) { 284 unconditional(&s->ipv6)) {
285 /* Tail of chains: STANDARD target (return/policy) */ 285 /* Tail of chains: STANDARD target (return/policy) */
286 *comment = *chainname == hookname 286 *comment = *chainname == hookname
287 ? comments[NF_IP6_TRACE_COMMENT_POLICY] 287 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
@@ -418,8 +418,8 @@ ip6t_do_table(struct sk_buff *skb,
418 back = get_entry(table_base, back->comefrom); 418 back = get_entry(table_base, back->comefrom);
419 continue; 419 continue;
420 } 420 }
421 if (table_base + v != ip6t_next_entry(e) 421 if (table_base + v != ip6t_next_entry(e) &&
422 && !(e->ipv6.flags & IP6T_F_GOTO)) { 422 !(e->ipv6.flags & IP6T_F_GOTO)) {
423 /* Save old back ptr in next entry */ 423 /* Save old back ptr in next entry */
424 struct ip6t_entry *next = ip6t_next_entry(e); 424 struct ip6t_entry *next = ip6t_next_entry(e);
425 next->comefrom = (void *)back - table_base; 425 next->comefrom = (void *)back - table_base;
@@ -505,11 +505,11 @@ mark_source_chains(struct xt_table_info *newinfo,
505 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); 505 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
506 506
507 /* Unconditional return/END. */ 507 /* Unconditional return/END. */
508 if ((e->target_offset == sizeof(struct ip6t_entry) 508 if ((e->target_offset == sizeof(struct ip6t_entry) &&
509 && (strcmp(t->target.u.user.name, 509 (strcmp(t->target.u.user.name,
510 IP6T_STANDARD_TARGET) == 0) 510 IP6T_STANDARD_TARGET) == 0) &&
511 && t->verdict < 0 511 t->verdict < 0 &&
512 && unconditional(&e->ipv6)) || visited) { 512 unconditional(&e->ipv6)) || visited) {
513 unsigned int oldpos, size; 513 unsigned int oldpos, size;
514 514
515 if ((strcmp(t->target.u.user.name, 515 if ((strcmp(t->target.u.user.name,
@@ -556,8 +556,8 @@ mark_source_chains(struct xt_table_info *newinfo,
556 int newpos = t->verdict; 556 int newpos = t->verdict;
557 557
558 if (strcmp(t->target.u.user.name, 558 if (strcmp(t->target.u.user.name,
559 IP6T_STANDARD_TARGET) == 0 559 IP6T_STANDARD_TARGET) == 0 &&
560 && newpos >= 0) { 560 newpos >= 0) {
561 if (newpos > newinfo->size - 561 if (newpos > newinfo->size -
562 sizeof(struct ip6t_entry)) { 562 sizeof(struct ip6t_entry)) {
563 duprintf("mark_source_chains: " 563 duprintf("mark_source_chains: "
@@ -767,8 +767,8 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
767{ 767{
768 unsigned int h; 768 unsigned int h;
769 769
770 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 770 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
771 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { 771 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
772 duprintf("Bad offset %p\n", e); 772 duprintf("Bad offset %p\n", e);
773 return -EINVAL; 773 return -EINVAL;
774 } 774 }
@@ -1584,8 +1584,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1584 int ret, off, h; 1584 int ret, off, h;
1585 1585
1586 duprintf("check_compat_entry_size_and_hooks %p\n", e); 1586 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1587 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 1587 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1588 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) { 1588 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1589 duprintf("Bad offset %p, limit = %p\n", e, limit); 1589 duprintf("Bad offset %p, limit = %p\n", e, limit);
1590 return -EINVAL; 1590 return -EINVAL;
1591 } 1591 }
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 7018cac4fdd..b285fdf1905 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -249,8 +249,8 @@ static void dump_packet(const struct nf_loginfo *info,
249 /* Max length: 11 "URGP=65535 " */ 249 /* Max length: 11 "URGP=65535 " */
250 printk("URGP=%u ", ntohs(th->urg_ptr)); 250 printk("URGP=%u ", ntohs(th->urg_ptr));
251 251
252 if ((logflags & IP6T_LOG_TCPOPT) 252 if ((logflags & IP6T_LOG_TCPOPT) &&
253 && th->doff * 4 > sizeof(struct tcphdr)) { 253 th->doff * 4 > sizeof(struct tcphdr)) {
254 u_int8_t _opt[60 - sizeof(struct tcphdr)]; 254 u_int8_t _opt[60 - sizeof(struct tcphdr)];
255 const u_int8_t *op; 255 const u_int8_t *op;
256 unsigned int i; 256 unsigned int i;
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 5a7f00cd15c..8311ca31816 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -223,8 +223,8 @@ static bool reject_tg6_check(const struct xt_tgchk_param *par)
223 return false; 223 return false;
224 } else if (rejinfo->with == IP6T_TCP_RESET) { 224 } else if (rejinfo->with == IP6T_TCP_RESET) {
225 /* Must specify that it's a TCP packet */ 225 /* Must specify that it's a TCP packet */
226 if (e->ipv6.proto != IPPROTO_TCP 226 if (e->ipv6.proto != IPPROTO_TCP ||
227 || (e->ipv6.invflags & XT_INV_PROTO)) { 227 (e->ipv6.invflags & XT_INV_PROTO)) {
228 printk("ip6t_REJECT: TCP_RESET illegal for non-tcp\n"); 228 printk("ip6t_REJECT: TCP_RESET illegal for non-tcp\n");
229 return false; 229 return false;
230 } 230 }
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index 3a82f24746b..ac0b7c629d7 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -77,17 +77,14 @@ static bool ah_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
77 ahinfo->hdrres, ah->reserved, 77 ahinfo->hdrres, ah->reserved,
78 !(ahinfo->hdrres && ah->reserved)); 78 !(ahinfo->hdrres && ah->reserved));
79 79
80 return (ah != NULL) 80 return (ah != NULL) &&
81 && 81 spi_match(ahinfo->spis[0], ahinfo->spis[1],
82 spi_match(ahinfo->spis[0], ahinfo->spis[1], 82 ntohl(ah->spi),
83 ntohl(ah->spi), 83 !!(ahinfo->invflags & IP6T_AH_INV_SPI)) &&
84 !!(ahinfo->invflags & IP6T_AH_INV_SPI)) 84 (!ahinfo->hdrlen ||
85 && 85 (ahinfo->hdrlen == hdrlen) ^
86 (!ahinfo->hdrlen || 86 !!(ahinfo->invflags & IP6T_AH_INV_LEN)) &&
87 (ahinfo->hdrlen == hdrlen) ^ 87 !(ahinfo->hdrres && ah->reserved);
88 !!(ahinfo->invflags & IP6T_AH_INV_LEN))
89 &&
90 !(ahinfo->hdrres && ah->reserved);
91} 88}
92 89
93static bool ah_mt6_check(const struct xt_mtchk_param *par) 90static bool ah_mt6_check(const struct xt_mtchk_param *par)
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index 673aa0a5084..7b91c2598ed 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -70,41 +70,36 @@ frag_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
70 pr_debug("res %02X %02X%04X %02X ", 70 pr_debug("res %02X %02X%04X %02X ",
71 fraginfo->flags & IP6T_FRAG_RES, fh->reserved, 71 fraginfo->flags & IP6T_FRAG_RES, fh->reserved,
72 ntohs(fh->frag_off) & 0x6, 72 ntohs(fh->frag_off) & 0x6,
73 !((fraginfo->flags & IP6T_FRAG_RES) 73 !((fraginfo->flags & IP6T_FRAG_RES) &&
74 && (fh->reserved || (ntohs(fh->frag_off) & 0x06)))); 74 (fh->reserved || (ntohs(fh->frag_off) & 0x06))));
75 pr_debug("first %02X %02X %02X ", 75 pr_debug("first %02X %02X %02X ",
76 fraginfo->flags & IP6T_FRAG_FST, 76 fraginfo->flags & IP6T_FRAG_FST,
77 ntohs(fh->frag_off) & ~0x7, 77 ntohs(fh->frag_off) & ~0x7,
78 !((fraginfo->flags & IP6T_FRAG_FST) 78 !((fraginfo->flags & IP6T_FRAG_FST) &&
79 && (ntohs(fh->frag_off) & ~0x7))); 79 (ntohs(fh->frag_off) & ~0x7)));
80 pr_debug("mf %02X %02X %02X ", 80 pr_debug("mf %02X %02X %02X ",
81 fraginfo->flags & IP6T_FRAG_MF, 81 fraginfo->flags & IP6T_FRAG_MF,
82 ntohs(fh->frag_off) & IP6_MF, 82 ntohs(fh->frag_off) & IP6_MF,
83 !((fraginfo->flags & IP6T_FRAG_MF) 83 !((fraginfo->flags & IP6T_FRAG_MF) &&
84 && !((ntohs(fh->frag_off) & IP6_MF)))); 84 !((ntohs(fh->frag_off) & IP6_MF))));
85 pr_debug("last %02X %02X %02X\n", 85 pr_debug("last %02X %02X %02X\n",
86 fraginfo->flags & IP6T_FRAG_NMF, 86 fraginfo->flags & IP6T_FRAG_NMF,
87 ntohs(fh->frag_off) & IP6_MF, 87 ntohs(fh->frag_off) & IP6_MF,
88 !((fraginfo->flags & IP6T_FRAG_NMF) 88 !((fraginfo->flags & IP6T_FRAG_NMF) &&
89 && (ntohs(fh->frag_off) & IP6_MF))); 89 (ntohs(fh->frag_off) & IP6_MF)));
90 90
91 return (fh != NULL) 91 return (fh != NULL) &&
92 && 92 id_match(fraginfo->ids[0], fraginfo->ids[1],
93 id_match(fraginfo->ids[0], fraginfo->ids[1], 93 ntohl(fh->identification),
94 ntohl(fh->identification), 94 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)) &&
95 !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)) 95 !((fraginfo->flags & IP6T_FRAG_RES) &&
96 && 96 (fh->reserved || (ntohs(fh->frag_off) & 0x6))) &&
97 !((fraginfo->flags & IP6T_FRAG_RES) 97 !((fraginfo->flags & IP6T_FRAG_FST) &&
98 && (fh->reserved || (ntohs(fh->frag_off) & 0x6))) 98 (ntohs(fh->frag_off) & ~0x7)) &&
99 && 99 !((fraginfo->flags & IP6T_FRAG_MF) &&
100 !((fraginfo->flags & IP6T_FRAG_FST) 100 !(ntohs(fh->frag_off) & IP6_MF)) &&
101 && (ntohs(fh->frag_off) & ~0x7)) 101 !((fraginfo->flags & IP6T_FRAG_NMF) &&
102 && 102 (ntohs(fh->frag_off) & IP6_MF));
103 !((fraginfo->flags & IP6T_FRAG_MF)
104 && !(ntohs(fh->frag_off) & IP6_MF))
105 &&
106 !((fraginfo->flags & IP6T_FRAG_NMF)
107 && (ntohs(fh->frag_off) & IP6_MF));
108} 103}
109 104
110static bool frag_mt6_check(const struct xt_mtchk_param *par) 105static bool frag_mt6_check(const struct xt_mtchk_param *par)
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index 356b8d6f6ba..b77307fc874 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -92,16 +92,13 @@ static bool rt_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
92 !((rtinfo->flags & IP6T_RT_RES) && 92 !((rtinfo->flags & IP6T_RT_RES) &&
93 (((const struct rt0_hdr *)rh)->reserved))); 93 (((const struct rt0_hdr *)rh)->reserved)));
94 94
95 ret = (rh != NULL) 95 ret = (rh != NULL) &&
96 &&
97 (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], 96 (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
98 rh->segments_left, 97 rh->segments_left,
99 !!(rtinfo->invflags & IP6T_RT_INV_SGS))) 98 !!(rtinfo->invflags & IP6T_RT_INV_SGS))) &&
100 &&
101 (!(rtinfo->flags & IP6T_RT_LEN) || 99 (!(rtinfo->flags & IP6T_RT_LEN) ||
102 ((rtinfo->hdrlen == hdrlen) ^ 100 ((rtinfo->hdrlen == hdrlen) ^
103 !!(rtinfo->invflags & IP6T_RT_INV_LEN))) 101 !!(rtinfo->invflags & IP6T_RT_INV_LEN))) &&
104 &&
105 (!(rtinfo->flags & IP6T_RT_TYP) || 102 (!(rtinfo->flags & IP6T_RT_TYP) ||
106 ((rtinfo->rt_type == rh->type) ^ 103 ((rtinfo->rt_type == rh->type) ^
107 !!(rtinfo->invflags & IP6T_RT_INV_TYP))); 104 !!(rtinfo->invflags & IP6T_RT_INV_TYP)));
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 6f4383ad86f..ad378efd0eb 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -79,8 +79,8 @@ ip6t_local_out_hook(unsigned int hook,
79{ 79{
80#if 0 80#if 0
81 /* root is playing with raw sockets. */ 81 /* root is playing with raw sockets. */
82 if (skb->len < sizeof(struct iphdr) 82 if (skb->len < sizeof(struct iphdr) ||
83 || ip_hdrlen(skb) < sizeof(struct iphdr)) { 83 ip_hdrlen(skb) < sizeof(struct iphdr)) {
84 if (net_ratelimit()) 84 if (net_ratelimit())
85 printk("ip6t_hook: happy cracking.\n"); 85 printk("ip6t_hook: happy cracking.\n");
86 return NF_ACCEPT; 86 return NF_ACCEPT;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 0ad91433ed6..a929c19d30e 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -102,8 +102,8 @@ ip6t_local_out_hook(unsigned int hook,
102 102
103#if 0 103#if 0
104 /* root is playing with raw sockets. */ 104 /* root is playing with raw sockets. */
105 if (skb->len < sizeof(struct iphdr) 105 if (skb->len < sizeof(struct iphdr) ||
106 || ip_hdrlen(skb) < sizeof(struct iphdr)) { 106 ip_hdrlen(skb) < sizeof(struct iphdr)) {
107 if (net_ratelimit()) 107 if (net_ratelimit())
108 printk("ip6t_hook: happy cracking.\n"); 108 printk("ip6t_hook: happy cracking.\n");
109 return NF_ACCEPT; 109 return NF_ACCEPT;
@@ -122,11 +122,11 @@ ip6t_local_out_hook(unsigned int hook,
122 ret = ip6t_do_table(skb, hook, in, out, 122 ret = ip6t_do_table(skb, hook, in, out,
123 dev_net(out)->ipv6.ip6table_mangle); 123 dev_net(out)->ipv6.ip6table_mangle);
124 124
125 if (ret != NF_DROP && ret != NF_STOLEN 125 if (ret != NF_DROP && ret != NF_STOLEN &&
126 && (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) 126 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
127 || memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) 127 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
128 || skb->mark != mark 128 skb->mark != mark ||
129 || ipv6_hdr(skb)->hop_limit != hop_limit)) 129 ipv6_hdr(skb)->hop_limit != hop_limit))
130 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP; 130 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
131 131
132 return ret; 132 return ret;
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 2acadc8c788..c7b8bd1d798 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -244,18 +244,18 @@ static const struct nla_policy icmpv6_nla_policy[CTA_PROTO_MAX+1] = {
244static int icmpv6_nlattr_to_tuple(struct nlattr *tb[], 244static int icmpv6_nlattr_to_tuple(struct nlattr *tb[],
245 struct nf_conntrack_tuple *tuple) 245 struct nf_conntrack_tuple *tuple)
246{ 246{
247 if (!tb[CTA_PROTO_ICMPV6_TYPE] 247 if (!tb[CTA_PROTO_ICMPV6_TYPE] ||
248 || !tb[CTA_PROTO_ICMPV6_CODE] 248 !tb[CTA_PROTO_ICMPV6_CODE] ||
249 || !tb[CTA_PROTO_ICMPV6_ID]) 249 !tb[CTA_PROTO_ICMPV6_ID])
250 return -EINVAL; 250 return -EINVAL;
251 251
252 tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMPV6_TYPE]); 252 tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMPV6_TYPE]);
253 tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMPV6_CODE]); 253 tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMPV6_CODE]);
254 tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMPV6_ID]); 254 tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMPV6_ID]);
255 255
256 if (tuple->dst.u.icmp.type < 128 256 if (tuple->dst.u.icmp.type < 128 ||
257 || tuple->dst.u.icmp.type - 128 >= sizeof(invmap) 257 tuple->dst.u.icmp.type - 128 >= sizeof(invmap) ||
258 || !invmap[tuple->dst.u.icmp.type - 128]) 258 !invmap[tuple->dst.u.icmp.type - 128])
259 return -EINVAL; 259 return -EINVAL;
260 260
261 return 0; 261 return 0;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4f24570b086..926ce8eeffa 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -72,7 +72,7 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
72 int is_multicast = ipv6_addr_is_multicast(loc_addr); 72 int is_multicast = ipv6_addr_is_multicast(loc_addr);
73 73
74 sk_for_each_from(sk, node) 74 sk_for_each_from(sk, node)
75 if (inet_sk(sk)->num == num) { 75 if (inet_sk(sk)->inet_num == num) {
76 struct ipv6_pinfo *np = inet6_sk(sk); 76 struct ipv6_pinfo *np = inet6_sk(sk);
77 77
78 if (!net_eq(sock_net(sk), net)) 78 if (!net_eq(sock_net(sk), net))
@@ -249,7 +249,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
249 249
250 /* Raw sockets are IPv6 only */ 250 /* Raw sockets are IPv6 only */
251 if (addr_type == IPV6_ADDR_MAPPED) 251 if (addr_type == IPV6_ADDR_MAPPED)
252 return(-EADDRNOTAVAIL); 252 return -EADDRNOTAVAIL;
253 253
254 lock_sock(sk); 254 lock_sock(sk);
255 255
@@ -257,6 +257,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
257 if (sk->sk_state != TCP_CLOSE) 257 if (sk->sk_state != TCP_CLOSE)
258 goto out; 258 goto out;
259 259
260 rcu_read_lock();
260 /* Check if the address belongs to the host. */ 261 /* Check if the address belongs to the host. */
261 if (addr_type != IPV6_ADDR_ANY) { 262 if (addr_type != IPV6_ADDR_ANY) {
262 struct net_device *dev = NULL; 263 struct net_device *dev = NULL;
@@ -272,13 +273,13 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
272 273
273 /* Binding to link-local address requires an interface */ 274 /* Binding to link-local address requires an interface */
274 if (!sk->sk_bound_dev_if) 275 if (!sk->sk_bound_dev_if)
275 goto out; 276 goto out_unlock;
276 277
277 dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if); 278 err = -ENODEV;
278 if (!dev) { 279 dev = dev_get_by_index_rcu(sock_net(sk),
279 err = -ENODEV; 280 sk->sk_bound_dev_if);
280 goto out; 281 if (!dev)
281 } 282 goto out_unlock;
282 } 283 }
283 284
284 /* ipv4 addr of the socket is invalid. Only the 285 /* ipv4 addr of the socket is invalid. Only the
@@ -289,20 +290,18 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
289 err = -EADDRNOTAVAIL; 290 err = -EADDRNOTAVAIL;
290 if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr, 291 if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
291 dev, 0)) { 292 dev, 0)) {
292 if (dev) 293 goto out_unlock;
293 dev_put(dev);
294 goto out;
295 } 294 }
296 } 295 }
297 if (dev)
298 dev_put(dev);
299 } 296 }
300 297
301 inet->rcv_saddr = inet->saddr = v4addr; 298 inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
302 ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); 299 ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
303 if (!(addr_type & IPV6_ADDR_MULTICAST)) 300 if (!(addr_type & IPV6_ADDR_MULTICAST))
304 ipv6_addr_copy(&np->saddr, &addr->sin6_addr); 301 ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
305 err = 0; 302 err = 0;
303out_unlock:
304 rcu_read_unlock();
306out: 305out:
307 release_sock(sk); 306 release_sock(sk);
308 return err; 307 return err;
@@ -381,8 +380,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
381 } 380 }
382 381
383 /* Charge it to the socket. */ 382 /* Charge it to the socket. */
384 if (sock_queue_rcv_skb(sk,skb)<0) { 383 if (sock_queue_rcv_skb(sk, skb) < 0) {
385 atomic_inc(&sk->sk_drops);
386 kfree_skb(skb); 384 kfree_skb(skb);
387 return NET_RX_DROP; 385 return NET_RX_DROP;
388 } 386 }
@@ -416,14 +414,14 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
416 skb_network_header_len(skb)); 414 skb_network_header_len(skb));
417 if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 415 if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
418 &ipv6_hdr(skb)->daddr, 416 &ipv6_hdr(skb)->daddr,
419 skb->len, inet->num, skb->csum)) 417 skb->len, inet->inet_num, skb->csum))
420 skb->ip_summed = CHECKSUM_UNNECESSARY; 418 skb->ip_summed = CHECKSUM_UNNECESSARY;
421 } 419 }
422 if (!skb_csum_unnecessary(skb)) 420 if (!skb_csum_unnecessary(skb))
423 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 421 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
424 &ipv6_hdr(skb)->daddr, 422 &ipv6_hdr(skb)->daddr,
425 skb->len, 423 skb->len,
426 inet->num, 0)); 424 inet->inet_num, 0));
427 425
428 if (inet->hdrincl) { 426 if (inet->hdrincl) {
429 if (skb_checksum_complete(skb)) { 427 if (skb_checksum_complete(skb)) {
@@ -497,7 +495,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
497 sin6->sin6_scope_id = IP6CB(skb)->iif; 495 sin6->sin6_scope_id = IP6CB(skb)->iif;
498 } 496 }
499 497
500 sock_recv_timestamp(msg, sk, skb); 498 sock_recv_ts_and_drops(msg, sk, skb);
501 499
502 if (np->rxopt.all) 500 if (np->rxopt.all)
503 datagram_recv_ctl(sk, msg, skb); 501 datagram_recv_ctl(sk, msg, skb);
@@ -518,7 +516,6 @@ csum_copy_err:
518 as some normal condition. 516 as some normal condition.
519 */ 517 */
520 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; 518 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
521 atomic_inc(&sk->sk_drops);
522 goto out; 519 goto out;
523} 520}
524 521
@@ -766,8 +763,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
766 proto = ntohs(sin6->sin6_port); 763 proto = ntohs(sin6->sin6_port);
767 764
768 if (!proto) 765 if (!proto)
769 proto = inet->num; 766 proto = inet->inet_num;
770 else if (proto != inet->num) 767 else if (proto != inet->inet_num)
771 return(-EINVAL); 768 return(-EINVAL);
772 769
773 if (proto > 255) 770 if (proto > 255)
@@ -800,7 +797,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
800 if (sk->sk_state != TCP_ESTABLISHED) 797 if (sk->sk_state != TCP_ESTABLISHED)
801 return -EDESTADDRREQ; 798 return -EDESTADDRREQ;
802 799
803 proto = inet->num; 800 proto = inet->inet_num;
804 daddr = &np->daddr; 801 daddr = &np->daddr;
805 fl.fl6_flowlabel = np->flow_label; 802 fl.fl6_flowlabel = np->flow_label;
806 } 803 }
@@ -967,7 +964,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
967 964
968 switch (optname) { 965 switch (optname) {
969 case IPV6_CHECKSUM: 966 case IPV6_CHECKSUM:
970 if (inet_sk(sk)->num == IPPROTO_ICMPV6 && 967 if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
971 level == IPPROTO_IPV6) { 968 level == IPPROTO_IPV6) {
972 /* 969 /*
973 * RFC3542 tells that IPV6_CHECKSUM socket 970 * RFC3542 tells that IPV6_CHECKSUM socket
@@ -1007,7 +1004,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
1007 break; 1004 break;
1008 1005
1009 case SOL_ICMPV6: 1006 case SOL_ICMPV6:
1010 if (inet_sk(sk)->num != IPPROTO_ICMPV6) 1007 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1011 return -EOPNOTSUPP; 1008 return -EOPNOTSUPP;
1012 return rawv6_seticmpfilter(sk, level, optname, optval, 1009 return rawv6_seticmpfilter(sk, level, optname, optval,
1013 optlen); 1010 optlen);
@@ -1030,7 +1027,7 @@ static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
1030 case SOL_RAW: 1027 case SOL_RAW:
1031 break; 1028 break;
1032 case SOL_ICMPV6: 1029 case SOL_ICMPV6:
1033 if (inet_sk(sk)->num != IPPROTO_ICMPV6) 1030 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1034 return -EOPNOTSUPP; 1031 return -EOPNOTSUPP;
1035 return rawv6_seticmpfilter(sk, level, optname, optval, optlen); 1032 return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
1036 case SOL_IPV6: 1033 case SOL_IPV6:
@@ -1087,7 +1084,7 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname,
1087 break; 1084 break;
1088 1085
1089 case SOL_ICMPV6: 1086 case SOL_ICMPV6:
1090 if (inet_sk(sk)->num != IPPROTO_ICMPV6) 1087 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1091 return -EOPNOTSUPP; 1088 return -EOPNOTSUPP;
1092 return rawv6_geticmpfilter(sk, level, optname, optval, 1089 return rawv6_geticmpfilter(sk, level, optname, optval,
1093 optlen); 1090 optlen);
@@ -1110,7 +1107,7 @@ static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
1110 case SOL_RAW: 1107 case SOL_RAW:
1111 break; 1108 break;
1112 case SOL_ICMPV6: 1109 case SOL_ICMPV6:
1113 if (inet_sk(sk)->num != IPPROTO_ICMPV6) 1110 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1114 return -EOPNOTSUPP; 1111 return -EOPNOTSUPP;
1115 return rawv6_geticmpfilter(sk, level, optname, optval, optlen); 1112 return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
1116 case SOL_IPV6: 1113 case SOL_IPV6:
@@ -1157,7 +1154,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
1157 1154
1158static void rawv6_close(struct sock *sk, long timeout) 1155static void rawv6_close(struct sock *sk, long timeout)
1159{ 1156{
1160 if (inet_sk(sk)->num == IPPROTO_RAW) 1157 if (inet_sk(sk)->inet_num == IPPROTO_RAW)
1161 ip6_ra_control(sk, -1); 1158 ip6_ra_control(sk, -1);
1162 ip6mr_sk_done(sk); 1159 ip6mr_sk_done(sk);
1163 sk_common_release(sk); 1160 sk_common_release(sk);
@@ -1176,7 +1173,7 @@ static int rawv6_init_sk(struct sock *sk)
1176{ 1173{
1177 struct raw6_sock *rp = raw6_sk(sk); 1174 struct raw6_sock *rp = raw6_sk(sk);
1178 1175
1179 switch (inet_sk(sk)->num) { 1176 switch (inet_sk(sk)->inet_num) {
1180 case IPPROTO_ICMPV6: 1177 case IPPROTO_ICMPV6:
1181 rp->checksum = 1; 1178 rp->checksum = 1;
1182 rp->offset = 2; 1179 rp->offset = 2;
@@ -1226,7 +1223,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1226 dest = &np->daddr; 1223 dest = &np->daddr;
1227 src = &np->rcv_saddr; 1224 src = &np->rcv_saddr;
1228 destp = 0; 1225 destp = 0;
1229 srcp = inet_sk(sp)->num; 1226 srcp = inet_sk(sp)->inet_num;
1230 seq_printf(seq, 1227 seq_printf(seq,
1231 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1228 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1232 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 1229 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
@@ -1338,7 +1335,6 @@ static struct inet_protosw rawv6_protosw = {
1338 .protocol = IPPROTO_IP, /* wild card */ 1335 .protocol = IPPROTO_IP, /* wild card */
1339 .prot = &rawv6_prot, 1336 .prot = &rawv6_prot,
1340 .ops = &inet6_sockraw_ops, 1337 .ops = &inet6_sockraw_ops,
1341 .capability = CAP_NET_RAW,
1342 .no_check = UDP_CSUM_DEFAULT, 1338 .no_check = UDP_CSUM_DEFAULT,
1343 .flags = INET_PROTOSW_REUSE, 1339 .flags = INET_PROTOSW_REUSE,
1344}; 1340};
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 2499e971203..4d98549a686 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -208,18 +208,17 @@ static void ip6_frag_expire(unsigned long data)
208 fq_kill(fq); 208 fq_kill(fq);
209 209
210 net = container_of(fq->q.net, struct net, ipv6.frags); 210 net = container_of(fq->q.net, struct net, ipv6.frags);
211 dev = dev_get_by_index(net, fq->iif); 211 rcu_read_lock();
212 dev = dev_get_by_index_rcu(net, fq->iif);
212 if (!dev) 213 if (!dev)
213 goto out; 214 goto out_rcu_unlock;
214 215
215 rcu_read_lock();
216 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 216 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
217 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 217 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
218 rcu_read_unlock();
219 218
220 /* Don't send error if the first segment did not arrive. */ 219 /* Don't send error if the first segment did not arrive. */
221 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) 220 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
222 goto out; 221 goto out_rcu_unlock;
223 222
224 /* 223 /*
225 But use as source device on which LAST ARRIVED 224 But use as source device on which LAST ARRIVED
@@ -228,9 +227,9 @@ static void ip6_frag_expire(unsigned long data)
228 */ 227 */
229 fq->q.fragments->dev = dev; 228 fq->q.fragments->dev = dev;
230 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); 229 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
230out_rcu_unlock:
231 rcu_read_unlock();
231out: 232out:
232 if (dev)
233 dev_put(dev);
234 spin_unlock(&fq->q.lock); 233 spin_unlock(&fq->q.lock);
235 fq_put(fq); 234 fq_put(fq);
236} 235}
@@ -676,7 +675,7 @@ static int ip6_frags_ns_sysctl_register(struct net *net)
676 struct ctl_table_header *hdr; 675 struct ctl_table_header *hdr;
677 676
678 table = ip6_frags_ns_ctl_table; 677 table = ip6_frags_ns_ctl_table;
679 if (net != &init_net) { 678 if (!net_eq(net, &init_net)) {
680 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); 679 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
681 if (table == NULL) 680 if (table == NULL)
682 goto err_alloc; 681 goto err_alloc;
@@ -694,7 +693,7 @@ static int ip6_frags_ns_sysctl_register(struct net *net)
694 return 0; 693 return 0;
695 694
696err_reg: 695err_reg:
697 if (net != &init_net) 696 if (!net_eq(net, &init_net))
698 kfree(table); 697 kfree(table);
699err_alloc: 698err_alloc:
700 return -ENOMEM; 699 return -ENOMEM;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6aa202e26f9..db3b2730389 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1471,9 +1471,10 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1471 }, 1471 },
1472 }, 1472 },
1473 }, 1473 },
1474 .gateway = *gateway,
1475 }; 1474 };
1476 1475
1476 ipv6_addr_copy(&rdfl.gateway, gateway);
1477
1477 if (rt6_need_strict(dest)) 1478 if (rt6_need_strict(dest))
1478 flags |= RT6_LOOKUP_F_IFACE; 1479 flags |= RT6_LOOKUP_F_IFACE;
1479 1480
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index dbd19a78ca7..976e68244b9 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -66,7 +66,7 @@ static void ipip6_fb_tunnel_init(struct net_device *dev);
66static void ipip6_tunnel_init(struct net_device *dev); 66static void ipip6_tunnel_init(struct net_device *dev);
67static void ipip6_tunnel_setup(struct net_device *dev); 67static void ipip6_tunnel_setup(struct net_device *dev);
68 68
69static int sit_net_id; 69static int sit_net_id __read_mostly;
70struct sit_net { 70struct sit_net {
71 struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 71 struct ip_tunnel *tunnels_r_l[HASH_SIZE];
72 struct ip_tunnel *tunnels_r[HASH_SIZE]; 72 struct ip_tunnel *tunnels_r[HASH_SIZE];
@@ -77,8 +77,17 @@ struct sit_net {
77 struct net_device *fb_tunnel_dev; 77 struct net_device *fb_tunnel_dev;
78}; 78};
79 79
80static DEFINE_RWLOCK(ipip6_lock); 80/*
81 * Locking : hash tables are protected by RCU and a spinlock
82 */
83static DEFINE_SPINLOCK(ipip6_lock);
84
85#define for_each_ip_tunnel_rcu(start) \
86 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
81 87
88/*
89 * Must be invoked with rcu_read_lock
90 */
82static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, 91static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
83 struct net_device *dev, __be32 remote, __be32 local) 92 struct net_device *dev, __be32 remote, __be32 local)
84{ 93{
@@ -87,26 +96,26 @@ static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
87 struct ip_tunnel *t; 96 struct ip_tunnel *t;
88 struct sit_net *sitn = net_generic(net, sit_net_id); 97 struct sit_net *sitn = net_generic(net, sit_net_id);
89 98
90 for (t = sitn->tunnels_r_l[h0^h1]; t; t = t->next) { 99 for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) {
91 if (local == t->parms.iph.saddr && 100 if (local == t->parms.iph.saddr &&
92 remote == t->parms.iph.daddr && 101 remote == t->parms.iph.daddr &&
93 (!dev || !t->parms.link || dev->iflink == t->parms.link) && 102 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
94 (t->dev->flags & IFF_UP)) 103 (t->dev->flags & IFF_UP))
95 return t; 104 return t;
96 } 105 }
97 for (t = sitn->tunnels_r[h0]; t; t = t->next) { 106 for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) {
98 if (remote == t->parms.iph.daddr && 107 if (remote == t->parms.iph.daddr &&
99 (!dev || !t->parms.link || dev->iflink == t->parms.link) && 108 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
100 (t->dev->flags & IFF_UP)) 109 (t->dev->flags & IFF_UP))
101 return t; 110 return t;
102 } 111 }
103 for (t = sitn->tunnels_l[h1]; t; t = t->next) { 112 for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) {
104 if (local == t->parms.iph.saddr && 113 if (local == t->parms.iph.saddr &&
105 (!dev || !t->parms.link || dev->iflink == t->parms.link) && 114 (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
106 (t->dev->flags & IFF_UP)) 115 (t->dev->flags & IFF_UP))
107 return t; 116 return t;
108 } 117 }
109 t = sitn->tunnels_wc[0]; 118 t = rcu_dereference(sitn->tunnels_wc[0]);
110 if ((t != NULL) && (t->dev->flags & IFF_UP)) 119 if ((t != NULL) && (t->dev->flags & IFF_UP))
111 return t; 120 return t;
112 return NULL; 121 return NULL;
@@ -143,9 +152,9 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
143 152
144 for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) { 153 for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) {
145 if (t == *tp) { 154 if (t == *tp) {
146 write_lock_bh(&ipip6_lock); 155 spin_lock_bh(&ipip6_lock);
147 *tp = t->next; 156 *tp = t->next;
148 write_unlock_bh(&ipip6_lock); 157 spin_unlock_bh(&ipip6_lock);
149 break; 158 break;
150 } 159 }
151 } 160 }
@@ -155,10 +164,27 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
155{ 164{
156 struct ip_tunnel **tp = ipip6_bucket(sitn, t); 165 struct ip_tunnel **tp = ipip6_bucket(sitn, t);
157 166
167 spin_lock_bh(&ipip6_lock);
158 t->next = *tp; 168 t->next = *tp;
159 write_lock_bh(&ipip6_lock); 169 rcu_assign_pointer(*tp, t);
160 *tp = t; 170 spin_unlock_bh(&ipip6_lock);
161 write_unlock_bh(&ipip6_lock); 171}
172
173static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
174{
175#ifdef CONFIG_IPV6_SIT_6RD
176 struct ip_tunnel *t = netdev_priv(dev);
177
178 if (t->dev == sitn->fb_tunnel_dev) {
179 ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
180 t->ip6rd.relay_prefix = 0;
181 t->ip6rd.prefixlen = 16;
182 t->ip6rd.relay_prefixlen = 0;
183 } else {
184 struct ip_tunnel *t0 = netdev_priv(sitn->fb_tunnel_dev);
185 memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd));
186 }
187#endif
162} 188}
163 189
164static struct ip_tunnel * ipip6_tunnel_locate(struct net *net, 190static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
@@ -204,6 +230,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
204 230
205 nt->parms = *parms; 231 nt->parms = *parms;
206 ipip6_tunnel_init(dev); 232 ipip6_tunnel_init(dev);
233 ipip6_tunnel_clone_6rd(dev, sitn);
207 234
208 if (parms->i_flags & SIT_ISATAP) 235 if (parms->i_flags & SIT_ISATAP)
209 dev->priv_flags |= IFF_ISATAP; 236 dev->priv_flags |= IFF_ISATAP;
@@ -222,15 +249,22 @@ failed:
222 return NULL; 249 return NULL;
223} 250}
224 251
252static DEFINE_SPINLOCK(ipip6_prl_lock);
253
254#define for_each_prl_rcu(start) \
255 for (prl = rcu_dereference(start); \
256 prl; \
257 prl = rcu_dereference(prl->next))
258
225static struct ip_tunnel_prl_entry * 259static struct ip_tunnel_prl_entry *
226__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) 260__ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
227{ 261{
228 struct ip_tunnel_prl_entry *p = (struct ip_tunnel_prl_entry *)NULL; 262 struct ip_tunnel_prl_entry *prl;
229 263
230 for (p = t->prl; p; p = p->next) 264 for_each_prl_rcu(t->prl)
231 if (p->addr == addr) 265 if (prl->addr == addr)
232 break; 266 break;
233 return p; 267 return prl;
234 268
235} 269}
236 270
@@ -255,7 +289,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
255 kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : 289 kcalloc(cmax, sizeof(*kp), GFP_KERNEL) :
256 NULL; 290 NULL;
257 291
258 read_lock(&ipip6_lock); 292 rcu_read_lock();
259 293
260 ca = t->prl_count < cmax ? t->prl_count : cmax; 294 ca = t->prl_count < cmax ? t->prl_count : cmax;
261 295
@@ -273,7 +307,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
273 } 307 }
274 308
275 c = 0; 309 c = 0;
276 for (prl = t->prl; prl; prl = prl->next) { 310 for_each_prl_rcu(t->prl) {
277 if (c >= cmax) 311 if (c >= cmax)
278 break; 312 break;
279 if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) 313 if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
@@ -285,7 +319,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
285 break; 319 break;
286 } 320 }
287out: 321out:
288 read_unlock(&ipip6_lock); 322 rcu_read_unlock();
289 323
290 len = sizeof(*kp) * c; 324 len = sizeof(*kp) * c;
291 ret = 0; 325 ret = 0;
@@ -306,12 +340,14 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
306 if (a->addr == htonl(INADDR_ANY)) 340 if (a->addr == htonl(INADDR_ANY))
307 return -EINVAL; 341 return -EINVAL;
308 342
309 write_lock(&ipip6_lock); 343 spin_lock(&ipip6_prl_lock);
310 344
311 for (p = t->prl; p; p = p->next) { 345 for (p = t->prl; p; p = p->next) {
312 if (p->addr == a->addr) { 346 if (p->addr == a->addr) {
313 if (chg) 347 if (chg) {
314 goto update; 348 p->flags = a->flags;
349 goto out;
350 }
315 err = -EEXIST; 351 err = -EEXIST;
316 goto out; 352 goto out;
317 } 353 }
@@ -328,46 +364,63 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
328 goto out; 364 goto out;
329 } 365 }
330 366
367 INIT_RCU_HEAD(&p->rcu_head);
331 p->next = t->prl; 368 p->next = t->prl;
332 t->prl = p;
333 t->prl_count++;
334update:
335 p->addr = a->addr; 369 p->addr = a->addr;
336 p->flags = a->flags; 370 p->flags = a->flags;
371 t->prl_count++;
372 rcu_assign_pointer(t->prl, p);
337out: 373out:
338 write_unlock(&ipip6_lock); 374 spin_unlock(&ipip6_prl_lock);
339 return err; 375 return err;
340} 376}
341 377
378static void prl_entry_destroy_rcu(struct rcu_head *head)
379{
380 kfree(container_of(head, struct ip_tunnel_prl_entry, rcu_head));
381}
382
383static void prl_list_destroy_rcu(struct rcu_head *head)
384{
385 struct ip_tunnel_prl_entry *p, *n;
386
387 p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
388 do {
389 n = p->next;
390 kfree(p);
391 p = n;
392 } while (p);
393}
394
342static int 395static int
343ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) 396ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
344{ 397{
345 struct ip_tunnel_prl_entry *x, **p; 398 struct ip_tunnel_prl_entry *x, **p;
346 int err = 0; 399 int err = 0;
347 400
348 write_lock(&ipip6_lock); 401 spin_lock(&ipip6_prl_lock);
349 402
350 if (a && a->addr != htonl(INADDR_ANY)) { 403 if (a && a->addr != htonl(INADDR_ANY)) {
351 for (p = &t->prl; *p; p = &(*p)->next) { 404 for (p = &t->prl; *p; p = &(*p)->next) {
352 if ((*p)->addr == a->addr) { 405 if ((*p)->addr == a->addr) {
353 x = *p; 406 x = *p;
354 *p = x->next; 407 *p = x->next;
355 kfree(x); 408 call_rcu(&x->rcu_head, prl_entry_destroy_rcu);
356 t->prl_count--; 409 t->prl_count--;
357 goto out; 410 goto out;
358 } 411 }
359 } 412 }
360 err = -ENXIO; 413 err = -ENXIO;
361 } else { 414 } else {
362 while (t->prl) { 415 if (t->prl) {
416 t->prl_count = 0;
363 x = t->prl; 417 x = t->prl;
364 t->prl = t->prl->next; 418 call_rcu(&x->rcu_head, prl_list_destroy_rcu);
365 kfree(x); 419 t->prl = NULL;
366 t->prl_count--;
367 } 420 }
368 } 421 }
369out: 422out:
370 write_unlock(&ipip6_lock); 423 spin_unlock(&ipip6_prl_lock);
371 return err; 424 return err;
372} 425}
373 426
@@ -377,7 +430,7 @@ isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t)
377 struct ip_tunnel_prl_entry *p; 430 struct ip_tunnel_prl_entry *p;
378 int ok = 1; 431 int ok = 1;
379 432
380 read_lock(&ipip6_lock); 433 rcu_read_lock();
381 p = __ipip6_tunnel_locate_prl(t, iph->saddr); 434 p = __ipip6_tunnel_locate_prl(t, iph->saddr);
382 if (p) { 435 if (p) {
383 if (p->flags & PRL_DEFAULT) 436 if (p->flags & PRL_DEFAULT)
@@ -393,7 +446,7 @@ isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t)
393 else 446 else
394 ok = 0; 447 ok = 0;
395 } 448 }
396 read_unlock(&ipip6_lock); 449 rcu_read_unlock();
397 return ok; 450 return ok;
398} 451}
399 452
@@ -403,9 +456,9 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
403 struct sit_net *sitn = net_generic(net, sit_net_id); 456 struct sit_net *sitn = net_generic(net, sit_net_id);
404 457
405 if (dev == sitn->fb_tunnel_dev) { 458 if (dev == sitn->fb_tunnel_dev) {
406 write_lock_bh(&ipip6_lock); 459 spin_lock_bh(&ipip6_lock);
407 sitn->tunnels_wc[0] = NULL; 460 sitn->tunnels_wc[0] = NULL;
408 write_unlock_bh(&ipip6_lock); 461 spin_unlock_bh(&ipip6_lock);
409 dev_put(dev); 462 dev_put(dev);
410 } else { 463 } else {
411 ipip6_tunnel_unlink(sitn, netdev_priv(dev)); 464 ipip6_tunnel_unlink(sitn, netdev_priv(dev));
@@ -458,7 +511,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
458 511
459 err = -ENOENT; 512 err = -ENOENT;
460 513
461 read_lock(&ipip6_lock); 514 rcu_read_lock();
462 t = ipip6_tunnel_lookup(dev_net(skb->dev), 515 t = ipip6_tunnel_lookup(dev_net(skb->dev),
463 skb->dev, 516 skb->dev,
464 iph->daddr, 517 iph->daddr,
@@ -476,7 +529,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
476 t->err_count = 1; 529 t->err_count = 1;
477 t->err_time = jiffies; 530 t->err_time = jiffies;
478out: 531out:
479 read_unlock(&ipip6_lock); 532 rcu_read_unlock();
480 return err; 533 return err;
481} 534}
482 535
@@ -496,7 +549,7 @@ static int ipip6_rcv(struct sk_buff *skb)
496 549
497 iph = ip_hdr(skb); 550 iph = ip_hdr(skb);
498 551
499 read_lock(&ipip6_lock); 552 rcu_read_lock();
500 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, 553 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
501 iph->saddr, iph->daddr); 554 iph->saddr, iph->daddr);
502 if (tunnel != NULL) { 555 if (tunnel != NULL) {
@@ -510,7 +563,7 @@ static int ipip6_rcv(struct sk_buff *skb)
510 if ((tunnel->dev->priv_flags & IFF_ISATAP) && 563 if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
511 !isatap_chksrc(skb, iph, tunnel)) { 564 !isatap_chksrc(skb, iph, tunnel)) {
512 tunnel->dev->stats.rx_errors++; 565 tunnel->dev->stats.rx_errors++;
513 read_unlock(&ipip6_lock); 566 rcu_read_unlock();
514 kfree_skb(skb); 567 kfree_skb(skb);
515 return 0; 568 return 0;
516 } 569 }
@@ -521,28 +574,52 @@ static int ipip6_rcv(struct sk_buff *skb)
521 nf_reset(skb); 574 nf_reset(skb);
522 ipip6_ecn_decapsulate(iph, skb); 575 ipip6_ecn_decapsulate(iph, skb);
523 netif_rx(skb); 576 netif_rx(skb);
524 read_unlock(&ipip6_lock); 577 rcu_read_unlock();
525 return 0; 578 return 0;
526 } 579 }
527 580
528 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 581 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
529 read_unlock(&ipip6_lock); 582 rcu_read_unlock();
530out: 583out:
531 kfree_skb(skb); 584 kfree_skb(skb);
532 return 0; 585 return 0;
533} 586}
534 587
535/* Returns the embedded IPv4 address if the IPv6 address 588/*
536 comes from 6to4 (RFC 3056) addr space */ 589 * Returns the embedded IPv4 address if the IPv6 address
537 590 * comes from 6rd / 6to4 (RFC 3056) addr space.
538static inline __be32 try_6to4(struct in6_addr *v6dst) 591 */
592static inline
593__be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel)
539{ 594{
540 __be32 dst = 0; 595 __be32 dst = 0;
541 596
597#ifdef CONFIG_IPV6_SIT_6RD
598 if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
599 tunnel->ip6rd.prefixlen)) {
600 unsigned pbw0, pbi0;
601 int pbi1;
602 u32 d;
603
604 pbw0 = tunnel->ip6rd.prefixlen >> 5;
605 pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
606
607 d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
608 tunnel->ip6rd.relay_prefixlen;
609
610 pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
611 if (pbi1 > 0)
612 d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >>
613 (32 - pbi1);
614
615 dst = tunnel->ip6rd.relay_prefix | htonl(d);
616 }
617#else
542 if (v6dst->s6_addr16[0] == htons(0x2002)) { 618 if (v6dst->s6_addr16[0] == htons(0x2002)) {
543 /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ 619 /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */
544 memcpy(&dst, &v6dst->s6_addr16[1], 4); 620 memcpy(&dst, &v6dst->s6_addr16[1], 4);
545 } 621 }
622#endif
546 return dst; 623 return dst;
547} 624}
548 625
@@ -555,10 +632,12 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
555 struct net_device *dev) 632 struct net_device *dev)
556{ 633{
557 struct ip_tunnel *tunnel = netdev_priv(dev); 634 struct ip_tunnel *tunnel = netdev_priv(dev);
558 struct net_device_stats *stats = &tunnel->dev->stats; 635 struct net_device_stats *stats = &dev->stats;
636 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
559 struct iphdr *tiph = &tunnel->parms.iph; 637 struct iphdr *tiph = &tunnel->parms.iph;
560 struct ipv6hdr *iph6 = ipv6_hdr(skb); 638 struct ipv6hdr *iph6 = ipv6_hdr(skb);
561 u8 tos = tunnel->parms.iph.tos; 639 u8 tos = tunnel->parms.iph.tos;
640 __be16 df = tiph->frag_off;
562 struct rtable *rt; /* Route to the other host */ 641 struct rtable *rt; /* Route to the other host */
563 struct net_device *tdev; /* Device to other host */ 642 struct net_device *tdev; /* Device to other host */
564 struct iphdr *iph; /* Our new IP header */ 643 struct iphdr *iph; /* Our new IP header */
@@ -595,7 +674,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
595 } 674 }
596 675
597 if (!dst) 676 if (!dst)
598 dst = try_6to4(&iph6->daddr); 677 dst = try_6rd(&iph6->daddr, tunnel);
599 678
600 if (!dst) { 679 if (!dst) {
601 struct neighbour *neigh = NULL; 680 struct neighbour *neigh = NULL;
@@ -648,25 +727,28 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
648 goto tx_error; 727 goto tx_error;
649 } 728 }
650 729
651 if (tiph->frag_off) 730 if (df) {
652 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); 731 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
653 else
654 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
655 732
656 if (mtu < 68) { 733 if (mtu < 68) {
657 stats->collisions++; 734 stats->collisions++;
658 ip_rt_put(rt); 735 ip_rt_put(rt);
659 goto tx_error; 736 goto tx_error;
660 } 737 }
661 if (mtu < IPV6_MIN_MTU)
662 mtu = IPV6_MIN_MTU;
663 if (tunnel->parms.iph.daddr && skb_dst(skb))
664 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
665 738
666 if (skb->len > mtu) { 739 if (mtu < IPV6_MIN_MTU) {
667 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 740 mtu = IPV6_MIN_MTU;
668 ip_rt_put(rt); 741 df = 0;
669 goto tx_error; 742 }
743
744 if (tunnel->parms.iph.daddr && skb_dst(skb))
745 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
746
747 if (skb->len > mtu) {
748 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
749 ip_rt_put(rt);
750 goto tx_error;
751 }
670 } 752 }
671 753
672 if (tunnel->err_count > 0) { 754 if (tunnel->err_count > 0) {
@@ -688,7 +770,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
688 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 770 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
689 if (!new_skb) { 771 if (!new_skb) {
690 ip_rt_put(rt); 772 ip_rt_put(rt);
691 stats->tx_dropped++; 773 txq->tx_dropped++;
692 dev_kfree_skb(skb); 774 dev_kfree_skb(skb);
693 return NETDEV_TX_OK; 775 return NETDEV_TX_OK;
694 } 776 }
@@ -714,11 +796,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
714 iph = ip_hdr(skb); 796 iph = ip_hdr(skb);
715 iph->version = 4; 797 iph->version = 4;
716 iph->ihl = sizeof(struct iphdr)>>2; 798 iph->ihl = sizeof(struct iphdr)>>2;
717 if (mtu > IPV6_MIN_MTU) 799 iph->frag_off = df;
718 iph->frag_off = tiph->frag_off;
719 else
720 iph->frag_off = 0;
721
722 iph->protocol = IPPROTO_IPV6; 800 iph->protocol = IPPROTO_IPV6;
723 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 801 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
724 iph->daddr = rt->rt_dst; 802 iph->daddr = rt->rt_dst;
@@ -785,9 +863,15 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
785 struct ip_tunnel *t; 863 struct ip_tunnel *t;
786 struct net *net = dev_net(dev); 864 struct net *net = dev_net(dev);
787 struct sit_net *sitn = net_generic(net, sit_net_id); 865 struct sit_net *sitn = net_generic(net, sit_net_id);
866#ifdef CONFIG_IPV6_SIT_6RD
867 struct ip_tunnel_6rd ip6rd;
868#endif
788 869
789 switch (cmd) { 870 switch (cmd) {
790 case SIOCGETTUNNEL: 871 case SIOCGETTUNNEL:
872#ifdef CONFIG_IPV6_SIT_6RD
873 case SIOCGET6RD:
874#endif
791 t = NULL; 875 t = NULL;
792 if (dev == sitn->fb_tunnel_dev) { 876 if (dev == sitn->fb_tunnel_dev) {
793 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 877 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
@@ -798,9 +882,25 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
798 } 882 }
799 if (t == NULL) 883 if (t == NULL)
800 t = netdev_priv(dev); 884 t = netdev_priv(dev);
801 memcpy(&p, &t->parms, sizeof(p)); 885
802 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 886 err = -EFAULT;
803 err = -EFAULT; 887 if (cmd == SIOCGETTUNNEL) {
888 memcpy(&p, &t->parms, sizeof(p));
889 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p,
890 sizeof(p)))
891 goto done;
892#ifdef CONFIG_IPV6_SIT_6RD
893 } else {
894 ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix);
895 ip6rd.relay_prefix = t->ip6rd.relay_prefix;
896 ip6rd.prefixlen = t->ip6rd.prefixlen;
897 ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
898 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ip6rd,
899 sizeof(ip6rd)))
900 goto done;
901#endif
902 }
903 err = 0;
804 break; 904 break;
805 905
806 case SIOCADDTUNNEL: 906 case SIOCADDTUNNEL:
@@ -921,6 +1021,54 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
921 netdev_state_change(dev); 1021 netdev_state_change(dev);
922 break; 1022 break;
923 1023
1024#ifdef CONFIG_IPV6_SIT_6RD
1025 case SIOCADD6RD:
1026 case SIOCCHG6RD:
1027 case SIOCDEL6RD:
1028 err = -EPERM;
1029 if (!capable(CAP_NET_ADMIN))
1030 goto done;
1031
1032 err = -EFAULT;
1033 if (copy_from_user(&ip6rd, ifr->ifr_ifru.ifru_data,
1034 sizeof(ip6rd)))
1035 goto done;
1036
1037 t = netdev_priv(dev);
1038
1039 if (cmd != SIOCDEL6RD) {
1040 struct in6_addr prefix;
1041 __be32 relay_prefix;
1042
1043 err = -EINVAL;
1044 if (ip6rd.relay_prefixlen > 32 ||
1045 ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64)
1046 goto done;
1047
1048 ipv6_addr_prefix(&prefix, &ip6rd.prefix,
1049 ip6rd.prefixlen);
1050 if (!ipv6_addr_equal(&prefix, &ip6rd.prefix))
1051 goto done;
1052 if (ip6rd.relay_prefixlen)
1053 relay_prefix = ip6rd.relay_prefix &
1054 htonl(0xffffffffUL <<
1055 (32 - ip6rd.relay_prefixlen));
1056 else
1057 relay_prefix = 0;
1058 if (relay_prefix != ip6rd.relay_prefix)
1059 goto done;
1060
1061 ipv6_addr_copy(&t->ip6rd.prefix, &prefix);
1062 t->ip6rd.relay_prefix = relay_prefix;
1063 t->ip6rd.prefixlen = ip6rd.prefixlen;
1064 t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
1065 } else
1066 ipip6_tunnel_clone_6rd(dev, sitn);
1067
1068 err = 0;
1069 break;
1070#endif
1071
924 default: 1072 default:
925 err = -EINVAL; 1073 err = -EINVAL;
926 } 1074 }
@@ -997,33 +1145,27 @@ static struct xfrm_tunnel sit_handler = {
997 .priority = 1, 1145 .priority = 1,
998}; 1146};
999 1147
1000static void sit_destroy_tunnels(struct sit_net *sitn) 1148static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1001{ 1149{
1002 int prio; 1150 int prio;
1003 1151
1004 for (prio = 1; prio < 4; prio++) { 1152 for (prio = 1; prio < 4; prio++) {
1005 int h; 1153 int h;
1006 for (h = 0; h < HASH_SIZE; h++) { 1154 for (h = 0; h < HASH_SIZE; h++) {
1007 struct ip_tunnel *t; 1155 struct ip_tunnel *t = sitn->tunnels[prio][h];
1008 while ((t = sitn->tunnels[prio][h]) != NULL) 1156
1009 unregister_netdevice(t->dev); 1157 while (t != NULL) {
1158 unregister_netdevice_queue(t->dev, head);
1159 t = t->next;
1160 }
1010 } 1161 }
1011 } 1162 }
1012} 1163}
1013 1164
1014static int sit_init_net(struct net *net) 1165static int sit_init_net(struct net *net)
1015{ 1166{
1167 struct sit_net *sitn = net_generic(net, sit_net_id);
1016 int err; 1168 int err;
1017 struct sit_net *sitn;
1018
1019 err = -ENOMEM;
1020 sitn = kzalloc(sizeof(struct sit_net), GFP_KERNEL);
1021 if (sitn == NULL)
1022 goto err_alloc;
1023
1024 err = net_assign_generic(net, sit_net_id, sitn);
1025 if (err < 0)
1026 goto err_assign;
1027 1169
1028 sitn->tunnels[0] = sitn->tunnels_wc; 1170 sitn->tunnels[0] = sitn->tunnels_wc;
1029 sitn->tunnels[1] = sitn->tunnels_l; 1171 sitn->tunnels[1] = sitn->tunnels_l;
@@ -1039,6 +1181,7 @@ static int sit_init_net(struct net *net)
1039 dev_net_set(sitn->fb_tunnel_dev, net); 1181 dev_net_set(sitn->fb_tunnel_dev, net);
1040 1182
1041 ipip6_fb_tunnel_init(sitn->fb_tunnel_dev); 1183 ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
1184 ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
1042 1185
1043 if ((err = register_netdev(sitn->fb_tunnel_dev))) 1186 if ((err = register_netdev(sitn->fb_tunnel_dev)))
1044 goto err_reg_dev; 1187 goto err_reg_dev;
@@ -1049,35 +1192,34 @@ err_reg_dev:
1049 dev_put(sitn->fb_tunnel_dev); 1192 dev_put(sitn->fb_tunnel_dev);
1050 free_netdev(sitn->fb_tunnel_dev); 1193 free_netdev(sitn->fb_tunnel_dev);
1051err_alloc_dev: 1194err_alloc_dev:
1052 /* nothing */
1053err_assign:
1054 kfree(sitn);
1055err_alloc:
1056 return err; 1195 return err;
1057} 1196}
1058 1197
1059static void sit_exit_net(struct net *net) 1198static void sit_exit_net(struct net *net)
1060{ 1199{
1061 struct sit_net *sitn; 1200 struct sit_net *sitn = net_generic(net, sit_net_id);
1201 LIST_HEAD(list);
1062 1202
1063 sitn = net_generic(net, sit_net_id);
1064 rtnl_lock(); 1203 rtnl_lock();
1065 sit_destroy_tunnels(sitn); 1204 sit_destroy_tunnels(sitn, &list);
1066 unregister_netdevice(sitn->fb_tunnel_dev); 1205 unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
1206 unregister_netdevice_many(&list);
1067 rtnl_unlock(); 1207 rtnl_unlock();
1068 kfree(sitn);
1069} 1208}
1070 1209
1071static struct pernet_operations sit_net_ops = { 1210static struct pernet_operations sit_net_ops = {
1072 .init = sit_init_net, 1211 .init = sit_init_net,
1073 .exit = sit_exit_net, 1212 .exit = sit_exit_net,
1213 .id = &sit_net_id,
1214 .size = sizeof(struct sit_net),
1074}; 1215};
1075 1216
1076static void __exit sit_cleanup(void) 1217static void __exit sit_cleanup(void)
1077{ 1218{
1078 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 1219 xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
1079 1220
1080 unregister_pernet_gen_device(sit_net_id, &sit_net_ops); 1221 unregister_pernet_device(&sit_net_ops);
1222 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1081} 1223}
1082 1224
1083static int __init sit_init(void) 1225static int __init sit_init(void)
@@ -1091,7 +1233,7 @@ static int __init sit_init(void)
1091 return -EAGAIN; 1233 return -EAGAIN;
1092 } 1234 }
1093 1235
1094 err = register_pernet_gen_device(&sit_net_id, &sit_net_ops); 1236 err = register_pernet_device(&sit_net_ops);
1095 if (err < 0) 1237 if (err < 0)
1096 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 1238 xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
1097 1239
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 6b6ae913b5d..5b9af508b8f 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -159,6 +159,8 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
159 159
160struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) 160struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
161{ 161{
162 struct tcp_options_received tcp_opt;
163 u8 *hash_location;
162 struct inet_request_sock *ireq; 164 struct inet_request_sock *ireq;
163 struct inet6_request_sock *ireq6; 165 struct inet6_request_sock *ireq6;
164 struct tcp_request_sock *treq; 166 struct tcp_request_sock *treq;
@@ -171,7 +173,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
171 int mss; 173 int mss;
172 struct dst_entry *dst; 174 struct dst_entry *dst;
173 __u8 rcv_wscale; 175 __u8 rcv_wscale;
174 struct tcp_options_received tcp_opt;
175 176
176 if (!sysctl_tcp_syncookies || !th->ack) 177 if (!sysctl_tcp_syncookies || !th->ack)
177 goto out; 178 goto out;
@@ -184,13 +185,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
184 185
185 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); 186 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
186 187
187 /* check for timestamp cookie support */
188 memset(&tcp_opt, 0, sizeof(tcp_opt));
189 tcp_parse_options(skb, &tcp_opt, 0);
190
191 if (tcp_opt.saw_tstamp)
192 cookie_check_timestamp(&tcp_opt);
193
194 ret = NULL; 188 ret = NULL;
195 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 189 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
196 if (!req) 190 if (!req)
@@ -224,12 +218,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
224 req->expires = 0UL; 218 req->expires = 0UL;
225 req->retrans = 0; 219 req->retrans = 0;
226 ireq->ecn_ok = 0; 220 ireq->ecn_ok = 0;
227 ireq->snd_wscale = tcp_opt.snd_wscale;
228 ireq->rcv_wscale = tcp_opt.rcv_wscale;
229 ireq->sack_ok = tcp_opt.sack_ok;
230 ireq->wscale_ok = tcp_opt.wscale_ok;
231 ireq->tstamp_ok = tcp_opt.saw_tstamp;
232 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
233 treq->rcv_isn = ntohl(th->seq) - 1; 221 treq->rcv_isn = ntohl(th->seq) - 1;
234 treq->snt_isn = cookie; 222 treq->snt_isn = cookie;
235 223
@@ -252,8 +240,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
252 } 240 }
253 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); 241 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
254 fl.oif = sk->sk_bound_dev_if; 242 fl.oif = sk->sk_bound_dev_if;
243 fl.mark = sk->sk_mark;
255 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 244 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
256 fl.fl_ip_sport = inet_sk(sk)->sport; 245 fl.fl_ip_sport = inet_sk(sk)->inet_sport;
257 security_req_classify_flow(req, &fl); 246 security_req_classify_flow(req, &fl);
258 if (ip6_dst_lookup(sk, &dst, &fl)) 247 if (ip6_dst_lookup(sk, &dst, &fl))
259 goto out_free; 248 goto out_free;
@@ -264,6 +253,21 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
264 goto out_free; 253 goto out_free;
265 } 254 }
266 255
256 /* check for timestamp cookie support */
257 memset(&tcp_opt, 0, sizeof(tcp_opt));
258 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst);
259
260 if (tcp_opt.saw_tstamp)
261 cookie_check_timestamp(&tcp_opt);
262
263 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
264
265 ireq->snd_wscale = tcp_opt.snd_wscale;
266 ireq->rcv_wscale = tcp_opt.rcv_wscale;
267 ireq->sack_ok = tcp_opt.sack_ok;
268 ireq->wscale_ok = tcp_opt.wscale_ok;
269 ireq->tstamp_ok = tcp_opt.saw_tstamp;
270
267 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); 271 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
268 tcp_select_initial_window(tcp_full_space(sk), req->mss, 272 tcp_select_initial_window(tcp_full_space(sk), req->mss,
269 &req->rcv_wnd, &req->window_clamp, 273 &req->rcv_wnd, &req->window_clamp,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 21d100b68b1..aadd7cef73b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -226,10 +226,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
226#endif 226#endif
227 goto failure; 227 goto failure;
228 } else { 228 } else {
229 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), 229 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
230 inet->saddr); 230 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
231 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), 231 &np->rcv_saddr);
232 inet->rcv_saddr);
233 } 232 }
234 233
235 return err; 234 return err;
@@ -243,8 +242,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
243 ipv6_addr_copy(&fl.fl6_src, 242 ipv6_addr_copy(&fl.fl6_src,
244 (saddr ? saddr : &np->saddr)); 243 (saddr ? saddr : &np->saddr));
245 fl.oif = sk->sk_bound_dev_if; 244 fl.oif = sk->sk_bound_dev_if;
245 fl.mark = sk->sk_mark;
246 fl.fl_ip_dport = usin->sin6_port; 246 fl.fl_ip_dport = usin->sin6_port;
247 fl.fl_ip_sport = inet->sport; 247 fl.fl_ip_sport = inet->inet_sport;
248 248
249 if (np->opt && np->opt->srcrt) { 249 if (np->opt && np->opt->srcrt) {
250 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; 250 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
@@ -276,7 +276,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
276 276
277 /* set the source address */ 277 /* set the source address */
278 ipv6_addr_copy(&np->saddr, saddr); 278 ipv6_addr_copy(&np->saddr, saddr);
279 inet->rcv_saddr = LOOPBACK4_IPV6; 279 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
280 280
281 sk->sk_gso_type = SKB_GSO_TCPV6; 281 sk->sk_gso_type = SKB_GSO_TCPV6;
282 __ip6_dst_store(sk, dst, NULL, NULL); 282 __ip6_dst_store(sk, dst, NULL, NULL);
@@ -288,7 +288,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
288 288
289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
290 290
291 inet->dport = usin->sin6_port; 291 inet->inet_dport = usin->sin6_port;
292 292
293 tcp_set_state(sk, TCP_SYN_SENT); 293 tcp_set_state(sk, TCP_SYN_SENT);
294 err = inet6_hash_connect(&tcp_death_row, sk); 294 err = inet6_hash_connect(&tcp_death_row, sk);
@@ -298,8 +298,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
298 if (!tp->write_seq) 298 if (!tp->write_seq)
299 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, 299 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
300 np->daddr.s6_addr32, 300 np->daddr.s6_addr32,
301 inet->sport, 301 inet->inet_sport,
302 inet->dport); 302 inet->inet_dport);
303 303
304 err = tcp_connect(sk); 304 err = tcp_connect(sk);
305 if (err) 305 if (err)
@@ -311,7 +311,7 @@ late_failure:
311 tcp_set_state(sk, TCP_CLOSE); 311 tcp_set_state(sk, TCP_CLOSE);
312 __sk_dst_reset(sk); 312 __sk_dst_reset(sk);
313failure: 313failure:
314 inet->dport = 0; 314 inet->inet_dport = 0;
315 sk->sk_route_caps = 0; 315 sk->sk_route_caps = 0;
316 return err; 316 return err;
317} 317}
@@ -383,8 +383,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
383 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 383 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
384 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 384 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
385 fl.oif = sk->sk_bound_dev_if; 385 fl.oif = sk->sk_bound_dev_if;
386 fl.fl_ip_dport = inet->dport; 386 fl.mark = sk->sk_mark;
387 fl.fl_ip_sport = inet->sport; 387 fl.fl_ip_dport = inet->inet_dport;
388 fl.fl_ip_sport = inet->inet_sport;
388 security_skb_classify_flow(skb, &fl); 389 security_skb_classify_flow(skb, &fl);
389 390
390 if ((err = ip6_dst_lookup(sk, &dst, &fl))) { 391 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
@@ -460,7 +461,8 @@ out:
460} 461}
461 462
462 463
463static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req) 464static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
465 struct request_values *rvp)
464{ 466{
465 struct inet6_request_sock *treq = inet6_rsk(req); 467 struct inet6_request_sock *treq = inet6_rsk(req);
466 struct ipv6_pinfo *np = inet6_sk(sk); 468 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -477,6 +479,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
477 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); 479 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
478 fl.fl6_flowlabel = 0; 480 fl.fl6_flowlabel = 0;
479 fl.oif = treq->iif; 481 fl.oif = treq->iif;
482 fl.mark = sk->sk_mark;
480 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 483 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
481 fl.fl_ip_sport = inet_rsk(req)->loc_port; 484 fl.fl_ip_sport = inet_rsk(req)->loc_port;
482 security_req_classify_flow(req, &fl); 485 security_req_classify_flow(req, &fl);
@@ -497,7 +500,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
497 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) 500 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
498 goto done; 501 goto done;
499 502
500 skb = tcp_make_synack(sk, dst, req); 503 skb = tcp_make_synack(sk, dst, req, rvp);
501 if (skb) { 504 if (skb) {
502 struct tcphdr *th = tcp_hdr(skb); 505 struct tcphdr *th = tcp_hdr(skb);
503 506
@@ -1159,11 +1162,14 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1159 */ 1162 */
1160static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 1163static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1161{ 1164{
1165 struct tcp_extend_values tmp_ext;
1166 struct tcp_options_received tmp_opt;
1167 u8 *hash_location;
1168 struct request_sock *req;
1162 struct inet6_request_sock *treq; 1169 struct inet6_request_sock *treq;
1163 struct ipv6_pinfo *np = inet6_sk(sk); 1170 struct ipv6_pinfo *np = inet6_sk(sk);
1164 struct tcp_options_received tmp_opt;
1165 struct tcp_sock *tp = tcp_sk(sk); 1171 struct tcp_sock *tp = tcp_sk(sk);
1166 struct request_sock *req = NULL; 1172 struct dst_entry *dst = __sk_dst_get(sk);
1167 __u32 isn = TCP_SKB_CB(skb)->when; 1173 __u32 isn = TCP_SKB_CB(skb)->when;
1168#ifdef CONFIG_SYN_COOKIES 1174#ifdef CONFIG_SYN_COOKIES
1169 int want_cookie = 0; 1175 int want_cookie = 0;
@@ -1202,8 +1208,52 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1202 tcp_clear_options(&tmp_opt); 1208 tcp_clear_options(&tmp_opt);
1203 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1209 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1204 tmp_opt.user_mss = tp->rx_opt.user_mss; 1210 tmp_opt.user_mss = tp->rx_opt.user_mss;
1211 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
1212
1213 if (tmp_opt.cookie_plus > 0 &&
1214 tmp_opt.saw_tstamp &&
1215 !tp->rx_opt.cookie_out_never &&
1216 (sysctl_tcp_cookie_size > 0 ||
1217 (tp->cookie_values != NULL &&
1218 tp->cookie_values->cookie_desired > 0))) {
1219 u8 *c;
1220 u32 *d;
1221 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1222 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1223
1224 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1225 goto drop_and_free;
1226
1227 /* Secret recipe starts with IP addresses */
1228 d = &ipv6_hdr(skb)->daddr.s6_addr32[0];
1229 *mess++ ^= *d++;
1230 *mess++ ^= *d++;
1231 *mess++ ^= *d++;
1232 *mess++ ^= *d++;
1233 d = &ipv6_hdr(skb)->saddr.s6_addr32[0];
1234 *mess++ ^= *d++;
1235 *mess++ ^= *d++;
1236 *mess++ ^= *d++;
1237 *mess++ ^= *d++;
1238
1239 /* plus variable length Initiator Cookie */
1240 c = (u8 *)mess;
1241 while (l-- > 0)
1242 *c++ ^= *hash_location++;
1205 1243
1206 tcp_parse_options(skb, &tmp_opt, 0); 1244#ifdef CONFIG_SYN_COOKIES
1245 want_cookie = 0; /* not our kind of cookie */
1246#endif
1247 tmp_ext.cookie_out_never = 0; /* false */
1248 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1249 } else if (!tp->rx_opt.cookie_in_always) {
1250 /* redundant indications, but ensure initialization. */
1251 tmp_ext.cookie_out_never = 1; /* true */
1252 tmp_ext.cookie_plus = 0;
1253 } else {
1254 goto drop_and_free;
1255 }
1256 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1207 1257
1208 if (want_cookie && !tmp_opt.saw_tstamp) 1258 if (want_cookie && !tmp_opt.saw_tstamp)
1209 tcp_clear_options(&tmp_opt); 1259 tcp_clear_options(&tmp_opt);
@@ -1236,23 +1286,21 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1236 1286
1237 isn = tcp_v6_init_sequence(skb); 1287 isn = tcp_v6_init_sequence(skb);
1238 } 1288 }
1239
1240 tcp_rsk(req)->snt_isn = isn; 1289 tcp_rsk(req)->snt_isn = isn;
1241 1290
1242 security_inet_conn_request(sk, skb, req); 1291 security_inet_conn_request(sk, skb, req);
1243 1292
1244 if (tcp_v6_send_synack(sk, req)) 1293 if (tcp_v6_send_synack(sk, req,
1245 goto drop; 1294 (struct request_values *)&tmp_ext) ||
1295 want_cookie)
1296 goto drop_and_free;
1246 1297
1247 if (!want_cookie) { 1298 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1248 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1299 return 0;
1249 return 0;
1250 }
1251 1300
1301drop_and_free:
1302 reqsk_free(req);
1252drop: 1303drop:
1253 if (req)
1254 reqsk_free(req);
1255
1256 return 0; /* don't send reset */ 1304 return 0; /* don't send reset */
1257} 1305}
1258 1306
@@ -1290,11 +1338,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1290 1338
1291 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1339 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1292 1340
1293 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF), 1341 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1294 newinet->daddr);
1295 1342
1296 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF), 1343 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1297 newinet->saddr);
1298 1344
1299 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); 1345 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1300 1346
@@ -1345,6 +1391,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1345 } 1391 }
1346 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); 1392 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1347 fl.oif = sk->sk_bound_dev_if; 1393 fl.oif = sk->sk_bound_dev_if;
1394 fl.mark = sk->sk_mark;
1348 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 1395 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1349 fl.fl_ip_sport = inet_rsk(req)->loc_port; 1396 fl.fl_ip_sport = inet_rsk(req)->loc_port;
1350 security_req_classify_flow(req, &fl); 1397 security_req_classify_flow(req, &fl);
@@ -1431,7 +1478,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1431 newtp->advmss = dst_metric(dst, RTAX_ADVMSS); 1478 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1432 tcp_initialize_rcv_mss(newsk); 1479 tcp_initialize_rcv_mss(newsk);
1433 1480
1434 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 1481 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1482 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1435 1483
1436#ifdef CONFIG_TCP_MD5SIG 1484#ifdef CONFIG_TCP_MD5SIG
1437 /* Copy over the MD5 key from the original socket */ 1485 /* Copy over the MD5 key from the original socket */
@@ -1848,7 +1896,7 @@ static int tcp_v6_init_sock(struct sock *sk)
1848 */ 1896 */
1849 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 1897 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1850 tp->snd_cwnd_clamp = ~0; 1898 tp->snd_cwnd_clamp = ~0;
1851 tp->mss_cache = 536; 1899 tp->mss_cache = TCP_MSS_DEFAULT;
1852 1900
1853 tp->reordering = sysctl_tcp_reordering; 1901 tp->reordering = sysctl_tcp_reordering;
1854 1902
@@ -1864,6 +1912,19 @@ static int tcp_v6_init_sock(struct sock *sk)
1864 tp->af_specific = &tcp_sock_ipv6_specific; 1912 tp->af_specific = &tcp_sock_ipv6_specific;
1865#endif 1913#endif
1866 1914
1915 /* TCP Cookie Transactions */
1916 if (sysctl_tcp_cookie_size > 0) {
1917 /* Default, cookies without s_data_payload. */
1918 tp->cookie_values =
1919 kzalloc(sizeof(*tp->cookie_values),
1920 sk->sk_allocation);
1921 if (tp->cookie_values != NULL)
1922 kref_init(&tp->cookie_values->kref);
1923 }
1924 /* Presumed zeroed, in order of appearance:
1925 * cookie_in_always, cookie_out_never,
1926 * s_data_constant, s_data_in, s_data_out
1927 */
1867 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1928 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1868 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1929 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1869 1930
@@ -1931,8 +1992,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1931 1992
1932 dest = &np->daddr; 1993 dest = &np->daddr;
1933 src = &np->rcv_saddr; 1994 src = &np->rcv_saddr;
1934 destp = ntohs(inet->dport); 1995 destp = ntohs(inet->inet_dport);
1935 srcp = ntohs(inet->sport); 1996 srcp = ntohs(inet->inet_sport);
1936 1997
1937 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 1998 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1938 timer_active = 1; 1999 timer_active = 1;
@@ -2109,7 +2170,6 @@ static struct inet_protosw tcpv6_protosw = {
2109 .protocol = IPPROTO_TCP, 2170 .protocol = IPPROTO_TCP,
2110 .prot = &tcpv6_prot, 2171 .prot = &tcpv6_prot,
2111 .ops = &inet6_stream_ops, 2172 .ops = &inet6_stream_ops,
2112 .capability = -1,
2113 .no_check = 0, 2173 .no_check = 0,
2114 .flags = INET_PROTOSW_PERMANENT | 2174 .flags = INET_PROTOSW_PERMANENT |
2115 INET_PROTOSW_ICSK, 2175 INET_PROTOSW_ICSK,
@@ -2124,12 +2184,17 @@ static int tcpv6_net_init(struct net *net)
2124static void tcpv6_net_exit(struct net *net) 2184static void tcpv6_net_exit(struct net *net)
2125{ 2185{
2126 inet_ctl_sock_destroy(net->ipv6.tcp_sk); 2186 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2127 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6); 2187}
2188
2189static void tcpv6_net_exit_batch(struct list_head *net_exit_list)
2190{
2191 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2128} 2192}
2129 2193
2130static struct pernet_operations tcpv6_net_ops = { 2194static struct pernet_operations tcpv6_net_ops = {
2131 .init = tcpv6_net_init, 2195 .init = tcpv6_net_init,
2132 .exit = tcpv6_net_exit, 2196 .exit = tcpv6_net_exit,
2197 .exit_batch = tcpv6_net_exit_batch,
2133}; 2198};
2134 2199
2135int __init tcpv6_init(void) 2200int __init tcpv6_init(void)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index cf538ed5ef6..69ebdbe78c4 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -53,7 +53,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
53{ 53{
54 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; 54 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
55 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); 55 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
56 __be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; 56 __be32 sk1_rcv_saddr = inet_sk(sk)->inet_rcv_saddr;
57 __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); 57 __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
58 int sk_ipv6only = ipv6_only_sock(sk); 58 int sk_ipv6only = ipv6_only_sock(sk);
59 int sk2_ipv6only = inet_v6_ipv6only(sk2); 59 int sk2_ipv6only = inet_v6_ipv6only(sk2);
@@ -63,8 +63,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
63 /* if both are mapped, treat as IPv4 */ 63 /* if both are mapped, treat as IPv4 */
64 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) 64 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
65 return (!sk2_ipv6only && 65 return (!sk2_ipv6only &&
66 (!sk_rcv_saddr || !sk2_rcv_saddr || 66 (!sk1_rcv_saddr || !sk2_rcv_saddr ||
67 sk_rcv_saddr == sk2_rcv_saddr)); 67 sk1_rcv_saddr == sk2_rcv_saddr));
68 68
69 if (addr_type2 == IPV6_ADDR_ANY && 69 if (addr_type2 == IPV6_ADDR_ANY &&
70 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) 70 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
@@ -81,9 +81,33 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
81 return 0; 81 return 0;
82} 82}
83 83
84static unsigned int udp6_portaddr_hash(struct net *net,
85 const struct in6_addr *addr6,
86 unsigned int port)
87{
88 unsigned int hash, mix = net_hash_mix(net);
89
90 if (ipv6_addr_any(addr6))
91 hash = jhash_1word(0, mix);
92 else if (ipv6_addr_v4mapped(addr6))
93 hash = jhash_1word(addr6->s6_addr32[3], mix);
94 else
95 hash = jhash2(addr6->s6_addr32, 4, mix);
96
97 return hash ^ port;
98}
99
100
84int udp_v6_get_port(struct sock *sk, unsigned short snum) 101int udp_v6_get_port(struct sock *sk, unsigned short snum)
85{ 102{
86 return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal); 103 unsigned int hash2_nulladdr =
104 udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
105 unsigned int hash2_partial =
106 udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
107
108 /* precompute partial secondary hash */
109 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
110 return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
87} 111}
88 112
89static inline int compute_score(struct sock *sk, struct net *net, 113static inline int compute_score(struct sock *sk, struct net *net,
@@ -94,14 +118,14 @@ static inline int compute_score(struct sock *sk, struct net *net,
94{ 118{
95 int score = -1; 119 int score = -1;
96 120
97 if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && 121 if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
98 sk->sk_family == PF_INET6) { 122 sk->sk_family == PF_INET6) {
99 struct ipv6_pinfo *np = inet6_sk(sk); 123 struct ipv6_pinfo *np = inet6_sk(sk);
100 struct inet_sock *inet = inet_sk(sk); 124 struct inet_sock *inet = inet_sk(sk);
101 125
102 score = 0; 126 score = 0;
103 if (inet->dport) { 127 if (inet->inet_dport) {
104 if (inet->dport != sport) 128 if (inet->inet_dport != sport)
105 return -1; 129 return -1;
106 score++; 130 score++;
107 } 131 }
@@ -124,6 +148,86 @@ static inline int compute_score(struct sock *sk, struct net *net,
124 return score; 148 return score;
125} 149}
126 150
151#define SCORE2_MAX (1 + 1 + 1)
152static inline int compute_score2(struct sock *sk, struct net *net,
153 const struct in6_addr *saddr, __be16 sport,
154 const struct in6_addr *daddr, unsigned short hnum,
155 int dif)
156{
157 int score = -1;
158
159 if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
160 sk->sk_family == PF_INET6) {
161 struct ipv6_pinfo *np = inet6_sk(sk);
162 struct inet_sock *inet = inet_sk(sk);
163
164 if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
165 return -1;
166 score = 0;
167 if (inet->inet_dport) {
168 if (inet->inet_dport != sport)
169 return -1;
170 score++;
171 }
172 if (!ipv6_addr_any(&np->daddr)) {
173 if (!ipv6_addr_equal(&np->daddr, saddr))
174 return -1;
175 score++;
176 }
177 if (sk->sk_bound_dev_if) {
178 if (sk->sk_bound_dev_if != dif)
179 return -1;
180 score++;
181 }
182 }
183 return score;
184}
185
186
187/* called with read_rcu_lock() */
188static struct sock *udp6_lib_lookup2(struct net *net,
189 const struct in6_addr *saddr, __be16 sport,
190 const struct in6_addr *daddr, unsigned int hnum, int dif,
191 struct udp_hslot *hslot2, unsigned int slot2)
192{
193 struct sock *sk, *result;
194 struct hlist_nulls_node *node;
195 int score, badness;
196
197begin:
198 result = NULL;
199 badness = -1;
200 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
201 score = compute_score2(sk, net, saddr, sport,
202 daddr, hnum, dif);
203 if (score > badness) {
204 result = sk;
205 badness = score;
206 if (score == SCORE2_MAX)
207 goto exact_match;
208 }
209 }
210 /*
211 * if the nulls value we got at the end of this lookup is
212 * not the expected one, we must restart lookup.
213 * We probably met an item that was moved to another chain.
214 */
215 if (get_nulls_value(node) != slot2)
216 goto begin;
217
218 if (result) {
219exact_match:
220 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
221 result = NULL;
222 else if (unlikely(compute_score2(result, net, saddr, sport,
223 daddr, hnum, dif) < badness)) {
224 sock_put(result);
225 goto begin;
226 }
227 }
228 return result;
229}
230
127static struct sock *__udp6_lib_lookup(struct net *net, 231static struct sock *__udp6_lib_lookup(struct net *net,
128 struct in6_addr *saddr, __be16 sport, 232 struct in6_addr *saddr, __be16 sport,
129 struct in6_addr *daddr, __be16 dport, 233 struct in6_addr *daddr, __be16 dport,
@@ -132,11 +236,35 @@ static struct sock *__udp6_lib_lookup(struct net *net,
132 struct sock *sk, *result; 236 struct sock *sk, *result;
133 struct hlist_nulls_node *node; 237 struct hlist_nulls_node *node;
134 unsigned short hnum = ntohs(dport); 238 unsigned short hnum = ntohs(dport);
135 unsigned int hash = udp_hashfn(net, hnum); 239 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
136 struct udp_hslot *hslot = &udptable->hash[hash]; 240 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
137 int score, badness; 241 int score, badness;
138 242
139 rcu_read_lock(); 243 rcu_read_lock();
244 if (hslot->count > 10) {
245 hash2 = udp6_portaddr_hash(net, daddr, hnum);
246 slot2 = hash2 & udptable->mask;
247 hslot2 = &udptable->hash2[slot2];
248 if (hslot->count < hslot2->count)
249 goto begin;
250
251 result = udp6_lib_lookup2(net, saddr, sport,
252 daddr, hnum, dif,
253 hslot2, slot2);
254 if (!result) {
255 hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
256 slot2 = hash2 & udptable->mask;
257 hslot2 = &udptable->hash2[slot2];
258 if (hslot->count < hslot2->count)
259 goto begin;
260
261 result = udp6_lib_lookup2(net, &in6addr_any, sport,
262 daddr, hnum, dif,
263 hslot2, slot2);
264 }
265 rcu_read_unlock();
266 return result;
267 }
140begin: 268begin:
141 result = NULL; 269 result = NULL;
142 badness = -1; 270 badness = -1;
@@ -152,7 +280,7 @@ begin:
152 * not the expected one, we must restart lookup. 280 * not the expected one, we must restart lookup.
153 * We probably met an item that was moved to another chain. 281 * We probably met an item that was moved to another chain.
154 */ 282 */
155 if (get_nulls_value(node) != hash) 283 if (get_nulls_value(node) != slot)
156 goto begin; 284 goto begin;
157 285
158 if (result) { 286 if (result) {
@@ -252,7 +380,7 @@ try_again:
252 UDP_MIB_INDATAGRAMS, is_udplite); 380 UDP_MIB_INDATAGRAMS, is_udplite);
253 } 381 }
254 382
255 sock_recv_timestamp(msg, sk, skb); 383 sock_recv_ts_and_drops(msg, sk, skb);
256 384
257 /* Copy the address. */ 385 /* Copy the address. */
258 if (msg->msg_name) { 386 if (msg->msg_name) {
@@ -265,8 +393,8 @@ try_again:
265 sin6->sin6_scope_id = 0; 393 sin6->sin6_scope_id = 0;
266 394
267 if (is_udp4) 395 if (is_udp4)
268 ipv6_addr_set(&sin6->sin6_addr, 0, 0, 396 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
269 htonl(0xffff), ip_hdr(skb)->saddr); 397 &sin6->sin6_addr);
270 else { 398 else {
271 ipv6_addr_copy(&sin6->sin6_addr, 399 ipv6_addr_copy(&sin6->sin6_addr,
272 &ipv6_hdr(skb)->saddr); 400 &ipv6_hdr(skb)->saddr);
@@ -383,18 +511,18 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
383 goto drop; 511 goto drop;
384 } 512 }
385 513
386 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 514 if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
387 /* Note that an ENOMEM error is charged twice */ 515 /* Note that an ENOMEM error is charged twice */
388 if (rc == -ENOMEM) { 516 if (rc == -ENOMEM)
389 UDP6_INC_STATS_BH(sock_net(sk), 517 UDP6_INC_STATS_BH(sock_net(sk),
390 UDP_MIB_RCVBUFERRORS, is_udplite); 518 UDP_MIB_RCVBUFERRORS, is_udplite);
391 atomic_inc(&sk->sk_drops); 519 goto drop_no_sk_drops_inc;
392 }
393 goto drop;
394 } 520 }
395 521
396 return 0; 522 return 0;
397drop: 523drop:
524 atomic_inc(&sk->sk_drops);
525drop_no_sk_drops_inc:
398 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 526 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
399 kfree_skb(skb); 527 kfree_skb(skb);
400 return -1; 528 return -1;
@@ -415,10 +543,11 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
415 if (!net_eq(sock_net(s), net)) 543 if (!net_eq(sock_net(s), net))
416 continue; 544 continue;
417 545
418 if (s->sk_hash == num && s->sk_family == PF_INET6) { 546 if (udp_sk(s)->udp_port_hash == num &&
547 s->sk_family == PF_INET6) {
419 struct ipv6_pinfo *np = inet6_sk(s); 548 struct ipv6_pinfo *np = inet6_sk(s);
420 if (inet->dport) { 549 if (inet->inet_dport) {
421 if (inet->dport != rmt_port) 550 if (inet->inet_dport != rmt_port)
422 continue; 551 continue;
423 } 552 }
424 if (!ipv6_addr_any(&np->daddr) && 553 if (!ipv6_addr_any(&np->daddr) &&
@@ -440,6 +569,33 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
440 return NULL; 569 return NULL;
441} 570}
442 571
572static void flush_stack(struct sock **stack, unsigned int count,
573 struct sk_buff *skb, unsigned int final)
574{
575 unsigned int i;
576 struct sock *sk;
577 struct sk_buff *skb1;
578
579 for (i = 0; i < count; i++) {
580 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
581
582 sk = stack[i];
583 if (skb1) {
584 bh_lock_sock(sk);
585 if (!sock_owned_by_user(sk))
586 udpv6_queue_rcv_skb(sk, skb1);
587 else
588 sk_add_backlog(sk, skb1);
589 bh_unlock_sock(sk);
590 } else {
591 atomic_inc(&sk->sk_drops);
592 UDP6_INC_STATS_BH(sock_net(sk),
593 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
594 UDP6_INC_STATS_BH(sock_net(sk),
595 UDP_MIB_INERRORS, IS_UDPLITE(sk));
596 }
597 }
598}
443/* 599/*
444 * Note: called only from the BH handler context, 600 * Note: called only from the BH handler context,
445 * so we don't need to lock the hashes. 601 * so we don't need to lock the hashes.
@@ -448,41 +604,43 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
448 struct in6_addr *saddr, struct in6_addr *daddr, 604 struct in6_addr *saddr, struct in6_addr *daddr,
449 struct udp_table *udptable) 605 struct udp_table *udptable)
450{ 606{
451 struct sock *sk, *sk2; 607 struct sock *sk, *stack[256 / sizeof(struct sock *)];
452 const struct udphdr *uh = udp_hdr(skb); 608 const struct udphdr *uh = udp_hdr(skb);
453 struct udp_hslot *hslot = &udptable->hash[udp_hashfn(net, ntohs(uh->dest))]; 609 struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
454 int dif; 610 int dif;
611 unsigned int i, count = 0;
455 612
456 spin_lock(&hslot->lock); 613 spin_lock(&hslot->lock);
457 sk = sk_nulls_head(&hslot->head); 614 sk = sk_nulls_head(&hslot->head);
458 dif = inet6_iif(skb); 615 dif = inet6_iif(skb);
459 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); 616 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
460 if (!sk) { 617 while (sk) {
461 kfree_skb(skb); 618 stack[count++] = sk;
462 goto out; 619 sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
463 } 620 uh->source, saddr, dif);
464 621 if (unlikely(count == ARRAY_SIZE(stack))) {
465 sk2 = sk; 622 if (!sk)
466 while ((sk2 = udp_v6_mcast_next(net, sk_nulls_next(sk2), uh->dest, daddr, 623 break;
467 uh->source, saddr, dif))) { 624 flush_stack(stack, count, skb, ~0);
468 struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); 625 count = 0;
469 if (buff) {
470 bh_lock_sock(sk2);
471 if (!sock_owned_by_user(sk2))
472 udpv6_queue_rcv_skb(sk2, buff);
473 else
474 sk_add_backlog(sk2, buff);
475 bh_unlock_sock(sk2);
476 } 626 }
477 } 627 }
478 bh_lock_sock(sk); 628 /*
479 if (!sock_owned_by_user(sk)) 629 * before releasing the lock, we must take reference on sockets
480 udpv6_queue_rcv_skb(sk, skb); 630 */
481 else 631 for (i = 0; i < count; i++)
482 sk_add_backlog(sk, skb); 632 sock_hold(stack[i]);
483 bh_unlock_sock(sk); 633
484out:
485 spin_unlock(&hslot->lock); 634 spin_unlock(&hslot->lock);
635
636 if (count) {
637 flush_stack(stack, count, skb, count - 1);
638
639 for (i = 0; i < count; i++)
640 sock_put(stack[i]);
641 } else {
642 kfree_skb(skb);
643 }
486 return 0; 644 return 0;
487} 645}
488 646
@@ -792,7 +950,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
792 if (ipv6_addr_v4mapped(daddr)) { 950 if (ipv6_addr_v4mapped(daddr)) {
793 struct sockaddr_in sin; 951 struct sockaddr_in sin;
794 sin.sin_family = AF_INET; 952 sin.sin_family = AF_INET;
795 sin.sin_port = sin6 ? sin6->sin6_port : inet->dport; 953 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
796 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 954 sin.sin_addr.s_addr = daddr->s6_addr32[3];
797 msg->msg_name = &sin; 955 msg->msg_name = &sin;
798 msg->msg_namelen = sizeof(sin); 956 msg->msg_namelen = sizeof(sin);
@@ -865,7 +1023,7 @@ do_udp_sendmsg:
865 if (sk->sk_state != TCP_ESTABLISHED) 1023 if (sk->sk_state != TCP_ESTABLISHED)
866 return -EDESTADDRREQ; 1024 return -EDESTADDRREQ;
867 1025
868 fl.fl_ip_dport = inet->dport; 1026 fl.fl_ip_dport = inet->inet_dport;
869 daddr = &np->daddr; 1027 daddr = &np->daddr;
870 fl.fl6_flowlabel = np->flow_label; 1028 fl.fl6_flowlabel = np->flow_label;
871 connected = 1; 1029 connected = 1;
@@ -877,6 +1035,8 @@ do_udp_sendmsg:
877 if (!fl.oif) 1035 if (!fl.oif)
878 fl.oif = np->sticky_pktinfo.ipi6_ifindex; 1036 fl.oif = np->sticky_pktinfo.ipi6_ifindex;
879 1037
1038 fl.mark = sk->sk_mark;
1039
880 if (msg->msg_controllen) { 1040 if (msg->msg_controllen) {
881 opt = &opt_space; 1041 opt = &opt_space;
882 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1042 memset(opt, 0, sizeof(struct ipv6_txoptions));
@@ -909,7 +1069,7 @@ do_udp_sendmsg:
909 fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1069 fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
910 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) 1070 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
911 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 1071 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
912 fl.fl_ip_sport = inet->sport; 1072 fl.fl_ip_sport = inet->inet_sport;
913 1073
914 /* merge ip6_build_xmit from ip6_output */ 1074 /* merge ip6_build_xmit from ip6_output */
915 if (opt && opt->srcrt) { 1075 if (opt && opt->srcrt) {
@@ -1190,10 +1350,10 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
1190 1350
1191 dest = &np->daddr; 1351 dest = &np->daddr;
1192 src = &np->rcv_saddr; 1352 src = &np->rcv_saddr;
1193 destp = ntohs(inet->dport); 1353 destp = ntohs(inet->inet_dport);
1194 srcp = ntohs(inet->sport); 1354 srcp = ntohs(inet->inet_sport);
1195 seq_printf(seq, 1355 seq_printf(seq,
1196 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1356 "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1197 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 1357 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
1198 bucket, 1358 bucket,
1199 src->s6_addr32[0], src->s6_addr32[1], 1359 src->s6_addr32[0], src->s6_addr32[1],
@@ -1282,7 +1442,6 @@ static struct inet_protosw udpv6_protosw = {
1282 .protocol = IPPROTO_UDP, 1442 .protocol = IPPROTO_UDP,
1283 .prot = &udpv6_prot, 1443 .prot = &udpv6_prot,
1284 .ops = &inet6_dgram_ops, 1444 .ops = &inet6_dgram_ops,
1285 .capability =-1,
1286 .no_check = UDP_CSUM_DEFAULT, 1445 .no_check = UDP_CSUM_DEFAULT,
1287 .flags = INET_PROTOSW_PERMANENT, 1446 .flags = INET_PROTOSW_PERMANENT,
1288}; 1447};
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index d737a27ee01..6ea6938919e 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -62,7 +62,6 @@ static struct inet_protosw udplite6_protosw = {
62 .protocol = IPPROTO_UDPLITE, 62 .protocol = IPPROTO_UDPLITE,
63 .prot = &udplitev6_prot, 63 .prot = &udplitev6_prot,
64 .ops = &inet6_dgram_ops, 64 .ops = &inet6_dgram_ops,
65 .capability = -1,
66 .no_check = 0, 65 .no_check = 0,
67 .flags = INET_PROTOSW_PERMANENT, 66 .flags = INET_PROTOSW_PERMANENT,
68}; 67};
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 81a95c00e50..438831d3359 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -23,7 +23,7 @@
23 */ 23 */
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/xfrm.h> 25#include <linux/xfrm.h>
26#include <linux/list.h> 26#include <linux/rculist.h>
27#include <net/ip.h> 27#include <net/ip.h>
28#include <net/xfrm.h> 28#include <net/xfrm.h>
29#include <net/ipv6.h> 29#include <net/ipv6.h>
@@ -36,14 +36,15 @@
36 * per xfrm_address_t. 36 * per xfrm_address_t.
37 */ 37 */
38struct xfrm6_tunnel_spi { 38struct xfrm6_tunnel_spi {
39 struct hlist_node list_byaddr; 39 struct hlist_node list_byaddr;
40 struct hlist_node list_byspi; 40 struct hlist_node list_byspi;
41 xfrm_address_t addr; 41 xfrm_address_t addr;
42 u32 spi; 42 u32 spi;
43 atomic_t refcnt; 43 atomic_t refcnt;
44 struct rcu_head rcu_head;
44}; 45};
45 46
46static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock); 47static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
47 48
48static u32 xfrm6_tunnel_spi; 49static u32 xfrm6_tunnel_spi;
49 50
@@ -107,6 +108,7 @@ static void xfrm6_tunnel_spi_fini(void)
107 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) 108 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
108 return; 109 return;
109 } 110 }
111 rcu_barrier();
110 kmem_cache_destroy(xfrm6_tunnel_spi_kmem); 112 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
111 xfrm6_tunnel_spi_kmem = NULL; 113 xfrm6_tunnel_spi_kmem = NULL;
112} 114}
@@ -116,7 +118,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
116 struct xfrm6_tunnel_spi *x6spi; 118 struct xfrm6_tunnel_spi *x6spi;
117 struct hlist_node *pos; 119 struct hlist_node *pos;
118 120
119 hlist_for_each_entry(x6spi, pos, 121 hlist_for_each_entry_rcu(x6spi, pos,
120 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 122 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
121 list_byaddr) { 123 list_byaddr) {
122 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) 124 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
@@ -131,10 +133,10 @@ __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
131 struct xfrm6_tunnel_spi *x6spi; 133 struct xfrm6_tunnel_spi *x6spi;
132 u32 spi; 134 u32 spi;
133 135
134 read_lock_bh(&xfrm6_tunnel_spi_lock); 136 rcu_read_lock_bh();
135 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 137 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
136 spi = x6spi ? x6spi->spi : 0; 138 spi = x6spi ? x6spi->spi : 0;
137 read_unlock_bh(&xfrm6_tunnel_spi_lock); 139 rcu_read_unlock_bh();
138 return htonl(spi); 140 return htonl(spi);
139} 141}
140 142
@@ -185,14 +187,15 @@ alloc_spi:
185 if (!x6spi) 187 if (!x6spi)
186 goto out; 188 goto out;
187 189
190 INIT_RCU_HEAD(&x6spi->rcu_head);
188 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); 191 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
189 x6spi->spi = spi; 192 x6spi->spi = spi;
190 atomic_set(&x6spi->refcnt, 1); 193 atomic_set(&x6spi->refcnt, 1);
191 194
192 hlist_add_head(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]); 195 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]);
193 196
194 index = xfrm6_tunnel_spi_hash_byaddr(saddr); 197 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
195 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); 198 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
196out: 199out:
197 return spi; 200 return spi;
198} 201}
@@ -202,26 +205,32 @@ __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
202 struct xfrm6_tunnel_spi *x6spi; 205 struct xfrm6_tunnel_spi *x6spi;
203 u32 spi; 206 u32 spi;
204 207
205 write_lock_bh(&xfrm6_tunnel_spi_lock); 208 spin_lock_bh(&xfrm6_tunnel_spi_lock);
206 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 209 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
207 if (x6spi) { 210 if (x6spi) {
208 atomic_inc(&x6spi->refcnt); 211 atomic_inc(&x6spi->refcnt);
209 spi = x6spi->spi; 212 spi = x6spi->spi;
210 } else 213 } else
211 spi = __xfrm6_tunnel_alloc_spi(saddr); 214 spi = __xfrm6_tunnel_alloc_spi(saddr);
212 write_unlock_bh(&xfrm6_tunnel_spi_lock); 215 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
213 216
214 return htonl(spi); 217 return htonl(spi);
215} 218}
216 219
217EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi); 220EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
218 221
222static void x6spi_destroy_rcu(struct rcu_head *head)
223{
224 kmem_cache_free(xfrm6_tunnel_spi_kmem,
225 container_of(head, struct xfrm6_tunnel_spi, rcu_head));
226}
227
219void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) 228void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
220{ 229{
221 struct xfrm6_tunnel_spi *x6spi; 230 struct xfrm6_tunnel_spi *x6spi;
222 struct hlist_node *pos, *n; 231 struct hlist_node *pos, *n;
223 232
224 write_lock_bh(&xfrm6_tunnel_spi_lock); 233 spin_lock_bh(&xfrm6_tunnel_spi_lock);
225 234
226 hlist_for_each_entry_safe(x6spi, pos, n, 235 hlist_for_each_entry_safe(x6spi, pos, n,
227 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 236 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
@@ -229,14 +238,14 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
229 { 238 {
230 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 239 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
231 if (atomic_dec_and_test(&x6spi->refcnt)) { 240 if (atomic_dec_and_test(&x6spi->refcnt)) {
232 hlist_del(&x6spi->list_byaddr); 241 hlist_del_rcu(&x6spi->list_byaddr);
233 hlist_del(&x6spi->list_byspi); 242 hlist_del_rcu(&x6spi->list_byspi);
234 kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi); 243 call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu);
235 break; 244 break;
236 } 245 }
237 } 246 }
238 } 247 }
239 write_unlock_bh(&xfrm6_tunnel_spi_lock); 248 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
240} 249}
241 250
242EXPORT_SYMBOL(xfrm6_tunnel_free_spi); 251EXPORT_SYMBOL(xfrm6_tunnel_free_spi);