aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig4
-rw-r--r--net/ipv4/af_inet.c27
-rw-r--r--net/ipv4/devinet.c21
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/icmp.c22
-rw-r--r--net/ipv4/igmp.c71
-rw-r--r--net/ipv4/inet_connection_sock.c18
-rw-r--r--net/ipv4/inet_fragment.c4
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/inet_timewait_sock.c37
-rw-r--r--net/ipv4/ip_fragment.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ipcomp.c319
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c27
-rw-r--r--net/ipv4/ipvs/ip_vs_dh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c116
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_nq.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_rr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sched.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_sed.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_wlc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_wrr.c2
-rw-r--r--net/ipv4/netfilter/Kconfig2
-rw-r--r--net/ipv4/netfilter/arptable_filter.c39
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c5
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c2
-rw-r--r--net/ipv4/netfilter/ipt_recent.c2
-rw-r--r--net/ipv4/netfilter/iptable_security.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c8
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/route.c123
-rw-r--r--net/ipv4/syncookies.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c8
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_input.c23
-rw-r--r--net/ipv4/tcp_ipv4.c21
-rw-r--r--net/ipv4/tcp_minisocks.c140
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c60
-rw-r--r--net/ipv4/xfrm4_mode_beet.c6
49 files changed, 493 insertions, 689 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 4670683b4688..591ea23639ca 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -356,10 +356,8 @@ config INET_ESP
356 356
357config INET_IPCOMP 357config INET_IPCOMP
358 tristate "IP: IPComp transformation" 358 tristate "IP: IPComp transformation"
359 select XFRM
360 select INET_XFRM_TUNNEL 359 select INET_XFRM_TUNNEL
361 select CRYPTO 360 select XFRM_IPCOMP
362 select CRYPTO_DEFLATE
363 ---help--- 361 ---help---
364 Support for IP Payload Compression Protocol (IPComp) (RFC3173), 362 Support for IP Payload Compression Protocol (IPComp) (RFC3173),
365 typically needed for IPsec. 363 typically needed for IPsec.
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index dd919d84285f..8a3ac1fa71a9 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -148,10 +148,10 @@ void inet_sock_destruct(struct sock *sk)
148 return; 148 return;
149 } 149 }
150 150
151 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); 151 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
152 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 152 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
153 BUG_TRAP(!sk->sk_wmem_queued); 153 WARN_ON(sk->sk_wmem_queued);
154 BUG_TRAP(!sk->sk_forward_alloc); 154 WARN_ON(sk->sk_forward_alloc);
155 155
156 kfree(inet->opt); 156 kfree(inet->opt);
157 dst_release(sk->sk_dst_cache); 157 dst_release(sk->sk_dst_cache);
@@ -264,7 +264,6 @@ static inline int inet_netns_ok(struct net *net, int protocol)
264static int inet_create(struct net *net, struct socket *sock, int protocol) 264static int inet_create(struct net *net, struct socket *sock, int protocol)
265{ 265{
266 struct sock *sk; 266 struct sock *sk;
267 struct list_head *p;
268 struct inet_protosw *answer; 267 struct inet_protosw *answer;
269 struct inet_sock *inet; 268 struct inet_sock *inet;
270 struct proto *answer_prot; 269 struct proto *answer_prot;
@@ -281,13 +280,12 @@ static int inet_create(struct net *net, struct socket *sock, int protocol)
281 sock->state = SS_UNCONNECTED; 280 sock->state = SS_UNCONNECTED;
282 281
283 /* Look for the requested type/protocol pair. */ 282 /* Look for the requested type/protocol pair. */
284 answer = NULL;
285lookup_protocol: 283lookup_protocol:
286 err = -ESOCKTNOSUPPORT; 284 err = -ESOCKTNOSUPPORT;
287 rcu_read_lock(); 285 rcu_read_lock();
288 list_for_each_rcu(p, &inetsw[sock->type]) { 286 list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
289 answer = list_entry(p, struct inet_protosw, list);
290 287
288 err = 0;
291 /* Check the non-wild match. */ 289 /* Check the non-wild match. */
292 if (protocol == answer->protocol) { 290 if (protocol == answer->protocol) {
293 if (protocol != IPPROTO_IP) 291 if (protocol != IPPROTO_IP)
@@ -302,10 +300,9 @@ lookup_protocol:
302 break; 300 break;
303 } 301 }
304 err = -EPROTONOSUPPORT; 302 err = -EPROTONOSUPPORT;
305 answer = NULL;
306 } 303 }
307 304
308 if (unlikely(answer == NULL)) { 305 if (unlikely(err)) {
309 if (try_loading_module < 2) { 306 if (try_loading_module < 2) {
310 rcu_read_unlock(); 307 rcu_read_unlock();
311 /* 308 /*
@@ -341,7 +338,7 @@ lookup_protocol:
341 answer_flags = answer->flags; 338 answer_flags = answer->flags;
342 rcu_read_unlock(); 339 rcu_read_unlock();
343 340
344 BUG_TRAP(answer_prot->slab != NULL); 341 WARN_ON(answer_prot->slab == NULL);
345 342
346 err = -ENOBUFS; 343 err = -ENOBUFS;
347 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); 344 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
@@ -661,8 +658,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
661 658
662 lock_sock(sk2); 659 lock_sock(sk2);
663 660
664 BUG_TRAP((1 << sk2->sk_state) & 661 WARN_ON(!((1 << sk2->sk_state) &
665 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)); 662 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
666 663
667 sock_graft(sk2, newsock); 664 sock_graft(sk2, newsock);
668 665
@@ -1442,6 +1439,10 @@ static int __init inet_init(void)
1442 1439
1443 (void)sock_register(&inet_family_ops); 1440 (void)sock_register(&inet_family_ops);
1444 1441
1442#ifdef CONFIG_SYSCTL
1443 ip_static_sysctl_init();
1444#endif
1445
1445 /* 1446 /*
1446 * Add all the base protocols. 1447 * Add all the base protocols.
1447 */ 1448 */
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 2e667e2f90df..b12dae2b0b2d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -138,8 +138,8 @@ void in_dev_finish_destroy(struct in_device *idev)
138{ 138{
139 struct net_device *dev = idev->dev; 139 struct net_device *dev = idev->dev;
140 140
141 BUG_TRAP(!idev->ifa_list); 141 WARN_ON(idev->ifa_list);
142 BUG_TRAP(!idev->mc_list); 142 WARN_ON(idev->mc_list);
143#ifdef NET_REFCNT_DEBUG 143#ifdef NET_REFCNT_DEBUG
144 printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n", 144 printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n",
145 idev, dev ? dev->name : "NIL"); 145 idev, dev ? dev->name : "NIL");
@@ -399,7 +399,7 @@ static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
399 } 399 }
400 ipv4_devconf_setall(in_dev); 400 ipv4_devconf_setall(in_dev);
401 if (ifa->ifa_dev != in_dev) { 401 if (ifa->ifa_dev != in_dev) {
402 BUG_TRAP(!ifa->ifa_dev); 402 WARN_ON(ifa->ifa_dev);
403 in_dev_hold(in_dev); 403 in_dev_hold(in_dev);
404 ifa->ifa_dev = in_dev; 404 ifa->ifa_dev = in_dev;
405 } 405 }
@@ -1029,6 +1029,11 @@ skip:
1029 } 1029 }
1030} 1030}
1031 1031
1032static inline bool inetdev_valid_mtu(unsigned mtu)
1033{
1034 return mtu >= 68;
1035}
1036
1032/* Called only under RTNL semaphore */ 1037/* Called only under RTNL semaphore */
1033 1038
1034static int inetdev_event(struct notifier_block *this, unsigned long event, 1039static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -1048,6 +1053,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1048 IN_DEV_CONF_SET(in_dev, NOXFRM, 1); 1053 IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1049 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1); 1054 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1050 } 1055 }
1056 } else if (event == NETDEV_CHANGEMTU) {
1057 /* Re-enabling IP */
1058 if (inetdev_valid_mtu(dev->mtu))
1059 in_dev = inetdev_init(dev);
1051 } 1060 }
1052 goto out; 1061 goto out;
1053 } 1062 }
@@ -1058,7 +1067,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1058 dev->ip_ptr = NULL; 1067 dev->ip_ptr = NULL;
1059 break; 1068 break;
1060 case NETDEV_UP: 1069 case NETDEV_UP:
1061 if (dev->mtu < 68) 1070 if (!inetdev_valid_mtu(dev->mtu))
1062 break; 1071 break;
1063 if (dev->flags & IFF_LOOPBACK) { 1072 if (dev->flags & IFF_LOOPBACK) {
1064 struct in_ifaddr *ifa; 1073 struct in_ifaddr *ifa;
@@ -1080,9 +1089,9 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1080 ip_mc_down(in_dev); 1089 ip_mc_down(in_dev);
1081 break; 1090 break;
1082 case NETDEV_CHANGEMTU: 1091 case NETDEV_CHANGEMTU:
1083 if (dev->mtu >= 68) 1092 if (inetdev_valid_mtu(dev->mtu))
1084 break; 1093 break;
1085 /* MTU falled under 68, disable IP */ 1094 /* disable IP when MTU is not enough */
1086 case NETDEV_UNREGISTER: 1095 case NETDEV_UNREGISTER:
1087 inetdev_destroy(in_dev); 1096 inetdev_destroy(in_dev);
1088 break; 1097 break;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 4e73e5708e70..21515d4c49eb 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -575,7 +575,7 @@ static int esp_init_state(struct xfrm_state *x)
575 crypto_aead_ivsize(aead); 575 crypto_aead_ivsize(aead);
576 if (x->props.mode == XFRM_MODE_TUNNEL) 576 if (x->props.mode == XFRM_MODE_TUNNEL)
577 x->props.header_len += sizeof(struct iphdr); 577 x->props.header_len += sizeof(struct iphdr);
578 else if (x->props.mode == XFRM_MODE_BEET) 578 else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
579 x->props.header_len += IPV4_BEET_PHMAXLEN; 579 x->props.header_len += IPV4_BEET_PHMAXLEN;
580 if (x->encap) { 580 if (x->encap) {
581 struct xfrm_encap_tmpl *encap = x->encap; 581 struct xfrm_encap_tmpl *encap = x->encap;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 860558633b2c..55c355e63234 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net)
204 return net->ipv4.icmp_sk[smp_processor_id()]; 204 return net->ipv4.icmp_sk[smp_processor_id()];
205} 205}
206 206
207static inline int icmp_xmit_lock(struct sock *sk) 207static inline struct sock *icmp_xmit_lock(struct net *net)
208{ 208{
209 struct sock *sk;
210
209 local_bh_disable(); 211 local_bh_disable();
210 212
213 sk = icmp_sk(net);
214
211 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { 215 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
212 /* This can happen if the output path signals a 216 /* This can happen if the output path signals a
213 * dst_link_failure() for an outgoing ICMP packet. 217 * dst_link_failure() for an outgoing ICMP packet.
214 */ 218 */
215 local_bh_enable(); 219 local_bh_enable();
216 return 1; 220 return NULL;
217 } 221 }
218 return 0; 222 return sk;
219} 223}
220 224
221static inline void icmp_xmit_unlock(struct sock *sk) 225static inline void icmp_xmit_unlock(struct sock *sk)
@@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
354 struct ipcm_cookie ipc; 358 struct ipcm_cookie ipc;
355 struct rtable *rt = skb->rtable; 359 struct rtable *rt = skb->rtable;
356 struct net *net = dev_net(rt->u.dst.dev); 360 struct net *net = dev_net(rt->u.dst.dev);
357 struct sock *sk = icmp_sk(net); 361 struct sock *sk;
358 struct inet_sock *inet = inet_sk(sk); 362 struct inet_sock *inet;
359 __be32 daddr; 363 __be32 daddr;
360 364
361 if (ip_options_echo(&icmp_param->replyopts, skb)) 365 if (ip_options_echo(&icmp_param->replyopts, skb))
362 return; 366 return;
363 367
364 if (icmp_xmit_lock(sk)) 368 sk = icmp_xmit_lock(net);
369 if (sk == NULL)
365 return; 370 return;
371 inet = inet_sk(sk);
366 372
367 icmp_param->data.icmph.checksum = 0; 373 icmp_param->data.icmph.checksum = 0;
368 374
@@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
419 if (!rt) 425 if (!rt)
420 goto out; 426 goto out;
421 net = dev_net(rt->u.dst.dev); 427 net = dev_net(rt->u.dst.dev);
422 sk = icmp_sk(net);
423 428
424 /* 429 /*
425 * Find the original header. It is expected to be valid, of course. 430 * Find the original header. It is expected to be valid, of course.
@@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
483 } 488 }
484 } 489 }
485 490
486 if (icmp_xmit_lock(sk)) 491 sk = icmp_xmit_lock(net);
492 if (sk == NULL)
487 return; 493 return;
488 494
489 /* 495 /*
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6203ece53606..f70fac612596 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -289,6 +289,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
289 struct rtable *rt; 289 struct rtable *rt;
290 struct iphdr *pip; 290 struct iphdr *pip;
291 struct igmpv3_report *pig; 291 struct igmpv3_report *pig;
292 struct net *net = dev_net(dev);
292 293
293 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 294 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
294 if (skb == NULL) 295 if (skb == NULL)
@@ -299,7 +300,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
299 .nl_u = { .ip4_u = { 300 .nl_u = { .ip4_u = {
300 .daddr = IGMPV3_ALL_MCR } }, 301 .daddr = IGMPV3_ALL_MCR } },
301 .proto = IPPROTO_IGMP }; 302 .proto = IPPROTO_IGMP };
302 if (ip_route_output_key(&init_net, &rt, &fl)) { 303 if (ip_route_output_key(net, &rt, &fl)) {
303 kfree_skb(skb); 304 kfree_skb(skb);
304 return NULL; 305 return NULL;
305 } 306 }
@@ -629,6 +630,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
629 struct igmphdr *ih; 630 struct igmphdr *ih;
630 struct rtable *rt; 631 struct rtable *rt;
631 struct net_device *dev = in_dev->dev; 632 struct net_device *dev = in_dev->dev;
633 struct net *net = dev_net(dev);
632 __be32 group = pmc ? pmc->multiaddr : 0; 634 __be32 group = pmc ? pmc->multiaddr : 0;
633 __be32 dst; 635 __be32 dst;
634 636
@@ -643,7 +645,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
643 struct flowi fl = { .oif = dev->ifindex, 645 struct flowi fl = { .oif = dev->ifindex,
644 .nl_u = { .ip4_u = { .daddr = dst } }, 646 .nl_u = { .ip4_u = { .daddr = dst } },
645 .proto = IPPROTO_IGMP }; 647 .proto = IPPROTO_IGMP };
646 if (ip_route_output_key(&init_net, &rt, &fl)) 648 if (ip_route_output_key(net, &rt, &fl))
647 return -1; 649 return -1;
648 } 650 }
649 if (rt->rt_src == 0) { 651 if (rt->rt_src == 0) {
@@ -1196,9 +1198,6 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1196 1198
1197 ASSERT_RTNL(); 1199 ASSERT_RTNL();
1198 1200
1199 if (!net_eq(dev_net(in_dev->dev), &init_net))
1200 return;
1201
1202 for (im=in_dev->mc_list; im; im=im->next) { 1201 for (im=in_dev->mc_list; im; im=im->next) {
1203 if (im->multiaddr == addr) { 1202 if (im->multiaddr == addr) {
1204 im->users++; 1203 im->users++;
@@ -1278,9 +1277,6 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1278 1277
1279 ASSERT_RTNL(); 1278 ASSERT_RTNL();
1280 1279
1281 if (!net_eq(dev_net(in_dev->dev), &init_net))
1282 return;
1283
1284 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { 1280 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
1285 if (i->multiaddr==addr) { 1281 if (i->multiaddr==addr) {
1286 if (--i->users == 0) { 1282 if (--i->users == 0) {
@@ -1308,9 +1304,6 @@ void ip_mc_down(struct in_device *in_dev)
1308 1304
1309 ASSERT_RTNL(); 1305 ASSERT_RTNL();
1310 1306
1311 if (!net_eq(dev_net(in_dev->dev), &init_net))
1312 return;
1313
1314 for (i=in_dev->mc_list; i; i=i->next) 1307 for (i=in_dev->mc_list; i; i=i->next)
1315 igmp_group_dropped(i); 1308 igmp_group_dropped(i);
1316 1309
@@ -1331,9 +1324,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
1331{ 1324{
1332 ASSERT_RTNL(); 1325 ASSERT_RTNL();
1333 1326
1334 if (!net_eq(dev_net(in_dev->dev), &init_net))
1335 return;
1336
1337 in_dev->mc_tomb = NULL; 1327 in_dev->mc_tomb = NULL;
1338#ifdef CONFIG_IP_MULTICAST 1328#ifdef CONFIG_IP_MULTICAST
1339 in_dev->mr_gq_running = 0; 1329 in_dev->mr_gq_running = 0;
@@ -1357,9 +1347,6 @@ void ip_mc_up(struct in_device *in_dev)
1357 1347
1358 ASSERT_RTNL(); 1348 ASSERT_RTNL();
1359 1349
1360 if (!net_eq(dev_net(in_dev->dev), &init_net))
1361 return;
1362
1363 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1350 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1364 1351
1365 for (i=in_dev->mc_list; i; i=i->next) 1352 for (i=in_dev->mc_list; i; i=i->next)
@@ -1376,9 +1363,6 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1376 1363
1377 ASSERT_RTNL(); 1364 ASSERT_RTNL();
1378 1365
1379 if (!net_eq(dev_net(in_dev->dev), &init_net))
1380 return;
1381
1382 /* Deactivate timers */ 1366 /* Deactivate timers */
1383 ip_mc_down(in_dev); 1367 ip_mc_down(in_dev);
1384 1368
@@ -1395,7 +1379,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1395 write_unlock_bh(&in_dev->mc_list_lock); 1379 write_unlock_bh(&in_dev->mc_list_lock);
1396} 1380}
1397 1381
1398static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr) 1382static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1399{ 1383{
1400 struct flowi fl = { .nl_u = { .ip4_u = 1384 struct flowi fl = { .nl_u = { .ip4_u =
1401 { .daddr = imr->imr_multiaddr.s_addr } } }; 1385 { .daddr = imr->imr_multiaddr.s_addr } } };
@@ -1404,19 +1388,19 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
1404 struct in_device *idev = NULL; 1388 struct in_device *idev = NULL;
1405 1389
1406 if (imr->imr_ifindex) { 1390 if (imr->imr_ifindex) {
1407 idev = inetdev_by_index(&init_net, imr->imr_ifindex); 1391 idev = inetdev_by_index(net, imr->imr_ifindex);
1408 if (idev) 1392 if (idev)
1409 __in_dev_put(idev); 1393 __in_dev_put(idev);
1410 return idev; 1394 return idev;
1411 } 1395 }
1412 if (imr->imr_address.s_addr) { 1396 if (imr->imr_address.s_addr) {
1413 dev = ip_dev_find(&init_net, imr->imr_address.s_addr); 1397 dev = ip_dev_find(net, imr->imr_address.s_addr);
1414 if (!dev) 1398 if (!dev)
1415 return NULL; 1399 return NULL;
1416 dev_put(dev); 1400 dev_put(dev);
1417 } 1401 }
1418 1402
1419 if (!dev && !ip_route_output_key(&init_net, &rt, &fl)) { 1403 if (!dev && !ip_route_output_key(net, &rt, &fl)) {
1420 dev = rt->u.dst.dev; 1404 dev = rt->u.dst.dev;
1421 ip_rt_put(rt); 1405 ip_rt_put(rt);
1422 } 1406 }
@@ -1754,18 +1738,16 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1754 struct ip_mc_socklist *iml=NULL, *i; 1738 struct ip_mc_socklist *iml=NULL, *i;
1755 struct in_device *in_dev; 1739 struct in_device *in_dev;
1756 struct inet_sock *inet = inet_sk(sk); 1740 struct inet_sock *inet = inet_sk(sk);
1741 struct net *net = sock_net(sk);
1757 int ifindex; 1742 int ifindex;
1758 int count = 0; 1743 int count = 0;
1759 1744
1760 if (!ipv4_is_multicast(addr)) 1745 if (!ipv4_is_multicast(addr))
1761 return -EINVAL; 1746 return -EINVAL;
1762 1747
1763 if (!net_eq(sock_net(sk), &init_net))
1764 return -EPROTONOSUPPORT;
1765
1766 rtnl_lock(); 1748 rtnl_lock();
1767 1749
1768 in_dev = ip_mc_find_dev(imr); 1750 in_dev = ip_mc_find_dev(net, imr);
1769 1751
1770 if (!in_dev) { 1752 if (!in_dev) {
1771 iml = NULL; 1753 iml = NULL;
@@ -1827,15 +1809,13 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1827 struct inet_sock *inet = inet_sk(sk); 1809 struct inet_sock *inet = inet_sk(sk);
1828 struct ip_mc_socklist *iml, **imlp; 1810 struct ip_mc_socklist *iml, **imlp;
1829 struct in_device *in_dev; 1811 struct in_device *in_dev;
1812 struct net *net = sock_net(sk);
1830 __be32 group = imr->imr_multiaddr.s_addr; 1813 __be32 group = imr->imr_multiaddr.s_addr;
1831 u32 ifindex; 1814 u32 ifindex;
1832 int ret = -EADDRNOTAVAIL; 1815 int ret = -EADDRNOTAVAIL;
1833 1816
1834 if (!net_eq(sock_net(sk), &init_net))
1835 return -EPROTONOSUPPORT;
1836
1837 rtnl_lock(); 1817 rtnl_lock();
1838 in_dev = ip_mc_find_dev(imr); 1818 in_dev = ip_mc_find_dev(net, imr);
1839 ifindex = imr->imr_ifindex; 1819 ifindex = imr->imr_ifindex;
1840 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { 1820 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
1841 if (iml->multi.imr_multiaddr.s_addr != group) 1821 if (iml->multi.imr_multiaddr.s_addr != group)
@@ -1873,21 +1853,19 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1873 struct in_device *in_dev = NULL; 1853 struct in_device *in_dev = NULL;
1874 struct inet_sock *inet = inet_sk(sk); 1854 struct inet_sock *inet = inet_sk(sk);
1875 struct ip_sf_socklist *psl; 1855 struct ip_sf_socklist *psl;
1856 struct net *net = sock_net(sk);
1876 int leavegroup = 0; 1857 int leavegroup = 0;
1877 int i, j, rv; 1858 int i, j, rv;
1878 1859
1879 if (!ipv4_is_multicast(addr)) 1860 if (!ipv4_is_multicast(addr))
1880 return -EINVAL; 1861 return -EINVAL;
1881 1862
1882 if (!net_eq(sock_net(sk), &init_net))
1883 return -EPROTONOSUPPORT;
1884
1885 rtnl_lock(); 1863 rtnl_lock();
1886 1864
1887 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; 1865 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
1888 imr.imr_address.s_addr = mreqs->imr_interface; 1866 imr.imr_address.s_addr = mreqs->imr_interface;
1889 imr.imr_ifindex = ifindex; 1867 imr.imr_ifindex = ifindex;
1890 in_dev = ip_mc_find_dev(&imr); 1868 in_dev = ip_mc_find_dev(net, &imr);
1891 1869
1892 if (!in_dev) { 1870 if (!in_dev) {
1893 err = -ENODEV; 1871 err = -ENODEV;
@@ -2007,6 +1985,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2007 struct in_device *in_dev; 1985 struct in_device *in_dev;
2008 struct inet_sock *inet = inet_sk(sk); 1986 struct inet_sock *inet = inet_sk(sk);
2009 struct ip_sf_socklist *newpsl, *psl; 1987 struct ip_sf_socklist *newpsl, *psl;
1988 struct net *net = sock_net(sk);
2010 int leavegroup = 0; 1989 int leavegroup = 0;
2011 1990
2012 if (!ipv4_is_multicast(addr)) 1991 if (!ipv4_is_multicast(addr))
@@ -2015,15 +1994,12 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2015 msf->imsf_fmode != MCAST_EXCLUDE) 1994 msf->imsf_fmode != MCAST_EXCLUDE)
2016 return -EINVAL; 1995 return -EINVAL;
2017 1996
2018 if (!net_eq(sock_net(sk), &init_net))
2019 return -EPROTONOSUPPORT;
2020
2021 rtnl_lock(); 1997 rtnl_lock();
2022 1998
2023 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 1999 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2024 imr.imr_address.s_addr = msf->imsf_interface; 2000 imr.imr_address.s_addr = msf->imsf_interface;
2025 imr.imr_ifindex = ifindex; 2001 imr.imr_ifindex = ifindex;
2026 in_dev = ip_mc_find_dev(&imr); 2002 in_dev = ip_mc_find_dev(net, &imr);
2027 2003
2028 if (!in_dev) { 2004 if (!in_dev) {
2029 err = -ENODEV; 2005 err = -ENODEV;
@@ -2094,19 +2070,17 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2094 struct in_device *in_dev; 2070 struct in_device *in_dev;
2095 struct inet_sock *inet = inet_sk(sk); 2071 struct inet_sock *inet = inet_sk(sk);
2096 struct ip_sf_socklist *psl; 2072 struct ip_sf_socklist *psl;
2073 struct net *net = sock_net(sk);
2097 2074
2098 if (!ipv4_is_multicast(addr)) 2075 if (!ipv4_is_multicast(addr))
2099 return -EINVAL; 2076 return -EINVAL;
2100 2077
2101 if (!net_eq(sock_net(sk), &init_net))
2102 return -EPROTONOSUPPORT;
2103
2104 rtnl_lock(); 2078 rtnl_lock();
2105 2079
2106 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2080 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2107 imr.imr_address.s_addr = msf->imsf_interface; 2081 imr.imr_address.s_addr = msf->imsf_interface;
2108 imr.imr_ifindex = 0; 2082 imr.imr_ifindex = 0;
2109 in_dev = ip_mc_find_dev(&imr); 2083 in_dev = ip_mc_find_dev(net, &imr);
2110 2084
2111 if (!in_dev) { 2085 if (!in_dev) {
2112 err = -ENODEV; 2086 err = -ENODEV;
@@ -2163,9 +2137,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2163 if (!ipv4_is_multicast(addr)) 2137 if (!ipv4_is_multicast(addr))
2164 return -EINVAL; 2138 return -EINVAL;
2165 2139
2166 if (!net_eq(sock_net(sk), &init_net))
2167 return -EPROTONOSUPPORT;
2168
2169 rtnl_lock(); 2140 rtnl_lock();
2170 2141
2171 err = -EADDRNOTAVAIL; 2142 err = -EADDRNOTAVAIL;
@@ -2246,19 +2217,17 @@ void ip_mc_drop_socket(struct sock *sk)
2246{ 2217{
2247 struct inet_sock *inet = inet_sk(sk); 2218 struct inet_sock *inet = inet_sk(sk);
2248 struct ip_mc_socklist *iml; 2219 struct ip_mc_socklist *iml;
2220 struct net *net = sock_net(sk);
2249 2221
2250 if (inet->mc_list == NULL) 2222 if (inet->mc_list == NULL)
2251 return; 2223 return;
2252 2224
2253 if (!net_eq(sock_net(sk), &init_net))
2254 return;
2255
2256 rtnl_lock(); 2225 rtnl_lock();
2257 while ((iml = inet->mc_list) != NULL) { 2226 while ((iml = inet->mc_list) != NULL) {
2258 struct in_device *in_dev; 2227 struct in_device *in_dev;
2259 inet->mc_list = iml->next; 2228 inet->mc_list = iml->next;
2260 2229
2261 in_dev = inetdev_by_index(&init_net, iml->multi.imr_ifindex); 2230 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2262 (void) ip_mc_leave_src(sk, iml, in_dev); 2231 (void) ip_mc_leave_src(sk, iml, in_dev);
2263 if (in_dev != NULL) { 2232 if (in_dev != NULL) {
2264 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2233 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index bb81c958b744..0c1ae68ee84b 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -167,7 +167,7 @@ tb_not_found:
167success: 167success:
168 if (!inet_csk(sk)->icsk_bind_hash) 168 if (!inet_csk(sk)->icsk_bind_hash)
169 inet_bind_hash(sk, tb, snum); 169 inet_bind_hash(sk, tb, snum);
170 BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); 170 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
171 ret = 0; 171 ret = 0;
172 172
173fail_unlock: 173fail_unlock:
@@ -260,7 +260,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
260 } 260 }
261 261
262 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); 262 newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
263 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); 263 WARN_ON(newsk->sk_state == TCP_SYN_RECV);
264out: 264out:
265 release_sock(sk); 265 release_sock(sk);
266 return newsk; 266 return newsk;
@@ -386,7 +386,7 @@ struct request_sock *inet_csk_search_req(const struct sock *sk,
386 ireq->rmt_addr == raddr && 386 ireq->rmt_addr == raddr &&
387 ireq->loc_addr == laddr && 387 ireq->loc_addr == laddr &&
388 AF_INET_FAMILY(req->rsk_ops->family)) { 388 AF_INET_FAMILY(req->rsk_ops->family)) {
389 BUG_TRAP(!req->sk); 389 WARN_ON(req->sk);
390 *prevp = prev; 390 *prevp = prev;
391 break; 391 break;
392 } 392 }
@@ -539,14 +539,14 @@ EXPORT_SYMBOL_GPL(inet_csk_clone);
539 */ 539 */
540void inet_csk_destroy_sock(struct sock *sk) 540void inet_csk_destroy_sock(struct sock *sk)
541{ 541{
542 BUG_TRAP(sk->sk_state == TCP_CLOSE); 542 WARN_ON(sk->sk_state != TCP_CLOSE);
543 BUG_TRAP(sock_flag(sk, SOCK_DEAD)); 543 WARN_ON(!sock_flag(sk, SOCK_DEAD));
544 544
545 /* It cannot be in hash table! */ 545 /* It cannot be in hash table! */
546 BUG_TRAP(sk_unhashed(sk)); 546 WARN_ON(!sk_unhashed(sk));
547 547
548 /* If it has not 0 inet_sk(sk)->num, it must be bound */ 548 /* If it has not 0 inet_sk(sk)->num, it must be bound */
549 BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash); 549 WARN_ON(inet_sk(sk)->num && !inet_csk(sk)->icsk_bind_hash);
550 550
551 sk->sk_prot->destroy(sk); 551 sk->sk_prot->destroy(sk);
552 552
@@ -629,7 +629,7 @@ void inet_csk_listen_stop(struct sock *sk)
629 629
630 local_bh_disable(); 630 local_bh_disable();
631 bh_lock_sock(child); 631 bh_lock_sock(child);
632 BUG_TRAP(!sock_owned_by_user(child)); 632 WARN_ON(sock_owned_by_user(child));
633 sock_hold(child); 633 sock_hold(child);
634 634
635 sk->sk_prot->disconnect(child, O_NONBLOCK); 635 sk->sk_prot->disconnect(child, O_NONBLOCK);
@@ -647,7 +647,7 @@ void inet_csk_listen_stop(struct sock *sk)
647 sk_acceptq_removed(sk); 647 sk_acceptq_removed(sk);
648 __reqsk_free(req); 648 __reqsk_free(req);
649 } 649 }
650 BUG_TRAP(!sk->sk_ack_backlog); 650 WARN_ON(sk->sk_ack_backlog);
651} 651}
652 652
653EXPORT_SYMBOL_GPL(inet_csk_listen_stop); 653EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 0546a0bc97ea..6c52e08f786e 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -134,8 +134,8 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
134 struct sk_buff *fp; 134 struct sk_buff *fp;
135 struct netns_frags *nf; 135 struct netns_frags *nf;
136 136
137 BUG_TRAP(q->last_in & INET_FRAG_COMPLETE); 137 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
138 BUG_TRAP(del_timer(&q->timer) == 0); 138 WARN_ON(del_timer(&q->timer) != 0);
139 139
140 /* Release all fragment data. */ 140 /* Release all fragment data. */
141 fp = q->fragments; 141 fp = q->fragments;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 115f53722d20..44981906fb91 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -305,7 +305,7 @@ unique:
305 inet->num = lport; 305 inet->num = lport;
306 inet->sport = htons(lport); 306 inet->sport = htons(lport);
307 sk->sk_hash = hash; 307 sk->sk_hash = hash;
308 BUG_TRAP(sk_unhashed(sk)); 308 WARN_ON(!sk_unhashed(sk));
309 __sk_add_node(sk, &head->chain); 309 __sk_add_node(sk, &head->chain);
310 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 310 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
311 write_unlock(lock); 311 write_unlock(lock);
@@ -342,7 +342,7 @@ void __inet_hash_nolisten(struct sock *sk)
342 rwlock_t *lock; 342 rwlock_t *lock;
343 struct inet_ehash_bucket *head; 343 struct inet_ehash_bucket *head;
344 344
345 BUG_TRAP(sk_unhashed(sk)); 345 WARN_ON(!sk_unhashed(sk));
346 346
347 sk->sk_hash = inet_sk_ehashfn(sk); 347 sk->sk_hash = inet_sk_ehashfn(sk);
348 head = inet_ehash_bucket(hashinfo, sk->sk_hash); 348 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
@@ -367,7 +367,7 @@ static void __inet_hash(struct sock *sk)
367 return; 367 return;
368 } 368 }
369 369
370 BUG_TRAP(sk_unhashed(sk)); 370 WARN_ON(!sk_unhashed(sk));
371 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; 371 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
372 lock = &hashinfo->lhash_lock; 372 lock = &hashinfo->lhash_lock;
373 373
@@ -450,7 +450,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
450 */ 450 */
451 inet_bind_bucket_for_each(tb, node, &head->chain) { 451 inet_bind_bucket_for_each(tb, node, &head->chain) {
452 if (tb->ib_net == net && tb->port == port) { 452 if (tb->ib_net == net && tb->port == port) {
453 BUG_TRAP(!hlist_empty(&tb->owners)); 453 WARN_ON(hlist_empty(&tb->owners));
454 if (tb->fastreuse >= 0) 454 if (tb->fastreuse >= 0)
455 goto next_port; 455 goto next_port;
456 if (!check_established(death_row, sk, 456 if (!check_established(death_row, sk,
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 75c2def8f9a0..743f011b9a84 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -86,7 +86,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
86 hashinfo->bhash_size)]; 86 hashinfo->bhash_size)];
87 spin_lock(&bhead->lock); 87 spin_lock(&bhead->lock);
88 tw->tw_tb = icsk->icsk_bind_hash; 88 tw->tw_tb = icsk->icsk_bind_hash;
89 BUG_TRAP(icsk->icsk_bind_hash); 89 WARN_ON(!icsk->icsk_bind_hash);
90 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); 90 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
91 spin_unlock(&bhead->lock); 91 spin_unlock(&bhead->lock);
92 92
@@ -409,3 +409,38 @@ out:
409} 409}
410 410
411EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); 411EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
412
413void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
414 struct inet_timewait_death_row *twdr, int family)
415{
416 struct inet_timewait_sock *tw;
417 struct sock *sk;
418 struct hlist_node *node;
419 int h;
420
421 local_bh_disable();
422 for (h = 0; h < (hashinfo->ehash_size); h++) {
423 struct inet_ehash_bucket *head =
424 inet_ehash_bucket(hashinfo, h);
425 rwlock_t *lock = inet_ehash_lockp(hashinfo, h);
426restart:
427 write_lock(lock);
428 sk_for_each(sk, node, &head->twchain) {
429
430 tw = inet_twsk(sk);
431 if (!net_eq(twsk_net(tw), net) ||
432 tw->tw_family != family)
433 continue;
434
435 atomic_inc(&tw->tw_refcnt);
436 write_unlock(lock);
437 inet_twsk_deschedule(tw, twdr);
438 inet_twsk_put(tw);
439
440 goto restart;
441 }
442 write_unlock(lock);
443 }
444 local_bh_enable();
445}
446EXPORT_SYMBOL_GPL(inet_twsk_purge);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 38d38f058018..2152d222b954 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -488,8 +488,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
488 qp->q.fragments = head; 488 qp->q.fragments = head;
489 } 489 }
490 490
491 BUG_TRAP(head != NULL); 491 WARN_ON(head == NULL);
492 BUG_TRAP(FRAG_CB(head)->offset == 0); 492 WARN_ON(FRAG_CB(head)->offset != 0);
493 493
494 /* Allocate a new buffer for the datagram. */ 494 /* Allocate a new buffer for the datagram. */
495 ihlen = ip_hdrlen(head); 495 ihlen = ip_hdrlen(head);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 465544f6281a..d533a89e08de 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -118,7 +118,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
118 __skb_pull(newskb, skb_network_offset(newskb)); 118 __skb_pull(newskb, skb_network_offset(newskb));
119 newskb->pkt_type = PACKET_LOOPBACK; 119 newskb->pkt_type = PACKET_LOOPBACK;
120 newskb->ip_summed = CHECKSUM_UNNECESSARY; 120 newskb->ip_summed = CHECKSUM_UNNECESSARY;
121 BUG_TRAP(newskb->dst); 121 WARN_ON(!newskb->dst);
122 netif_rx(newskb); 122 netif_rx(newskb);
123 return 0; 123 return 0;
124} 124}
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index a75807b971b3..38ccb6dfb02e 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -14,153 +14,14 @@
14 * - Adaptive compression. 14 * - Adaptive compression.
15 */ 15 */
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/crypto.h>
18#include <linux/err.h> 17#include <linux/err.h>
19#include <linux/pfkeyv2.h>
20#include <linux/percpu.h>
21#include <linux/smp.h>
22#include <linux/list.h>
23#include <linux/vmalloc.h>
24#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
25#include <linux/mutex.h>
26#include <net/ip.h> 19#include <net/ip.h>
27#include <net/xfrm.h> 20#include <net/xfrm.h>
28#include <net/icmp.h> 21#include <net/icmp.h>
29#include <net/ipcomp.h> 22#include <net/ipcomp.h>
30#include <net/protocol.h> 23#include <net/protocol.h>
31 24#include <net/sock.h>
32struct ipcomp_tfms {
33 struct list_head list;
34 struct crypto_comp **tfms;
35 int users;
36};
37
38static DEFINE_MUTEX(ipcomp_resource_mutex);
39static void **ipcomp_scratches;
40static int ipcomp_scratch_users;
41static LIST_HEAD(ipcomp_tfms_list);
42
43static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
44{
45 struct ipcomp_data *ipcd = x->data;
46 const int plen = skb->len;
47 int dlen = IPCOMP_SCRATCH_SIZE;
48 const u8 *start = skb->data;
49 const int cpu = get_cpu();
50 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
51 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
52 int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
53
54 if (err)
55 goto out;
56
57 if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
58 err = -EINVAL;
59 goto out;
60 }
61
62 err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC);
63 if (err)
64 goto out;
65
66 skb->truesize += dlen - plen;
67 __skb_put(skb, dlen - plen);
68 skb_copy_to_linear_data(skb, scratch, dlen);
69out:
70 put_cpu();
71 return err;
72}
73
74static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
75{
76 int nexthdr;
77 int err = -ENOMEM;
78 struct ip_comp_hdr *ipch;
79
80 if (skb_linearize_cow(skb))
81 goto out;
82
83 skb->ip_summed = CHECKSUM_NONE;
84
85 /* Remove ipcomp header and decompress original payload */
86 ipch = (void *)skb->data;
87 nexthdr = ipch->nexthdr;
88
89 skb->transport_header = skb->network_header + sizeof(*ipch);
90 __skb_pull(skb, sizeof(*ipch));
91 err = ipcomp_decompress(x, skb);
92 if (err)
93 goto out;
94
95 err = nexthdr;
96
97out:
98 return err;
99}
100
101static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
102{
103 struct ipcomp_data *ipcd = x->data;
104 const int plen = skb->len;
105 int dlen = IPCOMP_SCRATCH_SIZE;
106 u8 *start = skb->data;
107 const int cpu = get_cpu();
108 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
109 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
110 int err;
111
112 local_bh_disable();
113 err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
114 local_bh_enable();
115 if (err)
116 goto out;
117
118 if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) {
119 err = -EMSGSIZE;
120 goto out;
121 }
122
123 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
124 put_cpu();
125
126 pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
127 return 0;
128
129out:
130 put_cpu();
131 return err;
132}
133
134static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
135{
136 int err;
137 struct ip_comp_hdr *ipch;
138 struct ipcomp_data *ipcd = x->data;
139
140 if (skb->len < ipcd->threshold) {
141 /* Don't bother compressing */
142 goto out_ok;
143 }
144
145 if (skb_linearize_cow(skb))
146 goto out_ok;
147
148 err = ipcomp_compress(x, skb);
149
150 if (err) {
151 goto out_ok;
152 }
153
154 /* Install ipcomp header, convert into ipcomp datagram. */
155 ipch = ip_comp_hdr(skb);
156 ipch->nexthdr = *skb_mac_header(skb);
157 ipch->flags = 0;
158 ipch->cpi = htons((u16 )ntohl(x->id.spi));
159 *skb_mac_header(skb) = IPPROTO_COMP;
160out_ok:
161 skb_push(skb, -skb_network_offset(skb));
162 return 0;
163}
164 25
165static void ipcomp4_err(struct sk_buff *skb, u32 info) 26static void ipcomp4_err(struct sk_buff *skb, u32 info)
166{ 27{
@@ -241,155 +102,9 @@ out:
241 return err; 102 return err;
242} 103}
243 104
244static void ipcomp_free_scratches(void) 105static int ipcomp4_init_state(struct xfrm_state *x)
245{
246 int i;
247 void **scratches;
248
249 if (--ipcomp_scratch_users)
250 return;
251
252 scratches = ipcomp_scratches;
253 if (!scratches)
254 return;
255
256 for_each_possible_cpu(i)
257 vfree(*per_cpu_ptr(scratches, i));
258
259 free_percpu(scratches);
260}
261
262static void **ipcomp_alloc_scratches(void)
263{
264 int i;
265 void **scratches;
266
267 if (ipcomp_scratch_users++)
268 return ipcomp_scratches;
269
270 scratches = alloc_percpu(void *);
271 if (!scratches)
272 return NULL;
273
274 ipcomp_scratches = scratches;
275
276 for_each_possible_cpu(i) {
277 void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
278 if (!scratch)
279 return NULL;
280 *per_cpu_ptr(scratches, i) = scratch;
281 }
282
283 return scratches;
284}
285
286static void ipcomp_free_tfms(struct crypto_comp **tfms)
287{
288 struct ipcomp_tfms *pos;
289 int cpu;
290
291 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
292 if (pos->tfms == tfms)
293 break;
294 }
295
296 BUG_TRAP(pos);
297
298 if (--pos->users)
299 return;
300
301 list_del(&pos->list);
302 kfree(pos);
303
304 if (!tfms)
305 return;
306
307 for_each_possible_cpu(cpu) {
308 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
309 crypto_free_comp(tfm);
310 }
311 free_percpu(tfms);
312}
313
314static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
315{
316 struct ipcomp_tfms *pos;
317 struct crypto_comp **tfms;
318 int cpu;
319
320 /* This can be any valid CPU ID so we don't need locking. */
321 cpu = raw_smp_processor_id();
322
323 list_for_each_entry(pos, &ipcomp_tfms_list, list) {
324 struct crypto_comp *tfm;
325
326 tfms = pos->tfms;
327 tfm = *per_cpu_ptr(tfms, cpu);
328
329 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
330 pos->users++;
331 return tfms;
332 }
333 }
334
335 pos = kmalloc(sizeof(*pos), GFP_KERNEL);
336 if (!pos)
337 return NULL;
338
339 pos->users = 1;
340 INIT_LIST_HEAD(&pos->list);
341 list_add(&pos->list, &ipcomp_tfms_list);
342
343 pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
344 if (!tfms)
345 goto error;
346
347 for_each_possible_cpu(cpu) {
348 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
349 CRYPTO_ALG_ASYNC);
350 if (IS_ERR(tfm))
351 goto error;
352 *per_cpu_ptr(tfms, cpu) = tfm;
353 }
354
355 return tfms;
356
357error:
358 ipcomp_free_tfms(tfms);
359 return NULL;
360}
361
362static void ipcomp_free_data(struct ipcomp_data *ipcd)
363{ 106{
364 if (ipcd->tfms) 107 int err = -EINVAL;
365 ipcomp_free_tfms(ipcd->tfms);
366 ipcomp_free_scratches();
367}
368
369static void ipcomp_destroy(struct xfrm_state *x)
370{
371 struct ipcomp_data *ipcd = x->data;
372 if (!ipcd)
373 return;
374 xfrm_state_delete_tunnel(x);
375 mutex_lock(&ipcomp_resource_mutex);
376 ipcomp_free_data(ipcd);
377 mutex_unlock(&ipcomp_resource_mutex);
378 kfree(ipcd);
379}
380
381static int ipcomp_init_state(struct xfrm_state *x)
382{
383 int err;
384 struct ipcomp_data *ipcd;
385 struct xfrm_algo_desc *calg_desc;
386
387 err = -EINVAL;
388 if (!x->calg)
389 goto out;
390
391 if (x->encap)
392 goto out;
393 108
394 x->props.header_len = 0; 109 x->props.header_len = 0;
395 switch (x->props.mode) { 110 switch (x->props.mode) {
@@ -402,40 +117,22 @@ static int ipcomp_init_state(struct xfrm_state *x)
402 goto out; 117 goto out;
403 } 118 }
404 119
405 err = -ENOMEM; 120 err = ipcomp_init_state(x);
406 ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); 121 if (err)
407 if (!ipcd)
408 goto out; 122 goto out;
409 123
410 mutex_lock(&ipcomp_resource_mutex);
411 if (!ipcomp_alloc_scratches())
412 goto error;
413
414 ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
415 if (!ipcd->tfms)
416 goto error;
417 mutex_unlock(&ipcomp_resource_mutex);
418
419 if (x->props.mode == XFRM_MODE_TUNNEL) { 124 if (x->props.mode == XFRM_MODE_TUNNEL) {
420 err = ipcomp_tunnel_attach(x); 125 err = ipcomp_tunnel_attach(x);
421 if (err) 126 if (err)
422 goto error_tunnel; 127 goto error_tunnel;
423 } 128 }
424 129
425 calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0);
426 BUG_ON(!calg_desc);
427 ipcd->threshold = calg_desc->uinfo.comp.threshold;
428 x->data = ipcd;
429 err = 0; 130 err = 0;
430out: 131out:
431 return err; 132 return err;
432 133
433error_tunnel: 134error_tunnel:
434 mutex_lock(&ipcomp_resource_mutex); 135 ipcomp_destroy(x);
435error:
436 ipcomp_free_data(ipcd);
437 mutex_unlock(&ipcomp_resource_mutex);
438 kfree(ipcd);
439 goto out; 136 goto out;
440} 137}
441 138
@@ -443,7 +140,7 @@ static const struct xfrm_type ipcomp_type = {
443 .description = "IPCOMP4", 140 .description = "IPCOMP4",
444 .owner = THIS_MODULE, 141 .owner = THIS_MODULE,
445 .proto = IPPROTO_COMP, 142 .proto = IPPROTO_COMP,
446 .init_state = ipcomp_init_state, 143 .init_state = ipcomp4_init_state,
447 .destructor = ipcomp_destroy, 144 .destructor = ipcomp_destroy,
448 .input = ipcomp_input, 145 .input = ipcomp_input,
449 .output = ipcomp_output 146 .output = ipcomp_output
@@ -481,7 +178,7 @@ module_init(ipcomp4_init);
481module_exit(ipcomp4_fini); 178module_exit(ipcomp4_fini);
482 179
483MODULE_LICENSE("GPL"); 180MODULE_LICENSE("GPL");
484MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173"); 181MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp/IPv4) - RFC3173");
485MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); 182MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
486 183
487MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_COMP); 184MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_COMP);
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 1f1897a1a702..201b8ea3020d 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -608,7 +608,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
608} 608}
609 609
610 610
611int ip_vs_app_init(void) 611int __init ip_vs_app_init(void)
612{ 612{
613 /* we will replace it with proc_net_ipvs_create() soon */ 613 /* we will replace it with proc_net_ipvs_create() soon */
614 proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops); 614 proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index f8bdae47a77f..44a6872dc245 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -965,7 +965,7 @@ static void ip_vs_conn_flush(void)
965} 965}
966 966
967 967
968int ip_vs_conn_init(void) 968int __init ip_vs_conn_init(void)
969{ 969{
970 int idx; 970 int idx;
971 971
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 9a5ace0b4dd6..6379705a8dcb 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -683,9 +683,22 @@ static void
683ip_vs_zero_stats(struct ip_vs_stats *stats) 683ip_vs_zero_stats(struct ip_vs_stats *stats)
684{ 684{
685 spin_lock_bh(&stats->lock); 685 spin_lock_bh(&stats->lock);
686 memset(stats, 0, (char *)&stats->lock - (char *)stats); 686
687 spin_unlock_bh(&stats->lock); 687 stats->conns = 0;
688 stats->inpkts = 0;
689 stats->outpkts = 0;
690 stats->inbytes = 0;
691 stats->outbytes = 0;
692
693 stats->cps = 0;
694 stats->inpps = 0;
695 stats->outpps = 0;
696 stats->inbps = 0;
697 stats->outbps = 0;
698
688 ip_vs_zero_estimator(stats); 699 ip_vs_zero_estimator(stats);
700
701 spin_unlock_bh(&stats->lock);
689} 702}
690 703
691/* 704/*
@@ -1589,7 +1602,7 @@ static struct ctl_table vs_vars[] = {
1589 { .ctl_name = 0 } 1602 { .ctl_name = 0 }
1590}; 1603};
1591 1604
1592struct ctl_path net_vs_ctl_path[] = { 1605const struct ctl_path net_vs_ctl_path[] = {
1593 { .procname = "net", .ctl_name = CTL_NET, }, 1606 { .procname = "net", .ctl_name = CTL_NET, },
1594 { .procname = "ipv4", .ctl_name = NET_IPV4, }, 1607 { .procname = "ipv4", .ctl_name = NET_IPV4, },
1595 { .procname = "vs", }, 1608 { .procname = "vs", },
@@ -1784,7 +1797,9 @@ static const struct file_operations ip_vs_info_fops = {
1784 1797
1785#endif 1798#endif
1786 1799
1787struct ip_vs_stats ip_vs_stats; 1800struct ip_vs_stats ip_vs_stats = {
1801 .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
1802};
1788 1803
1789#ifdef CONFIG_PROC_FS 1804#ifdef CONFIG_PROC_FS
1790static int ip_vs_stats_show(struct seq_file *seq, void *v) 1805static int ip_vs_stats_show(struct seq_file *seq, void *v)
@@ -2306,7 +2321,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
2306}; 2321};
2307 2322
2308 2323
2309int ip_vs_control_init(void) 2324int __init ip_vs_control_init(void)
2310{ 2325{
2311 int ret; 2326 int ret;
2312 int idx; 2327 int idx;
@@ -2333,8 +2348,6 @@ int ip_vs_control_init(void)
2333 INIT_LIST_HEAD(&ip_vs_rtable[idx]); 2348 INIT_LIST_HEAD(&ip_vs_rtable[idx]);
2334 } 2349 }
2335 2350
2336 memset(&ip_vs_stats, 0, sizeof(ip_vs_stats));
2337 spin_lock_init(&ip_vs_stats.lock);
2338 ip_vs_new_estimator(&ip_vs_stats); 2351 ip_vs_new_estimator(&ip_vs_stats);
2339 2352
2340 /* Hook the defense timer */ 2353 /* Hook the defense timer */
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index 8afc1503ed20..fa66824d264f 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -233,6 +233,7 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
233 .name = "dh", 233 .name = "dh",
234 .refcnt = ATOMIC_INIT(0), 234 .refcnt = ATOMIC_INIT(0),
235 .module = THIS_MODULE, 235 .module = THIS_MODULE,
236 .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
236 .init_service = ip_vs_dh_init_svc, 237 .init_service = ip_vs_dh_init_svc,
237 .done_service = ip_vs_dh_done_svc, 238 .done_service = ip_vs_dh_done_svc,
238 .update_service = ip_vs_dh_update_svc, 239 .update_service = ip_vs_dh_update_svc,
@@ -242,7 +243,6 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
242 243
243static int __init ip_vs_dh_init(void) 244static int __init ip_vs_dh_init(void)
244{ 245{
245 INIT_LIST_HEAD(&ip_vs_dh_scheduler.n_list);
246 return register_ip_vs_scheduler(&ip_vs_dh_scheduler); 246 return register_ip_vs_scheduler(&ip_vs_dh_scheduler);
247} 247}
248 248
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index bc04eedd6dbb..5a20f93bd7f9 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -17,6 +17,7 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/sysctl.h> 19#include <linux/sysctl.h>
20#include <linux/list.h>
20 21
21#include <net/ip_vs.h> 22#include <net/ip_vs.h>
22 23
@@ -44,28 +45,11 @@
44 */ 45 */
45 46
46 47
47struct ip_vs_estimator 48static void estimation_timer(unsigned long arg);
48{
49 struct ip_vs_estimator *next;
50 struct ip_vs_stats *stats;
51
52 u32 last_conns;
53 u32 last_inpkts;
54 u32 last_outpkts;
55 u64 last_inbytes;
56 u64 last_outbytes;
57
58 u32 cps;
59 u32 inpps;
60 u32 outpps;
61 u32 inbps;
62 u32 outbps;
63};
64
65 49
66static struct ip_vs_estimator *est_list = NULL; 50static LIST_HEAD(est_list);
67static DEFINE_RWLOCK(est_lock); 51static DEFINE_SPINLOCK(est_lock);
68static struct timer_list est_timer; 52static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
69 53
70static void estimation_timer(unsigned long arg) 54static void estimation_timer(unsigned long arg)
71{ 55{
@@ -76,9 +60,9 @@ static void estimation_timer(unsigned long arg)
76 u64 n_inbytes, n_outbytes; 60 u64 n_inbytes, n_outbytes;
77 u32 rate; 61 u32 rate;
78 62
79 read_lock(&est_lock); 63 spin_lock(&est_lock);
80 for (e = est_list; e; e = e->next) { 64 list_for_each_entry(e, &est_list, list) {
81 s = e->stats; 65 s = container_of(e, struct ip_vs_stats, est);
82 66
83 spin_lock(&s->lock); 67 spin_lock(&s->lock);
84 n_conns = s->conns; 68 n_conns = s->conns;
@@ -114,19 +98,16 @@ static void estimation_timer(unsigned long arg)
114 s->outbps = (e->outbps+0xF)>>5; 98 s->outbps = (e->outbps+0xF)>>5;
115 spin_unlock(&s->lock); 99 spin_unlock(&s->lock);
116 } 100 }
117 read_unlock(&est_lock); 101 spin_unlock(&est_lock);
118 mod_timer(&est_timer, jiffies + 2*HZ); 102 mod_timer(&est_timer, jiffies + 2*HZ);
119} 103}
120 104
121int ip_vs_new_estimator(struct ip_vs_stats *stats) 105void ip_vs_new_estimator(struct ip_vs_stats *stats)
122{ 106{
123 struct ip_vs_estimator *est; 107 struct ip_vs_estimator *est = &stats->est;
124 108
125 est = kzalloc(sizeof(*est), GFP_KERNEL); 109 INIT_LIST_HEAD(&est->list);
126 if (est == NULL)
127 return -ENOMEM;
128 110
129 est->stats = stats;
130 est->last_conns = stats->conns; 111 est->last_conns = stats->conns;
131 est->cps = stats->cps<<10; 112 est->cps = stats->cps<<10;
132 113
@@ -142,59 +123,40 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats)
142 est->last_outbytes = stats->outbytes; 123 est->last_outbytes = stats->outbytes;
143 est->outbps = stats->outbps<<5; 124 est->outbps = stats->outbps<<5;
144 125
145 write_lock_bh(&est_lock); 126 spin_lock_bh(&est_lock);
146 est->next = est_list; 127 if (list_empty(&est_list))
147 if (est->next == NULL) { 128 mod_timer(&est_timer, jiffies + 2 * HZ);
148 setup_timer(&est_timer, estimation_timer, 0); 129 list_add(&est->list, &est_list);
149 est_timer.expires = jiffies + 2*HZ; 130 spin_unlock_bh(&est_lock);
150 add_timer(&est_timer);
151 }
152 est_list = est;
153 write_unlock_bh(&est_lock);
154 return 0;
155} 131}
156 132
157void ip_vs_kill_estimator(struct ip_vs_stats *stats) 133void ip_vs_kill_estimator(struct ip_vs_stats *stats)
158{ 134{
159 struct ip_vs_estimator *est, **pest; 135 struct ip_vs_estimator *est = &stats->est;
160 int killed = 0; 136
161 137 spin_lock_bh(&est_lock);
162 write_lock_bh(&est_lock); 138 list_del(&est->list);
163 pest = &est_list; 139 while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
164 while ((est=*pest) != NULL) { 140 spin_unlock_bh(&est_lock);
165 if (est->stats != stats) { 141 cpu_relax();
166 pest = &est->next; 142 spin_lock_bh(&est_lock);
167 continue;
168 }
169 *pest = est->next;
170 kfree(est);
171 killed++;
172 } 143 }
173 if (killed && est_list == NULL) 144 spin_unlock_bh(&est_lock);
174 del_timer_sync(&est_timer);
175 write_unlock_bh(&est_lock);
176} 145}
177 146
178void ip_vs_zero_estimator(struct ip_vs_stats *stats) 147void ip_vs_zero_estimator(struct ip_vs_stats *stats)
179{ 148{
180 struct ip_vs_estimator *e; 149 struct ip_vs_estimator *est = &stats->est;
181 150
182 write_lock_bh(&est_lock); 151 /* set counters zero, caller must hold the stats->lock lock */
183 for (e = est_list; e; e = e->next) { 152 est->last_inbytes = 0;
184 if (e->stats != stats) 153 est->last_outbytes = 0;
185 continue; 154 est->last_conns = 0;
186 155 est->last_inpkts = 0;
187 /* set counters zero */ 156 est->last_outpkts = 0;
188 e->last_conns = 0; 157 est->cps = 0;
189 e->last_inpkts = 0; 158 est->inpps = 0;
190 e->last_outpkts = 0; 159 est->outpps = 0;
191 e->last_inbytes = 0; 160 est->inbps = 0;
192 e->last_outbytes = 0; 161 est->outbps = 0;
193 e->cps = 0;
194 e->inpps = 0;
195 e->outpps = 0;
196 e->inbps = 0;
197 e->outbps = 0;
198 }
199 write_unlock_bh(&est_lock);
200} 162}
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 0efa3db4b180..7a6a319f544a 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -539,6 +539,7 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
539 .name = "lblc", 539 .name = "lblc",
540 .refcnt = ATOMIC_INIT(0), 540 .refcnt = ATOMIC_INIT(0),
541 .module = THIS_MODULE, 541 .module = THIS_MODULE,
542 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
542 .init_service = ip_vs_lblc_init_svc, 543 .init_service = ip_vs_lblc_init_svc,
543 .done_service = ip_vs_lblc_done_svc, 544 .done_service = ip_vs_lblc_done_svc,
544 .update_service = ip_vs_lblc_update_svc, 545 .update_service = ip_vs_lblc_update_svc,
@@ -550,7 +551,6 @@ static int __init ip_vs_lblc_init(void)
550{ 551{
551 int ret; 552 int ret;
552 553
553 INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
554 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); 554 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
555 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler); 555 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
556 if (ret) 556 if (ret)
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index 8e3bbeb45138..c234e73968a6 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -728,6 +728,7 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
728 .name = "lblcr", 728 .name = "lblcr",
729 .refcnt = ATOMIC_INIT(0), 729 .refcnt = ATOMIC_INIT(0),
730 .module = THIS_MODULE, 730 .module = THIS_MODULE,
731 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
731 .init_service = ip_vs_lblcr_init_svc, 732 .init_service = ip_vs_lblcr_init_svc,
732 .done_service = ip_vs_lblcr_done_svc, 733 .done_service = ip_vs_lblcr_done_svc,
733 .update_service = ip_vs_lblcr_update_svc, 734 .update_service = ip_vs_lblcr_update_svc,
@@ -739,7 +740,6 @@ static int __init ip_vs_lblcr_init(void)
739{ 740{
740 int ret; 741 int ret;
741 742
742 INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
743 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); 743 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
744 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 744 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
745 if (ret) 745 if (ret)
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index ac9f08e065d5..ebcdbf75ac65 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -98,6 +98,7 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
98 .name = "lc", 98 .name = "lc",
99 .refcnt = ATOMIC_INIT(0), 99 .refcnt = ATOMIC_INIT(0),
100 .module = THIS_MODULE, 100 .module = THIS_MODULE,
101 .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
101 .init_service = ip_vs_lc_init_svc, 102 .init_service = ip_vs_lc_init_svc,
102 .done_service = ip_vs_lc_done_svc, 103 .done_service = ip_vs_lc_done_svc,
103 .update_service = ip_vs_lc_update_svc, 104 .update_service = ip_vs_lc_update_svc,
@@ -107,7 +108,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
107 108
108static int __init ip_vs_lc_init(void) 109static int __init ip_vs_lc_init(void)
109{ 110{
110 INIT_LIST_HEAD(&ip_vs_lc_scheduler.n_list);
111 return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ; 111 return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ;
112} 112}
113 113
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index a46bf258d420..92f3a6770031 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -136,6 +136,7 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
136 .name = "nq", 136 .name = "nq",
137 .refcnt = ATOMIC_INIT(0), 137 .refcnt = ATOMIC_INIT(0),
138 .module = THIS_MODULE, 138 .module = THIS_MODULE,
139 .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
139 .init_service = ip_vs_nq_init_svc, 140 .init_service = ip_vs_nq_init_svc,
140 .done_service = ip_vs_nq_done_svc, 141 .done_service = ip_vs_nq_done_svc,
141 .update_service = ip_vs_nq_update_svc, 142 .update_service = ip_vs_nq_update_svc,
@@ -145,7 +146,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
145 146
146static int __init ip_vs_nq_init(void) 147static int __init ip_vs_nq_init(void)
147{ 148{
148 INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list);
149 return register_ip_vs_scheduler(&ip_vs_nq_scheduler); 149 return register_ip_vs_scheduler(&ip_vs_nq_scheduler);
150} 150}
151 151
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 876714f23d65..6099a88fc200 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -43,7 +43,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
43/* 43/*
44 * register an ipvs protocol 44 * register an ipvs protocol
45 */ 45 */
46static int __used register_ip_vs_protocol(struct ip_vs_protocol *pp) 46static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
47{ 47{
48 unsigned hash = IP_VS_PROTO_HASH(pp->protocol); 48 unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
49 49
@@ -190,7 +190,7 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
190} 190}
191 191
192 192
193int ip_vs_protocol_init(void) 193int __init ip_vs_protocol_init(void)
194{ 194{
195 char protocols[64]; 195 char protocols[64];
196#define REGISTER_PROTOCOL(p) \ 196#define REGISTER_PROTOCOL(p) \
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index c8db12d39e61..358110d17e59 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -94,6 +94,7 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
94 .name = "rr", /* name */ 94 .name = "rr", /* name */
95 .refcnt = ATOMIC_INIT(0), 95 .refcnt = ATOMIC_INIT(0),
96 .module = THIS_MODULE, 96 .module = THIS_MODULE,
97 .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
97 .init_service = ip_vs_rr_init_svc, 98 .init_service = ip_vs_rr_init_svc,
98 .done_service = ip_vs_rr_done_svc, 99 .done_service = ip_vs_rr_done_svc,
99 .update_service = ip_vs_rr_update_svc, 100 .update_service = ip_vs_rr_update_svc,
@@ -102,7 +103,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
102 103
103static int __init ip_vs_rr_init(void) 104static int __init ip_vs_rr_init(void)
104{ 105{
105 INIT_LIST_HEAD(&ip_vs_rr_scheduler.n_list);
106 return register_ip_vs_scheduler(&ip_vs_rr_scheduler); 106 return register_ip_vs_scheduler(&ip_vs_rr_scheduler);
107} 107}
108 108
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c
index b64767309855..a46ad9e35016 100644
--- a/net/ipv4/ipvs/ip_vs_sched.c
+++ b/net/ipv4/ipvs/ip_vs_sched.c
@@ -184,7 +184,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
184 184
185 write_lock_bh(&__ip_vs_sched_lock); 185 write_lock_bh(&__ip_vs_sched_lock);
186 186
187 if (scheduler->n_list.next != &scheduler->n_list) { 187 if (!list_empty(&scheduler->n_list)) {
188 write_unlock_bh(&__ip_vs_sched_lock); 188 write_unlock_bh(&__ip_vs_sched_lock);
189 ip_vs_use_count_dec(); 189 ip_vs_use_count_dec();
190 IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " 190 IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
@@ -229,7 +229,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
229 } 229 }
230 230
231 write_lock_bh(&__ip_vs_sched_lock); 231 write_lock_bh(&__ip_vs_sched_lock);
232 if (scheduler->n_list.next == &scheduler->n_list) { 232 if (list_empty(&scheduler->n_list)) {
233 write_unlock_bh(&__ip_vs_sched_lock); 233 write_unlock_bh(&__ip_vs_sched_lock);
234 IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler " 234 IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler "
235 "is not in the list. failed\n", scheduler->name); 235 "is not in the list. failed\n", scheduler->name);
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index 2a7d31358181..77663d84cbd1 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -138,6 +138,7 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
138 .name = "sed", 138 .name = "sed",
139 .refcnt = ATOMIC_INIT(0), 139 .refcnt = ATOMIC_INIT(0),
140 .module = THIS_MODULE, 140 .module = THIS_MODULE,
141 .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
141 .init_service = ip_vs_sed_init_svc, 142 .init_service = ip_vs_sed_init_svc,
142 .done_service = ip_vs_sed_done_svc, 143 .done_service = ip_vs_sed_done_svc,
143 .update_service = ip_vs_sed_update_svc, 144 .update_service = ip_vs_sed_update_svc,
@@ -147,7 +148,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
147 148
148static int __init ip_vs_sed_init(void) 149static int __init ip_vs_sed_init(void)
149{ 150{
150 INIT_LIST_HEAD(&ip_vs_sed_scheduler.n_list);
151 return register_ip_vs_scheduler(&ip_vs_sed_scheduler); 151 return register_ip_vs_scheduler(&ip_vs_sed_scheduler);
152} 152}
153 153
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index b8fdfac65001..7b979e228056 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -230,6 +230,7 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
230 .name = "sh", 230 .name = "sh",
231 .refcnt = ATOMIC_INIT(0), 231 .refcnt = ATOMIC_INIT(0),
232 .module = THIS_MODULE, 232 .module = THIS_MODULE,
233 .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
233 .init_service = ip_vs_sh_init_svc, 234 .init_service = ip_vs_sh_init_svc,
234 .done_service = ip_vs_sh_done_svc, 235 .done_service = ip_vs_sh_done_svc,
235 .update_service = ip_vs_sh_update_svc, 236 .update_service = ip_vs_sh_update_svc,
@@ -239,7 +240,6 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
239 240
240static int __init ip_vs_sh_init(void) 241static int __init ip_vs_sh_init(void)
241{ 242{
242 INIT_LIST_HEAD(&ip_vs_sh_scheduler.n_list);
243 return register_ip_vs_scheduler(&ip_vs_sh_scheduler); 243 return register_ip_vs_scheduler(&ip_vs_sh_scheduler);
244} 244}
245 245
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 45e9bd96c286..a652da2c3200 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -904,9 +904,9 @@ int stop_sync_thread(int state)
904 * progress of stopping the master sync daemon. 904 * progress of stopping the master sync daemon.
905 */ 905 */
906 906
907 spin_lock(&ip_vs_sync_lock); 907 spin_lock_bh(&ip_vs_sync_lock);
908 ip_vs_sync_state &= ~IP_VS_STATE_MASTER; 908 ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
909 spin_unlock(&ip_vs_sync_lock); 909 spin_unlock_bh(&ip_vs_sync_lock);
910 kthread_stop(sync_master_thread); 910 kthread_stop(sync_master_thread);
911 sync_master_thread = NULL; 911 sync_master_thread = NULL;
912 } else if (state == IP_VS_STATE_BACKUP) { 912 } else if (state == IP_VS_STATE_BACKUP) {
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 772c3cb4eca1..9b0ef86bb1f7 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -126,6 +126,7 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
126 .name = "wlc", 126 .name = "wlc",
127 .refcnt = ATOMIC_INIT(0), 127 .refcnt = ATOMIC_INIT(0),
128 .module = THIS_MODULE, 128 .module = THIS_MODULE,
129 .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
129 .init_service = ip_vs_wlc_init_svc, 130 .init_service = ip_vs_wlc_init_svc,
130 .done_service = ip_vs_wlc_done_svc, 131 .done_service = ip_vs_wlc_done_svc,
131 .update_service = ip_vs_wlc_update_svc, 132 .update_service = ip_vs_wlc_update_svc,
@@ -135,7 +136,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
135 136
136static int __init ip_vs_wlc_init(void) 137static int __init ip_vs_wlc_init(void)
137{ 138{
138 INIT_LIST_HEAD(&ip_vs_wlc_scheduler.n_list);
139 return register_ip_vs_scheduler(&ip_vs_wlc_scheduler); 139 return register_ip_vs_scheduler(&ip_vs_wlc_scheduler);
140} 140}
141 141
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 1d6932d7dc97..0d86a79b87b5 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -212,6 +212,7 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
212 .name = "wrr", 212 .name = "wrr",
213 .refcnt = ATOMIC_INIT(0), 213 .refcnt = ATOMIC_INIT(0),
214 .module = THIS_MODULE, 214 .module = THIS_MODULE,
215 .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list),
215 .init_service = ip_vs_wrr_init_svc, 216 .init_service = ip_vs_wrr_init_svc,
216 .done_service = ip_vs_wrr_done_svc, 217 .done_service = ip_vs_wrr_done_svc,
217 .update_service = ip_vs_wrr_update_svc, 218 .update_service = ip_vs_wrr_update_svc,
@@ -220,7 +221,6 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
220 221
221static int __init ip_vs_wrr_init(void) 222static int __init ip_vs_wrr_init(void)
222{ 223{
223 INIT_LIST_HEAD(&ip_vs_wrr_scheduler.n_list);
224 return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ; 224 return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ;
225} 225}
226 226
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index f23e60c93ef9..90eb7cb47e77 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -369,7 +369,7 @@ config IP_NF_SECURITY
369 tristate "Security table" 369 tristate "Security table"
370 depends on IP_NF_IPTABLES 370 depends on IP_NF_IPTABLES
371 depends on SECURITY 371 depends on SECURITY
372 default m if NETFILTER_ADVANCED=n 372 depends on NETFILTER_ADVANCED
373 help 373 help
374 This option adds a `security' table to iptables, for use 374 This option adds a `security' table to iptables, for use
375 with Mandatory Access Control (MAC) policy. 375 with Mandatory Access Control (MAC) policy.
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 3be4d07e7ed9..082f5dd3156c 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -55,32 +55,53 @@ static struct xt_table packet_filter = {
55}; 55};
56 56
57/* The work comes in here from netfilter.c */ 57/* The work comes in here from netfilter.c */
58static unsigned int arpt_hook(unsigned int hook, 58static unsigned int arpt_in_hook(unsigned int hook,
59 struct sk_buff *skb, 59 struct sk_buff *skb,
60 const struct net_device *in, 60 const struct net_device *in,
61 const struct net_device *out, 61 const struct net_device *out,
62 int (*okfn)(struct sk_buff *)) 62 int (*okfn)(struct sk_buff *))
63{ 63{
64 return arpt_do_table(skb, hook, in, out, init_net.ipv4.arptable_filter); 64 return arpt_do_table(skb, hook, in, out,
65 dev_net(in)->ipv4.arptable_filter);
66}
67
68static unsigned int arpt_out_hook(unsigned int hook,
69 struct sk_buff *skb,
70 const struct net_device *in,
71 const struct net_device *out,
72 int (*okfn)(struct sk_buff *))
73{
74 return arpt_do_table(skb, hook, in, out,
75 dev_net(out)->ipv4.arptable_filter);
76}
77
78static unsigned int arpt_forward_hook(unsigned int hook,
79 struct sk_buff *skb,
80 const struct net_device *in,
81 const struct net_device *out,
82 int (*okfn)(struct sk_buff *))
83{
84 return arpt_do_table(skb, hook, in, out,
85 dev_net(in)->ipv4.arptable_filter);
65} 86}
66 87
67static struct nf_hook_ops arpt_ops[] __read_mostly = { 88static struct nf_hook_ops arpt_ops[] __read_mostly = {
68 { 89 {
69 .hook = arpt_hook, 90 .hook = arpt_in_hook,
70 .owner = THIS_MODULE, 91 .owner = THIS_MODULE,
71 .pf = NF_ARP, 92 .pf = NF_ARP,
72 .hooknum = NF_ARP_IN, 93 .hooknum = NF_ARP_IN,
73 .priority = NF_IP_PRI_FILTER, 94 .priority = NF_IP_PRI_FILTER,
74 }, 95 },
75 { 96 {
76 .hook = arpt_hook, 97 .hook = arpt_out_hook,
77 .owner = THIS_MODULE, 98 .owner = THIS_MODULE,
78 .pf = NF_ARP, 99 .pf = NF_ARP,
79 .hooknum = NF_ARP_OUT, 100 .hooknum = NF_ARP_OUT,
80 .priority = NF_IP_PRI_FILTER, 101 .priority = NF_IP_PRI_FILTER,
81 }, 102 },
82 { 103 {
83 .hook = arpt_hook, 104 .hook = arpt_forward_hook,
84 .owner = THIS_MODULE, 105 .owner = THIS_MODULE,
85 .pf = NF_ARP, 106 .pf = NF_ARP,
86 .hooknum = NF_ARP_FORWARD, 107 .hooknum = NF_ARP_FORWARD,
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 1819ad7ab910..fafe8ebb4c55 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -475,11 +475,10 @@ static void arp_print(struct arp_payload *payload)
475#define HBUFFERLEN 30 475#define HBUFFERLEN 30
476 char hbuffer[HBUFFERLEN]; 476 char hbuffer[HBUFFERLEN];
477 int j,k; 477 int j,k;
478 const char hexbuf[]= "0123456789abcdef";
479 478
480 for (k=0, j=0; k < HBUFFERLEN-3 && j < ETH_ALEN; j++) { 479 for (k=0, j=0; k < HBUFFERLEN-3 && j < ETH_ALEN; j++) {
481 hbuffer[k++]=hexbuf[(payload->src_hw[j]>>4)&15]; 480 hbuffer[k++] = hex_asc_hi(payload->src_hw[j]);
482 hbuffer[k++]=hexbuf[payload->src_hw[j]&15]; 481 hbuffer[k++] = hex_asc_lo(payload->src_hw[j]);
483 hbuffer[k++]=':'; 482 hbuffer[k++]=':';
484 } 483 }
485 hbuffer[--k]='\0'; 484 hbuffer[--k]='\0';
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index 49587a497229..462a22c97877 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -70,7 +70,7 @@ addrtype_mt_v1(const struct sk_buff *skb, const struct net_device *in,
70 (info->flags & IPT_ADDRTYPE_INVERT_SOURCE); 70 (info->flags & IPT_ADDRTYPE_INVERT_SOURCE);
71 if (ret && info->dest) 71 if (ret && info->dest)
72 ret &= match_type(dev, iph->daddr, info->dest) ^ 72 ret &= match_type(dev, iph->daddr, info->dest) ^
73 (info->flags & IPT_ADDRTYPE_INVERT_DEST); 73 !!(info->flags & IPT_ADDRTYPE_INVERT_DEST);
74 return ret; 74 return ret;
75} 75}
76 76
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 21cb053f5d7d..3974d7cae5c0 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -305,10 +305,10 @@ static void recent_mt_destroy(const struct xt_match *match, void *matchinfo)
305 spin_lock_bh(&recent_lock); 305 spin_lock_bh(&recent_lock);
306 list_del(&t->list); 306 list_del(&t->list);
307 spin_unlock_bh(&recent_lock); 307 spin_unlock_bh(&recent_lock);
308 recent_table_flush(t);
309#ifdef CONFIG_PROC_FS 308#ifdef CONFIG_PROC_FS
310 remove_proc_entry(t->name, proc_dir); 309 remove_proc_entry(t->name, proc_dir);
311#endif 310#endif
311 recent_table_flush(t);
312 kfree(t); 312 kfree(t);
313 } 313 }
314 mutex_unlock(&recent_mutex); 314 mutex_unlock(&recent_mutex);
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index 2b472ac2263a..db6d312128e1 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -32,7 +32,7 @@ static struct
32 struct ipt_replace repl; 32 struct ipt_replace repl;
33 struct ipt_standard entries[3]; 33 struct ipt_standard entries[3];
34 struct ipt_error term; 34 struct ipt_error term;
35} initial_table __initdata = { 35} initial_table __net_initdata = {
36 .repl = { 36 .repl = {
37 .name = "security", 37 .name = "security",
38 .valid_hooks = SECURITY_VALID_HOOKS, 38 .valid_hooks = SECURITY_VALID_HOOKS,
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index 91537f11273f..6c4f11f51446 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -73,9 +73,13 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
73 range_size = ntohs(range->max.all) - min + 1; 73 range_size = ntohs(range->max.all) - min + 1;
74 } 74 }
75 75
76 off = *rover;
77 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) 76 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
78 off = net_random(); 77 off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip,
78 maniptype == IP_NAT_MANIP_SRC
79 ? tuple->dst.u.all
80 : tuple->src.u.all);
81 else
82 off = *rover;
79 83
80 for (i = 0; i < range_size; i++, off++) { 84 for (i = 0; i < range_size; i++, off++) {
81 *portptr = htons(min + off % range_size); 85 *portptr = htons(min + off % range_size);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 834356ea99df..8f5a403f6f6b 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -232,6 +232,8 @@ static const struct snmp_mib snmp4_net_list[] = {
232 SNMP_MIB_ITEM("TCPDSACKIgnoredOld", LINUX_MIB_TCPDSACKIGNOREDOLD), 232 SNMP_MIB_ITEM("TCPDSACKIgnoredOld", LINUX_MIB_TCPDSACKIGNOREDOLD),
233 SNMP_MIB_ITEM("TCPDSACKIgnoredNoUndo", LINUX_MIB_TCPDSACKIGNOREDNOUNDO), 233 SNMP_MIB_ITEM("TCPDSACKIgnoredNoUndo", LINUX_MIB_TCPDSACKIGNOREDNOUNDO),
234 SNMP_MIB_ITEM("TCPSpuriousRTOs", LINUX_MIB_TCPSPURIOUSRTOS), 234 SNMP_MIB_ITEM("TCPSpuriousRTOs", LINUX_MIB_TCPSPURIOUSRTOS),
235 SNMP_MIB_ITEM("TCPMD5NotFound", LINUX_MIB_TCPMD5NOTFOUND),
236 SNMP_MIB_ITEM("TCPMD5Unexpected", LINUX_MIB_TCPMD5UNEXPECTED),
235 SNMP_MIB_SENTINEL 237 SNMP_MIB_SENTINEL
236}; 238};
237 239
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e4ab0ac94f92..6ee5354c9aa1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1502,21 +1502,21 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1502 rth->fl.iif != 0 || 1502 rth->fl.iif != 0 ||
1503 dst_metric_locked(&rth->u.dst, RTAX_MTU) || 1503 dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
1504 !net_eq(dev_net(rth->u.dst.dev), net) || 1504 !net_eq(dev_net(rth->u.dst.dev), net) ||
1505 !rt_is_expired(rth)) 1505 rt_is_expired(rth))
1506 continue; 1506 continue;
1507 1507
1508 if (new_mtu < 68 || new_mtu >= old_mtu) { 1508 if (new_mtu < 68 || new_mtu >= old_mtu) {
1509 1509
1510 /* BSD 4.2 compatibility hack :-( */ 1510 /* BSD 4.2 compatibility hack :-( */
1511 if (mtu == 0 && 1511 if (mtu == 0 &&
1512 old_mtu >= dst_metric(&rth->u.dst, RTAX_MTU) && 1512 old_mtu >= dst_mtu(&rth->u.dst) &&
1513 old_mtu >= 68 + (iph->ihl << 2)) 1513 old_mtu >= 68 + (iph->ihl << 2))
1514 old_mtu -= iph->ihl << 2; 1514 old_mtu -= iph->ihl << 2;
1515 1515
1516 mtu = guess_mtu(old_mtu); 1516 mtu = guess_mtu(old_mtu);
1517 } 1517 }
1518 if (mtu <= dst_metric(&rth->u.dst, RTAX_MTU)) { 1518 if (mtu <= dst_mtu(&rth->u.dst)) {
1519 if (mtu < dst_metric(&rth->u.dst, RTAX_MTU)) { 1519 if (mtu < dst_mtu(&rth->u.dst)) {
1520 dst_confirm(&rth->u.dst); 1520 dst_confirm(&rth->u.dst);
1521 if (mtu < ip_rt_min_pmtu) { 1521 if (mtu < ip_rt_min_pmtu) {
1522 mtu = ip_rt_min_pmtu; 1522 mtu = ip_rt_min_pmtu;
@@ -1538,7 +1538,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1538 1538
1539static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) 1539static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1540{ 1540{
1541 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= 68 && 1541 if (dst_mtu(dst) > mtu && mtu >= 68 &&
1542 !(dst_metric_locked(dst, RTAX_MTU))) { 1542 !(dst_metric_locked(dst, RTAX_MTU))) {
1543 if (mtu < ip_rt_min_pmtu) { 1543 if (mtu < ip_rt_min_pmtu) {
1544 mtu = ip_rt_min_pmtu; 1544 mtu = ip_rt_min_pmtu;
@@ -1667,7 +1667,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1667 1667
1668 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) 1668 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
1669 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; 1669 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1670 if (dst_metric(&rt->u.dst, RTAX_MTU) > IP_MAX_MTU) 1670 if (dst_mtu(&rt->u.dst) > IP_MAX_MTU)
1671 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; 1671 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1672 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0) 1672 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0)
1673 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40, 1673 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
@@ -2914,7 +2914,69 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2914 return 0; 2914 return 0;
2915} 2915}
2916 2916
2917ctl_table ipv4_route_table[] = { 2917static void rt_secret_reschedule(int old)
2918{
2919 struct net *net;
2920 int new = ip_rt_secret_interval;
2921 int diff = new - old;
2922
2923 if (!diff)
2924 return;
2925
2926 rtnl_lock();
2927 for_each_net(net) {
2928 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
2929
2930 if (!new)
2931 continue;
2932
2933 if (deleted) {
2934 long time = net->ipv4.rt_secret_timer.expires - jiffies;
2935
2936 if (time <= 0 || (time += diff) <= 0)
2937 time = 0;
2938
2939 net->ipv4.rt_secret_timer.expires = time;
2940 } else
2941 net->ipv4.rt_secret_timer.expires = new;
2942
2943 net->ipv4.rt_secret_timer.expires += jiffies;
2944 add_timer(&net->ipv4.rt_secret_timer);
2945 }
2946 rtnl_unlock();
2947}
2948
2949static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
2950 struct file *filp,
2951 void __user *buffer, size_t *lenp,
2952 loff_t *ppos)
2953{
2954 int old = ip_rt_secret_interval;
2955 int ret = proc_dointvec_jiffies(ctl, write, filp, buffer, lenp, ppos);
2956
2957 rt_secret_reschedule(old);
2958
2959 return ret;
2960}
2961
2962static int ipv4_sysctl_rt_secret_interval_strategy(ctl_table *table,
2963 int __user *name,
2964 int nlen,
2965 void __user *oldval,
2966 size_t __user *oldlenp,
2967 void __user *newval,
2968 size_t newlen)
2969{
2970 int old = ip_rt_secret_interval;
2971 int ret = sysctl_jiffies(table, name, nlen, oldval, oldlenp, newval,
2972 newlen);
2973
2974 rt_secret_reschedule(old);
2975
2976 return ret;
2977}
2978
2979static ctl_table ipv4_route_table[] = {
2918 { 2980 {
2919 .ctl_name = NET_IPV4_ROUTE_GC_THRESH, 2981 .ctl_name = NET_IPV4_ROUTE_GC_THRESH,
2920 .procname = "gc_thresh", 2982 .procname = "gc_thresh",
@@ -3048,20 +3110,29 @@ ctl_table ipv4_route_table[] = {
3048 .data = &ip_rt_secret_interval, 3110 .data = &ip_rt_secret_interval,
3049 .maxlen = sizeof(int), 3111 .maxlen = sizeof(int),
3050 .mode = 0644, 3112 .mode = 0644,
3051 .proc_handler = &proc_dointvec_jiffies, 3113 .proc_handler = &ipv4_sysctl_rt_secret_interval,
3052 .strategy = &sysctl_jiffies, 3114 .strategy = &ipv4_sysctl_rt_secret_interval_strategy,
3053 }, 3115 },
3054 { .ctl_name = 0 } 3116 { .ctl_name = 0 }
3055}; 3117};
3056 3118
3057static __net_initdata struct ctl_path ipv4_route_path[] = { 3119static struct ctl_table empty[1];
3120
3121static struct ctl_table ipv4_skeleton[] =
3122{
3123 { .procname = "route", .ctl_name = NET_IPV4_ROUTE,
3124 .mode = 0555, .child = ipv4_route_table},
3125 { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH,
3126 .mode = 0555, .child = empty},
3127 { }
3128};
3129
3130static __net_initdata struct ctl_path ipv4_path[] = {
3058 { .procname = "net", .ctl_name = CTL_NET, }, 3131 { .procname = "net", .ctl_name = CTL_NET, },
3059 { .procname = "ipv4", .ctl_name = NET_IPV4, }, 3132 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3060 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
3061 { }, 3133 { },
3062}; 3134};
3063 3135
3064
3065static struct ctl_table ipv4_route_flush_table[] = { 3136static struct ctl_table ipv4_route_flush_table[] = {
3066 { 3137 {
3067 .ctl_name = NET_IPV4_ROUTE_FLUSH, 3138 .ctl_name = NET_IPV4_ROUTE_FLUSH,
@@ -3074,6 +3145,13 @@ static struct ctl_table ipv4_route_flush_table[] = {
3074 { .ctl_name = 0 }, 3145 { .ctl_name = 0 },
3075}; 3146};
3076 3147
3148static __net_initdata struct ctl_path ipv4_route_path[] = {
3149 { .procname = "net", .ctl_name = CTL_NET, },
3150 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3151 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
3152 { },
3153};
3154
3077static __net_init int sysctl_route_net_init(struct net *net) 3155static __net_init int sysctl_route_net_init(struct net *net)
3078{ 3156{
3079 struct ctl_table *tbl; 3157 struct ctl_table *tbl;
@@ -3126,10 +3204,12 @@ static __net_init int rt_secret_timer_init(struct net *net)
3126 net->ipv4.rt_secret_timer.data = (unsigned long)net; 3204 net->ipv4.rt_secret_timer.data = (unsigned long)net;
3127 init_timer_deferrable(&net->ipv4.rt_secret_timer); 3205 init_timer_deferrable(&net->ipv4.rt_secret_timer);
3128 3206
3129 net->ipv4.rt_secret_timer.expires = 3207 if (ip_rt_secret_interval) {
3130 jiffies + net_random() % ip_rt_secret_interval + 3208 net->ipv4.rt_secret_timer.expires =
3131 ip_rt_secret_interval; 3209 jiffies + net_random() % ip_rt_secret_interval +
3132 add_timer(&net->ipv4.rt_secret_timer); 3210 ip_rt_secret_interval;
3211 add_timer(&net->ipv4.rt_secret_timer);
3212 }
3133 return 0; 3213 return 0;
3134} 3214}
3135 3215
@@ -3216,6 +3296,17 @@ int __init ip_rt_init(void)
3216 return rc; 3296 return rc;
3217} 3297}
3218 3298
3299#ifdef CONFIG_SYSCTL
3300/*
3301 * We really need to sanitize the damn ipv4 init order, then all
3302 * this nonsense will go away.
3303 */
3304void __init ip_static_sysctl_init(void)
3305{
3306 register_sysctl_paths(ipv4_path, ipv4_skeleton);
3307}
3308#endif
3309
3219EXPORT_SYMBOL(__ip_select_ident); 3310EXPORT_SYMBOL(__ip_select_ident);
3220EXPORT_SYMBOL(ip_route_input); 3311EXPORT_SYMBOL(ip_route_input);
3221EXPORT_SYMBOL(ip_route_output_key); 3312EXPORT_SYMBOL(ip_route_output_key);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 51bc24d3b8a7..9d38005abbac 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -299,6 +299,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
299 ireq->rmt_port = th->source; 299 ireq->rmt_port = th->source;
300 ireq->loc_addr = ip_hdr(skb)->daddr; 300 ireq->loc_addr = ip_hdr(skb)->daddr;
301 ireq->rmt_addr = ip_hdr(skb)->saddr; 301 ireq->rmt_addr = ip_hdr(skb)->saddr;
302 ireq->ecn_ok = 0;
302 ireq->snd_wscale = tcp_opt.snd_wscale; 303 ireq->snd_wscale = tcp_opt.snd_wscale;
303 ireq->rcv_wscale = tcp_opt.rcv_wscale; 304 ireq->rcv_wscale = tcp_opt.rcv_wscale;
304 ireq->sack_ok = tcp_opt.sack_ok; 305 ireq->sack_ok = tcp_opt.sack_ok;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 14ef202a2254..e0689fd7b798 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -232,6 +232,7 @@ static struct ctl_table ipv4_table[] = {
232 .mode = 0644, 232 .mode = 0644,
233 .proc_handler = &ipv4_doint_and_flush, 233 .proc_handler = &ipv4_doint_and_flush,
234 .strategy = &ipv4_doint_and_flush_strategy, 234 .strategy = &ipv4_doint_and_flush_strategy,
235 .extra2 = &init_net,
235 }, 236 },
236 { 237 {
237 .ctl_name = NET_IPV4_NO_PMTU_DISC, 238 .ctl_name = NET_IPV4_NO_PMTU_DISC,
@@ -401,13 +402,6 @@ static struct ctl_table ipv4_table[] = {
401 .proc_handler = &ipv4_local_port_range, 402 .proc_handler = &ipv4_local_port_range,
402 .strategy = &ipv4_sysctl_local_port_range, 403 .strategy = &ipv4_sysctl_local_port_range,
403 }, 404 },
404 {
405 .ctl_name = NET_IPV4_ROUTE,
406 .procname = "route",
407 .maxlen = 0,
408 .mode = 0555,
409 .child = ipv4_route_table
410 },
411#ifdef CONFIG_IP_MULTICAST 405#ifdef CONFIG_IP_MULTICAST
412 { 406 {
413 .ctl_name = NET_IPV4_IGMP_MAX_MEMBERSHIPS, 407 .ctl_name = NET_IPV4_IGMP_MAX_MEMBERSHIPS,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0b491bf03db4..1ab341e5d3e0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1096,7 +1096,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1096#if TCP_DEBUG 1096#if TCP_DEBUG
1097 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1097 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1098 1098
1099 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); 1099 WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1100#endif 1100#endif
1101 1101
1102 if (inet_csk_ack_scheduled(sk)) { 1102 if (inet_csk_ack_scheduled(sk)) {
@@ -1358,7 +1358,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1358 goto found_ok_skb; 1358 goto found_ok_skb;
1359 if (tcp_hdr(skb)->fin) 1359 if (tcp_hdr(skb)->fin)
1360 goto found_fin_ok; 1360 goto found_fin_ok;
1361 BUG_TRAP(flags & MSG_PEEK); 1361 WARN_ON(!(flags & MSG_PEEK));
1362 skb = skb->next; 1362 skb = skb->next;
1363 } while (skb != (struct sk_buff *)&sk->sk_receive_queue); 1363 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1364 1364
@@ -1421,8 +1421,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1421 1421
1422 tp->ucopy.len = len; 1422 tp->ucopy.len = len;
1423 1423
1424 BUG_TRAP(tp->copied_seq == tp->rcv_nxt || 1424 WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1425 (flags & (MSG_PEEK | MSG_TRUNC))); 1425 !(flags & (MSG_PEEK | MSG_TRUNC)));
1426 1426
1427 /* Ugly... If prequeue is not empty, we have to 1427 /* Ugly... If prequeue is not empty, we have to
1428 * process it before releasing socket, otherwise 1428 * process it before releasing socket, otherwise
@@ -1844,7 +1844,7 @@ adjudge_to_death:
1844 */ 1844 */
1845 local_bh_disable(); 1845 local_bh_disable();
1846 bh_lock_sock(sk); 1846 bh_lock_sock(sk);
1847 BUG_TRAP(!sock_owned_by_user(sk)); 1847 WARN_ON(sock_owned_by_user(sk));
1848 1848
1849 /* Have we already been destroyed by a softirq or backlog? */ 1849 /* Have we already been destroyed by a softirq or backlog? */
1850 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 1850 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
@@ -1973,7 +1973,7 @@ int tcp_disconnect(struct sock *sk, int flags)
1973 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 1973 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1974 __sk_dst_reset(sk); 1974 __sk_dst_reset(sk);
1975 1975
1976 BUG_TRAP(!inet->num || icsk->icsk_bind_hash); 1976 WARN_ON(inet->num && !icsk->icsk_bind_hash);
1977 1977
1978 sk->sk_error_report(sk); 1978 sk->sk_error_report(sk);
1979 return err; 1979 return err;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1f5e6049883e..67ccce2a96bd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1629,10 +1629,10 @@ advance_sp:
1629out: 1629out:
1630 1630
1631#if FASTRETRANS_DEBUG > 0 1631#if FASTRETRANS_DEBUG > 0
1632 BUG_TRAP((int)tp->sacked_out >= 0); 1632 WARN_ON((int)tp->sacked_out < 0);
1633 BUG_TRAP((int)tp->lost_out >= 0); 1633 WARN_ON((int)tp->lost_out < 0);
1634 BUG_TRAP((int)tp->retrans_out >= 0); 1634 WARN_ON((int)tp->retrans_out < 0);
1635 BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0); 1635 WARN_ON((int)tcp_packets_in_flight(tp) < 0);
1636#endif 1636#endif
1637 return flag; 1637 return flag;
1638} 1638}
@@ -2181,7 +2181,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2181 int err; 2181 int err;
2182 unsigned int mss; 2182 unsigned int mss;
2183 2183
2184 BUG_TRAP(packets <= tp->packets_out); 2184 WARN_ON(packets > tp->packets_out);
2185 if (tp->lost_skb_hint) { 2185 if (tp->lost_skb_hint) {
2186 skb = tp->lost_skb_hint; 2186 skb = tp->lost_skb_hint;
2187 cnt = tp->lost_cnt_hint; 2187 cnt = tp->lost_cnt_hint;
@@ -2610,7 +2610,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2610 /* E. Check state exit conditions. State can be terminated 2610 /* E. Check state exit conditions. State can be terminated
2611 * when high_seq is ACKed. */ 2611 * when high_seq is ACKed. */
2612 if (icsk->icsk_ca_state == TCP_CA_Open) { 2612 if (icsk->icsk_ca_state == TCP_CA_Open) {
2613 BUG_TRAP(tp->retrans_out == 0); 2613 WARN_ON(tp->retrans_out != 0);
2614 tp->retrans_stamp = 0; 2614 tp->retrans_stamp = 0;
2615 } else if (!before(tp->snd_una, tp->high_seq)) { 2615 } else if (!before(tp->snd_una, tp->high_seq)) {
2616 switch (icsk->icsk_ca_state) { 2616 switch (icsk->icsk_ca_state) {
@@ -2972,9 +2972,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2972 } 2972 }
2973 2973
2974#if FASTRETRANS_DEBUG > 0 2974#if FASTRETRANS_DEBUG > 0
2975 BUG_TRAP((int)tp->sacked_out >= 0); 2975 WARN_ON((int)tp->sacked_out < 0);
2976 BUG_TRAP((int)tp->lost_out >= 0); 2976 WARN_ON((int)tp->lost_out < 0);
2977 BUG_TRAP((int)tp->retrans_out >= 0); 2977 WARN_ON((int)tp->retrans_out < 0);
2978 if (!tp->packets_out && tcp_is_sack(tp)) { 2978 if (!tp->packets_out && tcp_is_sack(tp)) {
2979 icsk = inet_csk(sk); 2979 icsk = inet_csk(sk);
2980 if (tp->lost_out) { 2980 if (tp->lost_out) {
@@ -3292,6 +3292,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3292 * log. Something worked... 3292 * log. Something worked...
3293 */ 3293 */
3294 sk->sk_err_soft = 0; 3294 sk->sk_err_soft = 0;
3295 icsk->icsk_probes_out = 0;
3295 tp->rcv_tstamp = tcp_time_stamp; 3296 tp->rcv_tstamp = tcp_time_stamp;
3296 prior_packets = tp->packets_out; 3297 prior_packets = tp->packets_out;
3297 if (!prior_packets) 3298 if (!prior_packets)
@@ -3324,8 +3325,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3324 return 1; 3325 return 1;
3325 3326
3326no_queue: 3327no_queue:
3327 icsk->icsk_probes_out = 0;
3328
3329 /* If this ack opens up a zero window, clear backoff. It was 3328 /* If this ack opens up a zero window, clear backoff. It was
3330 * being used to time the probes, and is probably far higher than 3329 * being used to time the probes, and is probably far higher than
3331 * it needs to be for normal retransmission. 3330 * it needs to be for normal retransmission.
@@ -3878,7 +3877,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
3878 int i; 3877 int i;
3879 3878
3880 /* RCV.NXT must cover all the block! */ 3879 /* RCV.NXT must cover all the block! */
3881 BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq)); 3880 WARN_ON(before(tp->rcv_nxt, sp->end_seq));
3882 3881
3883 /* Zap this SACK, by moving forward any other SACKS. */ 3882 /* Zap this SACK, by moving forward any other SACKS. */
3884 for (i=this_sack+1; i < num_sacks; i++) 3883 for (i=this_sack+1; i < num_sacks; i++)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a82df6307567..1b4fee20fc93 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -418,7 +418,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
418 /* ICMPs are not backlogged, hence we cannot get 418 /* ICMPs are not backlogged, hence we cannot get
419 an established socket here. 419 an established socket here.
420 */ 420 */
421 BUG_TRAP(!req->sk); 421 WARN_ON(req->sk);
422 422
423 if (seq != tcp_rsk(req)->snt_isn) { 423 if (seq != tcp_rsk(req)->snt_isn) {
424 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 424 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
@@ -655,8 +655,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
655 rep.th.doff = arg.iov[0].iov_len/4; 655 rep.th.doff = arg.iov[0].iov_len/4;
656 656
657 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], 657 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
658 key, ip_hdr(skb)->daddr, 658 key, ip_hdr(skb)->saddr,
659 ip_hdr(skb)->saddr, &rep.th); 659 ip_hdr(skb)->daddr, &rep.th);
660 } 660 }
661#endif 661#endif
662 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 662 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
@@ -687,14 +687,14 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
687 inet_twsk_put(tw); 687 inet_twsk_put(tw);
688} 688}
689 689
690static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, 690static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
691 struct request_sock *req) 691 struct request_sock *req)
692{ 692{
693 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, 693 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
694 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 694 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
695 req->ts_recent, 695 req->ts_recent,
696 0, 696 0,
697 tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr)); 697 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr));
698} 698}
699 699
700/* 700/*
@@ -1116,18 +1116,12 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1116 return 0; 1116 return 0;
1117 1117
1118 if (hash_expected && !hash_location) { 1118 if (hash_expected && !hash_location) {
1119 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash expected but NOT found " 1119 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1120 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
1121 NIPQUAD(iph->saddr), ntohs(th->source),
1122 NIPQUAD(iph->daddr), ntohs(th->dest));
1123 return 1; 1120 return 1;
1124 } 1121 }
1125 1122
1126 if (!hash_expected && hash_location) { 1123 if (!hash_expected && hash_location) {
1127 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found " 1124 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1128 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
1129 NIPQUAD(iph->saddr), ntohs(th->source),
1130 NIPQUAD(iph->daddr), ntohs(th->dest));
1131 return 1; 1125 return 1;
1132 } 1126 }
1133 1127
@@ -2382,6 +2376,7 @@ static int __net_init tcp_sk_init(struct net *net)
2382static void __net_exit tcp_sk_exit(struct net *net) 2376static void __net_exit tcp_sk_exit(struct net *net)
2383{ 2377{
2384 inet_ctl_sock_destroy(net->ipv4.tcp_sock); 2378 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2379 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET);
2385} 2380}
2386 2381
2387static struct pernet_operations __net_initdata tcp_sk_ops = { 2382static struct pernet_operations __net_initdata tcp_sk_ops = {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 204c42162660..f976fc57892c 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -609,7 +609,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
609 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { 609 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
610 /* Out of window: send ACK and drop. */ 610 /* Out of window: send ACK and drop. */
611 if (!(flg & TCP_FLAG_RST)) 611 if (!(flg & TCP_FLAG_RST))
612 req->rsk_ops->send_ack(skb, req); 612 req->rsk_ops->send_ack(sk, skb, req);
613 if (paws_reject) 613 if (paws_reject)
614 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 614 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
615 return NULL; 615 return NULL;
@@ -618,89 +618,87 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
618 /* In sequence, PAWS is OK. */ 618 /* In sequence, PAWS is OK. */
619 619
620 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) 620 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
621 req->ts_recent = tmp_opt.rcv_tsval; 621 req->ts_recent = tmp_opt.rcv_tsval;
622 622
623 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { 623 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
624 /* Truncate SYN, it is out of window starting 624 /* Truncate SYN, it is out of window starting
625 at tcp_rsk(req)->rcv_isn + 1. */ 625 at tcp_rsk(req)->rcv_isn + 1. */
626 flg &= ~TCP_FLAG_SYN; 626 flg &= ~TCP_FLAG_SYN;
627 } 627 }
628 628
629 /* RFC793: "second check the RST bit" and 629 /* RFC793: "second check the RST bit" and
630 * "fourth, check the SYN bit" 630 * "fourth, check the SYN bit"
631 */ 631 */
632 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 632 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
633 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 633 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
634 goto embryonic_reset; 634 goto embryonic_reset;
635 } 635 }
636 636
637 /* ACK sequence verified above, just make sure ACK is 637 /* ACK sequence verified above, just make sure ACK is
638 * set. If ACK not set, just silently drop the packet. 638 * set. If ACK not set, just silently drop the packet.
639 */ 639 */
640 if (!(flg & TCP_FLAG_ACK)) 640 if (!(flg & TCP_FLAG_ACK))
641 return NULL; 641 return NULL;
642
643 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
644 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
645 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
646 inet_rsk(req)->acked = 1;
647 return NULL;
648 }
649 642
650 /* OK, ACK is valid, create big socket and 643 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
651 * feed this segment to it. It will repeat all 644 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
652 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 645 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
653 * ESTABLISHED STATE. If it will be dropped after 646 inet_rsk(req)->acked = 1;
654 * socket is created, wait for troubles. 647 return NULL;
655 */ 648 }
656 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, 649
657 req, NULL); 650 /* OK, ACK is valid, create big socket and
658 if (child == NULL) 651 * feed this segment to it. It will repeat all
659 goto listen_overflow; 652 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
653 * ESTABLISHED STATE. If it will be dropped after
654 * socket is created, wait for troubles.
655 */
656 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
657 if (child == NULL)
658 goto listen_overflow;
660#ifdef CONFIG_TCP_MD5SIG 659#ifdef CONFIG_TCP_MD5SIG
661 else { 660 else {
662 /* Copy over the MD5 key from the original socket */ 661 /* Copy over the MD5 key from the original socket */
663 struct tcp_md5sig_key *key; 662 struct tcp_md5sig_key *key;
664 struct tcp_sock *tp = tcp_sk(sk); 663 struct tcp_sock *tp = tcp_sk(sk);
665 key = tp->af_specific->md5_lookup(sk, child); 664 key = tp->af_specific->md5_lookup(sk, child);
666 if (key != NULL) { 665 if (key != NULL) {
667 /* 666 /*
668 * We're using one, so create a matching key on the 667 * We're using one, so create a matching key on the
669 * newsk structure. If we fail to get memory then we 668 * newsk structure. If we fail to get memory then we
670 * end up not copying the key across. Shucks. 669 * end up not copying the key across. Shucks.
671 */ 670 */
672 char *newkey = kmemdup(key->key, key->keylen, 671 char *newkey = kmemdup(key->key, key->keylen,
673 GFP_ATOMIC); 672 GFP_ATOMIC);
674 if (newkey) { 673 if (newkey) {
675 if (!tcp_alloc_md5sig_pool()) 674 if (!tcp_alloc_md5sig_pool())
676 BUG(); 675 BUG();
677 tp->af_specific->md5_add(child, child, 676 tp->af_specific->md5_add(child, child, newkey,
678 newkey, 677 key->keylen);
679 key->keylen);
680 }
681 } 678 }
682 } 679 }
680 }
683#endif 681#endif
684 682
685 inet_csk_reqsk_queue_unlink(sk, req, prev); 683 inet_csk_reqsk_queue_unlink(sk, req, prev);
686 inet_csk_reqsk_queue_removed(sk, req); 684 inet_csk_reqsk_queue_removed(sk, req);
687 685
688 inet_csk_reqsk_queue_add(sk, req, child); 686 inet_csk_reqsk_queue_add(sk, req, child);
689 return child; 687 return child;
690 688
691 listen_overflow: 689listen_overflow:
692 if (!sysctl_tcp_abort_on_overflow) { 690 if (!sysctl_tcp_abort_on_overflow) {
693 inet_rsk(req)->acked = 1; 691 inet_rsk(req)->acked = 1;
694 return NULL; 692 return NULL;
695 } 693 }
696 694
697 embryonic_reset: 695embryonic_reset:
698 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 696 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
699 if (!(flg & TCP_FLAG_RST)) 697 if (!(flg & TCP_FLAG_RST))
700 req->rsk_ops->send_reset(sk, skb); 698 req->rsk_ops->send_reset(sk, skb);
701 699
702 inet_csk_reqsk_queue_drop(sk, req, prev); 700 inet_csk_reqsk_queue_drop(sk, req, prev);
703 return NULL; 701 return NULL;
704} 702}
705 703
706/* 704/*
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a00532de2a8c..8165f5aa8c71 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -468,7 +468,8 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
468 } 468 }
469 if (likely(sysctl_tcp_window_scaling)) { 469 if (likely(sysctl_tcp_window_scaling)) {
470 opts->ws = tp->rx_opt.rcv_wscale; 470 opts->ws = tp->rx_opt.rcv_wscale;
471 size += TCPOLEN_WSCALE_ALIGNED; 471 if(likely(opts->ws))
472 size += TCPOLEN_WSCALE_ALIGNED;
472 } 473 }
473 if (likely(sysctl_tcp_sack)) { 474 if (likely(sysctl_tcp_sack)) {
474 opts->options |= OPTION_SACK_ADVERTISE; 475 opts->options |= OPTION_SACK_ADVERTISE;
@@ -509,7 +510,8 @@ static unsigned tcp_synack_options(struct sock *sk,
509 510
510 if (likely(ireq->wscale_ok)) { 511 if (likely(ireq->wscale_ok)) {
511 opts->ws = ireq->rcv_wscale; 512 opts->ws = ireq->rcv_wscale;
512 size += TCPOLEN_WSCALE_ALIGNED; 513 if(likely(opts->ws))
514 size += TCPOLEN_WSCALE_ALIGNED;
513 } 515 }
514 if (likely(doing_ts)) { 516 if (likely(doing_ts)) {
515 opts->options |= OPTION_TS; 517 opts->options |= OPTION_TS;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 328e0cf42b3c..5ab6ba19c3ce 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -287,7 +287,7 @@ static void tcp_retransmit_timer(struct sock *sk)
287 if (!tp->packets_out) 287 if (!tp->packets_out)
288 goto out; 288 goto out;
289 289
290 BUG_TRAP(!tcp_write_queue_empty(sk)); 290 WARN_ON(tcp_write_queue_empty(sk));
291 291
292 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && 292 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
293 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { 293 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 383d17359d01..57e26fa66185 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -951,6 +951,27 @@ int udp_disconnect(struct sock *sk, int flags)
951 return 0; 951 return 0;
952} 952}
953 953
954static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
955{
956 int is_udplite = IS_UDPLITE(sk);
957 int rc;
958
959 if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
960 /* Note that an ENOMEM error is charged twice */
961 if (rc == -ENOMEM)
962 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
963 is_udplite);
964 goto drop;
965 }
966
967 return 0;
968
969drop:
970 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
971 kfree_skb(skb);
972 return -1;
973}
974
954/* returns: 975/* returns:
955 * -1: error 976 * -1: error
956 * 0: success 977 * 0: success
@@ -1042,17 +1063,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
1042 goto drop; 1063 goto drop;
1043 } 1064 }
1044 1065
1045 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 1066 rc = 0;
1046 /* Note that an ENOMEM error is charged twice */
1047 if (rc == -ENOMEM) {
1048 UDP_INC_STATS_BH(sock_net(sk),
1049 UDP_MIB_RCVBUFERRORS, is_udplite);
1050 atomic_inc(&sk->sk_drops);
1051 }
1052 goto drop;
1053 }
1054 1067
1055 return 0; 1068 bh_lock_sock(sk);
1069 if (!sock_owned_by_user(sk))
1070 rc = __udp_queue_rcv_skb(sk, skb);
1071 else
1072 sk_add_backlog(sk, skb);
1073 bh_unlock_sock(sk);
1074
1075 return rc;
1056 1076
1057drop: 1077drop:
1058 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1078 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -1090,15 +1110,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1090 skb1 = skb_clone(skb, GFP_ATOMIC); 1110 skb1 = skb_clone(skb, GFP_ATOMIC);
1091 1111
1092 if (skb1) { 1112 if (skb1) {
1093 int ret = 0; 1113 int ret = udp_queue_rcv_skb(sk, skb1);
1094
1095 bh_lock_sock_nested(sk);
1096 if (!sock_owned_by_user(sk))
1097 ret = udp_queue_rcv_skb(sk, skb1);
1098 else
1099 sk_add_backlog(sk, skb1);
1100 bh_unlock_sock(sk);
1101
1102 if (ret > 0) 1114 if (ret > 0)
1103 /* we should probably re-process instead 1115 /* we should probably re-process instead
1104 * of dropping packets here. */ 1116 * of dropping packets here. */
@@ -1193,13 +1205,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1193 uh->dest, inet_iif(skb), udptable); 1205 uh->dest, inet_iif(skb), udptable);
1194 1206
1195 if (sk != NULL) { 1207 if (sk != NULL) {
1196 int ret = 0; 1208 int ret = udp_queue_rcv_skb(sk, skb);
1197 bh_lock_sock_nested(sk);
1198 if (!sock_owned_by_user(sk))
1199 ret = udp_queue_rcv_skb(sk, skb);
1200 else
1201 sk_add_backlog(sk, skb);
1202 bh_unlock_sock(sk);
1203 sock_put(sk); 1209 sock_put(sk);
1204 1210
1205 /* a return value > 0 means to resubmit the input, but 1211 /* a return value > 0 means to resubmit the input, but
@@ -1492,7 +1498,7 @@ struct proto udp_prot = {
1492 .sendmsg = udp_sendmsg, 1498 .sendmsg = udp_sendmsg,
1493 .recvmsg = udp_recvmsg, 1499 .recvmsg = udp_recvmsg,
1494 .sendpage = udp_sendpage, 1500 .sendpage = udp_sendpage,
1495 .backlog_rcv = udp_queue_rcv_skb, 1501 .backlog_rcv = __udp_queue_rcv_skb,
1496 .hash = udp_lib_hash, 1502 .hash = udp_lib_hash,
1497 .unhash = udp_lib_unhash, 1503 .unhash = udp_lib_unhash,
1498 .get_port = udp_v4_get_port, 1504 .get_port = udp_v4_get_port,
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
index 9c798abce736..63418185f524 100644
--- a/net/ipv4/xfrm4_mode_beet.c
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -47,8 +47,10 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb)
47 if (unlikely(optlen)) 47 if (unlikely(optlen))
48 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4); 48 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
49 49
50 skb_set_network_header(skb, IPV4_BEET_PHMAXLEN - x->props.header_len - 50 skb_set_network_header(skb, -x->props.header_len -
51 hdrlen); 51 hdrlen + (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
52 if (x->sel.family != AF_INET6)
53 skb->network_header += IPV4_BEET_PHMAXLEN;
52 skb->mac_header = skb->network_header + 54 skb->mac_header = skb->network_header +
53 offsetof(struct iphdr, protocol); 55 offsetof(struct iphdr, protocol);
54 skb->transport_header = skb->network_header + sizeof(*top_iph); 56 skb->transport_header = skb->network_header + sizeof(*top_iph);