aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/8021q/vlan.h1
-rw-r--r--net/8021q/vlan_dev.c26
-rw-r--r--net/9p/mod.c4
-rw-r--r--net/9p/trans_fd.c3
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/common.c2
-rw-r--r--net/ax25/af_ax25.c6
-rw-r--r--net/bluetooth/bnep/sock.c2
-rw-r--r--net/bluetooth/cmtp/sock.c2
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/hidp/sock.c2
-rw-r--r--net/bluetooth/l2cap.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_netfilter.c3
-rw-r--r--net/bridge/netfilter/ebt_among.c2
-rw-r--r--net/bridge/netfilter/ebt_arp.c2
-rw-r--r--net/core/dev.c149
-rw-r--r--net/core/dev_mcast.c16
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/fib_rules.c22
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net_namespace.c182
-rw-r--r--net/core/netpoll.c37
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/request_sock.c35
-rw-r--r--net/core/skbuff.c47
-rw-r--r--net/core/sock.c178
-rw-r--r--net/dccp/ccids/lib/loss_interval.c2
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/dccp/proto.c9
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/decnet/dn_route.c16
-rw-r--r--net/decnet/dn_rules.c13
-rw-r--r--net/econet/af_econet.c2
-rw-r--r--net/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--net/ieee80211/ieee80211_crypt_tkip.c5
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c1
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c2
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/ah4.c1
-rw-r--r--net/ipv4/arp.c22
-rw-r--r--net/ipv4/esp4.c12
-rw-r--r--net/ipv4/fib_frontend.c15
-rw-r--r--net/ipv4/fib_rules.c51
-rw-r--r--net/ipv4/inet_diag.c9
-rw-r--r--net/ipv4/inet_hashtables.c7
-rw-r--r--net/ipv4/inet_timewait_sock.c13
-rw-r--r--net/ipv4/inetpeer.c42
-rw-r--r--net/ipv4/ip_output.c28
-rw-r--r--net/ipv4/ip_sockglue.c39
-rw-r--r--net/ipv4/ipcomp.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c18
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c22
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c49
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c78
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c29
-rw-r--r--net/ipv4/netfilter/Kconfig4
-rw-r--r--net/ipv4/netfilter/Makefile20
-rw-r--r--net/ipv4/netfilter/ip_queue.c37
-rw-r--r--net/ipv4/netfilter/iptable_raw.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c7
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c18
-rw-r--r--net/ipv4/netfilter/nf_nat_irc.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c2
-rw-r--r--net/ipv4/proc.c21
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/route.c28
-rw-r--r--net/ipv4/sysctl_net_ipv4.c2
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c142
-rw-r--r--net/ipv4/tcp_ipv4.c37
-rw-r--r--net/ipv4/tcp_output.c21
-rw-r--r--net/ipv4/tcp_vegas.c37
-rw-r--r--net/ipv4/tunnel4.c24
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/udplite.c3
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/ah6.c1
-rw-r--r--net/ipv6/esp6.c13
-rw-r--r--net/ipv6/fib6_rules.c37
-rw-r--r--net/ipv6/inet6_hashtables.c19
-rw-r--r--net/ipv6/ip6_output.c36
-rw-r--r--net/ipv6/ipcomp6.c4
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/netfilter/Makefile28
-rw-r--r--net/ipv6/netfilter/ip6_queue.c37
-rw-r--r--net/ipv6/proc.c19
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/route.c97
-rw-r--r--net/ipv6/tcp_ipv6.c26
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/ipv6/udplite.c3
-rw-r--r--net/ipx/af_ipx.c24
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/iriap.c2
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/irda/irnet/irnet_ppp.c10
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c107
-rw-r--r--net/key/af_key.c10
-rw-r--r--net/llc/llc_conn.c2
-rw-r--r--net/mac80211/Kconfig12
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/aes_ccm.c1
-rw-r--r--net/mac80211/ieee80211.c43
-rw-r--r--net/mac80211/ieee80211_common.h91
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/ieee80211_ioctl.c21
-rw-r--r--net/mac80211/ieee80211_rate.c24
-rw-r--r--net/mac80211/ieee80211_rate.h3
-rw-r--r--net/mac80211/ieee80211_sta.c28
-rw-r--r--net/mac80211/rc80211_simple.c25
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/wep.c2
-rw-r--r--net/mac80211/wpa.c18
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/Makefile14
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_extend.c2
-rw-r--r--net/netfilter/nf_sockopt.c117
-rw-r--r--net/netfilter/xt_connlimit.c5
-rw-r--r--net/netfilter/xt_time.c5
-rw-r--r--net/netfilter/xt_u32.c5
-rw-r--r--net/netlink/af_netlink.c14
-rw-r--r--net/netrom/af_netrom.c6
-rw-r--r--net/packet/af_packet.c33
-rw-r--r--net/rfkill/rfkill.c37
-rw-r--r--net/rose/af_rose.c6
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/rxrpc/ar-local.c4
-rw-r--r--net/rxrpc/rxkad.c9
-rw-r--r--net/sched/cls_u32.c14
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sctp/associola.c10
-rw-r--r--net/sctp/auth.c4
-rw-r--r--net/sctp/bind_addr.c13
-rw-r--r--net/sctp/endpointola.c35
-rw-r--r--net/sctp/input.c43
-rw-r--r--net/sctp/inqueue.c4
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/outqueue.c41
-rw-r--r--net/sctp/proc.c6
-rw-r--r--net/sctp/protocol.c10
-rw-r--r--net/sctp/sm_make_chunk.c170
-rw-r--r--net/sctp/sm_sideeffect.c10
-rw-r--r--net/sctp/sm_statefuns.c14
-rw-r--r--net/sctp/socket.c22
-rw-r--r--net/sctp/sysctl.c9
-rw-r--r--net/sctp/transport.c5
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/socket.c11
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c6
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c8
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c1
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/sysctl.c3
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c34
-rw-r--r--net/sunrpc/xprtrdma/transport.c12
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/unix/af_unix.c11
-rw-r--r--net/unix/garbage.c26
-rw-r--r--net/wireless/wext.c2
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/xfrm/xfrm_algo.c1
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--net/xfrm/xfrm_user.c2
182 files changed, 1724 insertions, 1563 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 3fe4fc86055f..6567213959cb 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -376,6 +376,7 @@ void vlan_setup(struct net_device *new_dev)
376 new_dev->init = vlan_dev_init; 376 new_dev->init = vlan_dev_init;
377 new_dev->open = vlan_dev_open; 377 new_dev->open = vlan_dev_open;
378 new_dev->stop = vlan_dev_stop; 378 new_dev->stop = vlan_dev_stop;
379 new_dev->set_mac_address = vlan_set_mac_address;
379 new_dev->set_multicast_list = vlan_dev_set_multicast_list; 380 new_dev->set_multicast_list = vlan_dev_set_multicast_list;
380 new_dev->change_rx_flags = vlan_change_rx_flags; 381 new_dev->change_rx_flags = vlan_change_rx_flags;
381 new_dev->destructor = free_netdev; 382 new_dev->destructor = free_netdev;
@@ -636,6 +637,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
636 if (!vlandev) 637 if (!vlandev)
637 continue; 638 continue;
638 639
640 flgs = vlandev->flags;
641 if (!(flgs & IFF_UP))
642 continue;
643
639 vlan_sync_address(dev, vlandev); 644 vlan_sync_address(dev, vlandev);
640 } 645 }
641 break; 646 break;
@@ -747,6 +752,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
747 vlan_dev_set_ingress_priority(dev, 752 vlan_dev_set_ingress_priority(dev,
748 args.u.skb_priority, 753 args.u.skb_priority,
749 args.vlan_qos); 754 args.vlan_qos);
755 err = 0;
750 break; 756 break;
751 757
752 case SET_VLAN_EGRESS_PRIORITY_CMD: 758 case SET_VLAN_EGRESS_PRIORITY_CMD:
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index cf4a80d06b35..2cd1393073ec 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -60,6 +60,7 @@ int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev
60int vlan_dev_change_mtu(struct net_device *dev, int new_mtu); 60int vlan_dev_change_mtu(struct net_device *dev, int new_mtu);
61int vlan_dev_open(struct net_device* dev); 61int vlan_dev_open(struct net_device* dev);
62int vlan_dev_stop(struct net_device* dev); 62int vlan_dev_stop(struct net_device* dev);
63int vlan_set_mac_address(struct net_device *dev, void *p);
63int vlan_dev_ioctl(struct net_device* dev, struct ifreq *ifr, int cmd); 64int vlan_dev_ioctl(struct net_device* dev, struct ifreq *ifr, int cmd);
64void vlan_dev_set_ingress_priority(const struct net_device *dev, 65void vlan_dev_set_ingress_priority(const struct net_device *dev,
65 u32 skb_prio, short vlan_prio); 66 u32 skb_prio, short vlan_prio);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 1a1740aa9a8b..7a36878241da 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -665,6 +665,32 @@ int vlan_dev_stop(struct net_device *dev)
665 return 0; 665 return 0;
666} 666}
667 667
668int vlan_set_mac_address(struct net_device *dev, void *p)
669{
670 struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev;
671 struct sockaddr *addr = p;
672 int err;
673
674 if (!is_valid_ether_addr(addr->sa_data))
675 return -EADDRNOTAVAIL;
676
677 if (!(dev->flags & IFF_UP))
678 goto out;
679
680 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
681 err = dev_unicast_add(real_dev, addr->sa_data, ETH_ALEN);
682 if (err < 0)
683 return err;
684 }
685
686 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
687 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN);
688
689out:
690 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
691 return 0;
692}
693
668int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 694int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
669{ 695{
670 struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev; 696 struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev;
diff --git a/net/9p/mod.c b/net/9p/mod.c
index 41d70f47375d..8f9763a9dc12 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -76,9 +76,9 @@ struct p9_trans_module *v9fs_match_trans(const substring_t *name)
76 list_for_each(p, &v9fs_trans_list) { 76 list_for_each(p, &v9fs_trans_list) {
77 t = list_entry(p, struct p9_trans_module, list); 77 t = list_entry(p, struct p9_trans_module, list);
78 if (strncmp(t->name, name->from, name->to-name->from) == 0) 78 if (strncmp(t->name, name->from, name->to-name->from) == 0)
79 break; 79 return t;
80 } 80 }
81 return t; 81 return NULL;
82} 82}
83EXPORT_SYMBOL(v9fs_match_trans); 83EXPORT_SYMBOL(v9fs_match_trans);
84 84
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 30269a4ff22a..62332ed9da4a 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -62,13 +62,14 @@ struct p9_trans_fd {
62 62
63enum { 63enum {
64 /* Options that take integer arguments */ 64 /* Options that take integer arguments */
65 Opt_port, Opt_rfdno, Opt_wfdno, 65 Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
66}; 66};
67 67
68static match_table_t tokens = { 68static match_table_t tokens = {
69 {Opt_port, "port=%u"}, 69 {Opt_port, "port=%u"},
70 {Opt_rfdno, "rfdno=%u"}, 70 {Opt_rfdno, "rfdno=%u"},
71 {Opt_wfdno, "wfdno=%u"}, 71 {Opt_wfdno, "wfdno=%u"},
72 {Opt_err, NULL},
72}; 73};
73 74
74/** 75/**
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 7c0b5151d526..e0d37d6dc1f8 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1044,7 +1044,7 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol)
1044 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) 1044 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1045 goto out; 1045 goto out;
1046 rc = -ENOMEM; 1046 rc = -ENOMEM;
1047 sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, 1); 1047 sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto);
1048 if (!sk) 1048 if (!sk)
1049 goto out; 1049 goto out;
1050 rc = 0; 1050 rc = 0;
diff --git a/net/atm/common.c b/net/atm/common.c
index e166d9e0ffd9..eba09a04f6bf 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -133,7 +133,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
133 sock->sk = NULL; 133 sock->sk = NULL;
134 if (sock->type == SOCK_STREAM) 134 if (sock->type == SOCK_STREAM)
135 return -EINVAL; 135 return -EINVAL;
136 sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto, 1); 136 sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto);
137 if (!sk) 137 if (!sk)
138 return -ENOMEM; 138 return -ENOMEM;
139 sock_init_data(sock, sk); 139 sock_init_data(sock, sk);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 993e5c75e909..8378afd54b30 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -836,7 +836,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol)
836 return -ESOCKTNOSUPPORT; 836 return -ESOCKTNOSUPPORT;
837 } 837 }
838 838
839 if ((sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, 1)) == NULL) 839 sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto);
840 if (sk == NULL)
840 return -ENOMEM; 841 return -ENOMEM;
841 842
842 ax25 = sk->sk_protinfo = ax25_create_cb(); 843 ax25 = sk->sk_protinfo = ax25_create_cb();
@@ -861,7 +862,8 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
861 struct sock *sk; 862 struct sock *sk;
862 ax25_cb *ax25, *oax25; 863 ax25_cb *ax25, *oax25;
863 864
864 if ((sk = sk_alloc(osk->sk_net, PF_AX25, GFP_ATOMIC, osk->sk_prot, 1)) == NULL) 865 sk = sk_alloc(osk->sk_net, PF_AX25, GFP_ATOMIC, osk->sk_prot);
866 if (sk == NULL)
865 return NULL; 867 return NULL;
866 868
867 if ((ax25 = ax25_create_cb()) == NULL) { 869 if ((ax25 = ax25_create_cb()) == NULL) {
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index f718965f296c..9ebd3c64474d 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -213,7 +213,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol)
213 if (sock->type != SOCK_RAW) 213 if (sock->type != SOCK_RAW)
214 return -ESOCKTNOSUPPORT; 214 return -ESOCKTNOSUPPORT;
215 215
216 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, 1); 216 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto);
217 if (!sk) 217 if (!sk)
218 return -ENOMEM; 218 return -ENOMEM;
219 219
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index cf700c20d11e..783edab12ce8 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -204,7 +204,7 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol)
204 if (sock->type != SOCK_RAW) 204 if (sock->type != SOCK_RAW)
205 return -ESOCKTNOSUPPORT; 205 return -ESOCKTNOSUPPORT;
206 206
207 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, 1); 207 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto);
208 if (!sk) 208 if (!sk)
209 return -ENOMEM; 209 return -ENOMEM;
210 210
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 8825102c517c..14991323c273 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -645,7 +645,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol)
645 645
646 sock->ops = &hci_sock_ops; 646 sock->ops = &hci_sock_ops;
647 647
648 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, 1); 648 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
649 if (!sk) 649 if (!sk)
650 return -ENOMEM; 650 return -ENOMEM;
651 651
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 1de2b6fbcac0..3292b956a7c4 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -255,7 +255,7 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol)
255 if (sock->type != SOCK_RAW) 255 if (sock->type != SOCK_RAW)
256 return -ESOCKTNOSUPPORT; 256 return -ESOCKTNOSUPPORT;
257 257
258 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, 1); 258 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto);
259 if (!sk) 259 if (!sk)
260 return -ENOMEM; 260 return -ENOMEM;
261 261
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 6fbbae78b304..477e052b17b5 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -607,7 +607,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
607{ 607{
608 struct sock *sk; 608 struct sock *sk;
609 609
610 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, 1); 610 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
611 if (!sk) 611 if (!sk)
612 return NULL; 612 return NULL;
613 613
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 266b6972667d..c46d51035e77 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -287,7 +287,7 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int
287 struct rfcomm_dlc *d; 287 struct rfcomm_dlc *d;
288 struct sock *sk; 288 struct sock *sk;
289 289
290 sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, 1); 290 sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto);
291 if (!sk) 291 if (!sk)
292 return NULL; 292 return NULL;
293 293
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 82d0dfdfa7e2..93ad1aae3f38 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -421,7 +421,7 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro
421{ 421{
422 struct sock *sk; 422 struct sock *sk;
423 423
424 sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, 1); 424 sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto);
425 if (!sk) 425 if (!sk)
426 return NULL; 426 return NULL;
427 427
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index da22f900e89d..c1757c79dfbb 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -766,6 +766,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
766 if (!nf_bridge) 766 if (!nf_bridge)
767 return NF_ACCEPT; 767 return NF_ACCEPT;
768 768
769 if (!(nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)))
770 return NF_ACCEPT;
771
769 if (!realoutdev) 772 if (!realoutdev)
770 return NF_DROP; 773 return NF_DROP;
771 774
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index 392d877040d3..6436d30a550e 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -187,7 +187,7 @@ static int ebt_among_check(const char *tablename, unsigned int hookmask,
187 187
188 if (datalen != EBT_ALIGN(expected_length)) { 188 if (datalen != EBT_ALIGN(expected_length)) {
189 printk(KERN_WARNING 189 printk(KERN_WARNING
190 "ebtables: among: wrong size: %d" 190 "ebtables: among: wrong size: %d "
191 "against expected %d, rounded to %Zd\n", 191 "against expected %d, rounded to %Zd\n",
192 datalen, expected_length, 192 datalen, expected_length,
193 EBT_ALIGN(expected_length)); 193 EBT_ALIGN(expected_length));
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index 1a46952a56d9..18141392a9b4 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -34,7 +34,7 @@ static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in
34 ah->ar_pro, EBT_ARP_PTYPE)) 34 ah->ar_pro, EBT_ARP_PTYPE))
35 return EBT_NOMATCH; 35 return EBT_NOMATCH;
36 36
37 if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP)) { 37 if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_GRAT)) {
38 __be32 saddr, daddr, *sap, *dap; 38 __be32 saddr, daddr, *sap, *dap;
39 39
40 if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP)) 40 if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP))
diff --git a/net/core/dev.c b/net/core/dev.c
index 853c8b575f1d..86d62611f2fc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1171,6 +1171,8 @@ rollback:
1171 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1171 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1172 } 1172 }
1173 } 1173 }
1174
1175 raw_notifier_chain_unregister(&netdev_chain, nb);
1174 goto unlock; 1176 goto unlock;
1175} 1177}
1176 1178
@@ -1751,9 +1753,6 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1751 * 1753 *
1752 * return values: 1754 * return values:
1753 * NET_RX_SUCCESS (no congestion) 1755 * NET_RX_SUCCESS (no congestion)
1754 * NET_RX_CN_LOW (low congestion)
1755 * NET_RX_CN_MOD (moderate congestion)
1756 * NET_RX_CN_HIGH (high congestion)
1757 * NET_RX_DROP (packet was dropped) 1756 * NET_RX_DROP (packet was dropped)
1758 * 1757 *
1759 */ 1758 */
@@ -2001,6 +2000,21 @@ out:
2001} 2000}
2002#endif 2001#endif
2003 2002
2003/**
2004 * netif_receive_skb - process receive buffer from network
2005 * @skb: buffer to process
2006 *
2007 * netif_receive_skb() is the main receive data processing function.
2008 * It always succeeds. The buffer may be dropped during processing
2009 * for congestion control or by the protocol layers.
2010 *
2011 * This function may only be called from softirq context and interrupts
2012 * should be enabled.
2013 *
2014 * Return values (usually ignored):
2015 * NET_RX_SUCCESS: no congestion
2016 * NET_RX_DROP: packet was dropped
2017 */
2004int netif_receive_skb(struct sk_buff *skb) 2018int netif_receive_skb(struct sk_buff *skb)
2005{ 2019{
2006 struct packet_type *ptype, *pt_prev; 2020 struct packet_type *ptype, *pt_prev;
@@ -2172,7 +2186,15 @@ static void net_rx_action(struct softirq_action *h)
2172 2186
2173 weight = n->weight; 2187 weight = n->weight;
2174 2188
2175 work = n->poll(n, weight); 2189 /* This NAPI_STATE_SCHED test is for avoiding a race
2190 * with netpoll's poll_napi(). Only the entity which
2191 * obtains the lock and sees NAPI_STATE_SCHED set will
2192 * actually make the ->poll() call. Therefore we avoid
2193 * accidently calling ->poll() when NAPI is not scheduled.
2194 */
2195 work = 0;
2196 if (test_bit(NAPI_STATE_SCHED, &n->state))
2197 work = n->poll(n, weight);
2176 2198
2177 WARN_ON_ONCE(work > weight); 2199 WARN_ON_ONCE(work > weight);
2178 2200
@@ -2668,7 +2690,7 @@ static void __net_exit dev_proc_net_exit(struct net *net)
2668 proc_net_remove(net, "dev"); 2690 proc_net_remove(net, "dev");
2669} 2691}
2670 2692
2671static struct pernet_operations dev_proc_ops = { 2693static struct pernet_operations __net_initdata dev_proc_ops = {
2672 .init = dev_proc_net_init, 2694 .init = dev_proc_net_init,
2673 .exit = dev_proc_net_exit, 2695 .exit = dev_proc_net_exit,
2674}; 2696};
@@ -3488,6 +3510,60 @@ static void net_set_todo(struct net_device *dev)
3488 spin_unlock(&net_todo_list_lock); 3510 spin_unlock(&net_todo_list_lock);
3489} 3511}
3490 3512
3513static void rollback_registered(struct net_device *dev)
3514{
3515 BUG_ON(dev_boot_phase);
3516 ASSERT_RTNL();
3517
3518 /* Some devices call without registering for initialization unwind. */
3519 if (dev->reg_state == NETREG_UNINITIALIZED) {
3520 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3521 "was registered\n", dev->name, dev);
3522
3523 WARN_ON(1);
3524 return;
3525 }
3526
3527 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3528
3529 /* If device is running, close it first. */
3530 dev_close(dev);
3531
3532 /* And unlink it from device chain. */
3533 unlist_netdevice(dev);
3534
3535 dev->reg_state = NETREG_UNREGISTERING;
3536
3537 synchronize_net();
3538
3539 /* Shutdown queueing discipline. */
3540 dev_shutdown(dev);
3541
3542
3543 /* Notify protocols, that we are about to destroy
3544 this device. They should clean all the things.
3545 */
3546 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3547
3548 /*
3549 * Flush the unicast and multicast chains
3550 */
3551 dev_addr_discard(dev);
3552
3553 if (dev->uninit)
3554 dev->uninit(dev);
3555
3556 /* Notifier chain MUST detach us from master device. */
3557 BUG_TRAP(!dev->master);
3558
3559 /* Remove entries from kobject tree */
3560 netdev_unregister_kobject(dev);
3561
3562 synchronize_net();
3563
3564 dev_put(dev);
3565}
3566
3491/** 3567/**
3492 * register_netdevice - register a network device 3568 * register_netdevice - register a network device
3493 * @dev: device to register 3569 * @dev: device to register
@@ -3625,8 +3701,10 @@ int register_netdevice(struct net_device *dev)
3625 /* Notify protocols, that a new device appeared. */ 3701 /* Notify protocols, that a new device appeared. */
3626 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); 3702 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
3627 ret = notifier_to_errno(ret); 3703 ret = notifier_to_errno(ret);
3628 if (ret) 3704 if (ret) {
3629 unregister_netdevice(dev); 3705 rollback_registered(dev);
3706 dev->reg_state = NETREG_UNREGISTERED;
3707 }
3630 3708
3631out: 3709out:
3632 return ret; 3710 return ret;
@@ -3903,59 +3981,9 @@ void synchronize_net(void)
3903 3981
3904void unregister_netdevice(struct net_device *dev) 3982void unregister_netdevice(struct net_device *dev)
3905{ 3983{
3906 BUG_ON(dev_boot_phase); 3984 rollback_registered(dev);
3907 ASSERT_RTNL();
3908
3909 /* Some devices call without registering for initialization unwind. */
3910 if (dev->reg_state == NETREG_UNINITIALIZED) {
3911 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3912 "was registered\n", dev->name, dev);
3913
3914 WARN_ON(1);
3915 return;
3916 }
3917
3918 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3919
3920 /* If device is running, close it first. */
3921 dev_close(dev);
3922
3923 /* And unlink it from device chain. */
3924 unlist_netdevice(dev);
3925
3926 dev->reg_state = NETREG_UNREGISTERING;
3927
3928 synchronize_net();
3929
3930 /* Shutdown queueing discipline. */
3931 dev_shutdown(dev);
3932
3933
3934 /* Notify protocols, that we are about to destroy
3935 this device. They should clean all the things.
3936 */
3937 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3938
3939 /*
3940 * Flush the unicast and multicast chains
3941 */
3942 dev_addr_discard(dev);
3943
3944 if (dev->uninit)
3945 dev->uninit(dev);
3946
3947 /* Notifier chain MUST detach us from master device. */
3948 BUG_TRAP(!dev->master);
3949
3950 /* Remove entries from kobject tree */
3951 netdev_unregister_kobject(dev);
3952
3953 /* Finish processing unregister after unlock */ 3985 /* Finish processing unregister after unlock */
3954 net_set_todo(dev); 3986 net_set_todo(dev);
3955
3956 synchronize_net();
3957
3958 dev_put(dev);
3959} 3987}
3960 3988
3961/** 3989/**
@@ -4304,7 +4332,6 @@ static struct hlist_head *netdev_create_hash(void)
4304static int __net_init netdev_init(struct net *net) 4332static int __net_init netdev_init(struct net *net)
4305{ 4333{
4306 INIT_LIST_HEAD(&net->dev_base_head); 4334 INIT_LIST_HEAD(&net->dev_base_head);
4307 rwlock_init(&dev_base_lock);
4308 4335
4309 net->dev_name_head = netdev_create_hash(); 4336 net->dev_name_head = netdev_create_hash();
4310 if (net->dev_name_head == NULL) 4337 if (net->dev_name_head == NULL)
@@ -4328,7 +4355,7 @@ static void __net_exit netdev_exit(struct net *net)
4328 kfree(net->dev_index_head); 4355 kfree(net->dev_index_head);
4329} 4356}
4330 4357
4331static struct pernet_operations netdev_net_ops = { 4358static struct pernet_operations __net_initdata netdev_net_ops = {
4332 .init = netdev_init, 4359 .init = netdev_init,
4333 .exit = netdev_exit, 4360 .exit = netdev_exit,
4334}; 4361};
@@ -4359,7 +4386,7 @@ static void __net_exit default_device_exit(struct net *net)
4359 rtnl_unlock(); 4386 rtnl_unlock();
4360} 4387}
4361 4388
4362static struct pernet_operations default_device_ops = { 4389static struct pernet_operations __net_initdata default_device_ops = {
4363 .exit = default_device_exit, 4390 .exit = default_device_exit,
4364}; 4391};
4365 4392
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index ae354057d84c..69fff16ece10 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -168,13 +168,13 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
168 da = from->mc_list; 168 da = from->mc_list;
169 while (da != NULL) { 169 while (da != NULL) {
170 next = da->next; 170 next = da->next;
171 if (!da->da_synced) 171 if (da->da_synced) {
172 continue; 172 __dev_addr_delete(&to->mc_list, &to->mc_count,
173 __dev_addr_delete(&to->mc_list, &to->mc_count, 173 da->da_addr, da->da_addrlen, 0);
174 da->da_addr, da->da_addrlen, 0); 174 da->da_synced = 0;
175 da->da_synced = 0; 175 __dev_addr_delete(&from->mc_list, &from->mc_count,
176 __dev_addr_delete(&from->mc_list, &from->mc_count, 176 da->da_addr, da->da_addrlen, 0);
177 da->da_addr, da->da_addrlen, 0); 177 }
178 da = next; 178 da = next;
179 } 179 }
180 __dev_set_rx_mode(to); 180 __dev_set_rx_mode(to);
@@ -285,7 +285,7 @@ static void __net_exit dev_mc_net_exit(struct net *net)
285 proc_net_remove(net, "dev_mcast"); 285 proc_net_remove(net, "dev_mcast");
286} 286}
287 287
288static struct pernet_operations dev_mc_net_ops = { 288static struct pernet_operations __net_initdata dev_mc_net_ops = {
289 .init = dev_mc_net_init, 289 .init = dev_mc_net_init,
290 .exit = dev_mc_net_exit, 290 .exit = dev_mc_net_exit,
291}; 291};
diff --git a/net/core/dst.c b/net/core/dst.c
index 16958e64e577..03daead3592a 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -18,7 +18,6 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <net/net_namespace.h> 19#include <net/net_namespace.h>
20 20
21#include <net/net_namespace.h>
22#include <net/dst.h> 21#include <net/dst.h>
23 22
24/* 23/*
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 13de6f53f098..848132b6cb73 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -18,6 +18,28 @@
18static LIST_HEAD(rules_ops); 18static LIST_HEAD(rules_ops);
19static DEFINE_SPINLOCK(rules_mod_lock); 19static DEFINE_SPINLOCK(rules_mod_lock);
20 20
21int fib_default_rule_add(struct fib_rules_ops *ops,
22 u32 pref, u32 table, u32 flags)
23{
24 struct fib_rule *r;
25
26 r = kzalloc(ops->rule_size, GFP_KERNEL);
27 if (r == NULL)
28 return -ENOMEM;
29
30 atomic_set(&r->refcnt, 1);
31 r->action = FR_ACT_TO_TBL;
32 r->pref = pref;
33 r->table = table;
34 r->flags = flags;
35
36 /* The lock is not required here, the list in unreacheable
37 * at the moment this function is called */
38 list_add_tail(&r->list, &ops->rules_list);
39 return 0;
40}
41EXPORT_SYMBOL(fib_default_rule_add);
42
21static void notify_rule_change(int event, struct fib_rule *rule, 43static void notify_rule_change(int event, struct fib_rule *rule,
22 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 44 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
23 u32 pid); 45 u32 pid);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 05979e356963..29b8ee4e35d6 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1435,6 +1435,8 @@ int neigh_table_clear(struct neigh_table *tbl)
1435 kfree(tbl->phash_buckets); 1435 kfree(tbl->phash_buckets);
1436 tbl->phash_buckets = NULL; 1436 tbl->phash_buckets = NULL;
1437 1437
1438 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1439
1438 free_percpu(tbl->stats); 1440 free_percpu(tbl->stats);
1439 tbl->stats = NULL; 1441 tbl->stats = NULL;
1440 1442
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6f71db8c4428..383252b50411 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -17,74 +17,13 @@ static DEFINE_MUTEX(net_mutex);
17 17
18LIST_HEAD(net_namespace_list); 18LIST_HEAD(net_namespace_list);
19 19
20static struct kmem_cache *net_cachep;
21
22struct net init_net; 20struct net init_net;
23EXPORT_SYMBOL_GPL(init_net); 21EXPORT_SYMBOL_GPL(init_net);
24 22
25static struct net *net_alloc(void)
26{
27 return kmem_cache_zalloc(net_cachep, GFP_KERNEL);
28}
29
30static void net_free(struct net *net)
31{
32 if (!net)
33 return;
34
35 if (unlikely(atomic_read(&net->use_count) != 0)) {
36 printk(KERN_EMERG "network namespace not free! Usage: %d\n",
37 atomic_read(&net->use_count));
38 return;
39 }
40
41 kmem_cache_free(net_cachep, net);
42}
43
44static void cleanup_net(struct work_struct *work)
45{
46 struct pernet_operations *ops;
47 struct net *net;
48
49 net = container_of(work, struct net, work);
50
51 mutex_lock(&net_mutex);
52
53 /* Don't let anyone else find us. */
54 rtnl_lock();
55 list_del(&net->list);
56 rtnl_unlock();
57
58 /* Run all of the network namespace exit methods */
59 list_for_each_entry_reverse(ops, &pernet_list, list) {
60 if (ops->exit)
61 ops->exit(net);
62 }
63
64 mutex_unlock(&net_mutex);
65
66 /* Ensure there are no outstanding rcu callbacks using this
67 * network namespace.
68 */
69 rcu_barrier();
70
71 /* Finally it is safe to free my network namespace structure */
72 net_free(net);
73}
74
75
76void __put_net(struct net *net)
77{
78 /* Cleanup the network namespace in process context */
79 INIT_WORK(&net->work, cleanup_net);
80 schedule_work(&net->work);
81}
82EXPORT_SYMBOL_GPL(__put_net);
83
84/* 23/*
85 * setup_net runs the initializers for the network namespace object. 24 * setup_net runs the initializers for the network namespace object.
86 */ 25 */
87static int setup_net(struct net *net) 26static __net_init int setup_net(struct net *net)
88{ 27{
89 /* Must be called with net_mutex held */ 28 /* Must be called with net_mutex held */
90 struct pernet_operations *ops; 29 struct pernet_operations *ops;
@@ -112,9 +51,33 @@ out_undo:
112 if (ops->exit) 51 if (ops->exit)
113 ops->exit(net); 52 ops->exit(net);
114 } 53 }
54
55 rcu_barrier();
115 goto out; 56 goto out;
116} 57}
117 58
59#ifdef CONFIG_NET_NS
60static struct kmem_cache *net_cachep;
61
62static struct net *net_alloc(void)
63{
64 return kmem_cache_zalloc(net_cachep, GFP_KERNEL);
65}
66
67static void net_free(struct net *net)
68{
69 if (!net)
70 return;
71
72 if (unlikely(atomic_read(&net->use_count) != 0)) {
73 printk(KERN_EMERG "network namespace not free! Usage: %d\n",
74 atomic_read(&net->use_count));
75 return;
76 }
77
78 kmem_cache_free(net_cachep, net);
79}
80
118struct net *copy_net_ns(unsigned long flags, struct net *old_net) 81struct net *copy_net_ns(unsigned long flags, struct net *old_net)
119{ 82{
120 struct net *new_net = NULL; 83 struct net *new_net = NULL;
@@ -125,10 +88,6 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
125 if (!(flags & CLONE_NEWNET)) 88 if (!(flags & CLONE_NEWNET))
126 return old_net; 89 return old_net;
127 90
128#ifndef CONFIG_NET_NS
129 return ERR_PTR(-EINVAL);
130#endif
131
132 err = -ENOMEM; 91 err = -ENOMEM;
133 new_net = net_alloc(); 92 new_net = net_alloc();
134 if (!new_net) 93 if (!new_net)
@@ -155,14 +114,64 @@ out:
155 return new_net; 114 return new_net;
156} 115}
157 116
117static void cleanup_net(struct work_struct *work)
118{
119 struct pernet_operations *ops;
120 struct net *net;
121
122 net = container_of(work, struct net, work);
123
124 mutex_lock(&net_mutex);
125
126 /* Don't let anyone else find us. */
127 rtnl_lock();
128 list_del(&net->list);
129 rtnl_unlock();
130
131 /* Run all of the network namespace exit methods */
132 list_for_each_entry_reverse(ops, &pernet_list, list) {
133 if (ops->exit)
134 ops->exit(net);
135 }
136
137 mutex_unlock(&net_mutex);
138
139 /* Ensure there are no outstanding rcu callbacks using this
140 * network namespace.
141 */
142 rcu_barrier();
143
144 /* Finally it is safe to free my network namespace structure */
145 net_free(net);
146}
147
148void __put_net(struct net *net)
149{
150 /* Cleanup the network namespace in process context */
151 INIT_WORK(&net->work, cleanup_net);
152 schedule_work(&net->work);
153}
154EXPORT_SYMBOL_GPL(__put_net);
155
156#else
157struct net *copy_net_ns(unsigned long flags, struct net *old_net)
158{
159 if (flags & CLONE_NEWNET)
160 return ERR_PTR(-EINVAL);
161 return old_net;
162}
163#endif
164
158static int __init net_ns_init(void) 165static int __init net_ns_init(void)
159{ 166{
160 int err; 167 int err;
161 168
162 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); 169 printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net));
170#ifdef CONFIG_NET_NS
163 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), 171 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
164 SMP_CACHE_BYTES, 172 SMP_CACHE_BYTES,
165 SLAB_PANIC, NULL); 173 SLAB_PANIC, NULL);
174#endif
166 mutex_lock(&net_mutex); 175 mutex_lock(&net_mutex);
167 err = setup_net(&init_net); 176 err = setup_net(&init_net);
168 177
@@ -179,35 +188,35 @@ static int __init net_ns_init(void)
179 188
180pure_initcall(net_ns_init); 189pure_initcall(net_ns_init);
181 190
191#ifdef CONFIG_NET_NS
182static int register_pernet_operations(struct list_head *list, 192static int register_pernet_operations(struct list_head *list,
183 struct pernet_operations *ops) 193 struct pernet_operations *ops)
184{ 194{
185 struct net *net, *undo_net; 195 struct net *net, *undo_net;
186 int error; 196 int error;
187 197
188 error = 0;
189 list_add_tail(&ops->list, list); 198 list_add_tail(&ops->list, list);
190 for_each_net(net) { 199 if (ops->init) {
191 if (ops->init) { 200 for_each_net(net) {
192 error = ops->init(net); 201 error = ops->init(net);
193 if (error) 202 if (error)
194 goto out_undo; 203 goto out_undo;
195 } 204 }
196 } 205 }
197out: 206 return 0;
198 return error;
199 207
200out_undo: 208out_undo:
201 /* If I have an error cleanup all namespaces I initialized */ 209 /* If I have an error cleanup all namespaces I initialized */
202 list_del(&ops->list); 210 list_del(&ops->list);
203 for_each_net(undo_net) { 211 if (ops->exit) {
204 if (undo_net == net) 212 for_each_net(undo_net) {
205 goto undone; 213 if (undo_net == net)
206 if (ops->exit) 214 goto undone;
207 ops->exit(undo_net); 215 ops->exit(undo_net);
216 }
208 } 217 }
209undone: 218undone:
210 goto out; 219 return error;
211} 220}
212 221
213static void unregister_pernet_operations(struct pernet_operations *ops) 222static void unregister_pernet_operations(struct pernet_operations *ops)
@@ -215,11 +224,28 @@ static void unregister_pernet_operations(struct pernet_operations *ops)
215 struct net *net; 224 struct net *net;
216 225
217 list_del(&ops->list); 226 list_del(&ops->list);
218 for_each_net(net) 227 if (ops->exit)
219 if (ops->exit) 228 for_each_net(net)
220 ops->exit(net); 229 ops->exit(net);
221} 230}
222 231
232#else
233
234static int register_pernet_operations(struct list_head *list,
235 struct pernet_operations *ops)
236{
237 if (ops->init == NULL)
238 return 0;
239 return ops->init(&init_net);
240}
241
242static void unregister_pernet_operations(struct pernet_operations *ops)
243{
244 if (ops->exit)
245 ops->exit(&init_net);
246}
247#endif
248
223/** 249/**
224 * register_pernet_subsys - register a network namespace subsystem 250 * register_pernet_subsys - register a network namespace subsystem
225 * @ops: pernet operations structure for the subsystem 251 * @ops: pernet operations structure for the subsystem
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index bf8d18f1b013..c499b5c69bed 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -116,6 +116,29 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
116 * network adapter, forcing superfluous retries and possibly timeouts. 116 * network adapter, forcing superfluous retries and possibly timeouts.
117 * Thus, we set our budget to greater than 1. 117 * Thus, we set our budget to greater than 1.
118 */ 118 */
119static int poll_one_napi(struct netpoll_info *npinfo,
120 struct napi_struct *napi, int budget)
121{
122 int work;
123
124 /* net_rx_action's ->poll() invocations and our's are
125 * synchronized by this test which is only made while
126 * holding the napi->poll_lock.
127 */
128 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
129 return budget;
130
131 npinfo->rx_flags |= NETPOLL_RX_DROP;
132 atomic_inc(&trapped);
133
134 work = napi->poll(napi, budget);
135
136 atomic_dec(&trapped);
137 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
138
139 return budget - work;
140}
141
119static void poll_napi(struct netpoll *np) 142static void poll_napi(struct netpoll *np)
120{ 143{
121 struct netpoll_info *npinfo = np->dev->npinfo; 144 struct netpoll_info *npinfo = np->dev->npinfo;
@@ -123,17 +146,13 @@ static void poll_napi(struct netpoll *np)
123 int budget = 16; 146 int budget = 16;
124 147
125 list_for_each_entry(napi, &np->dev->napi_list, dev_list) { 148 list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
126 if (test_bit(NAPI_STATE_SCHED, &napi->state) && 149 if (napi->poll_owner != smp_processor_id() &&
127 napi->poll_owner != smp_processor_id() &&
128 spin_trylock(&napi->poll_lock)) { 150 spin_trylock(&napi->poll_lock)) {
129 npinfo->rx_flags |= NETPOLL_RX_DROP; 151 budget = poll_one_napi(npinfo, napi, budget);
130 atomic_inc(&trapped);
131
132 napi->poll(napi, budget);
133
134 atomic_dec(&trapped);
135 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
136 spin_unlock(&napi->poll_lock); 152 spin_unlock(&napi->poll_lock);
153
154 if (!budget)
155 break;
137 } 156 }
138 } 157 }
139} 158}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index de33f36947e9..285ec3ed9b37 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2463,8 +2463,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2463 2463
2464 x->curlft.bytes +=skb->len; 2464 x->curlft.bytes +=skb->len;
2465 x->curlft.packets++; 2465 x->curlft.packets++;
2466 spin_unlock(&x->lock);
2467
2468error: 2466error:
2469 spin_unlock(&x->lock); 2467 spin_unlock(&x->lock);
2470 return err; 2468 return err;
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 5f0818d815e6..45aed75cb571 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -71,6 +71,41 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
71 71
72EXPORT_SYMBOL(reqsk_queue_alloc); 72EXPORT_SYMBOL(reqsk_queue_alloc);
73 73
74void __reqsk_queue_destroy(struct request_sock_queue *queue)
75{
76 struct listen_sock *lopt;
77 size_t lopt_size;
78
79 /*
80 * this is an error recovery path only
81 * no locking needed and the lopt is not NULL
82 */
83
84 lopt = queue->listen_opt;
85 lopt_size = sizeof(struct listen_sock) +
86 lopt->nr_table_entries * sizeof(struct request_sock *);
87
88 if (lopt_size > PAGE_SIZE)
89 vfree(lopt);
90 else
91 kfree(lopt);
92}
93
94EXPORT_SYMBOL(__reqsk_queue_destroy);
95
96static inline struct listen_sock *reqsk_queue_yank_listen_sk(
97 struct request_sock_queue *queue)
98{
99 struct listen_sock *lopt;
100
101 write_lock_bh(&queue->syn_wait_lock);
102 lopt = queue->listen_opt;
103 queue->listen_opt = NULL;
104 write_unlock_bh(&queue->syn_wait_lock);
105
106 return lopt;
107}
108
74void reqsk_queue_destroy(struct request_sock_queue *queue) 109void reqsk_queue_destroy(struct request_sock_queue *queue)
75{ 110{
76 /* make all the listen_opt local to us */ 111 /* make all the listen_opt local to us */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 573e17240197..5b4ce9b4dd20 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -275,12 +275,11 @@ static void skb_release_data(struct sk_buff *skb)
275/* 275/*
276 * Free an skbuff by memory without cleaning the state. 276 * Free an skbuff by memory without cleaning the state.
277 */ 277 */
278void kfree_skbmem(struct sk_buff *skb) 278static void kfree_skbmem(struct sk_buff *skb)
279{ 279{
280 struct sk_buff *other; 280 struct sk_buff *other;
281 atomic_t *fclone_ref; 281 atomic_t *fclone_ref;
282 282
283 skb_release_data(skb);
284 switch (skb->fclone) { 283 switch (skb->fclone) {
285 case SKB_FCLONE_UNAVAILABLE: 284 case SKB_FCLONE_UNAVAILABLE:
286 kmem_cache_free(skbuff_head_cache, skb); 285 kmem_cache_free(skbuff_head_cache, skb);
@@ -307,16 +306,8 @@ void kfree_skbmem(struct sk_buff *skb)
307 } 306 }
308} 307}
309 308
310/** 309/* Free everything but the sk_buff shell. */
311 * __kfree_skb - private function 310static void skb_release_all(struct sk_buff *skb)
312 * @skb: buffer
313 *
314 * Free an sk_buff. Release anything attached to the buffer.
315 * Clean the state. This is an internal helper function. Users should
316 * always call kfree_skb
317 */
318
319void __kfree_skb(struct sk_buff *skb)
320{ 311{
321 dst_release(skb->dst); 312 dst_release(skb->dst);
322#ifdef CONFIG_XFRM 313#ifdef CONFIG_XFRM
@@ -340,7 +331,21 @@ void __kfree_skb(struct sk_buff *skb)
340 skb->tc_verd = 0; 331 skb->tc_verd = 0;
341#endif 332#endif
342#endif 333#endif
334 skb_release_data(skb);
335}
343 336
337/**
338 * __kfree_skb - private function
339 * @skb: buffer
340 *
341 * Free an sk_buff. Release anything attached to the buffer.
342 * Clean the state. This is an internal helper function. Users should
343 * always call kfree_skb
344 */
345
346void __kfree_skb(struct sk_buff *skb)
347{
348 skb_release_all(skb);
344 kfree_skbmem(skb); 349 kfree_skbmem(skb);
345} 350}
346 351
@@ -441,7 +446,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
441 */ 446 */
442struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 447struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
443{ 448{
444 skb_release_data(dst); 449 skb_release_all(dst);
445 return __skb_clone(dst, src); 450 return __skb_clone(dst, src);
446} 451}
447EXPORT_SYMBOL_GPL(skb_morph); 452EXPORT_SYMBOL_GPL(skb_morph);
@@ -2028,8 +2033,8 @@ void __init skb_init(void)
2028 * Fill the specified scatter-gather list with mappings/pointers into a 2033 * Fill the specified scatter-gather list with mappings/pointers into a
2029 * region of the buffer space attached to a socket buffer. 2034 * region of the buffer space attached to a socket buffer.
2030 */ 2035 */
2031int 2036static int
2032skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2037__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2033{ 2038{
2034 int start = skb_headlen(skb); 2039 int start = skb_headlen(skb);
2035 int i, copy = start - offset; 2040 int i, copy = start - offset;
@@ -2078,7 +2083,8 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2078 if ((copy = end - offset) > 0) { 2083 if ((copy = end - offset) > 0) {
2079 if (copy > len) 2084 if (copy > len)
2080 copy = len; 2085 copy = len;
2081 elt += skb_to_sgvec(list, sg+elt, offset - start, copy); 2086 elt += __skb_to_sgvec(list, sg+elt, offset - start,
2087 copy);
2082 if ((len -= copy) == 0) 2088 if ((len -= copy) == 0)
2083 return elt; 2089 return elt;
2084 offset += copy; 2090 offset += copy;
@@ -2090,6 +2096,15 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2090 return elt; 2096 return elt;
2091} 2097}
2092 2098
2099int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2100{
2101 int nsg = __skb_to_sgvec(skb, sg, offset, len);
2102
2103 sg_mark_end(&sg[nsg - 1]);
2104
2105 return nsg;
2106}
2107
2093/** 2108/**
2094 * skb_cow_data - Check that a socket buffer's data buffers are writable 2109 * skb_cow_data - Check that a socket buffer's data buffers are writable
2095 * @skb: The socket buffer to check. 2110 * @skb: The socket buffer to check.
diff --git a/net/core/sock.c b/net/core/sock.c
index bba9949681ff..c519b439b8b1 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -857,46 +857,43 @@ static inline void sock_lock_init(struct sock *sk)
857 af_family_keys + sk->sk_family); 857 af_family_keys + sk->sk_family);
858} 858}
859 859
860/** 860static void sock_copy(struct sock *nsk, const struct sock *osk)
861 * sk_alloc - All socket objects are allocated here 861{
862 * @net: the applicable net namespace 862#ifdef CONFIG_SECURITY_NETWORK
863 * @family: protocol family 863 void *sptr = nsk->sk_security;
864 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 864#endif
865 * @prot: struct proto associated with this new sock instance 865
866 * @zero_it: if we should zero the newly allocated sock 866 memcpy(nsk, osk, osk->sk_prot->obj_size);
867 */ 867#ifdef CONFIG_SECURITY_NETWORK
868struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 868 nsk->sk_security = sptr;
869 struct proto *prot, int zero_it) 869 security_sk_clone(osk, nsk);
870#endif
871}
872
873static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
874 int family)
870{ 875{
871 struct sock *sk = NULL; 876 struct sock *sk;
872 struct kmem_cache *slab = prot->slab; 877 struct kmem_cache *slab;
873 878
879 slab = prot->slab;
874 if (slab != NULL) 880 if (slab != NULL)
875 sk = kmem_cache_alloc(slab, priority); 881 sk = kmem_cache_alloc(slab, priority);
876 else 882 else
877 sk = kmalloc(prot->obj_size, priority); 883 sk = kmalloc(prot->obj_size, priority);
878 884
879 if (sk) { 885 if (sk != NULL) {
880 if (zero_it) {
881 memset(sk, 0, prot->obj_size);
882 sk->sk_family = family;
883 /*
884 * See comment in struct sock definition to understand
885 * why we need sk_prot_creator -acme
886 */
887 sk->sk_prot = sk->sk_prot_creator = prot;
888 sock_lock_init(sk);
889 sk->sk_net = get_net(net);
890 }
891
892 if (security_sk_alloc(sk, family, priority)) 886 if (security_sk_alloc(sk, family, priority))
893 goto out_free; 887 goto out_free;
894 888
895 if (!try_module_get(prot->owner)) 889 if (!try_module_get(prot->owner))
896 goto out_free; 890 goto out_free_sec;
897 } 891 }
892
898 return sk; 893 return sk;
899 894
895out_free_sec:
896 security_sk_free(sk);
900out_free: 897out_free:
901 if (slab != NULL) 898 if (slab != NULL)
902 kmem_cache_free(slab, sk); 899 kmem_cache_free(slab, sk);
@@ -905,10 +902,53 @@ out_free:
905 return NULL; 902 return NULL;
906} 903}
907 904
905static void sk_prot_free(struct proto *prot, struct sock *sk)
906{
907 struct kmem_cache *slab;
908 struct module *owner;
909
910 owner = prot->owner;
911 slab = prot->slab;
912
913 security_sk_free(sk);
914 if (slab != NULL)
915 kmem_cache_free(slab, sk);
916 else
917 kfree(sk);
918 module_put(owner);
919}
920
921/**
922 * sk_alloc - All socket objects are allocated here
923 * @net: the applicable net namespace
924 * @family: protocol family
925 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
926 * @prot: struct proto associated with this new sock instance
927 * @zero_it: if we should zero the newly allocated sock
928 */
929struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
930 struct proto *prot)
931{
932 struct sock *sk;
933
934 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
935 if (sk) {
936 sk->sk_family = family;
937 /*
938 * See comment in struct sock definition to understand
939 * why we need sk_prot_creator -acme
940 */
941 sk->sk_prot = sk->sk_prot_creator = prot;
942 sock_lock_init(sk);
943 sk->sk_net = get_net(net);
944 }
945
946 return sk;
947}
948
908void sk_free(struct sock *sk) 949void sk_free(struct sock *sk)
909{ 950{
910 struct sk_filter *filter; 951 struct sk_filter *filter;
911 struct module *owner = sk->sk_prot_creator->owner;
912 952
913 if (sk->sk_destruct) 953 if (sk->sk_destruct)
914 sk->sk_destruct(sk); 954 sk->sk_destruct(sk);
@@ -925,25 +965,22 @@ void sk_free(struct sock *sk)
925 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 965 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
926 __FUNCTION__, atomic_read(&sk->sk_omem_alloc)); 966 __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
927 967
928 security_sk_free(sk);
929 put_net(sk->sk_net); 968 put_net(sk->sk_net);
930 if (sk->sk_prot_creator->slab != NULL) 969 sk_prot_free(sk->sk_prot_creator, sk);
931 kmem_cache_free(sk->sk_prot_creator->slab, sk);
932 else
933 kfree(sk);
934 module_put(owner);
935} 970}
936 971
937struct sock *sk_clone(const struct sock *sk, const gfp_t priority) 972struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
938{ 973{
939 struct sock *newsk = sk_alloc(sk->sk_net, sk->sk_family, priority, sk->sk_prot, 0); 974 struct sock *newsk;
940 975
976 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
941 if (newsk != NULL) { 977 if (newsk != NULL) {
942 struct sk_filter *filter; 978 struct sk_filter *filter;
943 979
944 sock_copy(newsk, sk); 980 sock_copy(newsk, sk);
945 981
946 /* SANITY */ 982 /* SANITY */
983 get_net(newsk->sk_net);
947 sk_node_init(&newsk->sk_node); 984 sk_node_init(&newsk->sk_node);
948 sock_lock_init(newsk); 985 sock_lock_init(newsk);
949 bh_lock_sock(newsk); 986 bh_lock_sock(newsk);
@@ -1764,11 +1801,65 @@ EXPORT_SYMBOL(sk_common_release);
1764static DEFINE_RWLOCK(proto_list_lock); 1801static DEFINE_RWLOCK(proto_list_lock);
1765static LIST_HEAD(proto_list); 1802static LIST_HEAD(proto_list);
1766 1803
1804#ifdef CONFIG_SMP
1805/*
1806 * Define default functions to keep track of inuse sockets per protocol
1807 * Note that often used protocols use dedicated functions to get a speed increase.
1808 * (see DEFINE_PROTO_INUSE/REF_PROTO_INUSE)
1809 */
1810static void inuse_add(struct proto *prot, int inc)
1811{
1812 per_cpu_ptr(prot->inuse_ptr, smp_processor_id())[0] += inc;
1813}
1814
1815static int inuse_get(const struct proto *prot)
1816{
1817 int res = 0, cpu;
1818 for_each_possible_cpu(cpu)
1819 res += per_cpu_ptr(prot->inuse_ptr, cpu)[0];
1820 return res;
1821}
1822
1823static int inuse_init(struct proto *prot)
1824{
1825 if (!prot->inuse_getval || !prot->inuse_add) {
1826 prot->inuse_ptr = alloc_percpu(int);
1827 if (prot->inuse_ptr == NULL)
1828 return -ENOBUFS;
1829
1830 prot->inuse_getval = inuse_get;
1831 prot->inuse_add = inuse_add;
1832 }
1833 return 0;
1834}
1835
1836static void inuse_fini(struct proto *prot)
1837{
1838 if (prot->inuse_ptr != NULL) {
1839 free_percpu(prot->inuse_ptr);
1840 prot->inuse_ptr = NULL;
1841 prot->inuse_getval = NULL;
1842 prot->inuse_add = NULL;
1843 }
1844}
1845#else
1846static inline int inuse_init(struct proto *prot)
1847{
1848 return 0;
1849}
1850
1851static inline void inuse_fini(struct proto *prot)
1852{
1853}
1854#endif
1855
1767int proto_register(struct proto *prot, int alloc_slab) 1856int proto_register(struct proto *prot, int alloc_slab)
1768{ 1857{
1769 char *request_sock_slab_name = NULL; 1858 char *request_sock_slab_name = NULL;
1770 char *timewait_sock_slab_name; 1859 char *timewait_sock_slab_name;
1771 int rc = -ENOBUFS; 1860
1861 if (inuse_init(prot))
1862 goto out;
1772 1863
1773 if (alloc_slab) { 1864 if (alloc_slab) {
1774 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 1865 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
@@ -1777,7 +1868,7 @@ int proto_register(struct proto *prot, int alloc_slab)
1777 if (prot->slab == NULL) { 1868 if (prot->slab == NULL) {
1778 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", 1869 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
1779 prot->name); 1870 prot->name);
1780 goto out; 1871 goto out_free_inuse;
1781 } 1872 }
1782 1873
1783 if (prot->rsk_prot != NULL) { 1874 if (prot->rsk_prot != NULL) {
@@ -1821,9 +1912,8 @@ int proto_register(struct proto *prot, int alloc_slab)
1821 write_lock(&proto_list_lock); 1912 write_lock(&proto_list_lock);
1822 list_add(&prot->node, &proto_list); 1913 list_add(&prot->node, &proto_list);
1823 write_unlock(&proto_list_lock); 1914 write_unlock(&proto_list_lock);
1824 rc = 0; 1915 return 0;
1825out: 1916
1826 return rc;
1827out_free_timewait_sock_slab_name: 1917out_free_timewait_sock_slab_name:
1828 kfree(timewait_sock_slab_name); 1918 kfree(timewait_sock_slab_name);
1829out_free_request_sock_slab: 1919out_free_request_sock_slab:
@@ -1836,7 +1926,10 @@ out_free_request_sock_slab_name:
1836out_free_sock_slab: 1926out_free_sock_slab:
1837 kmem_cache_destroy(prot->slab); 1927 kmem_cache_destroy(prot->slab);
1838 prot->slab = NULL; 1928 prot->slab = NULL;
1839 goto out; 1929out_free_inuse:
1930 inuse_fini(prot);
1931out:
1932 return -ENOBUFS;
1840} 1933}
1841 1934
1842EXPORT_SYMBOL(proto_register); 1935EXPORT_SYMBOL(proto_register);
@@ -1847,6 +1940,7 @@ void proto_unregister(struct proto *prot)
1847 list_del(&prot->node); 1940 list_del(&prot->node);
1848 write_unlock(&proto_list_lock); 1941 write_unlock(&proto_list_lock);
1849 1942
1943 inuse_fini(prot);
1850 if (prot->slab != NULL) { 1944 if (prot->slab != NULL) {
1851 kmem_cache_destroy(prot->slab); 1945 kmem_cache_destroy(prot->slab);
1852 prot->slab = NULL; 1946 prot->slab = NULL;
@@ -2003,7 +2097,3 @@ EXPORT_SYMBOL(sock_wmalloc);
2003EXPORT_SYMBOL(sock_i_uid); 2097EXPORT_SYMBOL(sock_i_uid);
2004EXPORT_SYMBOL(sock_i_ino); 2098EXPORT_SYMBOL(sock_i_ino);
2005EXPORT_SYMBOL(sysctl_optmem_max); 2099EXPORT_SYMBOL(sysctl_optmem_max);
2006#ifdef CONFIG_SYSCTL
2007EXPORT_SYMBOL(sysctl_rmem_max);
2008EXPORT_SYMBOL(sysctl_wmem_max);
2009#endif
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 40ad428a27f5..d26b88dbbb45 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -166,7 +166,7 @@ static u32 dccp_li_calc_first_li(struct sock *sk,
166 } 166 }
167 167
168 if (unlikely(interval == 0)) { 168 if (unlikely(interval == 0)) {
169 DCCP_WARN("%s(%p), Could not find a win_count interval > 0." 169 DCCP_WARN("%s(%p), Could not find a win_count interval > 0. "
170 "Defaulting to 1\n", dccp_role(sk), sk); 170 "Defaulting to 1\n", dccp_role(sk), sk);
171 interval = 1; 171 interval = 1;
172 } 172 }
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 01a6a808bdb7..db17b83e8d3e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -922,6 +922,8 @@ static struct timewait_sock_ops dccp_timewait_sock_ops = {
922 .twsk_obj_size = sizeof(struct inet_timewait_sock), 922 .twsk_obj_size = sizeof(struct inet_timewait_sock),
923}; 923};
924 924
925DEFINE_PROTO_INUSE(dccp_v4)
926
925static struct proto dccp_v4_prot = { 927static struct proto dccp_v4_prot = {
926 .name = "DCCP", 928 .name = "DCCP",
927 .owner = THIS_MODULE, 929 .owner = THIS_MODULE,
@@ -950,6 +952,7 @@ static struct proto dccp_v4_prot = {
950 .compat_setsockopt = compat_dccp_setsockopt, 952 .compat_setsockopt = compat_dccp_setsockopt,
951 .compat_getsockopt = compat_dccp_getsockopt, 953 .compat_getsockopt = compat_dccp_getsockopt,
952#endif 954#endif
955 REF_PROTO_INUSE(dccp_v4)
953}; 956};
954 957
955static struct net_protocol dccp_v4_protocol = { 958static struct net_protocol dccp_v4_protocol = {
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 62428ff137dd..87c98fb86fa8 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1107,6 +1107,8 @@ static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1107 .twsk_obj_size = sizeof(struct dccp6_timewait_sock), 1107 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1108}; 1108};
1109 1109
1110DEFINE_PROTO_INUSE(dccp_v6)
1111
1110static struct proto dccp_v6_prot = { 1112static struct proto dccp_v6_prot = {
1111 .name = "DCCPv6", 1113 .name = "DCCPv6",
1112 .owner = THIS_MODULE, 1114 .owner = THIS_MODULE,
@@ -1135,6 +1137,7 @@ static struct proto dccp_v6_prot = {
1135 .compat_setsockopt = compat_dccp_setsockopt, 1137 .compat_setsockopt = compat_dccp_setsockopt,
1136 .compat_getsockopt = compat_dccp_getsockopt, 1138 .compat_getsockopt = compat_dccp_getsockopt,
1137#endif 1139#endif
1140 REF_PROTO_INUSE(dccp_v6)
1138}; 1141};
1139 1142
1140static struct inet6_protocol dccp_v6_protocol = { 1143static struct inet6_protocol dccp_v6_protocol = {
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index d84973928033..7a3bea9c28c1 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1072,11 +1072,13 @@ static int __init dccp_init(void)
1072 } 1072 }
1073 1073
1074 for (i = 0; i < dccp_hashinfo.ehash_size; i++) { 1074 for (i = 0; i < dccp_hashinfo.ehash_size; i++) {
1075 rwlock_init(&dccp_hashinfo.ehash[i].lock);
1076 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain); 1075 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
1077 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain); 1076 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain);
1078 } 1077 }
1079 1078
1079 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1080 goto out_free_dccp_ehash;
1081
1080 bhash_order = ehash_order; 1082 bhash_order = ehash_order;
1081 1083
1082 do { 1084 do {
@@ -1091,7 +1093,7 @@ static int __init dccp_init(void)
1091 1093
1092 if (!dccp_hashinfo.bhash) { 1094 if (!dccp_hashinfo.bhash) {
1093 DCCP_CRIT("Failed to allocate DCCP bind hash table"); 1095 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1094 goto out_free_dccp_ehash; 1096 goto out_free_dccp_locks;
1095 } 1097 }
1096 1098
1097 for (i = 0; i < dccp_hashinfo.bhash_size; i++) { 1099 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
@@ -1121,6 +1123,8 @@ out_free_dccp_mib:
1121out_free_dccp_bhash: 1123out_free_dccp_bhash:
1122 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); 1124 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1123 dccp_hashinfo.bhash = NULL; 1125 dccp_hashinfo.bhash = NULL;
1126out_free_dccp_locks:
1127 inet_ehash_locks_free(&dccp_hashinfo);
1124out_free_dccp_ehash: 1128out_free_dccp_ehash:
1125 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); 1129 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1126 dccp_hashinfo.ehash = NULL; 1130 dccp_hashinfo.ehash = NULL;
@@ -1139,6 +1143,7 @@ static void __exit dccp_fini(void)
1139 free_pages((unsigned long)dccp_hashinfo.ehash, 1143 free_pages((unsigned long)dccp_hashinfo.ehash,
1140 get_order(dccp_hashinfo.ehash_size * 1144 get_order(dccp_hashinfo.ehash_size *
1141 sizeof(struct inet_ehash_bucket))); 1145 sizeof(struct inet_ehash_bucket)));
1146 inet_ehash_locks_free(&dccp_hashinfo);
1142 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1147 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1143 dccp_ackvec_exit(); 1148 dccp_ackvec_exit();
1144 dccp_sysctl_exit(); 1149 dccp_sysctl_exit();
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index aabe98d9402f..57d574951838 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -474,7 +474,7 @@ static struct proto dn_proto = {
474static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp) 474static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
475{ 475{
476 struct dn_scp *scp; 476 struct dn_scp *scp;
477 struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, 1); 477 struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto);
478 478
479 if (!sk) 479 if (!sk)
480 goto out; 480 goto out;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 26130afd8029..66e266fb5908 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1439,7 +1439,7 @@ static const struct file_operations dn_dev_seq_fops = {
1439 1439
1440#endif /* CONFIG_PROC_FS */ 1440#endif /* CONFIG_PROC_FS */
1441 1441
1442static int __initdata addr[2]; 1442static int addr[2];
1443module_param_array(addr, int, NULL, 0444); 1443module_param_array(addr, int, NULL, 0444);
1444MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node"); 1444MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node");
1445 1445
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 97eee5e8fbbe..66663e5d7acd 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -293,9 +293,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
293 dn_rt_hash_table[hash].chain); 293 dn_rt_hash_table[hash].chain);
294 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); 294 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
295 295
296 rth->u.dst.__use++; 296 dst_use(&rth->u.dst, now);
297 dst_hold(&rth->u.dst);
298 rth->u.dst.lastuse = now;
299 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 297 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
300 298
301 dnrt_drop(rt); 299 dnrt_drop(rt);
@@ -308,9 +306,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
308 rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain); 306 rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain);
309 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); 307 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
310 308
311 dst_hold(&rt->u.dst); 309 dst_use(&rt->u.dst, now);
312 rt->u.dst.__use++;
313 rt->u.dst.lastuse = now;
314 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 310 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
315 *rp = rt; 311 *rp = rt;
316 return 0; 312 return 0;
@@ -1182,9 +1178,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
1182 (flp->mark == rt->fl.mark) && 1178 (flp->mark == rt->fl.mark) &&
1183 (rt->fl.iif == 0) && 1179 (rt->fl.iif == 0) &&
1184 (rt->fl.oif == flp->oif)) { 1180 (rt->fl.oif == flp->oif)) {
1185 rt->u.dst.lastuse = jiffies; 1181 dst_use(&rt->u.dst, jiffies);
1186 dst_hold(&rt->u.dst);
1187 rt->u.dst.__use++;
1188 rcu_read_unlock_bh(); 1182 rcu_read_unlock_bh();
1189 *pprt = &rt->u.dst; 1183 *pprt = &rt->u.dst;
1190 return 0; 1184 return 0;
@@ -1456,9 +1450,7 @@ int dn_route_input(struct sk_buff *skb)
1456 (rt->fl.oif == 0) && 1450 (rt->fl.oif == 0) &&
1457 (rt->fl.mark == skb->mark) && 1451 (rt->fl.mark == skb->mark) &&
1458 (rt->fl.iif == cb->iif)) { 1452 (rt->fl.iif == cb->iif)) {
1459 rt->u.dst.lastuse = jiffies; 1453 dst_use(&rt->u.dst, jiffies);
1460 dst_hold(&rt->u.dst);
1461 rt->u.dst.__use++;
1462 rcu_read_unlock(); 1454 rcu_read_unlock();
1463 skb->dst = (struct dst_entry *)rt; 1455 skb->dst = (struct dst_entry *)rt;
1464 return 0; 1456 return 0;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index ddd3f04f0919..ffebea04cc99 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -48,15 +48,6 @@ struct dn_fib_rule
48 u8 flags; 48 u8 flags;
49}; 49};
50 50
51static struct dn_fib_rule default_rule = {
52 .common = {
53 .refcnt = ATOMIC_INIT(2),
54 .pref = 0x7fff,
55 .table = RT_TABLE_MAIN,
56 .action = FR_ACT_TO_TBL,
57 },
58};
59
60 51
61int dn_fib_lookup(struct flowi *flp, struct dn_fib_res *res) 52int dn_fib_lookup(struct flowi *flp, struct dn_fib_res *res)
62{ 53{
@@ -262,8 +253,8 @@ static struct fib_rules_ops dn_fib_rules_ops = {
262 253
263void __init dn_fib_rules_init(void) 254void __init dn_fib_rules_init(void)
264{ 255{
265 list_add_tail(&default_rule.common.list, 256 BUG_ON(fib_default_rule_add(&dn_fib_rules_ops, 0x7fff,
266 &dn_fib_rules_ops.rules_list); 257 RT_TABLE_MAIN, 0));
267 fib_rules_register(&dn_fib_rules_ops); 258 fib_rules_register(&dn_fib_rules_ops);
268} 259}
269 260
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 9cae16b4e0b7..f70df073c588 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -624,7 +624,7 @@ static int econet_create(struct net *net, struct socket *sock, int protocol)
624 sock->state = SS_UNCONNECTED; 624 sock->state = SS_UNCONNECTED;
625 625
626 err = -ENOBUFS; 626 err = -ENOBUFS;
627 sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto, 1); 627 sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto);
628 if (sk == NULL) 628 if (sk == NULL)
629 goto out; 629 goto out;
630 630
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
index 0936a3e0210b..208bf35b5546 100644
--- a/net/ieee80211/ieee80211_crypt_ccmp.c
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -25,7 +25,6 @@
25#include <net/ieee80211.h> 25#include <net/ieee80211.h>
26 26
27#include <linux/crypto.h> 27#include <linux/crypto.h>
28#include <asm/scatterlist.h>
29 28
30MODULE_AUTHOR("Jouni Malinen"); 29MODULE_AUTHOR("Jouni Malinen");
31MODULE_DESCRIPTION("Host AP crypt: CCMP"); 30MODULE_DESCRIPTION("Host AP crypt: CCMP");
@@ -339,7 +338,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
339 pos += 8; 338 pos += 8;
340 339
341 if (ccmp_replay_check(pn, key->rx_pn)) { 340 if (ccmp_replay_check(pn, key->rx_pn)) {
342 if (net_ratelimit()) { 341 if (ieee80211_ratelimit_debug(IEEE80211_DL_DROP)) {
343 IEEE80211_DEBUG_DROP("CCMP: replay detected: STA=%s " 342 IEEE80211_DEBUG_DROP("CCMP: replay detected: STA=%s "
344 "previous PN %02x%02x%02x%02x%02x%02x " 343 "previous PN %02x%02x%02x%02x%02x%02x "
345 "received PN %02x%02x%02x%02x%02x%02x\n", 344 "received PN %02x%02x%02x%02x%02x%02x\n",
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
index 4cce3534e408..8e146949fc6f 100644
--- a/net/ieee80211/ieee80211_crypt_tkip.c
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -25,7 +25,6 @@
25#include <net/ieee80211.h> 25#include <net/ieee80211.h>
26 26
27#include <linux/crypto.h> 27#include <linux/crypto.h>
28#include <linux/scatterlist.h>
29#include <linux/crc32.h> 28#include <linux/crc32.h>
30 29
31MODULE_AUTHOR("Jouni Malinen"); 30MODULE_AUTHOR("Jouni Malinen");
@@ -465,7 +464,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
465 pos += 8; 464 pos += 8;
466 465
467 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { 466 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
468 if (net_ratelimit()) { 467 if (ieee80211_ratelimit_debug(IEEE80211_DL_DROP)) {
469 IEEE80211_DEBUG_DROP("TKIP: replay detected: STA=%s" 468 IEEE80211_DEBUG_DROP("TKIP: replay detected: STA=%s"
470 " previous TSC %08x%04x received TSC " 469 " previous TSC %08x%04x received TSC "
471 "%08x%04x\n", print_mac(mac, hdr->addr2), 470 "%08x%04x\n", print_mac(mac, hdr->addr2),
@@ -505,7 +504,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
505 * it needs to be recalculated for the next packet. */ 504 * it needs to be recalculated for the next packet. */
506 tkey->rx_phase1_done = 0; 505 tkey->rx_phase1_done = 0;
507 } 506 }
508 if (net_ratelimit()) { 507 if (ieee80211_ratelimit_debug(IEEE80211_DL_DROP)) {
509 IEEE80211_DEBUG_DROP("TKIP: ICV error detected: STA=" 508 IEEE80211_DEBUG_DROP("TKIP: ICV error detected: STA="
510 "%s\n", print_mac(mac, hdr->addr2)); 509 "%s\n", print_mac(mac, hdr->addr2));
511 } 510 }
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index 866fc04c44f9..3fa30c40779f 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -22,7 +22,6 @@
22#include <net/ieee80211.h> 22#include <net/ieee80211.h>
23 23
24#include <linux/crypto.h> 24#include <linux/crypto.h>
25#include <linux/scatterlist.h>
26#include <linux/crc32.h> 25#include <linux/crc32.h>
27 26
28MODULE_AUTHOR("Jouni Malinen"); 27MODULE_AUTHOR("Jouni Malinen");
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index ac36767b56e8..e01b59aedc54 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -470,7 +470,7 @@ ieee80211softmac_wx_set_mlme(struct net_device *dev,
470{ 470{
471 struct ieee80211softmac_device *mac = ieee80211_priv(dev); 471 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
472 struct iw_mlme *mlme = (struct iw_mlme *)extra; 472 struct iw_mlme *mlme = (struct iw_mlme *)extra;
473 u16 reason = cpu_to_le16(mlme->reason_code); 473 u16 reason = mlme->reason_code;
474 struct ieee80211softmac_network *net; 474 struct ieee80211softmac_network *net;
475 int err = -EINVAL; 475 int err = -EINVAL;
476 476
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 621b128897d7..d2f22e74b267 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -323,7 +323,7 @@ lookup_protocol:
323 BUG_TRAP(answer_prot->slab != NULL); 323 BUG_TRAP(answer_prot->slab != NULL);
324 324
325 err = -ENOBUFS; 325 err = -ENOBUFS;
326 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, 1); 326 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
327 if (sk == NULL) 327 if (sk == NULL)
328 goto out; 328 goto out;
329 329
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 4e8e3b079f5b..5fc346d8b566 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -8,7 +8,6 @@
8#include <linux/spinlock.h> 8#include <linux/spinlock.h>
9#include <net/icmp.h> 9#include <net/icmp.h>
10#include <net/protocol.h> 10#include <net/protocol.h>
11#include <asm/scatterlist.h>
12 11
13 12
14/* Clear mutable options and find final destination to substitute 13/* Clear mutable options and find final destination to substitute
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 36d6798947b5..b3f366a33a5c 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -111,12 +111,8 @@
111#include <net/tcp.h> 111#include <net/tcp.h>
112#include <net/sock.h> 112#include <net/sock.h>
113#include <net/arp.h> 113#include <net/arp.h>
114#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
115#include <net/ax25.h> 114#include <net/ax25.h>
116#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
117#include <net/netrom.h> 115#include <net/netrom.h>
118#endif
119#endif
120#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) 116#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
121#include <net/atmclip.h> 117#include <net/atmclip.h>
122struct neigh_table *clip_tbl_hook; 118struct neigh_table *clip_tbl_hook;
@@ -731,20 +727,10 @@ static int arp_process(struct sk_buff *skb)
731 htons(dev_type) != arp->ar_hrd) 727 htons(dev_type) != arp->ar_hrd)
732 goto out; 728 goto out;
733 break; 729 break;
734#ifdef CONFIG_NET_ETHERNET
735 case ARPHRD_ETHER: 730 case ARPHRD_ETHER:
736#endif
737#ifdef CONFIG_TR
738 case ARPHRD_IEEE802_TR: 731 case ARPHRD_IEEE802_TR:
739#endif
740#ifdef CONFIG_FDDI
741 case ARPHRD_FDDI: 732 case ARPHRD_FDDI:
742#endif
743#ifdef CONFIG_NET_FC
744 case ARPHRD_IEEE802: 733 case ARPHRD_IEEE802:
745#endif
746#if defined(CONFIG_NET_ETHERNET) || defined(CONFIG_TR) || \
747 defined(CONFIG_FDDI) || defined(CONFIG_NET_FC)
748 /* 734 /*
749 * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802 735 * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802
750 * devices, according to RFC 2625) devices will accept ARP 736 * devices, according to RFC 2625) devices will accept ARP
@@ -759,21 +745,16 @@ static int arp_process(struct sk_buff *skb)
759 arp->ar_pro != htons(ETH_P_IP)) 745 arp->ar_pro != htons(ETH_P_IP))
760 goto out; 746 goto out;
761 break; 747 break;
762#endif
763#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
764 case ARPHRD_AX25: 748 case ARPHRD_AX25:
765 if (arp->ar_pro != htons(AX25_P_IP) || 749 if (arp->ar_pro != htons(AX25_P_IP) ||
766 arp->ar_hrd != htons(ARPHRD_AX25)) 750 arp->ar_hrd != htons(ARPHRD_AX25))
767 goto out; 751 goto out;
768 break; 752 break;
769#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
770 case ARPHRD_NETROM: 753 case ARPHRD_NETROM:
771 if (arp->ar_pro != htons(AX25_P_IP) || 754 if (arp->ar_pro != htons(AX25_P_IP) ||
772 arp->ar_hrd != htons(ARPHRD_NETROM)) 755 arp->ar_hrd != htons(ARPHRD_NETROM))
773 goto out; 756 goto out;
774 break; 757 break;
775#endif
776#endif
777 } 758 }
778 759
779 /* Understand only these message types */ 760 /* Understand only these message types */
@@ -828,7 +809,8 @@ static int arp_process(struct sk_buff *skb)
828 if (arp->ar_op == htons(ARPOP_REQUEST) && 809 if (arp->ar_op == htons(ARPOP_REQUEST) &&
829 inet_addr_type(tip) == RTN_LOCAL && 810 inet_addr_type(tip) == RTN_LOCAL &&
830 !arp_ignore(in_dev,dev,sip,tip)) 811 !arp_ignore(in_dev,dev,sip,tip))
831 arp_send(ARPOP_REPLY,ETH_P_ARP,tip,dev,tip,sha,dev->dev_addr,dev->dev_addr); 812 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
813 dev->dev_addr, sha);
832 goto out; 814 goto out;
833 } 815 }
834 816
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index cad4278025ad..c31bccb9b526 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -111,9 +111,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
111 goto unlock; 111 goto unlock;
112 } 112 }
113 sg_init_table(sg, nfrags); 113 sg_init_table(sg, nfrags);
114 sg_mark_end(sg, skb_to_sgvec(skb, sg, esph->enc_data + 114 skb_to_sgvec(skb, sg,
115 esp->conf.ivlen - 115 esph->enc_data +
116 skb->data, clen)); 116 esp->conf.ivlen -
117 skb->data, clen);
117 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 118 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
118 if (unlikely(sg != &esp->sgbuf[0])) 119 if (unlikely(sg != &esp->sgbuf[0]))
119 kfree(sg); 120 kfree(sg);
@@ -205,8 +206,9 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
205 goto out; 206 goto out;
206 } 207 }
207 sg_init_table(sg, nfrags); 208 sg_init_table(sg, nfrags);
208 sg_mark_end(sg, skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, 209 skb_to_sgvec(skb, sg,
209 elen)); 210 sizeof(*esph) + esp->conf.ivlen,
211 elen);
210 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 212 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
211 if (unlikely(sg != &esp->sgbuf[0])) 213 if (unlikely(sg != &esp->sgbuf[0]))
212 kfree(sg); 214 kfree(sg);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 60123905dbbf..732d8f088b13 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -59,6 +59,13 @@ struct fib_table *ip_fib_main_table;
59#define FIB_TABLE_HASHSZ 1 59#define FIB_TABLE_HASHSZ 1
60static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ]; 60static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ];
61 61
62static void __init fib4_rules_init(void)
63{
64 ip_fib_local_table = fib_hash_init(RT_TABLE_LOCAL);
65 hlist_add_head_rcu(&ip_fib_local_table->tb_hlist, &fib_table_hash[0]);
66 ip_fib_main_table = fib_hash_init(RT_TABLE_MAIN);
67 hlist_add_head_rcu(&ip_fib_main_table->tb_hlist, &fib_table_hash[0]);
68}
62#else 69#else
63 70
64#define FIB_TABLE_HASHSZ 256 71#define FIB_TABLE_HASHSZ 256
@@ -905,14 +912,8 @@ void __init ip_fib_init(void)
905 912
906 for (i = 0; i < FIB_TABLE_HASHSZ; i++) 913 for (i = 0; i < FIB_TABLE_HASHSZ; i++)
907 INIT_HLIST_HEAD(&fib_table_hash[i]); 914 INIT_HLIST_HEAD(&fib_table_hash[i]);
908#ifndef CONFIG_IP_MULTIPLE_TABLES 915
909 ip_fib_local_table = fib_hash_init(RT_TABLE_LOCAL);
910 hlist_add_head_rcu(&ip_fib_local_table->tb_hlist, &fib_table_hash[0]);
911 ip_fib_main_table = fib_hash_init(RT_TABLE_MAIN);
912 hlist_add_head_rcu(&ip_fib_main_table->tb_hlist, &fib_table_hash[0]);
913#else
914 fib4_rules_init(); 916 fib4_rules_init();
915#endif
916 917
917 register_netdevice_notifier(&fib_netdev_notifier); 918 register_netdevice_notifier(&fib_netdev_notifier);
918 register_inetaddr_notifier(&fib_inetaddr_notifier); 919 register_inetaddr_notifier(&fib_inetaddr_notifier);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index f16839c6a721..a0ada3a8d8dd 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -49,33 +49,6 @@ struct fib4_rule
49#endif 49#endif
50}; 50};
51 51
52static struct fib4_rule default_rule = {
53 .common = {
54 .refcnt = ATOMIC_INIT(2),
55 .pref = 0x7FFF,
56 .table = RT_TABLE_DEFAULT,
57 .action = FR_ACT_TO_TBL,
58 },
59};
60
61static struct fib4_rule main_rule = {
62 .common = {
63 .refcnt = ATOMIC_INIT(2),
64 .pref = 0x7FFE,
65 .table = RT_TABLE_MAIN,
66 .action = FR_ACT_TO_TBL,
67 },
68};
69
70static struct fib4_rule local_rule = {
71 .common = {
72 .refcnt = ATOMIC_INIT(2),
73 .table = RT_TABLE_LOCAL,
74 .action = FR_ACT_TO_TBL,
75 .flags = FIB_RULE_PERMANENT,
76 },
77};
78
79#ifdef CONFIG_NET_CLS_ROUTE 52#ifdef CONFIG_NET_CLS_ROUTE
80u32 fib_rules_tclass(struct fib_result *res) 53u32 fib_rules_tclass(struct fib_result *res)
81{ 54{
@@ -319,11 +292,27 @@ static struct fib_rules_ops fib4_rules_ops = {
319 .owner = THIS_MODULE, 292 .owner = THIS_MODULE,
320}; 293};
321 294
322void __init fib4_rules_init(void) 295static int __init fib_default_rules_init(void)
323{ 296{
324 list_add_tail(&local_rule.common.list, &fib4_rules_ops.rules_list); 297 int err;
325 list_add_tail(&main_rule.common.list, &fib4_rules_ops.rules_list); 298
326 list_add_tail(&default_rule.common.list, &fib4_rules_ops.rules_list); 299 err = fib_default_rule_add(&fib4_rules_ops, 0,
300 RT_TABLE_LOCAL, FIB_RULE_PERMANENT);
301 if (err < 0)
302 return err;
303 err = fib_default_rule_add(&fib4_rules_ops, 0x7FFE,
304 RT_TABLE_MAIN, 0);
305 if (err < 0)
306 return err;
307 err = fib_default_rule_add(&fib4_rules_ops, 0x7FFF,
308 RT_TABLE_DEFAULT, 0);
309 if (err < 0)
310 return err;
311 return 0;
312}
327 313
314void __init fib4_rules_init(void)
315{
316 BUG_ON(fib_default_rules_init());
328 fib_rules_register(&fib4_rules_ops); 317 fib_rules_register(&fib4_rules_ops);
329} 318}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index dc429b6b0ba6..b0170732b5e9 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -747,13 +747,14 @@ skip_listen_ht:
747 747
748 for (i = s_i; i < hashinfo->ehash_size; i++) { 748 for (i = s_i; i < hashinfo->ehash_size; i++) {
749 struct inet_ehash_bucket *head = &hashinfo->ehash[i]; 749 struct inet_ehash_bucket *head = &hashinfo->ehash[i];
750 rwlock_t *lock = inet_ehash_lockp(hashinfo, i);
750 struct sock *sk; 751 struct sock *sk;
751 struct hlist_node *node; 752 struct hlist_node *node;
752 753
753 if (i > s_i) 754 if (i > s_i)
754 s_num = 0; 755 s_num = 0;
755 756
756 read_lock_bh(&head->lock); 757 read_lock_bh(lock);
757 num = 0; 758 num = 0;
758 sk_for_each(sk, node, &head->chain) { 759 sk_for_each(sk, node, &head->chain) {
759 struct inet_sock *inet = inet_sk(sk); 760 struct inet_sock *inet = inet_sk(sk);
@@ -769,7 +770,7 @@ skip_listen_ht:
769 r->id.idiag_dport) 770 r->id.idiag_dport)
770 goto next_normal; 771 goto next_normal;
771 if (inet_csk_diag_dump(sk, skb, cb) < 0) { 772 if (inet_csk_diag_dump(sk, skb, cb) < 0) {
772 read_unlock_bh(&head->lock); 773 read_unlock_bh(lock);
773 goto done; 774 goto done;
774 } 775 }
775next_normal: 776next_normal:
@@ -791,14 +792,14 @@ next_normal:
791 r->id.idiag_dport) 792 r->id.idiag_dport)
792 goto next_dying; 793 goto next_dying;
793 if (inet_twsk_diag_dump(tw, skb, cb) < 0) { 794 if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
794 read_unlock_bh(&head->lock); 795 read_unlock_bh(lock);
795 goto done; 796 goto done;
796 } 797 }
797next_dying: 798next_dying:
798 ++num; 799 ++num;
799 } 800 }
800 } 801 }
801 read_unlock_bh(&head->lock); 802 read_unlock_bh(lock);
802 } 803 }
803 804
804done: 805done:
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 16eecc7046a3..67704da04fc4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -204,12 +204,13 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
204 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport); 204 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
205 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); 205 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
206 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 206 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
207 rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
207 struct sock *sk2; 208 struct sock *sk2;
208 const struct hlist_node *node; 209 const struct hlist_node *node;
209 struct inet_timewait_sock *tw; 210 struct inet_timewait_sock *tw;
210 211
211 prefetch(head->chain.first); 212 prefetch(head->chain.first);
212 write_lock(&head->lock); 213 write_lock(lock);
213 214
214 /* Check TIME-WAIT sockets first. */ 215 /* Check TIME-WAIT sockets first. */
215 sk_for_each(sk2, node, &head->twchain) { 216 sk_for_each(sk2, node, &head->twchain) {
@@ -239,7 +240,7 @@ unique:
239 BUG_TRAP(sk_unhashed(sk)); 240 BUG_TRAP(sk_unhashed(sk));
240 __sk_add_node(sk, &head->chain); 241 __sk_add_node(sk, &head->chain);
241 sock_prot_inc_use(sk->sk_prot); 242 sock_prot_inc_use(sk->sk_prot);
242 write_unlock(&head->lock); 243 write_unlock(lock);
243 244
244 if (twp) { 245 if (twp) {
245 *twp = tw; 246 *twp = tw;
@@ -255,7 +256,7 @@ unique:
255 return 0; 256 return 0;
256 257
257not_unique: 258not_unique:
258 write_unlock(&head->lock); 259 write_unlock(lock);
259 return -EADDRNOTAVAIL; 260 return -EADDRNOTAVAIL;
260} 261}
261 262
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 4e189e28f306..a60b99e0ebdc 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -20,16 +20,16 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
20 struct inet_bind_hashbucket *bhead; 20 struct inet_bind_hashbucket *bhead;
21 struct inet_bind_bucket *tb; 21 struct inet_bind_bucket *tb;
22 /* Unlink from established hashes. */ 22 /* Unlink from established hashes. */
23 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, tw->tw_hash); 23 rwlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
24 24
25 write_lock(&ehead->lock); 25 write_lock(lock);
26 if (hlist_unhashed(&tw->tw_node)) { 26 if (hlist_unhashed(&tw->tw_node)) {
27 write_unlock(&ehead->lock); 27 write_unlock(lock);
28 return; 28 return;
29 } 29 }
30 __hlist_del(&tw->tw_node); 30 __hlist_del(&tw->tw_node);
31 sk_node_init(&tw->tw_node); 31 sk_node_init(&tw->tw_node);
32 write_unlock(&ehead->lock); 32 write_unlock(lock);
33 33
34 /* Disassociate with bind bucket. */ 34 /* Disassociate with bind bucket. */
35 bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)]; 35 bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)];
@@ -59,6 +59,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
59 const struct inet_sock *inet = inet_sk(sk); 59 const struct inet_sock *inet = inet_sk(sk);
60 const struct inet_connection_sock *icsk = inet_csk(sk); 60 const struct inet_connection_sock *icsk = inet_csk(sk);
61 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); 61 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
62 rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
62 struct inet_bind_hashbucket *bhead; 63 struct inet_bind_hashbucket *bhead;
63 /* Step 1: Put TW into bind hash. Original socket stays there too. 64 /* Step 1: Put TW into bind hash. Original socket stays there too.
64 Note, that any socket with inet->num != 0 MUST be bound in 65 Note, that any socket with inet->num != 0 MUST be bound in
@@ -71,7 +72,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
71 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); 72 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
72 spin_unlock(&bhead->lock); 73 spin_unlock(&bhead->lock);
73 74
74 write_lock(&ehead->lock); 75 write_lock(lock);
75 76
76 /* Step 2: Remove SK from established hash. */ 77 /* Step 2: Remove SK from established hash. */
77 if (__sk_del_node_init(sk)) 78 if (__sk_del_node_init(sk))
@@ -81,7 +82,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
81 inet_twsk_add_node(tw, &ehead->twchain); 82 inet_twsk_add_node(tw, &ehead->twchain);
82 atomic_inc(&tw->tw_refcnt); 83 atomic_inc(&tw->tw_refcnt);
83 84
84 write_unlock(&ehead->lock); 85 write_unlock(lock);
85} 86}
86 87
87EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); 88EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 771031dfbd0f..af995198f643 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -61,7 +61,7 @@
61 * 4. Global variable peer_total is modified under the pool lock. 61 * 4. Global variable peer_total is modified under the pool lock.
62 * 5. struct inet_peer fields modification: 62 * 5. struct inet_peer fields modification:
63 * avl_left, avl_right, avl_parent, avl_height: pool lock 63 * avl_left, avl_right, avl_parent, avl_height: pool lock
64 * unused_next, unused_prevp: unused node list lock 64 * unused: unused node list lock
65 * refcnt: atomically against modifications on other CPU; 65 * refcnt: atomically against modifications on other CPU;
66 * usually under some other lock to prevent node disappearing 66 * usually under some other lock to prevent node disappearing
67 * dtime: unused node list lock 67 * dtime: unused node list lock
@@ -94,8 +94,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
94int inet_peer_gc_mintime __read_mostly = 10 * HZ; 94int inet_peer_gc_mintime __read_mostly = 10 * HZ;
95int inet_peer_gc_maxtime __read_mostly = 120 * HZ; 95int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
96 96
97static struct inet_peer *inet_peer_unused_head; 97static LIST_HEAD(unused_peers);
98static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
99static DEFINE_SPINLOCK(inet_peer_unused_lock); 98static DEFINE_SPINLOCK(inet_peer_unused_lock);
100 99
101static void peer_check_expire(unsigned long dummy); 100static void peer_check_expire(unsigned long dummy);
@@ -138,15 +137,7 @@ void __init inet_initpeers(void)
138static void unlink_from_unused(struct inet_peer *p) 137static void unlink_from_unused(struct inet_peer *p)
139{ 138{
140 spin_lock_bh(&inet_peer_unused_lock); 139 spin_lock_bh(&inet_peer_unused_lock);
141 if (p->unused_prevp != NULL) { 140 list_del_init(&p->unused);
142 /* On unused list. */
143 *p->unused_prevp = p->unused_next;
144 if (p->unused_next != NULL)
145 p->unused_next->unused_prevp = p->unused_prevp;
146 else
147 inet_peer_unused_tailp = p->unused_prevp;
148 p->unused_prevp = NULL; /* mark it as removed */
149 }
150 spin_unlock_bh(&inet_peer_unused_lock); 141 spin_unlock_bh(&inet_peer_unused_lock);
151} 142}
152 143
@@ -337,24 +328,24 @@ static void unlink_from_pool(struct inet_peer *p)
337/* May be called with local BH enabled. */ 328/* May be called with local BH enabled. */
338static int cleanup_once(unsigned long ttl) 329static int cleanup_once(unsigned long ttl)
339{ 330{
340 struct inet_peer *p; 331 struct inet_peer *p = NULL;
341 332
342 /* Remove the first entry from the list of unused nodes. */ 333 /* Remove the first entry from the list of unused nodes. */
343 spin_lock_bh(&inet_peer_unused_lock); 334 spin_lock_bh(&inet_peer_unused_lock);
344 p = inet_peer_unused_head; 335 if (!list_empty(&unused_peers)) {
345 if (p != NULL) { 336 __u32 delta;
346 __u32 delta = (__u32)jiffies - p->dtime; 337
338 p = list_first_entry(&unused_peers, struct inet_peer, unused);
339 delta = (__u32)jiffies - p->dtime;
340
347 if (delta < ttl) { 341 if (delta < ttl) {
348 /* Do not prune fresh entries. */ 342 /* Do not prune fresh entries. */
349 spin_unlock_bh(&inet_peer_unused_lock); 343 spin_unlock_bh(&inet_peer_unused_lock);
350 return -1; 344 return -1;
351 } 345 }
352 inet_peer_unused_head = p->unused_next; 346
353 if (p->unused_next != NULL) 347 list_del_init(&p->unused);
354 p->unused_next->unused_prevp = p->unused_prevp; 348
355 else
356 inet_peer_unused_tailp = p->unused_prevp;
357 p->unused_prevp = NULL; /* mark as not on the list */
358 /* Grab an extra reference to prevent node disappearing 349 /* Grab an extra reference to prevent node disappearing
359 * before unlink_from_pool() call. */ 350 * before unlink_from_pool() call. */
360 atomic_inc(&p->refcnt); 351 atomic_inc(&p->refcnt);
@@ -412,7 +403,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
412 403
413 /* Link the node. */ 404 /* Link the node. */
414 link_to_pool(n); 405 link_to_pool(n);
415 n->unused_prevp = NULL; /* not on the list */ 406 INIT_LIST_HEAD(&n->unused);
416 peer_total++; 407 peer_total++;
417 write_unlock_bh(&peer_pool_lock); 408 write_unlock_bh(&peer_pool_lock);
418 409
@@ -467,10 +458,7 @@ void inet_putpeer(struct inet_peer *p)
467{ 458{
468 spin_lock_bh(&inet_peer_unused_lock); 459 spin_lock_bh(&inet_peer_unused_lock);
469 if (atomic_dec_and_test(&p->refcnt)) { 460 if (atomic_dec_and_test(&p->refcnt)) {
470 p->unused_prevp = inet_peer_unused_tailp; 461 list_add_tail(&p->unused, &unused_peers);
471 p->unused_next = NULL;
472 *inet_peer_unused_tailp = p;
473 inet_peer_unused_tailp = &p->unused_next;
474 p->dtime = (__u32)jiffies; 462 p->dtime = (__u32)jiffies;
475 } 463 }
476 spin_unlock_bh(&inet_peer_unused_lock); 464 spin_unlock_bh(&inet_peer_unused_lock);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e5f7dc2de303..fd99fbd685ea 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1183,6 +1183,17 @@ error:
1183 return err; 1183 return err;
1184} 1184}
1185 1185
1186static void ip_cork_release(struct inet_sock *inet)
1187{
1188 inet->cork.flags &= ~IPCORK_OPT;
1189 kfree(inet->cork.opt);
1190 inet->cork.opt = NULL;
1191 if (inet->cork.rt) {
1192 ip_rt_put(inet->cork.rt);
1193 inet->cork.rt = NULL;
1194 }
1195}
1196
1186/* 1197/*
1187 * Combined all pending IP fragments on the socket as one IP datagram 1198 * Combined all pending IP fragments on the socket as one IP datagram
1188 * and push them out. 1199 * and push them out.
@@ -1276,13 +1287,7 @@ int ip_push_pending_frames(struct sock *sk)
1276 } 1287 }
1277 1288
1278out: 1289out:
1279 inet->cork.flags &= ~IPCORK_OPT; 1290 ip_cork_release(inet);
1280 kfree(inet->cork.opt);
1281 inet->cork.opt = NULL;
1282 if (inet->cork.rt) {
1283 ip_rt_put(inet->cork.rt);
1284 inet->cork.rt = NULL;
1285 }
1286 return err; 1291 return err;
1287 1292
1288error: 1293error:
@@ -1295,19 +1300,12 @@ error:
1295 */ 1300 */
1296void ip_flush_pending_frames(struct sock *sk) 1301void ip_flush_pending_frames(struct sock *sk)
1297{ 1302{
1298 struct inet_sock *inet = inet_sk(sk);
1299 struct sk_buff *skb; 1303 struct sk_buff *skb;
1300 1304
1301 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) 1305 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1302 kfree_skb(skb); 1306 kfree_skb(skb);
1303 1307
1304 inet->cork.flags &= ~IPCORK_OPT; 1308 ip_cork_release(inet_sk(sk));
1305 kfree(inet->cork.opt);
1306 inet->cork.opt = NULL;
1307 if (inet->cork.rt) {
1308 ip_rt_put(inet->cork.rt);
1309 inet->cork.rt = NULL;
1310 }
1311} 1309}
1312 1310
1313 1311
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index f51f20e487c8..82817e554363 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -437,10 +437,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
437 437
438 /* If optlen==0, it is equivalent to val == 0 */ 438 /* If optlen==0, it is equivalent to val == 0 */
439 439
440#ifdef CONFIG_IP_MROUTE 440 if (ip_mroute_opt(optname))
441 if (optname >= MRT_BASE && optname <= (MRT_BASE + 10))
442 return ip_mroute_setsockopt(sk,optname,optval,optlen); 441 return ip_mroute_setsockopt(sk,optname,optval,optlen);
443#endif
444 442
445 err = 0; 443 err = 0;
446 lock_sock(sk); 444 lock_sock(sk);
@@ -909,11 +907,9 @@ int ip_setsockopt(struct sock *sk, int level,
909#ifdef CONFIG_NETFILTER 907#ifdef CONFIG_NETFILTER
910 /* we need to exclude all possible ENOPROTOOPTs except default case */ 908 /* we need to exclude all possible ENOPROTOOPTs except default case */
911 if (err == -ENOPROTOOPT && optname != IP_HDRINCL && 909 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
912 optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY 910 optname != IP_IPSEC_POLICY &&
913#ifdef CONFIG_IP_MROUTE 911 optname != IP_XFRM_POLICY &&
914 && (optname < MRT_BASE || optname > (MRT_BASE + 10)) 912 !ip_mroute_opt(optname)) {
915#endif
916 ) {
917 lock_sock(sk); 913 lock_sock(sk);
918 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); 914 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
919 release_sock(sk); 915 release_sock(sk);
@@ -935,11 +931,9 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
935#ifdef CONFIG_NETFILTER 931#ifdef CONFIG_NETFILTER
936 /* we need to exclude all possible ENOPROTOOPTs except default case */ 932 /* we need to exclude all possible ENOPROTOOPTs except default case */
937 if (err == -ENOPROTOOPT && optname != IP_HDRINCL && 933 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
938 optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY 934 optname != IP_IPSEC_POLICY &&
939#ifdef CONFIG_IP_MROUTE 935 optname != IP_XFRM_POLICY &&
940 && (optname < MRT_BASE || optname > (MRT_BASE + 10)) 936 !ip_mroute_opt(optname)) {
941#endif
942 ) {
943 lock_sock(sk); 937 lock_sock(sk);
944 err = compat_nf_setsockopt(sk, PF_INET, optname, 938 err = compat_nf_setsockopt(sk, PF_INET, optname,
945 optval, optlen); 939 optval, optlen);
@@ -967,11 +961,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
967 if (level != SOL_IP) 961 if (level != SOL_IP)
968 return -EOPNOTSUPP; 962 return -EOPNOTSUPP;
969 963
970#ifdef CONFIG_IP_MROUTE 964 if (ip_mroute_opt(optname))
971 if (optname >= MRT_BASE && optname <= MRT_BASE+10) {
972 return ip_mroute_getsockopt(sk,optname,optval,optlen); 965 return ip_mroute_getsockopt(sk,optname,optval,optlen);
973 }
974#endif
975 966
976 if (get_user(len,optlen)) 967 if (get_user(len,optlen))
977 return -EFAULT; 968 return -EFAULT;
@@ -1171,11 +1162,8 @@ int ip_getsockopt(struct sock *sk, int level,
1171 err = do_ip_getsockopt(sk, level, optname, optval, optlen); 1162 err = do_ip_getsockopt(sk, level, optname, optval, optlen);
1172#ifdef CONFIG_NETFILTER 1163#ifdef CONFIG_NETFILTER
1173 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1164 /* we need to exclude all possible ENOPROTOOPTs except default case */
1174 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS 1165 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1175#ifdef CONFIG_IP_MROUTE 1166 !ip_mroute_opt(optname)) {
1176 && (optname < MRT_BASE || optname > MRT_BASE+10)
1177#endif
1178 ) {
1179 int len; 1167 int len;
1180 1168
1181 if (get_user(len,optlen)) 1169 if (get_user(len,optlen))
@@ -1200,11 +1188,8 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1200 int err = do_ip_getsockopt(sk, level, optname, optval, optlen); 1188 int err = do_ip_getsockopt(sk, level, optname, optval, optlen);
1201#ifdef CONFIG_NETFILTER 1189#ifdef CONFIG_NETFILTER
1202 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1190 /* we need to exclude all possible ENOPROTOOPTs except default case */
1203 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS 1191 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1204#ifdef CONFIG_IP_MROUTE 1192 !ip_mroute_opt(optname)) {
1205 && (optname < MRT_BASE || optname > MRT_BASE+10)
1206#endif
1207 ) {
1208 int len; 1193 int len;
1209 1194
1210 if (get_user(len, optlen)) 1195 if (get_user(len, optlen))
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 0bfeb02a5f87..2c44a94c2135 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -14,9 +14,9 @@
14 * - Adaptive compression. 14 * - Adaptive compression.
15 */ 15 */
16#include <linux/module.h> 16#include <linux/module.h>
17#include <asm/scatterlist.h>
18#include <asm/semaphore.h> 17#include <asm/semaphore.h>
19#include <linux/crypto.h> 18#include <linux/crypto.h>
19#include <linux/err.h>
20#include <linux/pfkeyv2.h> 20#include <linux/pfkeyv2.h>
21#include <linux/percpu.h> 21#include <linux/percpu.h>
22#include <linux/smp.h> 22#include <linux/smp.h>
@@ -345,7 +345,7 @@ static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
345 for_each_possible_cpu(cpu) { 345 for_each_possible_cpu(cpu) {
346 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 346 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
347 CRYPTO_ALG_ASYNC); 347 CRYPTO_ALG_ASYNC);
348 if (!tfm) 348 if (IS_ERR(tfm))
349 goto error; 349 goto error;
350 *per_cpu_ptr(tfms, cpu) = tfm; 350 *per_cpu_ptr(tfms, cpu) = tfm;
351 } 351 }
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 4b702f708d30..0a9f3c37e18d 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -426,6 +426,24 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
426 426
427 427
428/* 428/*
429 * Check if there is a destination for the connection, if so
430 * bind the connection to the destination.
431 */
432struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
433{
434 struct ip_vs_dest *dest;
435
436 if ((cp) && (!cp->dest)) {
437 dest = ip_vs_find_dest(cp->daddr, cp->dport,
438 cp->vaddr, cp->vport, cp->protocol);
439 ip_vs_bind_dest(cp, dest);
440 return dest;
441 } else
442 return NULL;
443}
444
445
446/*
429 * Unbind a connection entry with its VS destination 447 * Unbind a connection entry with its VS destination
430 * Called by the ip_vs_conn_expire function. 448 * Called by the ip_vs_conn_expire function.
431 */ 449 */
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index c6ed7654e839..8fba20256f52 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -637,7 +637,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related)
637 verdict = NF_DROP; 637 verdict = NF_DROP;
638 638
639 if (IP_VS_FWD_METHOD(cp) != 0) { 639 if (IP_VS_FWD_METHOD(cp) != 0) {
640 IP_VS_ERR("shouldn't reach here, because the box is on the" 640 IP_VS_ERR("shouldn't reach here, because the box is on the "
641 "half connection in the tun/dr module.\n"); 641 "half connection in the tun/dr module.\n");
642 } 642 }
643 643
@@ -979,15 +979,23 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
979 ret = NF_ACCEPT; 979 ret = NF_ACCEPT;
980 } 980 }
981 981
982 /* increase its packet counter and check if it is needed 982 /* Increase its packet counter and check if it is needed
983 to be synchronized */ 983 * to be synchronized
984 *
985 * Sync connection if it is about to close to
986 * encorage the standby servers to update the connections timeout
987 */
984 atomic_inc(&cp->in_pkts); 988 atomic_inc(&cp->in_pkts);
985 if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && 989 if ((ip_vs_sync_state & IP_VS_STATE_MASTER) &&
986 (cp->protocol != IPPROTO_TCP || 990 (((cp->protocol != IPPROTO_TCP ||
987 cp->state == IP_VS_TCP_S_ESTABLISHED) && 991 cp->state == IP_VS_TCP_S_ESTABLISHED) &&
988 (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1] 992 (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1]
989 == sysctl_ip_vs_sync_threshold[0])) 993 == sysctl_ip_vs_sync_threshold[0])) ||
994 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
995 ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
996 (cp->state == IP_VS_TCP_S_CLOSE)))))
990 ip_vs_sync_conn(cp); 997 ip_vs_sync_conn(cp);
998 cp->old_state = cp->state;
991 999
992 ip_vs_conn_put(cp); 1000 ip_vs_conn_put(cp);
993 return ret; 1001 return ret;
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 7345fc252a23..693d92490c11 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -579,6 +579,31 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
579 return NULL; 579 return NULL;
580} 580}
581 581
582/*
583 * Find destination by {daddr,dport,vaddr,protocol}
584 * Cretaed to be used in ip_vs_process_message() in
585 * the backup synchronization daemon. It finds the
586 * destination to be bound to the received connection
587 * on the backup.
588 *
589 * ip_vs_lookup_real_service() looked promissing, but
590 * seems not working as expected.
591 */
592struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport,
593 __be32 vaddr, __be16 vport, __u16 protocol)
594{
595 struct ip_vs_dest *dest;
596 struct ip_vs_service *svc;
597
598 svc = ip_vs_service_get(0, protocol, vaddr, vport);
599 if (!svc)
600 return NULL;
601 dest = ip_vs_lookup_dest(svc, daddr, dport);
602 if (dest)
603 atomic_inc(&dest->refcnt);
604 ip_vs_service_put(svc);
605 return dest;
606}
582 607
583/* 608/*
584 * Lookup dest by {svc,addr,port} in the destination trash. 609 * Lookup dest by {svc,addr,port} in the destination trash.
@@ -1399,7 +1424,6 @@ proc_do_sync_threshold(ctl_table *table, int write, struct file *filp,
1399 1424
1400static struct ctl_table vs_vars[] = { 1425static struct ctl_table vs_vars[] = {
1401 { 1426 {
1402 .ctl_name = NET_IPV4_VS_AMEMTHRESH,
1403 .procname = "amemthresh", 1427 .procname = "amemthresh",
1404 .data = &sysctl_ip_vs_amemthresh, 1428 .data = &sysctl_ip_vs_amemthresh,
1405 .maxlen = sizeof(int), 1429 .maxlen = sizeof(int),
@@ -1408,7 +1432,6 @@ static struct ctl_table vs_vars[] = {
1408 }, 1432 },
1409#ifdef CONFIG_IP_VS_DEBUG 1433#ifdef CONFIG_IP_VS_DEBUG
1410 { 1434 {
1411 .ctl_name = NET_IPV4_VS_DEBUG_LEVEL,
1412 .procname = "debug_level", 1435 .procname = "debug_level",
1413 .data = &sysctl_ip_vs_debug_level, 1436 .data = &sysctl_ip_vs_debug_level,
1414 .maxlen = sizeof(int), 1437 .maxlen = sizeof(int),
@@ -1417,7 +1440,6 @@ static struct ctl_table vs_vars[] = {
1417 }, 1440 },
1418#endif 1441#endif
1419 { 1442 {
1420 .ctl_name = NET_IPV4_VS_AMDROPRATE,
1421 .procname = "am_droprate", 1443 .procname = "am_droprate",
1422 .data = &sysctl_ip_vs_am_droprate, 1444 .data = &sysctl_ip_vs_am_droprate,
1423 .maxlen = sizeof(int), 1445 .maxlen = sizeof(int),
@@ -1425,7 +1447,6 @@ static struct ctl_table vs_vars[] = {
1425 .proc_handler = &proc_dointvec, 1447 .proc_handler = &proc_dointvec,
1426 }, 1448 },
1427 { 1449 {
1428 .ctl_name = NET_IPV4_VS_DROP_ENTRY,
1429 .procname = "drop_entry", 1450 .procname = "drop_entry",
1430 .data = &sysctl_ip_vs_drop_entry, 1451 .data = &sysctl_ip_vs_drop_entry,
1431 .maxlen = sizeof(int), 1452 .maxlen = sizeof(int),
@@ -1433,7 +1454,6 @@ static struct ctl_table vs_vars[] = {
1433 .proc_handler = &proc_do_defense_mode, 1454 .proc_handler = &proc_do_defense_mode,
1434 }, 1455 },
1435 { 1456 {
1436 .ctl_name = NET_IPV4_VS_DROP_PACKET,
1437 .procname = "drop_packet", 1457 .procname = "drop_packet",
1438 .data = &sysctl_ip_vs_drop_packet, 1458 .data = &sysctl_ip_vs_drop_packet,
1439 .maxlen = sizeof(int), 1459 .maxlen = sizeof(int),
@@ -1441,7 +1461,6 @@ static struct ctl_table vs_vars[] = {
1441 .proc_handler = &proc_do_defense_mode, 1461 .proc_handler = &proc_do_defense_mode,
1442 }, 1462 },
1443 { 1463 {
1444 .ctl_name = NET_IPV4_VS_SECURE_TCP,
1445 .procname = "secure_tcp", 1464 .procname = "secure_tcp",
1446 .data = &sysctl_ip_vs_secure_tcp, 1465 .data = &sysctl_ip_vs_secure_tcp,
1447 .maxlen = sizeof(int), 1466 .maxlen = sizeof(int),
@@ -1450,7 +1469,6 @@ static struct ctl_table vs_vars[] = {
1450 }, 1469 },
1451#if 0 1470#if 0
1452 { 1471 {
1453 .ctl_name = NET_IPV4_VS_TO_ES,
1454 .procname = "timeout_established", 1472 .procname = "timeout_established",
1455 .data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED], 1473 .data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED],
1456 .maxlen = sizeof(int), 1474 .maxlen = sizeof(int),
@@ -1458,7 +1476,6 @@ static struct ctl_table vs_vars[] = {
1458 .proc_handler = &proc_dointvec_jiffies, 1476 .proc_handler = &proc_dointvec_jiffies,
1459 }, 1477 },
1460 { 1478 {
1461 .ctl_name = NET_IPV4_VS_TO_SS,
1462 .procname = "timeout_synsent", 1479 .procname = "timeout_synsent",
1463 .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT], 1480 .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT],
1464 .maxlen = sizeof(int), 1481 .maxlen = sizeof(int),
@@ -1466,7 +1483,6 @@ static struct ctl_table vs_vars[] = {
1466 .proc_handler = &proc_dointvec_jiffies, 1483 .proc_handler = &proc_dointvec_jiffies,
1467 }, 1484 },
1468 { 1485 {
1469 .ctl_name = NET_IPV4_VS_TO_SR,
1470 .procname = "timeout_synrecv", 1486 .procname = "timeout_synrecv",
1471 .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV], 1487 .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV],
1472 .maxlen = sizeof(int), 1488 .maxlen = sizeof(int),
@@ -1474,7 +1490,6 @@ static struct ctl_table vs_vars[] = {
1474 .proc_handler = &proc_dointvec_jiffies, 1490 .proc_handler = &proc_dointvec_jiffies,
1475 }, 1491 },
1476 { 1492 {
1477 .ctl_name = NET_IPV4_VS_TO_FW,
1478 .procname = "timeout_finwait", 1493 .procname = "timeout_finwait",
1479 .data = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT], 1494 .data = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT],
1480 .maxlen = sizeof(int), 1495 .maxlen = sizeof(int),
@@ -1482,7 +1497,6 @@ static struct ctl_table vs_vars[] = {
1482 .proc_handler = &proc_dointvec_jiffies, 1497 .proc_handler = &proc_dointvec_jiffies,
1483 }, 1498 },
1484 { 1499 {
1485 .ctl_name = NET_IPV4_VS_TO_TW,
1486 .procname = "timeout_timewait", 1500 .procname = "timeout_timewait",
1487 .data = &vs_timeout_table_dos.timeout[IP_VS_S_TIME_WAIT], 1501 .data = &vs_timeout_table_dos.timeout[IP_VS_S_TIME_WAIT],
1488 .maxlen = sizeof(int), 1502 .maxlen = sizeof(int),
@@ -1490,7 +1504,6 @@ static struct ctl_table vs_vars[] = {
1490 .proc_handler = &proc_dointvec_jiffies, 1504 .proc_handler = &proc_dointvec_jiffies,
1491 }, 1505 },
1492 { 1506 {
1493 .ctl_name = NET_IPV4_VS_TO_CL,
1494 .procname = "timeout_close", 1507 .procname = "timeout_close",
1495 .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE], 1508 .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE],
1496 .maxlen = sizeof(int), 1509 .maxlen = sizeof(int),
@@ -1498,7 +1511,6 @@ static struct ctl_table vs_vars[] = {
1498 .proc_handler = &proc_dointvec_jiffies, 1511 .proc_handler = &proc_dointvec_jiffies,
1499 }, 1512 },
1500 { 1513 {
1501 .ctl_name = NET_IPV4_VS_TO_CW,
1502 .procname = "timeout_closewait", 1514 .procname = "timeout_closewait",
1503 .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT], 1515 .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT],
1504 .maxlen = sizeof(int), 1516 .maxlen = sizeof(int),
@@ -1506,7 +1518,6 @@ static struct ctl_table vs_vars[] = {
1506 .proc_handler = &proc_dointvec_jiffies, 1518 .proc_handler = &proc_dointvec_jiffies,
1507 }, 1519 },
1508 { 1520 {
1509 .ctl_name = NET_IPV4_VS_TO_LA,
1510 .procname = "timeout_lastack", 1521 .procname = "timeout_lastack",
1511 .data = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK], 1522 .data = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK],
1512 .maxlen = sizeof(int), 1523 .maxlen = sizeof(int),
@@ -1514,7 +1525,6 @@ static struct ctl_table vs_vars[] = {
1514 .proc_handler = &proc_dointvec_jiffies, 1525 .proc_handler = &proc_dointvec_jiffies,
1515 }, 1526 },
1516 { 1527 {
1517 .ctl_name = NET_IPV4_VS_TO_LI,
1518 .procname = "timeout_listen", 1528 .procname = "timeout_listen",
1519 .data = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN], 1529 .data = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN],
1520 .maxlen = sizeof(int), 1530 .maxlen = sizeof(int),
@@ -1522,7 +1532,6 @@ static struct ctl_table vs_vars[] = {
1522 .proc_handler = &proc_dointvec_jiffies, 1532 .proc_handler = &proc_dointvec_jiffies,
1523 }, 1533 },
1524 { 1534 {
1525 .ctl_name = NET_IPV4_VS_TO_SA,
1526 .procname = "timeout_synack", 1535 .procname = "timeout_synack",
1527 .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK], 1536 .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK],
1528 .maxlen = sizeof(int), 1537 .maxlen = sizeof(int),
@@ -1530,7 +1539,6 @@ static struct ctl_table vs_vars[] = {
1530 .proc_handler = &proc_dointvec_jiffies, 1539 .proc_handler = &proc_dointvec_jiffies,
1531 }, 1540 },
1532 { 1541 {
1533 .ctl_name = NET_IPV4_VS_TO_UDP,
1534 .procname = "timeout_udp", 1542 .procname = "timeout_udp",
1535 .data = &vs_timeout_table_dos.timeout[IP_VS_S_UDP], 1543 .data = &vs_timeout_table_dos.timeout[IP_VS_S_UDP],
1536 .maxlen = sizeof(int), 1544 .maxlen = sizeof(int),
@@ -1538,7 +1546,6 @@ static struct ctl_table vs_vars[] = {
1538 .proc_handler = &proc_dointvec_jiffies, 1546 .proc_handler = &proc_dointvec_jiffies,
1539 }, 1547 },
1540 { 1548 {
1541 .ctl_name = NET_IPV4_VS_TO_ICMP,
1542 .procname = "timeout_icmp", 1549 .procname = "timeout_icmp",
1543 .data = &vs_timeout_table_dos.timeout[IP_VS_S_ICMP], 1550 .data = &vs_timeout_table_dos.timeout[IP_VS_S_ICMP],
1544 .maxlen = sizeof(int), 1551 .maxlen = sizeof(int),
@@ -1547,7 +1554,6 @@ static struct ctl_table vs_vars[] = {
1547 }, 1554 },
1548#endif 1555#endif
1549 { 1556 {
1550 .ctl_name = NET_IPV4_VS_CACHE_BYPASS,
1551 .procname = "cache_bypass", 1557 .procname = "cache_bypass",
1552 .data = &sysctl_ip_vs_cache_bypass, 1558 .data = &sysctl_ip_vs_cache_bypass,
1553 .maxlen = sizeof(int), 1559 .maxlen = sizeof(int),
@@ -1555,7 +1561,6 @@ static struct ctl_table vs_vars[] = {
1555 .proc_handler = &proc_dointvec, 1561 .proc_handler = &proc_dointvec,
1556 }, 1562 },
1557 { 1563 {
1558 .ctl_name = NET_IPV4_VS_EXPIRE_NODEST_CONN,
1559 .procname = "expire_nodest_conn", 1564 .procname = "expire_nodest_conn",
1560 .data = &sysctl_ip_vs_expire_nodest_conn, 1565 .data = &sysctl_ip_vs_expire_nodest_conn,
1561 .maxlen = sizeof(int), 1566 .maxlen = sizeof(int),
@@ -1563,7 +1568,6 @@ static struct ctl_table vs_vars[] = {
1563 .proc_handler = &proc_dointvec, 1568 .proc_handler = &proc_dointvec,
1564 }, 1569 },
1565 { 1570 {
1566 .ctl_name = NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE,
1567 .procname = "expire_quiescent_template", 1571 .procname = "expire_quiescent_template",
1568 .data = &sysctl_ip_vs_expire_quiescent_template, 1572 .data = &sysctl_ip_vs_expire_quiescent_template,
1569 .maxlen = sizeof(int), 1573 .maxlen = sizeof(int),
@@ -1571,7 +1575,6 @@ static struct ctl_table vs_vars[] = {
1571 .proc_handler = &proc_dointvec, 1575 .proc_handler = &proc_dointvec,
1572 }, 1576 },
1573 { 1577 {
1574 .ctl_name = NET_IPV4_VS_SYNC_THRESHOLD,
1575 .procname = "sync_threshold", 1578 .procname = "sync_threshold",
1576 .data = &sysctl_ip_vs_sync_threshold, 1579 .data = &sysctl_ip_vs_sync_threshold,
1577 .maxlen = sizeof(sysctl_ip_vs_sync_threshold), 1580 .maxlen = sizeof(sysctl_ip_vs_sync_threshold),
@@ -1579,7 +1582,6 @@ static struct ctl_table vs_vars[] = {
1579 .proc_handler = &proc_do_sync_threshold, 1582 .proc_handler = &proc_do_sync_threshold,
1580 }, 1583 },
1581 { 1584 {
1582 .ctl_name = NET_IPV4_VS_NAT_ICMP_SEND,
1583 .procname = "nat_icmp_send", 1585 .procname = "nat_icmp_send",
1584 .data = &sysctl_ip_vs_nat_icmp_send, 1586 .data = &sysctl_ip_vs_nat_icmp_send,
1585 .maxlen = sizeof(int), 1587 .maxlen = sizeof(int),
@@ -1591,7 +1593,6 @@ static struct ctl_table vs_vars[] = {
1591 1593
1592static ctl_table vs_table[] = { 1594static ctl_table vs_table[] = {
1593 { 1595 {
1594 .ctl_name = NET_IPV4_VS,
1595 .procname = "vs", 1596 .procname = "vs",
1596 .mode = 0555, 1597 .mode = 0555,
1597 .child = vs_vars 1598 .child = vs_vars
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 052f4ed59174..b843a11d7cf7 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -114,7 +114,6 @@ struct ip_vs_lblc_table {
114 114
115static ctl_table vs_vars_table[] = { 115static ctl_table vs_vars_table[] = {
116 { 116 {
117 .ctl_name = NET_IPV4_VS_LBLC_EXPIRE,
118 .procname = "lblc_expiration", 117 .procname = "lblc_expiration",
119 .data = &sysctl_ip_vs_lblc_expiration, 118 .data = &sysctl_ip_vs_lblc_expiration,
120 .maxlen = sizeof(int), 119 .maxlen = sizeof(int),
@@ -126,7 +125,6 @@ static ctl_table vs_vars_table[] = {
126 125
127static ctl_table vs_table[] = { 126static ctl_table vs_table[] = {
128 { 127 {
129 .ctl_name = NET_IPV4_VS,
130 .procname = "vs", 128 .procname = "vs",
131 .mode = 0555, 129 .mode = 0555,
132 .child = vs_vars_table 130 .child = vs_vars_table
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index 6a1fec416eaf..e5b323a6b2f7 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -48,8 +48,6 @@
48/* for sysctl */ 48/* for sysctl */
49#include <linux/fs.h> 49#include <linux/fs.h>
50#include <linux/sysctl.h> 50#include <linux/sysctl.h>
51/* for proc_net_create/proc_net_remove */
52#include <linux/proc_fs.h>
53#include <net/net_namespace.h> 51#include <net/net_namespace.h>
54 52
55#include <net/ip_vs.h> 53#include <net/ip_vs.h>
@@ -304,7 +302,6 @@ struct ip_vs_lblcr_table {
304 302
305static ctl_table vs_vars_table[] = { 303static ctl_table vs_vars_table[] = {
306 { 304 {
307 .ctl_name = NET_IPV4_VS_LBLCR_EXPIRE,
308 .procname = "lblcr_expiration", 305 .procname = "lblcr_expiration",
309 .data = &sysctl_ip_vs_lblcr_expiration, 306 .data = &sysctl_ip_vs_lblcr_expiration,
310 .maxlen = sizeof(int), 307 .maxlen = sizeof(int),
@@ -316,7 +313,6 @@ static ctl_table vs_vars_table[] = {
316 313
317static ctl_table vs_table[] = { 314static ctl_table vs_table[] = {
318 { 315 {
319 .ctl_name = NET_IPV4_VS,
320 .procname = "vs", 316 .procname = "vs",
321 .mode = 0555, 317 .mode = 0555,
322 .child = vs_vars_table 318 .child = vs_vars_table
@@ -547,71 +543,6 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
547 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL); 543 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
548} 544}
549 545
550
551#ifdef CONFIG_IP_VS_LBLCR_DEBUG
552static struct ip_vs_lblcr_table *lblcr_table_list;
553
554/*
555 * /proc/net/ip_vs_lblcr to display the mappings of
556 * destination IP address <==> its serverSet
557 */
558static int
559ip_vs_lblcr_getinfo(char *buffer, char **start, off_t offset, int length)
560{
561 off_t pos=0, begin;
562 int len=0, size;
563 struct ip_vs_lblcr_table *tbl;
564 unsigned long now = jiffies;
565 int i;
566 struct ip_vs_lblcr_entry *en;
567
568 tbl = lblcr_table_list;
569
570 size = sprintf(buffer, "LastTime Dest IP address Server set\n");
571 pos += size;
572 len += size;
573
574 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
575 read_lock_bh(&tbl->lock);
576 list_for_each_entry(en, &tbl->bucket[i], list) {
577 char tbuf[16];
578 struct ip_vs_dest_list *d;
579
580 sprintf(tbuf, "%u.%u.%u.%u", NIPQUAD(en->addr));
581 size = sprintf(buffer+len, "%8lu %-16s ",
582 now-en->lastuse, tbuf);
583
584 read_lock(&en->set.lock);
585 for (d=en->set.list; d!=NULL; d=d->next) {
586 size += sprintf(buffer+len+size,
587 "%u.%u.%u.%u ",
588 NIPQUAD(d->dest->addr));
589 }
590 read_unlock(&en->set.lock);
591 size += sprintf(buffer+len+size, "\n");
592 len += size;
593 pos += size;
594 if (pos <= offset)
595 len=0;
596 if (pos >= offset+length) {
597 read_unlock_bh(&tbl->lock);
598 goto done;
599 }
600 }
601 read_unlock_bh(&tbl->lock);
602 }
603
604 done:
605 begin = len - (pos - offset);
606 *start = buffer + begin;
607 len -= begin;
608 if(len>length)
609 len = length;
610 return len;
611}
612#endif
613
614
615static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) 546static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
616{ 547{
617 int i; 548 int i;
@@ -650,9 +581,6 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
650 tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; 581 tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL;
651 add_timer(&tbl->periodic_timer); 582 add_timer(&tbl->periodic_timer);
652 583
653#ifdef CONFIG_IP_VS_LBLCR_DEBUG
654 lblcr_table_list = tbl;
655#endif
656 return 0; 584 return 0;
657} 585}
658 586
@@ -843,18 +771,12 @@ static int __init ip_vs_lblcr_init(void)
843{ 771{
844 INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list); 772 INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
845 sysctl_header = register_sysctl_table(lblcr_root_table); 773 sysctl_header = register_sysctl_table(lblcr_root_table);
846#ifdef CONFIG_IP_VS_LBLCR_DEBUG
847 proc_net_create(&init_net, "ip_vs_lblcr", 0, ip_vs_lblcr_getinfo);
848#endif
849 return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 774 return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
850} 775}
851 776
852 777
853static void __exit ip_vs_lblcr_cleanup(void) 778static void __exit ip_vs_lblcr_cleanup(void)
854{ 779{
855#ifdef CONFIG_IP_VS_LBLCR_DEBUG
856 proc_net_remove(&init_net, "ip_vs_lblcr");
857#endif
858 unregister_sysctl_table(sysctl_header); 780 unregister_sysctl_table(sysctl_header);
859 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 781 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
860} 782}
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index e844ddb82b9a..c0e11ec8f0f9 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -45,7 +45,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
45/* 45/*
46 * register an ipvs protocol 46 * register an ipvs protocol
47 */ 47 */
48static int register_ip_vs_protocol(struct ip_vs_protocol *pp) 48static int __used register_ip_vs_protocol(struct ip_vs_protocol *pp)
49{ 49{
50 unsigned hash = IP_VS_PROTO_HASH(pp->protocol); 50 unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
51 51
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index c99f2a33fb9e..bd930efc18da 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -72,7 +72,6 @@ struct ip_vs_sync_thread_data {
72 int state; 72 int state;
73}; 73};
74 74
75#define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
76#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) 75#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
77#define FULL_CONN_SIZE \ 76#define FULL_CONN_SIZE \
78(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options)) 77(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
@@ -284,6 +283,8 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
284 struct ip_vs_sync_conn *s; 283 struct ip_vs_sync_conn *s;
285 struct ip_vs_sync_conn_options *opt; 284 struct ip_vs_sync_conn_options *opt;
286 struct ip_vs_conn *cp; 285 struct ip_vs_conn *cp;
286 struct ip_vs_protocol *pp;
287 struct ip_vs_dest *dest;
287 char *p; 288 char *p;
288 int i; 289 int i;
289 290
@@ -317,20 +318,34 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
317 s->caddr, s->cport, 318 s->caddr, s->cport,
318 s->vaddr, s->vport); 319 s->vaddr, s->vport);
319 if (!cp) { 320 if (!cp) {
321 /*
322 * Find the appropriate destination for the connection.
323 * If it is not found the connection will remain unbound
324 * but still handled.
325 */
326 dest = ip_vs_find_dest(s->daddr, s->dport,
327 s->vaddr, s->vport,
328 s->protocol);
320 cp = ip_vs_conn_new(s->protocol, 329 cp = ip_vs_conn_new(s->protocol,
321 s->caddr, s->cport, 330 s->caddr, s->cport,
322 s->vaddr, s->vport, 331 s->vaddr, s->vport,
323 s->daddr, s->dport, 332 s->daddr, s->dport,
324 flags, NULL); 333 flags, dest);
334 if (dest)
335 atomic_dec(&dest->refcnt);
325 if (!cp) { 336 if (!cp) {
326 IP_VS_ERR("ip_vs_conn_new failed\n"); 337 IP_VS_ERR("ip_vs_conn_new failed\n");
327 return; 338 return;
328 } 339 }
329 cp->state = ntohs(s->state); 340 cp->state = ntohs(s->state);
330 } else if (!cp->dest) { 341 } else if (!cp->dest) {
331 /* it is an entry created by the synchronization */ 342 dest = ip_vs_try_bind_dest(cp);
332 cp->state = ntohs(s->state); 343 if (!dest) {
333 cp->flags = flags | IP_VS_CONN_F_HASHED; 344 /* it is an unbound entry created by
345 * synchronization */
346 cp->flags = flags | IP_VS_CONN_F_HASHED;
347 } else
348 atomic_dec(&dest->refcnt);
334 } /* Note that we don't touch its state and flags 349 } /* Note that we don't touch its state and flags
335 if it is a normal entry. */ 350 if it is a normal entry. */
336 351
@@ -342,7 +357,9 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
342 p += SIMPLE_CONN_SIZE; 357 p += SIMPLE_CONN_SIZE;
343 358
344 atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]); 359 atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
345 cp->timeout = IP_VS_SYNC_CONN_TIMEOUT; 360 cp->state = ntohs(s->state);
361 pp = ip_vs_proto_get(s->protocol);
362 cp->timeout = pp->timeout_table[cp->state];
346 ip_vs_conn_put(cp); 363 ip_vs_conn_put(cp);
347 364
348 if (p > buffer+buflen) { 365 if (p > buffer+buflen) {
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index fa97947c6ae1..9aca9c55687c 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -128,7 +128,7 @@ config IP_NF_MATCH_ADDRTYPE
128 eg. UNICAST, LOCAL, BROADCAST, ... 128 eg. UNICAST, LOCAL, BROADCAST, ...
129 129
130 If you want to compile it as a module, say M here and read 130 If you want to compile it as a module, say M here and read
131 <file:Documentation/modules.txt>. If unsure, say `N'. 131 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
132 132
133# `filter', generic and specific targets 133# `filter', generic and specific targets
134config IP_NF_FILTER 134config IP_NF_FILTER
@@ -371,7 +371,7 @@ config IP_NF_RAW
371 and OUTPUT chains. 371 and OUTPUT chains.
372 372
373 If you want to compile it as a module, say M here and read 373 If you want to compile it as a module, say M here and read
374 <file:Documentation/modules.txt>. If unsure, say `N'. 374 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
375 375
376# ARP tables 376# ARP tables
377config IP_NF_ARPTABLES 377config IP_NF_ARPTABLES
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 409d273f6f82..7456833d6ade 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -41,27 +41,27 @@ obj-$(CONFIG_NF_NAT) += iptable_nat.o
41obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o 41obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
42 42
43# matches 43# matches
44obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
45obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
46obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
44obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o 47obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
45obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o 48obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
46obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
47obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o 49obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
48obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o 50obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
49obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
50obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o 51obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
51obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
52 52
53# targets 53# targets
54obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o 54obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
55obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
56obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o 55obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
56obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
57obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o 57obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
58obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
59obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o 58obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
59obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
60obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
60obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o 61obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
61obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o 62obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
62obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
63obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
64obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o 63obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
64obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
65 65
66# generic ARP tables 66# generic ARP tables
67obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o 67obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 10a2ce09fd8e..14d64a383db1 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -22,6 +22,7 @@
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/sysctl.h> 23#include <linux/sysctl.h>
24#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
25#include <linux/security.h> 26#include <linux/security.h>
26#include <linux/mutex.h> 27#include <linux/mutex.h>
27#include <net/net_namespace.h> 28#include <net/net_namespace.h>
@@ -607,15 +608,11 @@ static ctl_table ipq_root_table[] = {
607 { .ctl_name = 0 } 608 { .ctl_name = 0 }
608}; 609};
609 610
610#ifdef CONFIG_PROC_FS 611static int ip_queue_show(struct seq_file *m, void *v)
611static int
612ipq_get_info(char *buffer, char **start, off_t offset, int length)
613{ 612{
614 int len;
615
616 read_lock_bh(&queue_lock); 613 read_lock_bh(&queue_lock);
617 614
618 len = sprintf(buffer, 615 seq_printf(m,
619 "Peer PID : %d\n" 616 "Peer PID : %d\n"
620 "Copy mode : %hu\n" 617 "Copy mode : %hu\n"
621 "Copy range : %u\n" 618 "Copy range : %u\n"
@@ -632,16 +629,21 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length)
632 queue_user_dropped); 629 queue_user_dropped);
633 630
634 read_unlock_bh(&queue_lock); 631 read_unlock_bh(&queue_lock);
632 return 0;
633}
635 634
636 *start = buffer + offset; 635static int ip_queue_open(struct inode *inode, struct file *file)
637 len -= offset; 636{
638 if (len > length) 637 return single_open(file, ip_queue_show, NULL);
639 len = length;
640 else if (len < 0)
641 len = 0;
642 return len;
643} 638}
644#endif /* CONFIG_PROC_FS */ 639
640static const struct file_operations ip_queue_proc_fops = {
641 .open = ip_queue_open,
642 .read = seq_read,
643 .llseek = seq_lseek,
644 .release = single_release,
645 .owner = THIS_MODULE,
646};
645 647
646static struct nf_queue_handler nfqh = { 648static struct nf_queue_handler nfqh = {
647 .name = "ip_queue", 649 .name = "ip_queue",
@@ -661,10 +663,11 @@ static int __init ip_queue_init(void)
661 goto cleanup_netlink_notifier; 663 goto cleanup_netlink_notifier;
662 } 664 }
663 665
664 proc = proc_net_create(&init_net, IPQ_PROC_FS_NAME, 0, ipq_get_info); 666 proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net);
665 if (proc) 667 if (proc) {
666 proc->owner = THIS_MODULE; 668 proc->owner = THIS_MODULE;
667 else { 669 proc->proc_fops = &ip_queue_proc_fops;
670 } else {
668 printk(KERN_ERR "ip_queue: failed to create proc entry\n"); 671 printk(KERN_ERR "ip_queue: failed to create proc entry\n");
669 goto cleanup_ipqnl; 672 goto cleanup_ipqnl;
670 } 673 }
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 5de6e57ac55c..f8678651250f 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -66,7 +66,7 @@ ipt_local_hook(unsigned int hook,
66 if (skb->len < sizeof(struct iphdr) || 66 if (skb->len < sizeof(struct iphdr) ||
67 ip_hdrlen(skb) < sizeof(struct iphdr)) { 67 ip_hdrlen(skb) < sizeof(struct iphdr)) {
68 if (net_ratelimit()) 68 if (net_ratelimit())
69 printk("iptable_raw: ignoring short SOCK_RAW" 69 printk("iptable_raw: ignoring short SOCK_RAW "
70 "packet.\n"); 70 "packet.\n");
71 return NF_ACCEPT; 71 return NF_ACCEPT;
72 } 72 }
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 35a5aa69cd92..c31b87668250 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -69,7 +69,7 @@ static void __exit nf_nat_amanda_fini(void)
69 69
70static int __init nf_nat_amanda_init(void) 70static int __init nf_nat_amanda_init(void)
71{ 71{
72 BUG_ON(rcu_dereference(nf_nat_amanda_hook)); 72 BUG_ON(nf_nat_amanda_hook != NULL);
73 rcu_assign_pointer(nf_nat_amanda_hook, help); 73 rcu_assign_pointer(nf_nat_amanda_hook, help);
74 return 0; 74 return 0;
75} 75}
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 56e93f692e82..86b465b176ba 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -607,13 +607,10 @@ static void nf_nat_move_storage(struct nf_conn *conntrack, void *old)
607 struct nf_conn_nat *new_nat = nf_ct_ext_find(conntrack, NF_CT_EXT_NAT); 607 struct nf_conn_nat *new_nat = nf_ct_ext_find(conntrack, NF_CT_EXT_NAT);
608 struct nf_conn_nat *old_nat = (struct nf_conn_nat *)old; 608 struct nf_conn_nat *old_nat = (struct nf_conn_nat *)old;
609 struct nf_conn *ct = old_nat->ct; 609 struct nf_conn *ct = old_nat->ct;
610 unsigned int srchash;
611 610
612 if (!(ct->status & IPS_NAT_DONE_MASK)) 611 if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
613 return; 612 return;
614 613
615 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
616
617 write_lock_bh(&nf_nat_lock); 614 write_lock_bh(&nf_nat_lock);
618 hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); 615 hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
619 new_nat->ct = ct; 616 new_nat->ct = ct;
@@ -681,7 +678,7 @@ static int clean_nat(struct nf_conn *i, void *data)
681 678
682 if (!nat) 679 if (!nat)
683 return 0; 680 return 0;
684 memset(nat, 0, sizeof(nat)); 681 memset(nat, 0, sizeof(*nat));
685 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST); 682 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
686 return 0; 683 return 0;
687} 684}
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index e1a16d3ea4cb..a1d5d58a58bf 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -147,7 +147,7 @@ static void __exit nf_nat_ftp_fini(void)
147 147
148static int __init nf_nat_ftp_init(void) 148static int __init nf_nat_ftp_init(void)
149{ 149{
150 BUG_ON(rcu_dereference(nf_nat_ftp_hook)); 150 BUG_ON(nf_nat_ftp_hook != NULL);
151 rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp); 151 rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp);
152 return 0; 152 return 0;
153} 153}
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index a868c8c41328..93e18ef114f2 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -544,15 +544,15 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
544/****************************************************************************/ 544/****************************************************************************/
545static int __init init(void) 545static int __init init(void)
546{ 546{
547 BUG_ON(rcu_dereference(set_h245_addr_hook) != NULL); 547 BUG_ON(set_h245_addr_hook != NULL);
548 BUG_ON(rcu_dereference(set_h225_addr_hook) != NULL); 548 BUG_ON(set_h225_addr_hook != NULL);
549 BUG_ON(rcu_dereference(set_sig_addr_hook) != NULL); 549 BUG_ON(set_sig_addr_hook != NULL);
550 BUG_ON(rcu_dereference(set_ras_addr_hook) != NULL); 550 BUG_ON(set_ras_addr_hook != NULL);
551 BUG_ON(rcu_dereference(nat_rtp_rtcp_hook) != NULL); 551 BUG_ON(nat_rtp_rtcp_hook != NULL);
552 BUG_ON(rcu_dereference(nat_t120_hook) != NULL); 552 BUG_ON(nat_t120_hook != NULL);
553 BUG_ON(rcu_dereference(nat_h245_hook) != NULL); 553 BUG_ON(nat_h245_hook != NULL);
554 BUG_ON(rcu_dereference(nat_callforwarding_hook) != NULL); 554 BUG_ON(nat_callforwarding_hook != NULL);
555 BUG_ON(rcu_dereference(nat_q931_hook) != NULL); 555 BUG_ON(nat_q931_hook != NULL);
556 556
557 rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); 557 rcu_assign_pointer(set_h245_addr_hook, set_h245_addr);
558 rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); 558 rcu_assign_pointer(set_h225_addr_hook, set_h225_addr);
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c
index 766e2c16c6b9..fe6f9cef6c85 100644
--- a/net/ipv4/netfilter/nf_nat_irc.c
+++ b/net/ipv4/netfilter/nf_nat_irc.c
@@ -74,7 +74,7 @@ static void __exit nf_nat_irc_fini(void)
74 74
75static int __init nf_nat_irc_init(void) 75static int __init nf_nat_irc_init(void)
76{ 76{
77 BUG_ON(rcu_dereference(nf_nat_irc_hook)); 77 BUG_ON(nf_nat_irc_hook != NULL);
78 rcu_assign_pointer(nf_nat_irc_hook, help); 78 rcu_assign_pointer(nf_nat_irc_hook, help);
79 return 0; 79 return 0;
80} 80}
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index e1385a099079..6817e7995f35 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -281,16 +281,16 @@ static int __init nf_nat_helper_pptp_init(void)
281{ 281{
282 nf_nat_need_gre(); 282 nf_nat_need_gre();
283 283
284 BUG_ON(rcu_dereference(nf_nat_pptp_hook_outbound)); 284 BUG_ON(nf_nat_pptp_hook_outbound != NULL);
285 rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); 285 rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
286 286
287 BUG_ON(rcu_dereference(nf_nat_pptp_hook_inbound)); 287 BUG_ON(nf_nat_pptp_hook_inbound != NULL);
288 rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); 288 rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);
289 289
290 BUG_ON(rcu_dereference(nf_nat_pptp_hook_exp_gre)); 290 BUG_ON(nf_nat_pptp_hook_exp_gre != NULL);
291 rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); 291 rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);
292 292
293 BUG_ON(rcu_dereference(nf_nat_pptp_hook_expectfn)); 293 BUG_ON(nf_nat_pptp_hook_expectfn != NULL);
294 rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected); 294 rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
295 return 0; 295 return 0;
296} 296}
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index ce9edbcc01e3..3ca98971a1e9 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -293,8 +293,8 @@ static void __exit nf_nat_sip_fini(void)
293 293
294static int __init nf_nat_sip_init(void) 294static int __init nf_nat_sip_init(void)
295{ 295{
296 BUG_ON(rcu_dereference(nf_nat_sip_hook)); 296 BUG_ON(nf_nat_sip_hook != NULL);
297 BUG_ON(rcu_dereference(nf_nat_sdp_hook)); 297 BUG_ON(nf_nat_sdp_hook != NULL);
298 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); 298 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip);
299 rcu_assign_pointer(nf_nat_sdp_hook, ip_nat_sdp); 299 rcu_assign_pointer(nf_nat_sdp_hook, ip_nat_sdp);
300 return 0; 300 return 0;
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index 0ecec701cb44..1360a94766dd 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -43,7 +43,7 @@ static void __exit nf_nat_tftp_fini(void)
43 43
44static int __init nf_nat_tftp_init(void) 44static int __init nf_nat_tftp_init(void)
45{ 45{
46 BUG_ON(rcu_dereference(nf_nat_tftp_hook)); 46 BUG_ON(nf_nat_tftp_hook != NULL);
47 rcu_assign_pointer(nf_nat_tftp_hook, help); 47 rcu_assign_pointer(nf_nat_tftp_hook, help);
48 return 0; 48 return 0;
49} 49}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 9be0daa9c0ec..ce34b281803f 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -46,17 +46,6 @@
46#include <net/sock.h> 46#include <net/sock.h>
47#include <net/raw.h> 47#include <net/raw.h>
48 48
49static int fold_prot_inuse(struct proto *proto)
50{
51 int res = 0;
52 int cpu;
53
54 for_each_possible_cpu(cpu)
55 res += proto->stats[cpu].inuse;
56
57 return res;
58}
59
60/* 49/*
61 * Report socket allocation statistics [mea@utu.fi] 50 * Report socket allocation statistics [mea@utu.fi]
62 */ 51 */
@@ -64,12 +53,12 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
64{ 53{
65 socket_seq_show(seq); 54 socket_seq_show(seq);
66 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", 55 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
67 fold_prot_inuse(&tcp_prot), atomic_read(&tcp_orphan_count), 56 sock_prot_inuse(&tcp_prot), atomic_read(&tcp_orphan_count),
68 tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated), 57 tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated),
69 atomic_read(&tcp_memory_allocated)); 58 atomic_read(&tcp_memory_allocated));
70 seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot)); 59 seq_printf(seq, "UDP: inuse %d\n", sock_prot_inuse(&udp_prot));
71 seq_printf(seq, "UDPLITE: inuse %d\n", fold_prot_inuse(&udplite_prot)); 60 seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse(&udplite_prot));
72 seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot)); 61 seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse(&raw_prot));
73 seq_printf(seq, "FRAG: inuse %d memory %d\n", 62 seq_printf(seq, "FRAG: inuse %d memory %d\n",
74 ip_frag_nqueues(), ip_frag_mem()); 63 ip_frag_nqueues(), ip_frag_mem());
75 return 0; 64 return 0;
@@ -304,7 +293,7 @@ static void icmp_put(struct seq_file *seq)
304 for (i=0; icmpmibmap[i].name != NULL; i++) 293 for (i=0; icmpmibmap[i].name != NULL; i++)
305 seq_printf(seq, " %lu", 294 seq_printf(seq, " %lu",
306 snmp_fold_field((void **) icmpmsg_statistics, 295 snmp_fold_field((void **) icmpmsg_statistics,
307 icmpmibmap[i].index)); 296 icmpmibmap[i].index | 0x100));
308} 297}
309 298
310/* 299/*
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 3916faca3afe..66b42f547bf9 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -760,6 +760,8 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
760 } 760 }
761} 761}
762 762
763DEFINE_PROTO_INUSE(raw)
764
763struct proto raw_prot = { 765struct proto raw_prot = {
764 .name = "RAW", 766 .name = "RAW",
765 .owner = THIS_MODULE, 767 .owner = THIS_MODULE,
@@ -781,6 +783,7 @@ struct proto raw_prot = {
781 .compat_setsockopt = compat_raw_setsockopt, 783 .compat_setsockopt = compat_raw_setsockopt,
782 .compat_getsockopt = compat_raw_getsockopt, 784 .compat_getsockopt = compat_raw_getsockopt,
783#endif 785#endif
786 REF_PROTO_INUSE(raw)
784}; 787};
785 788
786#ifdef CONFIG_PROC_FS 789#ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 21b12de9e653..c426dec6d579 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -578,6 +578,9 @@ static void rt_check_expire(struct work_struct *work)
578 i = (i + 1) & rt_hash_mask; 578 i = (i + 1) & rt_hash_mask;
579 rthp = &rt_hash_table[i].chain; 579 rthp = &rt_hash_table[i].chain;
580 580
581 if (need_resched())
582 cond_resched();
583
581 if (*rthp == NULL) 584 if (*rthp == NULL)
582 continue; 585 continue;
583 spin_lock_bh(rt_hash_lock_addr(i)); 586 spin_lock_bh(rt_hash_lock_addr(i));
@@ -851,9 +854,7 @@ restart:
851 */ 854 */
852 rcu_assign_pointer(rt_hash_table[hash].chain, rth); 855 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
853 856
854 rth->u.dst.__use++; 857 dst_use(&rth->u.dst, now);
855 dst_hold(&rth->u.dst);
856 rth->u.dst.lastuse = now;
857 spin_unlock_bh(rt_hash_lock_addr(hash)); 858 spin_unlock_bh(rt_hash_lock_addr(hash));
858 859
859 rt_drop(rt); 860 rt_drop(rt);
@@ -1813,11 +1814,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1813 goto martian_destination; 1814 goto martian_destination;
1814 1815
1815 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos); 1816 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
1816 if (err == -ENOBUFS)
1817 goto e_nobufs;
1818 if (err == -EINVAL)
1819 goto e_inval;
1820
1821done: 1817done:
1822 in_dev_put(in_dev); 1818 in_dev_put(in_dev);
1823 if (free_res) 1819 if (free_res)
@@ -1935,9 +1931,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1935 rth->fl.oif == 0 && 1931 rth->fl.oif == 0 &&
1936 rth->fl.mark == skb->mark && 1932 rth->fl.mark == skb->mark &&
1937 rth->fl.fl4_tos == tos) { 1933 rth->fl.fl4_tos == tos) {
1938 rth->u.dst.lastuse = jiffies; 1934 dst_use(&rth->u.dst, jiffies);
1939 dst_hold(&rth->u.dst);
1940 rth->u.dst.__use++;
1941 RT_CACHE_STAT_INC(in_hit); 1935 RT_CACHE_STAT_INC(in_hit);
1942 rcu_read_unlock(); 1936 rcu_read_unlock();
1943 skb->dst = (struct dst_entry*)rth; 1937 skb->dst = (struct dst_entry*)rth;
@@ -2331,9 +2325,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2331 rth->fl.mark == flp->mark && 2325 rth->fl.mark == flp->mark &&
2332 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2326 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2333 (IPTOS_RT_MASK | RTO_ONLINK))) { 2327 (IPTOS_RT_MASK | RTO_ONLINK))) {
2334 rth->u.dst.lastuse = jiffies; 2328 dst_use(&rth->u.dst, jiffies);
2335 dst_hold(&rth->u.dst);
2336 rth->u.dst.__use++;
2337 RT_CACHE_STAT_INC(out_hit); 2329 RT_CACHE_STAT_INC(out_hit);
2338 rcu_read_unlock_bh(); 2330 rcu_read_unlock_bh();
2339 *rp = rth; 2331 *rp = rth;
@@ -2896,18 +2888,14 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
2896 offset /= sizeof(u32); 2888 offset /= sizeof(u32);
2897 2889
2898 if (length > 0) { 2890 if (length > 0) {
2899 u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset;
2900 u32 *dst = (u32 *) buffer; 2891 u32 *dst = (u32 *) buffer;
2901 2892
2902 /* Copy first cpu. */
2903 *start = buffer; 2893 *start = buffer;
2904 memcpy(dst, src, length); 2894 memset(dst, 0, length);
2905 2895
2906 /* Add the other cpus in, one int at a time */
2907 for_each_possible_cpu(i) { 2896 for_each_possible_cpu(i) {
2908 unsigned int j; 2897 unsigned int j;
2909 2898 u32 *src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
2910 src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
2911 2899
2912 for (j = 0; j < length/4; j++) 2900 for (j = 0; j < length/4; j++)
2913 dst[j] += src[j]; 2901 dst[j] += src[j];
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ffddd2b45352..bec6fe880657 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -191,7 +191,7 @@ static int sysctl_tcp_congestion_control(ctl_table *table, int __user *name,
191 191
192 tcp_get_default_congestion_control(val); 192 tcp_get_default_congestion_control(val);
193 ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen); 193 ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen);
194 if (ret == 0 && newval && newlen) 194 if (ret == 1 && newval && newlen)
195 ret = tcp_set_default_congestion_control(val); 195 ret = tcp_set_default_congestion_control(val);
196 return ret; 196 return ret;
197} 197}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2e6ad6dbba6c..8e65182f7af1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2453,14 +2453,14 @@ void __init tcp_init(void)
2453 0, 2453 0,
2454 &tcp_hashinfo.ehash_size, 2454 &tcp_hashinfo.ehash_size,
2455 NULL, 2455 NULL,
2456 0); 2456 thash_entries ? 0 : 512 * 1024);
2457 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size; 2457 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2458 for (i = 0; i < tcp_hashinfo.ehash_size; i++) { 2458 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
2459 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2460 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); 2459 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2461 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain); 2460 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
2462 } 2461 }
2463 2462 if (inet_ehash_locks_alloc(&tcp_hashinfo))
2463 panic("TCP: failed to alloc ehash_locks");
2464 tcp_hashinfo.bhash = 2464 tcp_hashinfo.bhash =
2465 alloc_large_system_hash("TCP bind", 2465 alloc_large_system_hash("TCP bind",
2466 sizeof(struct inet_bind_hashbucket), 2466 sizeof(struct inet_bind_hashbucket),
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 69d8c38ccd39..0f0c1c9829a1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1269,6 +1269,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1269 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) 1269 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
1270 return 0; 1270 return 0;
1271 1271
1272 if (!tp->packets_out)
1273 goto out;
1274
1272 /* SACK fastpath: 1275 /* SACK fastpath:
1273 * if the only SACK change is the increase of the end_seq of 1276 * if the only SACK change is the increase of the end_seq of
1274 * the first block then only apply that SACK block 1277 * the first block then only apply that SACK block
@@ -1330,12 +1333,15 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1330 cached_fack_count = 0; 1333 cached_fack_count = 0;
1331 } 1334 }
1332 1335
1333 for (i=0; i<num_sacks; i++, sp++) { 1336 for (i = 0; i < num_sacks; i++) {
1334 struct sk_buff *skb; 1337 struct sk_buff *skb;
1335 __u32 start_seq = ntohl(sp->start_seq); 1338 __u32 start_seq = ntohl(sp->start_seq);
1336 __u32 end_seq = ntohl(sp->end_seq); 1339 __u32 end_seq = ntohl(sp->end_seq);
1337 int fack_count; 1340 int fack_count;
1338 int dup_sack = (found_dup_sack && (i == first_sack_index)); 1341 int dup_sack = (found_dup_sack && (i == first_sack_index));
1342 int next_dup = (found_dup_sack && (i+1 == first_sack_index));
1343
1344 sp++;
1339 1345
1340 if (!tcp_is_sackblock_valid(tp, dup_sack, start_seq, end_seq)) { 1346 if (!tcp_is_sackblock_valid(tp, dup_sack, start_seq, end_seq)) {
1341 if (dup_sack) { 1347 if (dup_sack) {
@@ -1361,7 +1367,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1361 flag |= FLAG_DATA_LOST; 1367 flag |= FLAG_DATA_LOST;
1362 1368
1363 tcp_for_write_queue_from(skb, sk) { 1369 tcp_for_write_queue_from(skb, sk) {
1364 int in_sack; 1370 int in_sack = 0;
1365 u8 sacked; 1371 u8 sacked;
1366 1372
1367 if (skb == tcp_send_head(sk)) 1373 if (skb == tcp_send_head(sk))
@@ -1380,11 +1386,25 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1380 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1386 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1381 break; 1387 break;
1382 1388
1383 in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); 1389 dup_sack = (found_dup_sack && (i == first_sack_index));
1384 if (in_sack < 0) 1390
1385 break; 1391 /* Due to sorting DSACK may reside within this SACK block! */
1392 if (next_dup) {
1393 u32 dup_start = ntohl(sp->start_seq);
1394 u32 dup_end = ntohl(sp->end_seq);
1386 1395
1387 fack_count += tcp_skb_pcount(skb); 1396 if (before(TCP_SKB_CB(skb)->seq, dup_end)) {
1397 in_sack = tcp_match_skb_to_sack(sk, skb, dup_start, dup_end);
1398 if (in_sack > 0)
1399 dup_sack = 1;
1400 }
1401 }
1402
1403 /* DSACK info lost if out-of-mem, try SACK still */
1404 if (in_sack <= 0)
1405 in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
1406 if (unlikely(in_sack < 0))
1407 break;
1388 1408
1389 sacked = TCP_SKB_CB(skb)->sacked; 1409 sacked = TCP_SKB_CB(skb)->sacked;
1390 1410
@@ -1400,19 +1420,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1400 if ((dup_sack && in_sack) && 1420 if ((dup_sack && in_sack) &&
1401 (sacked&TCPCB_SACKED_ACKED)) 1421 (sacked&TCPCB_SACKED_ACKED))
1402 reord = min(fack_count, reord); 1422 reord = min(fack_count, reord);
1403 } else {
1404 /* If it was in a hole, we detected reordering. */
1405 if (fack_count < prior_fackets &&
1406 !(sacked&TCPCB_SACKED_ACKED))
1407 reord = min(fack_count, reord);
1408 } 1423 }
1409 1424
1410 /* Nothing to do; acked frame is about to be dropped. */ 1425 /* Nothing to do; acked frame is about to be dropped. */
1426 fack_count += tcp_skb_pcount(skb);
1411 continue; 1427 continue;
1412 } 1428 }
1413 1429
1414 if (!in_sack) 1430 if (!in_sack) {
1431 fack_count += tcp_skb_pcount(skb);
1415 continue; 1432 continue;
1433 }
1416 1434
1417 if (!(sacked&TCPCB_SACKED_ACKED)) { 1435 if (!(sacked&TCPCB_SACKED_ACKED)) {
1418 if (sacked & TCPCB_SACKED_RETRANS) { 1436 if (sacked & TCPCB_SACKED_RETRANS) {
@@ -1429,12 +1447,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1429 tp->retransmit_skb_hint = NULL; 1447 tp->retransmit_skb_hint = NULL;
1430 } 1448 }
1431 } else { 1449 } else {
1432 /* New sack for not retransmitted frame, 1450 if (!(sacked & TCPCB_RETRANS)) {
1433 * which was in hole. It is reordering. 1451 /* New sack for not retransmitted frame,
1434 */ 1452 * which was in hole. It is reordering.
1435 if (!(sacked & TCPCB_RETRANS) && 1453 */
1436 fack_count < prior_fackets) 1454 if (fack_count < prior_fackets)
1437 reord = min(fack_count, reord); 1455 reord = min(fack_count, reord);
1456
1457 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
1458 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
1459 flag |= FLAG_ONLY_ORIG_SACKED;
1460 }
1438 1461
1439 if (sacked & TCPCB_LOST) { 1462 if (sacked & TCPCB_LOST) {
1440 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 1463 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
@@ -1443,24 +1466,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1443 /* clear lost hint */ 1466 /* clear lost hint */
1444 tp->retransmit_skb_hint = NULL; 1467 tp->retransmit_skb_hint = NULL;
1445 } 1468 }
1446 /* SACK enhanced F-RTO detection.
1447 * Set flag if and only if non-rexmitted
1448 * segments below frto_highmark are
1449 * SACKed (RFC4138; Appendix B).
1450 * Clearing correct due to in-order walk
1451 */
1452 if (after(end_seq, tp->frto_highmark)) {
1453 flag &= ~FLAG_ONLY_ORIG_SACKED;
1454 } else {
1455 if (!(sacked & TCPCB_RETRANS))
1456 flag |= FLAG_ONLY_ORIG_SACKED;
1457 }
1458 } 1469 }
1459 1470
1460 TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED; 1471 TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
1461 flag |= FLAG_DATA_SACKED; 1472 flag |= FLAG_DATA_SACKED;
1462 tp->sacked_out += tcp_skb_pcount(skb); 1473 tp->sacked_out += tcp_skb_pcount(skb);
1463 1474
1475 fack_count += tcp_skb_pcount(skb);
1464 if (fack_count > tp->fackets_out) 1476 if (fack_count > tp->fackets_out)
1465 tp->fackets_out = fack_count; 1477 tp->fackets_out = fack_count;
1466 1478
@@ -1471,6 +1483,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1471 } else { 1483 } else {
1472 if (dup_sack && (sacked&TCPCB_RETRANS)) 1484 if (dup_sack && (sacked&TCPCB_RETRANS))
1473 reord = min(fack_count, reord); 1485 reord = min(fack_count, reord);
1486
1487 fack_count += tcp_skb_pcount(skb);
1474 } 1488 }
1475 1489
1476 /* D-SACK. We can detect redundant retransmission 1490 /* D-SACK. We can detect redundant retransmission
@@ -1485,6 +1499,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1485 tp->retransmit_skb_hint = NULL; 1499 tp->retransmit_skb_hint = NULL;
1486 } 1500 }
1487 } 1501 }
1502
1503 /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
1504 * due to in-order walk
1505 */
1506 if (after(end_seq, tp->frto_highmark))
1507 flag &= ~FLAG_ONLY_ORIG_SACKED;
1488 } 1508 }
1489 1509
1490 if (tp->retrans_out && 1510 if (tp->retrans_out &&
@@ -1496,7 +1516,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1496 1516
1497 if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss && 1517 if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss &&
1498 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) 1518 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
1499 tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0); 1519 tcp_update_reordering(sk, tp->fackets_out - reord, 0);
1520
1521out:
1500 1522
1501#if FASTRETRANS_DEBUG > 0 1523#if FASTRETRANS_DEBUG > 0
1502 BUG_TRAP((int)tp->sacked_out >= 0); 1524 BUG_TRAP((int)tp->sacked_out >= 0);
@@ -1652,6 +1674,9 @@ void tcp_enter_frto(struct sock *sk)
1652 } 1674 }
1653 tcp_verify_left_out(tp); 1675 tcp_verify_left_out(tp);
1654 1676
1677 /* Too bad if TCP was application limited */
1678 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
1679
1655 /* Earlier loss recovery underway (see RFC4138; Appendix B). 1680 /* Earlier loss recovery underway (see RFC4138; Appendix B).
1656 * The last condition is necessary at least in tp->frto_counter case. 1681 * The last condition is necessary at least in tp->frto_counter case.
1657 */ 1682 */
@@ -1684,6 +1709,8 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1684 tcp_for_write_queue(skb, sk) { 1709 tcp_for_write_queue(skb, sk) {
1685 if (skb == tcp_send_head(sk)) 1710 if (skb == tcp_send_head(sk))
1686 break; 1711 break;
1712
1713 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1687 /* 1714 /*
1688 * Count the retransmission made on RTO correctly (only when 1715 * Count the retransmission made on RTO correctly (only when
1689 * waiting for the first ACK and did not get it)... 1716 * waiting for the first ACK and did not get it)...
@@ -1697,7 +1724,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1697 } else { 1724 } else {
1698 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 1725 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
1699 tp->undo_marker = 0; 1726 tp->undo_marker = 0;
1700 TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1727 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1701 } 1728 }
1702 1729
1703 /* Don't lost mark skbs that were fwd transmitted after RTO */ 1730 /* Don't lost mark skbs that were fwd transmitted after RTO */
@@ -2059,7 +2086,7 @@ static void tcp_update_scoreboard(struct sock *sk)
2059 if (!tcp_skb_timedout(sk, skb)) 2086 if (!tcp_skb_timedout(sk, skb))
2060 break; 2087 break;
2061 2088
2062 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { 2089 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
2063 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 2090 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
2064 tp->lost_out += tcp_skb_pcount(skb); 2091 tp->lost_out += tcp_skb_pcount(skb);
2065 tcp_verify_retransmit_hint(tp, skb); 2092 tcp_verify_retransmit_hint(tp, skb);
@@ -2611,7 +2638,8 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
2611 * is before the ack sequence we can discard it as it's confirmed to have 2638 * is before the ack sequence we can discard it as it's confirmed to have
2612 * arrived at the other end. 2639 * arrived at the other end.
2613 */ 2640 */
2614static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p) 2641static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p,
2642 int prior_fackets)
2615{ 2643{
2616 struct tcp_sock *tp = tcp_sk(sk); 2644 struct tcp_sock *tp = tcp_sk(sk);
2617 const struct inet_connection_sock *icsk = inet_csk(sk); 2645 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2620,6 +2648,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
2620 int fully_acked = 1; 2648 int fully_acked = 1;
2621 int flag = 0; 2649 int flag = 0;
2622 int prior_packets = tp->packets_out; 2650 int prior_packets = tp->packets_out;
2651 u32 cnt = 0;
2652 u32 reord = tp->packets_out;
2623 s32 seq_rtt = -1; 2653 s32 seq_rtt = -1;
2624 ktime_t last_ackt = net_invalid_timestamp(); 2654 ktime_t last_ackt = net_invalid_timestamp();
2625 2655
@@ -2660,10 +2690,14 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
2660 if ((flag & FLAG_DATA_ACKED) || 2690 if ((flag & FLAG_DATA_ACKED) ||
2661 (packets_acked > 1)) 2691 (packets_acked > 1))
2662 flag |= FLAG_NONHEAD_RETRANS_ACKED; 2692 flag |= FLAG_NONHEAD_RETRANS_ACKED;
2663 } else if (seq_rtt < 0) { 2693 } else {
2664 seq_rtt = now - scb->when; 2694 if (seq_rtt < 0) {
2665 if (fully_acked) 2695 seq_rtt = now - scb->when;
2666 last_ackt = skb->tstamp; 2696 if (fully_acked)
2697 last_ackt = skb->tstamp;
2698 }
2699 if (!(sacked & TCPCB_SACKED_ACKED))
2700 reord = min(cnt, reord);
2667 } 2701 }
2668 2702
2669 if (sacked & TCPCB_SACKED_ACKED) 2703 if (sacked & TCPCB_SACKED_ACKED)
@@ -2674,12 +2708,16 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
2674 if ((sacked & TCPCB_URG) && tp->urg_mode && 2708 if ((sacked & TCPCB_URG) && tp->urg_mode &&
2675 !before(end_seq, tp->snd_up)) 2709 !before(end_seq, tp->snd_up))
2676 tp->urg_mode = 0; 2710 tp->urg_mode = 0;
2677 } else if (seq_rtt < 0) { 2711 } else {
2678 seq_rtt = now - scb->when; 2712 if (seq_rtt < 0) {
2679 if (fully_acked) 2713 seq_rtt = now - scb->when;
2680 last_ackt = skb->tstamp; 2714 if (fully_acked)
2715 last_ackt = skb->tstamp;
2716 }
2717 reord = min(cnt, reord);
2681 } 2718 }
2682 tp->packets_out -= packets_acked; 2719 tp->packets_out -= packets_acked;
2720 cnt += packets_acked;
2683 2721
2684 /* Initial outgoing SYN's get put onto the write_queue 2722 /* Initial outgoing SYN's get put onto the write_queue
2685 * just like anything else we transmit. It is not 2723 * just like anything else we transmit. It is not
@@ -2711,13 +2749,18 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
2711 tcp_ack_update_rtt(sk, flag, seq_rtt); 2749 tcp_ack_update_rtt(sk, flag, seq_rtt);
2712 tcp_rearm_rto(sk); 2750 tcp_rearm_rto(sk);
2713 2751
2752 if (tcp_is_reno(tp)) {
2753 tcp_remove_reno_sacks(sk, pkts_acked);
2754 } else {
2755 /* Non-retransmitted hole got filled? That's reordering */
2756 if (reord < prior_fackets)
2757 tcp_update_reordering(sk, tp->fackets_out - reord, 0);
2758 }
2759
2714 tp->fackets_out -= min(pkts_acked, tp->fackets_out); 2760 tp->fackets_out -= min(pkts_acked, tp->fackets_out);
2715 /* hint's skb might be NULL but we don't need to care */ 2761 /* hint's skb might be NULL but we don't need to care */
2716 tp->fastpath_cnt_hint -= min_t(u32, pkts_acked, 2762 tp->fastpath_cnt_hint -= min_t(u32, pkts_acked,
2717 tp->fastpath_cnt_hint); 2763 tp->fastpath_cnt_hint);
2718 if (tcp_is_reno(tp))
2719 tcp_remove_reno_sacks(sk, pkts_acked);
2720
2721 if (ca_ops->pkts_acked) { 2764 if (ca_ops->pkts_acked) {
2722 s32 rtt_us = -1; 2765 s32 rtt_us = -1;
2723 2766
@@ -3000,6 +3043,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3000 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3043 u32 ack_seq = TCP_SKB_CB(skb)->seq;
3001 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3044 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3002 u32 prior_in_flight; 3045 u32 prior_in_flight;
3046 u32 prior_fackets;
3003 s32 seq_rtt; 3047 s32 seq_rtt;
3004 int prior_packets; 3048 int prior_packets;
3005 int frto_cwnd = 0; 3049 int frto_cwnd = 0;
@@ -3024,6 +3068,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3024 tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache); 3068 tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
3025 } 3069 }
3026 3070
3071 prior_fackets = tp->fackets_out;
3072
3027 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 3073 if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
3028 /* Window is constant, pure forward advance. 3074 /* Window is constant, pure forward advance.
3029 * No more checks are required. 3075 * No more checks are required.
@@ -3065,13 +3111,13 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3065 prior_in_flight = tcp_packets_in_flight(tp); 3111 prior_in_flight = tcp_packets_in_flight(tp);
3066 3112
3067 /* See if we can take anything off of the retransmit queue. */ 3113 /* See if we can take anything off of the retransmit queue. */
3068 flag |= tcp_clean_rtx_queue(sk, &seq_rtt); 3114 flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets);
3069 3115
3116 if (tp->frto_counter)
3117 frto_cwnd = tcp_process_frto(sk, flag);
3070 /* Guarantee sacktag reordering detection against wrap-arounds */ 3118 /* Guarantee sacktag reordering detection against wrap-arounds */
3071 if (before(tp->frto_highmark, tp->snd_una)) 3119 if (before(tp->frto_highmark, tp->snd_una))
3072 tp->frto_highmark = 0; 3120 tp->frto_highmark = 0;
3073 if (tp->frto_counter)
3074 frto_cwnd = tcp_process_frto(sk, flag);
3075 3121
3076 if (tcp_ack_is_dubious(sk, flag)) { 3122 if (tcp_ack_is_dubious(sk, flag)) {
3077 /* Advance CWND, if state allows this. */ 3123 /* Advance CWND, if state allows this. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ad759f1c3777..652c32368ccc 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -858,16 +858,16 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
858 u8 *newkey, u8 newkeylen) 858 u8 *newkey, u8 newkeylen)
859{ 859{
860 /* Add Key to the list */ 860 /* Add Key to the list */
861 struct tcp4_md5sig_key *key; 861 struct tcp_md5sig_key *key;
862 struct tcp_sock *tp = tcp_sk(sk); 862 struct tcp_sock *tp = tcp_sk(sk);
863 struct tcp4_md5sig_key *keys; 863 struct tcp4_md5sig_key *keys;
864 864
865 key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); 865 key = tcp_v4_md5_do_lookup(sk, addr);
866 if (key) { 866 if (key) {
867 /* Pre-existing entry - just update that one. */ 867 /* Pre-existing entry - just update that one. */
868 kfree(key->base.key); 868 kfree(key->key);
869 key->base.key = newkey; 869 key->key = newkey;
870 key->base.keylen = newkeylen; 870 key->keylen = newkeylen;
871 } else { 871 } else {
872 struct tcp_md5sig_info *md5sig; 872 struct tcp_md5sig_info *md5sig;
873 873
@@ -900,8 +900,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
900 sizeof(*keys) * md5sig->entries4); 900 sizeof(*keys) * md5sig->entries4);
901 901
902 /* Free old key list, and reference new one */ 902 /* Free old key list, and reference new one */
903 if (md5sig->keys4) 903 kfree(md5sig->keys4);
904 kfree(md5sig->keys4);
905 md5sig->keys4 = keys; 904 md5sig->keys4 = keys;
906 md5sig->alloced4++; 905 md5sig->alloced4++;
907 } 906 }
@@ -939,10 +938,10 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
939 tp->md5sig_info->alloced4 = 0; 938 tp->md5sig_info->alloced4 = 0;
940 } else if (tp->md5sig_info->entries4 != i) { 939 } else if (tp->md5sig_info->entries4 != i) {
941 /* Need to do some manipulation */ 940 /* Need to do some manipulation */
942 memcpy(&tp->md5sig_info->keys4[i], 941 memmove(&tp->md5sig_info->keys4[i],
943 &tp->md5sig_info->keys4[i+1], 942 &tp->md5sig_info->keys4[i+1],
944 (tp->md5sig_info->entries4 - i) * 943 (tp->md5sig_info->entries4 - i) *
945 sizeof(struct tcp4_md5sig_key)); 944 sizeof(struct tcp4_md5sig_key));
946 } 945 }
947 tcp_free_md5sig_pool(); 946 tcp_free_md5sig_pool();
948 return 0; 947 return 0;
@@ -1083,7 +1082,7 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1083 sg_set_buf(&sg[block++], key->key, key->keylen); 1082 sg_set_buf(&sg[block++], key->key, key->keylen);
1084 nbytes += key->keylen; 1083 nbytes += key->keylen;
1085 1084
1086 sg_mark_end(sg, block); 1085 sg_mark_end(&sg[block - 1]);
1087 1086
1088 /* Now store the Hash into the packet */ 1087 /* Now store the Hash into the packet */
1089 err = crypto_hash_init(desc); 1088 err = crypto_hash_init(desc);
@@ -2049,8 +2048,9 @@ static void *established_get_first(struct seq_file *seq)
2049 struct sock *sk; 2048 struct sock *sk;
2050 struct hlist_node *node; 2049 struct hlist_node *node;
2051 struct inet_timewait_sock *tw; 2050 struct inet_timewait_sock *tw;
2051 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2052 2052
2053 read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock); 2053 read_lock_bh(lock);
2054 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { 2054 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2055 if (sk->sk_family != st->family) { 2055 if (sk->sk_family != st->family) {
2056 continue; 2056 continue;
@@ -2067,7 +2067,7 @@ static void *established_get_first(struct seq_file *seq)
2067 rc = tw; 2067 rc = tw;
2068 goto out; 2068 goto out;
2069 } 2069 }
2070 read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock); 2070 read_unlock_bh(lock);
2071 st->state = TCP_SEQ_STATE_ESTABLISHED; 2071 st->state = TCP_SEQ_STATE_ESTABLISHED;
2072 } 2072 }
2073out: 2073out:
@@ -2094,11 +2094,11 @@ get_tw:
2094 cur = tw; 2094 cur = tw;
2095 goto out; 2095 goto out;
2096 } 2096 }
2097 read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock); 2097 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2098 st->state = TCP_SEQ_STATE_ESTABLISHED; 2098 st->state = TCP_SEQ_STATE_ESTABLISHED;
2099 2099
2100 if (++st->bucket < tcp_hashinfo.ehash_size) { 2100 if (++st->bucket < tcp_hashinfo.ehash_size) {
2101 read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock); 2101 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2102 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); 2102 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
2103 } else { 2103 } else {
2104 cur = NULL; 2104 cur = NULL;
@@ -2206,7 +2206,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2206 case TCP_SEQ_STATE_TIME_WAIT: 2206 case TCP_SEQ_STATE_TIME_WAIT:
2207 case TCP_SEQ_STATE_ESTABLISHED: 2207 case TCP_SEQ_STATE_ESTABLISHED:
2208 if (v) 2208 if (v)
2209 read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock); 2209 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2210 break; 2210 break;
2211 } 2211 }
2212} 2212}
@@ -2417,6 +2417,8 @@ void tcp4_proc_exit(void)
2417} 2417}
2418#endif /* CONFIG_PROC_FS */ 2418#endif /* CONFIG_PROC_FS */
2419 2419
2420DEFINE_PROTO_INUSE(tcp)
2421
2420struct proto tcp_prot = { 2422struct proto tcp_prot = {
2421 .name = "TCP", 2423 .name = "TCP",
2422 .owner = THIS_MODULE, 2424 .owner = THIS_MODULE,
@@ -2451,6 +2453,7 @@ struct proto tcp_prot = {
2451 .compat_setsockopt = compat_tcp_setsockopt, 2453 .compat_setsockopt = compat_tcp_setsockopt,
2452 .compat_getsockopt = compat_tcp_getsockopt, 2454 .compat_getsockopt = compat_tcp_getsockopt,
2453#endif 2455#endif
2456 REF_PROTO_INUSE(tcp)
2454}; 2457};
2455 2458
2456void __init tcp_v4_init(struct net_proto_family *ops) 2459void __init tcp_v4_init(struct net_proto_family *ops)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 324b4207254a..e5130a7fe181 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1295,6 +1295,7 @@ static int tcp_mtu_probe(struct sock *sk)
1295 struct sk_buff *skb, *nskb, *next; 1295 struct sk_buff *skb, *nskb, *next;
1296 int len; 1296 int len;
1297 int probe_size; 1297 int probe_size;
1298 int size_needed;
1298 unsigned int pif; 1299 unsigned int pif;
1299 int copy; 1300 int copy;
1300 int mss_now; 1301 int mss_now;
@@ -1313,27 +1314,20 @@ static int tcp_mtu_probe(struct sock *sk)
1313 /* Very simple search strategy: just double the MSS. */ 1314 /* Very simple search strategy: just double the MSS. */
1314 mss_now = tcp_current_mss(sk, 0); 1315 mss_now = tcp_current_mss(sk, 0);
1315 probe_size = 2*tp->mss_cache; 1316 probe_size = 2*tp->mss_cache;
1317 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
1316 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1318 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1317 /* TODO: set timer for probe_converge_event */ 1319 /* TODO: set timer for probe_converge_event */
1318 return -1; 1320 return -1;
1319 } 1321 }
1320 1322
1321 /* Have enough data in the send queue to probe? */ 1323 /* Have enough data in the send queue to probe? */
1322 len = 0; 1324 if (tp->write_seq - tp->snd_nxt < size_needed)
1323 if ((skb = tcp_send_head(sk)) == NULL)
1324 return -1;
1325 while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb))
1326 skb = tcp_write_queue_next(sk, skb);
1327 if (len < probe_size)
1328 return -1; 1325 return -1;
1329 1326
1330 /* Receive window check. */ 1327 if (tp->snd_wnd < size_needed)
1331 if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) { 1328 return -1;
1332 if (tp->snd_wnd < probe_size) 1329 if (after(tp->snd_nxt + size_needed, tp->snd_una + tp->snd_wnd))
1333 return -1; 1330 return 0;
1334 else
1335 return 0;
1336 }
1337 1331
1338 /* Do we need to wait to drain cwnd? */ 1332 /* Do we need to wait to drain cwnd? */
1339 pif = tcp_packets_in_flight(tp); 1333 pif = tcp_packets_in_flight(tp);
@@ -1352,7 +1346,6 @@ static int tcp_mtu_probe(struct sock *sk)
1352 1346
1353 skb = tcp_send_head(sk); 1347 skb = tcp_send_head(sk);
1354 tcp_insert_write_queue_before(nskb, skb, sk); 1348 tcp_insert_write_queue_before(nskb, skb, sk);
1355 tcp_advance_send_head(sk, skb);
1356 1349
1357 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1350 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1358 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1351 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index b49dedcda52d..007304e99842 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -266,26 +266,25 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
266 */ 266 */
267 diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; 267 diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
268 268
269 if (tp->snd_cwnd <= tp->snd_ssthresh) { 269 if (diff > gamma && tp->snd_ssthresh > 2 ) {
270 /* Slow start. */ 270 /* Going too fast. Time to slow down
271 if (diff > gamma) { 271 * and switch to congestion avoidance.
272 /* Going too fast. Time to slow down 272 */
273 * and switch to congestion avoidance. 273 tp->snd_ssthresh = 2;
274 */ 274
275 tp->snd_ssthresh = 2; 275 /* Set cwnd to match the actual rate
276 276 * exactly:
277 /* Set cwnd to match the actual rate 277 * cwnd = (actual rate) * baseRTT
278 * exactly: 278 * Then we add 1 because the integer
279 * cwnd = (actual rate) * baseRTT 279 * truncation robs us of full link
280 * Then we add 1 because the integer 280 * utilization.
281 * truncation robs us of full link 281 */
282 * utilization. 282 tp->snd_cwnd = min(tp->snd_cwnd,
283 */ 283 (target_cwnd >>
284 tp->snd_cwnd = min(tp->snd_cwnd, 284 V_PARAM_SHIFT)+1);
285 (target_cwnd >>
286 V_PARAM_SHIFT)+1);
287 285
288 } 286 } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
287 /* Slow start. */
289 tcp_slow_start(tp); 288 tcp_slow_start(tp);
290 } else { 289 } else {
291 /* Congestion avoidance. */ 290 /* Congestion avoidance. */
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index a794a8ca8b4f..978b3fd61e65 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -17,6 +17,11 @@ static struct xfrm_tunnel *tunnel4_handlers;
17static struct xfrm_tunnel *tunnel64_handlers; 17static struct xfrm_tunnel *tunnel64_handlers;
18static DEFINE_MUTEX(tunnel4_mutex); 18static DEFINE_MUTEX(tunnel4_mutex);
19 19
20static inline struct xfrm_tunnel **fam_handlers(unsigned short family)
21{
22 return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers;
23}
24
20int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family) 25int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
21{ 26{
22 struct xfrm_tunnel **pprev; 27 struct xfrm_tunnel **pprev;
@@ -25,8 +30,7 @@ int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
25 30
26 mutex_lock(&tunnel4_mutex); 31 mutex_lock(&tunnel4_mutex);
27 32
28 for (pprev = (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers; 33 for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
29 *pprev; pprev = &(*pprev)->next) {
30 if ((*pprev)->priority > priority) 34 if ((*pprev)->priority > priority)
31 break; 35 break;
32 if ((*pprev)->priority == priority) 36 if ((*pprev)->priority == priority)
@@ -53,8 +57,7 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
53 57
54 mutex_lock(&tunnel4_mutex); 58 mutex_lock(&tunnel4_mutex);
55 59
56 for (pprev = (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers; 60 for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
57 *pprev; pprev = &(*pprev)->next) {
58 if (*pprev == handler) { 61 if (*pprev == handler) {
59 *pprev = handler->next; 62 *pprev = handler->next;
60 ret = 0; 63 ret = 0;
@@ -118,6 +121,17 @@ static void tunnel4_err(struct sk_buff *skb, u32 info)
118 break; 121 break;
119} 122}
120 123
124#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
125static void tunnel64_err(struct sk_buff *skb, u32 info)
126{
127 struct xfrm_tunnel *handler;
128
129 for (handler = tunnel64_handlers; handler; handler = handler->next)
130 if (!handler->err_handler(skb, info))
131 break;
132}
133#endif
134
121static struct net_protocol tunnel4_protocol = { 135static struct net_protocol tunnel4_protocol = {
122 .handler = tunnel4_rcv, 136 .handler = tunnel4_rcv,
123 .err_handler = tunnel4_err, 137 .err_handler = tunnel4_err,
@@ -127,7 +141,7 @@ static struct net_protocol tunnel4_protocol = {
127#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 141#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
128static struct net_protocol tunnel64_protocol = { 142static struct net_protocol tunnel64_protocol = {
129 .handler = tunnel64_rcv, 143 .handler = tunnel64_rcv,
130 .err_handler = tunnel4_err, 144 .err_handler = tunnel64_err,
131 .no_policy = 1, 145 .no_policy = 1,
132}; 146};
133#endif 147#endif
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4bc25b46f33f..03c400ca14c5 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1430,6 +1430,8 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1430 1430
1431} 1431}
1432 1432
1433DEFINE_PROTO_INUSE(udp)
1434
1433struct proto udp_prot = { 1435struct proto udp_prot = {
1434 .name = "UDP", 1436 .name = "UDP",
1435 .owner = THIS_MODULE, 1437 .owner = THIS_MODULE,
@@ -1452,6 +1454,7 @@ struct proto udp_prot = {
1452 .compat_setsockopt = compat_udp_setsockopt, 1454 .compat_setsockopt = compat_udp_setsockopt,
1453 .compat_getsockopt = compat_udp_getsockopt, 1455 .compat_getsockopt = compat_udp_getsockopt,
1454#endif 1456#endif
1457 REF_PROTO_INUSE(udp)
1455}; 1458};
1456 1459
1457/* ------------------------------------------------------------------------ */ 1460/* ------------------------------------------------------------------------ */
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 94977205abb4..f5baeb3e8b85 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -44,6 +44,8 @@ static struct net_protocol udplite_protocol = {
44 .no_policy = 1, 44 .no_policy = 1,
45}; 45};
46 46
47DEFINE_PROTO_INUSE(udplite)
48
47struct proto udplite_prot = { 49struct proto udplite_prot = {
48 .name = "UDP-Lite", 50 .name = "UDP-Lite",
49 .owner = THIS_MODULE, 51 .owner = THIS_MODULE,
@@ -67,6 +69,7 @@ struct proto udplite_prot = {
67 .compat_setsockopt = compat_udp_setsockopt, 69 .compat_setsockopt = compat_udp_setsockopt,
68 .compat_getsockopt = compat_udp_getsockopt, 70 .compat_getsockopt = compat_udp_getsockopt,
69#endif 71#endif
72 REF_PROTO_INUSE(udplite)
70}; 73};
71 74
72static struct inet_protosw udplite4_protosw = { 75static struct inet_protosw udplite4_protosw = {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 348bd8d06112..567664eac463 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -967,7 +967,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
967 if (unlikely(score.addr_type == IPV6_ADDR_ANY || 967 if (unlikely(score.addr_type == IPV6_ADDR_ANY ||
968 score.addr_type & IPV6_ADDR_MULTICAST)) { 968 score.addr_type & IPV6_ADDR_MULTICAST)) {
969 LIMIT_NETDEBUG(KERN_DEBUG 969 LIMIT_NETDEBUG(KERN_DEBUG
970 "ADDRCONF: unspecified / multicast address" 970 "ADDRCONF: unspecified / multicast address "
971 "assigned as unicast address on %s", 971 "assigned as unicast address on %s",
972 dev->name); 972 dev->name);
973 continue; 973 continue;
@@ -4288,8 +4288,4 @@ void __exit addrconf_cleanup(void)
4288 del_timer(&addr_chk_timer); 4288 del_timer(&addr_chk_timer);
4289 4289
4290 rtnl_unlock(); 4290 rtnl_unlock();
4291
4292#ifdef CONFIG_PROC_FS
4293 proc_net_remove(&init_net, "if_inet6");
4294#endif
4295} 4291}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 1b1caf3aa1c1..ecbd38894fdd 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -162,7 +162,7 @@ lookup_protocol:
162 BUG_TRAP(answer_prot->slab != NULL); 162 BUG_TRAP(answer_prot->slab != NULL);
163 163
164 err = -ENOBUFS; 164 err = -ENOBUFS;
165 sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, 1); 165 sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot);
166 if (sk == NULL) 166 if (sk == NULL)
167 goto out; 167 goto out;
168 168
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 66a9139d46e9..4eaf55072b1b 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -35,7 +35,6 @@
35#include <net/ipv6.h> 35#include <net/ipv6.h>
36#include <net/protocol.h> 36#include <net/protocol.h>
37#include <net/xfrm.h> 37#include <net/xfrm.h>
38#include <asm/scatterlist.h>
39 38
40static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr) 39static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
41{ 40{
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index ab17b5e62355..7db66f10e00d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -110,9 +110,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
110 goto unlock; 110 goto unlock;
111 } 111 }
112 sg_init_table(sg, nfrags); 112 sg_init_table(sg, nfrags);
113 sg_mark_end(sg, skb_to_sgvec(skb, sg, esph->enc_data + 113 skb_to_sgvec(skb, sg,
114 esp->conf.ivlen - 114 esph->enc_data +
115 skb->data, clen)); 115 esp->conf.ivlen -
116 skb->data, clen);
116 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 117 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
117 if (unlikely(sg != &esp->sgbuf[0])) 118 if (unlikely(sg != &esp->sgbuf[0]))
118 kfree(sg); 119 kfree(sg);
@@ -209,9 +210,9 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
209 } 210 }
210 } 211 }
211 sg_init_table(sg, nfrags); 212 sg_init_table(sg, nfrags);
212 sg_mark_end(sg, skb_to_sgvec(skb, sg, 213 skb_to_sgvec(skb, sg,
213 sizeof(*esph) + esp->conf.ivlen, 214 sizeof(*esph) + esp->conf.ivlen,
214 elen)); 215 elen);
215 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 216 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
216 if (unlikely(sg != &esp->sgbuf[0])) 217 if (unlikely(sg != &esp->sgbuf[0]))
217 kfree(sg); 218 kfree(sg);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 706622af206f..428c6b0e26d8 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -31,25 +31,6 @@ struct fib6_rule
31 31
32static struct fib_rules_ops fib6_rules_ops; 32static struct fib_rules_ops fib6_rules_ops;
33 33
34static struct fib6_rule main_rule = {
35 .common = {
36 .refcnt = ATOMIC_INIT(2),
37 .pref = 0x7FFE,
38 .action = FR_ACT_TO_TBL,
39 .table = RT6_TABLE_MAIN,
40 },
41};
42
43static struct fib6_rule local_rule = {
44 .common = {
45 .refcnt = ATOMIC_INIT(2),
46 .pref = 0,
47 .action = FR_ACT_TO_TBL,
48 .table = RT6_TABLE_LOCAL,
49 .flags = FIB_RULE_PERMANENT,
50 },
51};
52
53struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags, 34struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags,
54 pol_lookup_t lookup) 35 pol_lookup_t lookup)
55{ 36{
@@ -270,11 +251,23 @@ static struct fib_rules_ops fib6_rules_ops = {
270 .owner = THIS_MODULE, 251 .owner = THIS_MODULE,
271}; 252};
272 253
273void __init fib6_rules_init(void) 254static int __init fib6_default_rules_init(void)
274{ 255{
275 list_add_tail(&local_rule.common.list, &fib6_rules_ops.rules_list); 256 int err;
276 list_add_tail(&main_rule.common.list, &fib6_rules_ops.rules_list); 257
258 err = fib_default_rule_add(&fib6_rules_ops, 0,
259 RT6_TABLE_LOCAL, FIB_RULE_PERMANENT);
260 if (err < 0)
261 return err;
262 err = fib_default_rule_add(&fib6_rules_ops, 0x7FFE, RT6_TABLE_MAIN, 0);
263 if (err < 0)
264 return err;
265 return 0;
266}
277 267
268void __init fib6_rules_init(void)
269{
270 BUG_ON(fib6_default_rules_init());
278 fib_rules_register(&fib6_rules_ops); 271 fib_rules_register(&fib6_rules_ops);
279} 272}
280 273
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index d6f1026f1943..adc73adadfae 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -37,9 +37,8 @@ void __inet6_hash(struct inet_hashinfo *hashinfo,
37 } else { 37 } else {
38 unsigned int hash; 38 unsigned int hash;
39 sk->sk_hash = hash = inet6_sk_ehashfn(sk); 39 sk->sk_hash = hash = inet6_sk_ehashfn(sk);
40 hash &= (hashinfo->ehash_size - 1); 40 list = &inet_ehash_bucket(hashinfo, hash)->chain;
41 list = &hashinfo->ehash[hash].chain; 41 lock = inet_ehash_lockp(hashinfo, hash);
42 lock = &hashinfo->ehash[hash].lock;
43 write_lock(lock); 42 write_lock(lock);
44 } 43 }
45 44
@@ -70,9 +69,10 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
70 */ 69 */
71 unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport); 70 unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport);
72 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); 71 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
72 rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
73 73
74 prefetch(head->chain.first); 74 prefetch(head->chain.first);
75 read_lock(&head->lock); 75 read_lock(lock);
76 sk_for_each(sk, node, &head->chain) { 76 sk_for_each(sk, node, &head->chain) {
77 /* For IPV6 do the cheaper port and family tests first. */ 77 /* For IPV6 do the cheaper port and family tests first. */
78 if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif)) 78 if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif))
@@ -92,12 +92,12 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
92 goto hit; 92 goto hit;
93 } 93 }
94 } 94 }
95 read_unlock(&head->lock); 95 read_unlock(lock);
96 return NULL; 96 return NULL;
97 97
98hit: 98hit:
99 sock_hold(sk); 99 sock_hold(sk);
100 read_unlock(&head->lock); 100 read_unlock(lock);
101 return sk; 101 return sk;
102} 102}
103EXPORT_SYMBOL(__inet6_lookup_established); 103EXPORT_SYMBOL(__inet6_lookup_established);
@@ -175,12 +175,13 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
175 const unsigned int hash = inet6_ehashfn(daddr, lport, saddr, 175 const unsigned int hash = inet6_ehashfn(daddr, lport, saddr,
176 inet->dport); 176 inet->dport);
177 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 177 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
178 rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
178 struct sock *sk2; 179 struct sock *sk2;
179 const struct hlist_node *node; 180 const struct hlist_node *node;
180 struct inet_timewait_sock *tw; 181 struct inet_timewait_sock *tw;
181 182
182 prefetch(head->chain.first); 183 prefetch(head->chain.first);
183 write_lock(&head->lock); 184 write_lock(lock);
184 185
185 /* Check TIME-WAIT sockets first. */ 186 /* Check TIME-WAIT sockets first. */
186 sk_for_each(sk2, node, &head->twchain) { 187 sk_for_each(sk2, node, &head->twchain) {
@@ -216,7 +217,7 @@ unique:
216 __sk_add_node(sk, &head->chain); 217 __sk_add_node(sk, &head->chain);
217 sk->sk_hash = hash; 218 sk->sk_hash = hash;
218 sock_prot_inc_use(sk->sk_prot); 219 sock_prot_inc_use(sk->sk_prot);
219 write_unlock(&head->lock); 220 write_unlock(lock);
220 221
221 if (twp != NULL) { 222 if (twp != NULL) {
222 *twp = tw; 223 *twp = tw;
@@ -231,7 +232,7 @@ unique:
231 return 0; 232 return 0;
232 233
233not_unique: 234not_unique:
234 write_unlock(&head->lock); 235 write_unlock(lock);
235 return -EADDRNOTAVAIL; 236 return -EADDRNOTAVAIL;
236} 237}
237 238
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 653fc0a8235b..86e1835ce4e4 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1339,6 +1339,19 @@ error:
1339 return err; 1339 return err;
1340} 1340}
1341 1341
1342static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1343{
1344 inet->cork.flags &= ~IPCORK_OPT;
1345 kfree(np->cork.opt);
1346 np->cork.opt = NULL;
1347 if (np->cork.rt) {
1348 dst_release(&np->cork.rt->u.dst);
1349 np->cork.rt = NULL;
1350 inet->cork.flags &= ~IPCORK_ALLFRAG;
1351 }
1352 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1353}
1354
1342int ip6_push_pending_frames(struct sock *sk) 1355int ip6_push_pending_frames(struct sock *sk)
1343{ 1356{
1344 struct sk_buff *skb, *tmp_skb; 1357 struct sk_buff *skb, *tmp_skb;
@@ -1415,15 +1428,7 @@ int ip6_push_pending_frames(struct sock *sk)
1415 } 1428 }
1416 1429
1417out: 1430out:
1418 inet->cork.flags &= ~IPCORK_OPT; 1431 ip6_cork_release(inet, np);
1419 kfree(np->cork.opt);
1420 np->cork.opt = NULL;
1421 if (np->cork.rt) {
1422 dst_release(&np->cork.rt->u.dst);
1423 np->cork.rt = NULL;
1424 inet->cork.flags &= ~IPCORK_ALLFRAG;
1425 }
1426 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1427 return err; 1432 return err;
1428error: 1433error:
1429 goto out; 1434 goto out;
@@ -1431,8 +1436,6 @@ error:
1431 1436
1432void ip6_flush_pending_frames(struct sock *sk) 1437void ip6_flush_pending_frames(struct sock *sk)
1433{ 1438{
1434 struct inet_sock *inet = inet_sk(sk);
1435 struct ipv6_pinfo *np = inet6_sk(sk);
1436 struct sk_buff *skb; 1439 struct sk_buff *skb;
1437 1440
1438 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { 1441 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
@@ -1442,14 +1445,5 @@ void ip6_flush_pending_frames(struct sock *sk)
1442 kfree_skb(skb); 1445 kfree_skb(skb);
1443 } 1446 }
1444 1447
1445 inet->cork.flags &= ~IPCORK_OPT; 1448 ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1446
1447 kfree(np->cork.opt);
1448 np->cork.opt = NULL;
1449 if (np->cork.rt) {
1450 dst_release(&np->cork.rt->u.dst);
1451 np->cork.rt = NULL;
1452 inet->cork.flags &= ~IPCORK_ALLFRAG;
1453 }
1454 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1455} 1449}
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 80ef2a1d39fd..0cd4056f9127 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -34,9 +34,9 @@
34#include <net/ip.h> 34#include <net/ip.h>
35#include <net/xfrm.h> 35#include <net/xfrm.h>
36#include <net/ipcomp.h> 36#include <net/ipcomp.h>
37#include <asm/scatterlist.h>
38#include <asm/semaphore.h> 37#include <asm/semaphore.h>
39#include <linux/crypto.h> 38#include <linux/crypto.h>
39#include <linux/err.h>
40#include <linux/pfkeyv2.h> 40#include <linux/pfkeyv2.h>
41#include <linux/random.h> 41#include <linux/random.h>
42#include <linux/percpu.h> 42#include <linux/percpu.h>
@@ -359,7 +359,7 @@ static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name)
359 for_each_possible_cpu(cpu) { 359 for_each_possible_cpu(cpu) {
360 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 360 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
361 CRYPTO_ALG_ASYNC); 361 CRYPTO_ALG_ASYNC);
362 if (!tfm) 362 if (IS_ERR(tfm))
363 goto error; 363 goto error;
364 *per_cpu_ptr(tfms, cpu) = tfm; 364 *per_cpu_ptr(tfms, cpu) = tfm;
365 } 365 }
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 20cfc90d5597..67997a74ddce 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1037,6 +1037,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
1037 1037
1038 ndmsg = nlmsg_data(nlh); 1038 ndmsg = nlmsg_data(nlh);
1039 ndmsg->nduseropt_family = AF_INET6; 1039 ndmsg->nduseropt_family = AF_INET6;
1040 ndmsg->nduseropt_ifindex = ra->dev->ifindex;
1040 ndmsg->nduseropt_icmp_type = icmp6h->icmp6_type; 1041 ndmsg->nduseropt_icmp_type = icmp6h->icmp6_type;
1041 ndmsg->nduseropt_icmp_code = icmp6h->icmp6_code; 1042 ndmsg->nduseropt_icmp_code = icmp6h->icmp6_code;
1042 ndmsg->nduseropt_opts_len = opt->nd_opt_len << 3; 1043 ndmsg->nduseropt_opts_len = opt->nd_opt_len << 3;
@@ -1670,7 +1671,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, struct file * f
1670 filp, buffer, lenp, ppos); 1671 filp, buffer, lenp, ppos);
1671 1672
1672 else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) || 1673 else if ((strcmp(ctl->procname, "retrans_time_ms") == 0) ||
1673 (strcmp(ctl->procname, "base_reacable_time_ms") == 0)) 1674 (strcmp(ctl->procname, "base_reachable_time_ms") == 0))
1674 ret = proc_dointvec_ms_jiffies(ctl, write, 1675 ret = proc_dointvec_ms_jiffies(ctl, write,
1675 filp, buffer, lenp, ppos); 1676 filp, buffer, lenp, ppos);
1676 else 1677 else
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 4513eab77397..e789ec44d23b 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -4,25 +4,29 @@
4 4
5# Link order matters here. 5# Link order matters here.
6obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o 6obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
7obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
8obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o
9obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
10obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o
11obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
12obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
13obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
14obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o 7obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
15obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o 8obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
16obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
17obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o 9obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
18obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
19obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o 10obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
20obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
21obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
22obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
23 11
24# objects for l3 independent conntrack 12# objects for l3 independent conntrack
25nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o 13nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o
26 14
27# l3 independent conntrack 15# l3 independent conntrack
28obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o 16obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o
17
18# matches
19obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
20obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
21obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o
22obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
23obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
24obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
25obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o
26obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
27obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
28
29# targets
30obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
31obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
32obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 6413a30d9f68..e273605eef85 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -23,6 +23,7 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/sysctl.h> 24#include <linux/sysctl.h>
25#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
26#include <linux/mutex.h> 27#include <linux/mutex.h>
27#include <net/net_namespace.h> 28#include <net/net_namespace.h>
28#include <net/sock.h> 29#include <net/sock.h>
@@ -596,15 +597,11 @@ static ctl_table ipq_root_table[] = {
596 { .ctl_name = 0 } 597 { .ctl_name = 0 }
597}; 598};
598 599
599#ifdef CONFIG_PROC_FS 600static int ip6_queue_show(struct seq_file *m, void *v)
600static int
601ipq_get_info(char *buffer, char **start, off_t offset, int length)
602{ 601{
603 int len;
604
605 read_lock_bh(&queue_lock); 602 read_lock_bh(&queue_lock);
606 603
607 len = sprintf(buffer, 604 seq_printf(m,
608 "Peer PID : %d\n" 605 "Peer PID : %d\n"
609 "Copy mode : %hu\n" 606 "Copy mode : %hu\n"
610 "Copy range : %u\n" 607 "Copy range : %u\n"
@@ -621,16 +618,21 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length)
621 queue_user_dropped); 618 queue_user_dropped);
622 619
623 read_unlock_bh(&queue_lock); 620 read_unlock_bh(&queue_lock);
621 return 0;
622}
624 623
625 *start = buffer + offset; 624static int ip6_queue_open(struct inode *inode, struct file *file)
626 len -= offset; 625{
627 if (len > length) 626 return single_open(file, ip6_queue_show, NULL);
628 len = length;
629 else if (len < 0)
630 len = 0;
631 return len;
632} 627}
633#endif /* CONFIG_PROC_FS */ 628
629static const struct file_operations ip6_queue_proc_fops = {
630 .open = ip6_queue_open,
631 .read = seq_read,
632 .llseek = seq_lseek,
633 .release = single_release,
634 .owner = THIS_MODULE,
635};
634 636
635static struct nf_queue_handler nfqh = { 637static struct nf_queue_handler nfqh = {
636 .name = "ip6_queue", 638 .name = "ip6_queue",
@@ -650,10 +652,11 @@ static int __init ip6_queue_init(void)
650 goto cleanup_netlink_notifier; 652 goto cleanup_netlink_notifier;
651 } 653 }
652 654
653 proc = proc_net_create(&init_net, IPQ_PROC_FS_NAME, 0, ipq_get_info); 655 proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net);
654 if (proc) 656 if (proc) {
655 proc->owner = THIS_MODULE; 657 proc->owner = THIS_MODULE;
656 else { 658 proc->proc_fops = &ip6_queue_proc_fops;
659 } else {
657 printk(KERN_ERR "ip6_queue: failed to create proc entry\n"); 660 printk(KERN_ERR "ip6_queue: failed to create proc entry\n");
658 goto cleanup_ipqnl; 661 goto cleanup_ipqnl;
659 } 662 }
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index be526ad92543..8631ed7fe8a9 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -32,27 +32,16 @@
32 32
33static struct proc_dir_entry *proc_net_devsnmp6; 33static struct proc_dir_entry *proc_net_devsnmp6;
34 34
35static int fold_prot_inuse(struct proto *proto)
36{
37 int res = 0;
38 int cpu;
39
40 for_each_possible_cpu(cpu)
41 res += proto->stats[cpu].inuse;
42
43 return res;
44}
45
46static int sockstat6_seq_show(struct seq_file *seq, void *v) 35static int sockstat6_seq_show(struct seq_file *seq, void *v)
47{ 36{
48 seq_printf(seq, "TCP6: inuse %d\n", 37 seq_printf(seq, "TCP6: inuse %d\n",
49 fold_prot_inuse(&tcpv6_prot)); 38 sock_prot_inuse(&tcpv6_prot));
50 seq_printf(seq, "UDP6: inuse %d\n", 39 seq_printf(seq, "UDP6: inuse %d\n",
51 fold_prot_inuse(&udpv6_prot)); 40 sock_prot_inuse(&udpv6_prot));
52 seq_printf(seq, "UDPLITE6: inuse %d\n", 41 seq_printf(seq, "UDPLITE6: inuse %d\n",
53 fold_prot_inuse(&udplitev6_prot)); 42 sock_prot_inuse(&udplitev6_prot));
54 seq_printf(seq, "RAW6: inuse %d\n", 43 seq_printf(seq, "RAW6: inuse %d\n",
55 fold_prot_inuse(&rawv6_prot)); 44 sock_prot_inuse(&rawv6_prot));
56 seq_printf(seq, "FRAG6: inuse %d memory %d\n", 45 seq_printf(seq, "FRAG6: inuse %d memory %d\n",
57 ip6_frag_nqueues(), ip6_frag_mem()); 46 ip6_frag_nqueues(), ip6_frag_mem());
58 return 0; 47 return 0;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ca24ef19cd8f..807260d03586 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1144,6 +1144,8 @@ static int rawv6_init_sk(struct sock *sk)
1144 return(0); 1144 return(0);
1145} 1145}
1146 1146
1147DEFINE_PROTO_INUSE(rawv6)
1148
1147struct proto rawv6_prot = { 1149struct proto rawv6_prot = {
1148 .name = "RAWv6", 1150 .name = "RAWv6",
1149 .owner = THIS_MODULE, 1151 .owner = THIS_MODULE,
@@ -1166,6 +1168,7 @@ struct proto rawv6_prot = {
1166 .compat_setsockopt = compat_rawv6_setsockopt, 1168 .compat_setsockopt = compat_rawv6_setsockopt,
1167 .compat_getsockopt = compat_rawv6_getsockopt, 1169 .compat_getsockopt = compat_rawv6_getsockopt,
1168#endif 1170#endif
1171 REF_PROTO_INUSE(rawv6)
1169}; 1172};
1170 1173
1171#ifdef CONFIG_PROC_FS 1174#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 95f8e4a62f68..6ecb5e6fae2e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -38,12 +38,8 @@
38#include <linux/in6.h> 38#include <linux/in6.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/if_arp.h> 40#include <linux/if_arp.h>
41
42#ifdef CONFIG_PROC_FS
43#include <linux/proc_fs.h> 41#include <linux/proc_fs.h>
44#include <linux/seq_file.h> 42#include <linux/seq_file.h>
45#endif
46
47#include <net/net_namespace.h> 43#include <net/net_namespace.h>
48#include <net/snmp.h> 44#include <net/snmp.h>
49#include <net/ipv6.h> 45#include <net/ipv6.h>
@@ -548,12 +544,8 @@ restart:
548 rt = rt6_device_match(rt, fl->oif, flags); 544 rt = rt6_device_match(rt, fl->oif, flags);
549 BACKTRACK(&fl->fl6_src); 545 BACKTRACK(&fl->fl6_src);
550out: 546out:
551 dst_hold(&rt->u.dst); 547 dst_use(&rt->u.dst, jiffies);
552 read_unlock_bh(&table->tb6_lock); 548 read_unlock_bh(&table->tb6_lock);
553
554 rt->u.dst.lastuse = jiffies;
555 rt->u.dst.__use++;
556
557 return rt; 549 return rt;
558 550
559} 551}
@@ -2288,71 +2280,50 @@ struct rt6_proc_arg
2288 2280
2289static int rt6_info_route(struct rt6_info *rt, void *p_arg) 2281static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2290{ 2282{
2291 struct rt6_proc_arg *arg = (struct rt6_proc_arg *) p_arg; 2283 struct seq_file *m = p_arg;
2292
2293 if (arg->skip < arg->offset / RT6_INFO_LEN) {
2294 arg->skip++;
2295 return 0;
2296 }
2297
2298 if (arg->len >= arg->length)
2299 return 0;
2300 2284
2301 arg->len += sprintf(arg->buffer + arg->len, 2285 seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_dst.addr),
2302 NIP6_SEQFMT " %02x ", 2286 rt->rt6i_dst.plen);
2303 NIP6(rt->rt6i_dst.addr),
2304 rt->rt6i_dst.plen);
2305 2287
2306#ifdef CONFIG_IPV6_SUBTREES 2288#ifdef CONFIG_IPV6_SUBTREES
2307 arg->len += sprintf(arg->buffer + arg->len, 2289 seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_src.addr),
2308 NIP6_SEQFMT " %02x ", 2290 rt->rt6i_src.plen);
2309 NIP6(rt->rt6i_src.addr),
2310 rt->rt6i_src.plen);
2311#else 2291#else
2312 arg->len += sprintf(arg->buffer + arg->len, 2292 seq_puts(m, "00000000000000000000000000000000 00 ");
2313 "00000000000000000000000000000000 00 ");
2314#endif 2293#endif
2315 2294
2316 if (rt->rt6i_nexthop) { 2295 if (rt->rt6i_nexthop) {
2317 arg->len += sprintf(arg->buffer + arg->len, 2296 seq_printf(m, NIP6_SEQFMT,
2318 NIP6_SEQFMT, 2297 NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key)));
2319 NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key)));
2320 } else { 2298 } else {
2321 arg->len += sprintf(arg->buffer + arg->len, 2299 seq_puts(m, "00000000000000000000000000000000");
2322 "00000000000000000000000000000000");
2323 } 2300 }
2324 arg->len += sprintf(arg->buffer + arg->len, 2301 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2325 " %08x %08x %08x %08x %8s\n", 2302 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
2326 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt), 2303 rt->u.dst.__use, rt->rt6i_flags,
2327 rt->u.dst.__use, rt->rt6i_flags, 2304 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2328 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2329 return 0; 2305 return 0;
2330} 2306}
2331 2307
2332static int rt6_proc_info(char *buffer, char **start, off_t offset, int length) 2308static int ipv6_route_show(struct seq_file *m, void *v)
2333{ 2309{
2334 struct rt6_proc_arg arg = { 2310 fib6_clean_all(rt6_info_route, 0, m);
2335 .buffer = buffer, 2311 return 0;
2336 .offset = offset, 2312}
2337 .length = length,
2338 };
2339
2340 fib6_clean_all(rt6_info_route, 0, &arg);
2341
2342 *start = buffer;
2343 if (offset)
2344 *start += offset % RT6_INFO_LEN;
2345
2346 arg.len -= offset % RT6_INFO_LEN;
2347
2348 if (arg.len > length)
2349 arg.len = length;
2350 if (arg.len < 0)
2351 arg.len = 0;
2352 2313
2353 return arg.len; 2314static int ipv6_route_open(struct inode *inode, struct file *file)
2315{
2316 return single_open(file, ipv6_route_show, NULL);
2354} 2317}
2355 2318
2319static const struct file_operations ipv6_route_proc_fops = {
2320 .owner = THIS_MODULE,
2321 .open = ipv6_route_open,
2322 .read = seq_read,
2323 .llseek = seq_lseek,
2324 .release = single_release,
2325};
2326
2356static int rt6_stats_seq_show(struct seq_file *seq, void *v) 2327static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2357{ 2328{
2358 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", 2329 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
@@ -2489,22 +2460,14 @@ ctl_table ipv6_route_table[] = {
2489 2460
2490void __init ip6_route_init(void) 2461void __init ip6_route_init(void)
2491{ 2462{
2492#ifdef CONFIG_PROC_FS
2493 struct proc_dir_entry *p;
2494#endif
2495 ip6_dst_ops.kmem_cachep = 2463 ip6_dst_ops.kmem_cachep =
2496 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 2464 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2497 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2465 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2498 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep; 2466 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep;
2499 2467
2500 fib6_init(); 2468 fib6_init();
2501#ifdef CONFIG_PROC_FS 2469 proc_net_fops_create(&init_net, "ipv6_route", 0, &ipv6_route_proc_fops);
2502 p = proc_net_create(&init_net, "ipv6_route", 0, rt6_proc_info);
2503 if (p)
2504 p->owner = THIS_MODULE;
2505
2506 proc_net_fops_create(&init_net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); 2470 proc_net_fops_create(&init_net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2507#endif
2508#ifdef CONFIG_XFRM 2471#ifdef CONFIG_XFRM
2509 xfrm6_init(); 2472 xfrm6_init();
2510#endif 2473#endif
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 85208026278b..93980c3b83e6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -561,16 +561,16 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
561 char *newkey, u8 newkeylen) 561 char *newkey, u8 newkeylen)
562{ 562{
563 /* Add key to the list */ 563 /* Add key to the list */
564 struct tcp6_md5sig_key *key; 564 struct tcp_md5sig_key *key;
565 struct tcp_sock *tp = tcp_sk(sk); 565 struct tcp_sock *tp = tcp_sk(sk);
566 struct tcp6_md5sig_key *keys; 566 struct tcp6_md5sig_key *keys;
567 567
568 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); 568 key = tcp_v6_md5_do_lookup(sk, peer);
569 if (key) { 569 if (key) {
570 /* modify existing entry - just update that one */ 570 /* modify existing entry - just update that one */
571 kfree(key->base.key); 571 kfree(key->key);
572 key->base.key = newkey; 572 key->key = newkey;
573 key->base.keylen = newkeylen; 573 key->keylen = newkeylen;
574 } else { 574 } else {
575 /* reallocate new list if current one is full. */ 575 /* reallocate new list if current one is full. */
576 if (!tp->md5sig_info) { 576 if (!tp->md5sig_info) {
@@ -581,7 +581,10 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
581 } 581 }
582 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 582 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
583 } 583 }
584 tcp_alloc_md5sig_pool(); 584 if (tcp_alloc_md5sig_pool() == NULL) {
585 kfree(newkey);
586 return -ENOMEM;
587 }
585 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) { 588 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
586 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) * 589 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
587 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); 590 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
@@ -634,10 +637,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
634 kfree(tp->md5sig_info->keys6); 637 kfree(tp->md5sig_info->keys6);
635 tp->md5sig_info->keys6 = NULL; 638 tp->md5sig_info->keys6 = NULL;
636 tp->md5sig_info->alloced6 = 0; 639 tp->md5sig_info->alloced6 = 0;
637
638 tcp_free_md5sig_pool();
639
640 return 0;
641 } else { 640 } else {
642 /* shrink the database */ 641 /* shrink the database */
643 if (tp->md5sig_info->entries6 != i) 642 if (tp->md5sig_info->entries6 != i)
@@ -646,6 +645,8 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
646 (tp->md5sig_info->entries6 - i) 645 (tp->md5sig_info->entries6 - i)
647 * sizeof (tp->md5sig_info->keys6[0])); 646 * sizeof (tp->md5sig_info->keys6[0]));
648 } 647 }
648 tcp_free_md5sig_pool();
649 return 0;
649 } 650 }
650 } 651 }
651 return -ENOENT; 652 return -ENOENT;
@@ -781,7 +782,7 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
781 sg_set_buf(&sg[block++], key->key, key->keylen); 782 sg_set_buf(&sg[block++], key->key, key->keylen);
782 nbytes += key->keylen; 783 nbytes += key->keylen;
783 784
784 sg_mark_end(sg, block); 785 sg_mark_end(&sg[block - 1]);
785 786
786 /* Now store the hash into the packet */ 787 /* Now store the hash into the packet */
787 err = crypto_hash_init(desc); 788 err = crypto_hash_init(desc);
@@ -2107,6 +2108,8 @@ void tcp6_proc_exit(void)
2107} 2108}
2108#endif 2109#endif
2109 2110
2111DEFINE_PROTO_INUSE(tcpv6)
2112
2110struct proto tcpv6_prot = { 2113struct proto tcpv6_prot = {
2111 .name = "TCPv6", 2114 .name = "TCPv6",
2112 .owner = THIS_MODULE, 2115 .owner = THIS_MODULE,
@@ -2141,6 +2144,7 @@ struct proto tcpv6_prot = {
2141 .compat_setsockopt = compat_tcp_setsockopt, 2144 .compat_setsockopt = compat_tcp_setsockopt,
2142 .compat_getsockopt = compat_tcp_getsockopt, 2145 .compat_getsockopt = compat_tcp_getsockopt,
2143#endif 2146#endif
2147 REF_PROTO_INUSE(tcpv6)
2144}; 2148};
2145 2149
2146static struct inet6_protocol tcpv6_protocol = { 2150static struct inet6_protocol tcpv6_protocol = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index caebad6ee510..ee1cc3f8599f 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -205,12 +205,11 @@ out:
205 return err; 205 return err;
206 206
207csum_copy_err: 207csum_copy_err:
208 UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
208 skb_kill_datagram(sk, skb, flags); 209 skb_kill_datagram(sk, skb, flags);
209 210
210 if (flags & MSG_DONTWAIT) { 211 if (flags & MSG_DONTWAIT)
211 UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
212 return -EAGAIN; 212 return -EAGAIN;
213 }
214 goto try_again; 213 goto try_again;
215} 214}
216 215
@@ -971,6 +970,8 @@ void udp6_proc_exit(void) {
971 970
972/* ------------------------------------------------------------------------ */ 971/* ------------------------------------------------------------------------ */
973 972
973DEFINE_PROTO_INUSE(udpv6)
974
974struct proto udpv6_prot = { 975struct proto udpv6_prot = {
975 .name = "UDPv6", 976 .name = "UDPv6",
976 .owner = THIS_MODULE, 977 .owner = THIS_MODULE,
@@ -992,6 +993,7 @@ struct proto udpv6_prot = {
992 .compat_setsockopt = compat_udpv6_setsockopt, 993 .compat_setsockopt = compat_udpv6_setsockopt,
993 .compat_getsockopt = compat_udpv6_getsockopt, 994 .compat_getsockopt = compat_udpv6_getsockopt,
994#endif 995#endif
996 REF_PROTO_INUSE(udpv6)
995}; 997};
996 998
997static struct inet_protosw udpv6_protosw = { 999static struct inet_protosw udpv6_protosw = {
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 766566f7de47..5a0379f71415 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -40,6 +40,8 @@ static int udplite_v6_get_port(struct sock *sk, unsigned short snum)
40 return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal); 40 return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal);
41} 41}
42 42
43DEFINE_PROTO_INUSE(udplitev6)
44
43struct proto udplitev6_prot = { 45struct proto udplitev6_prot = {
44 .name = "UDPLITEv6", 46 .name = "UDPLITEv6",
45 .owner = THIS_MODULE, 47 .owner = THIS_MODULE,
@@ -62,6 +64,7 @@ struct proto udplitev6_prot = {
62 .compat_setsockopt = compat_udpv6_setsockopt, 64 .compat_setsockopt = compat_udpv6_setsockopt,
63 .compat_getsockopt = compat_udpv6_getsockopt, 65 .compat_getsockopt = compat_udpv6_getsockopt,
64#endif 66#endif
67 REF_PROTO_INUSE(udplitev6)
65}; 68};
66 69
67static struct inet_protosw udplite6_protosw = { 70static struct inet_protosw udplite6_protosw = {
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 29b063d43120..c76a9523091b 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -92,11 +92,6 @@ extern int ipxrtr_route_skb(struct sk_buff *skb);
92extern struct ipx_route *ipxrtr_lookup(__be32 net); 92extern struct ipx_route *ipxrtr_lookup(__be32 net);
93extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg); 93extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
94 94
95#undef IPX_REFCNT_DEBUG
96#ifdef IPX_REFCNT_DEBUG
97atomic_t ipx_sock_nr;
98#endif
99
100struct ipx_interface *ipx_interfaces_head(void) 95struct ipx_interface *ipx_interfaces_head(void)
101{ 96{
102 struct ipx_interface *rc = NULL; 97 struct ipx_interface *rc = NULL;
@@ -151,14 +146,7 @@ static void ipx_destroy_socket(struct sock *sk)
151{ 146{
152 ipx_remove_socket(sk); 147 ipx_remove_socket(sk);
153 skb_queue_purge(&sk->sk_receive_queue); 148 skb_queue_purge(&sk->sk_receive_queue);
154#ifdef IPX_REFCNT_DEBUG 149 sk_refcnt_debug_dec(sk);
155 atomic_dec(&ipx_sock_nr);
156 printk(KERN_DEBUG "IPX socket %p released, %d are still alive\n", sk,
157 atomic_read(&ipx_sock_nr));
158 if (atomic_read(&sk->sk_refcnt) != 1)
159 printk(KERN_DEBUG "Destruction sock ipx %p delayed, cnt=%d\n",
160 sk, atomic_read(&sk->sk_refcnt));
161#endif
162 sock_put(sk); 150 sock_put(sk);
163} 151}
164 152
@@ -1381,14 +1369,11 @@ static int ipx_create(struct net *net, struct socket *sock, int protocol)
1381 goto out; 1369 goto out;
1382 1370
1383 rc = -ENOMEM; 1371 rc = -ENOMEM;
1384 sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto, 1); 1372 sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto);
1385 if (!sk) 1373 if (!sk)
1386 goto out; 1374 goto out;
1387#ifdef IPX_REFCNT_DEBUG 1375
1388 atomic_inc(&ipx_sock_nr); 1376 sk_refcnt_debug_inc(sk);
1389 printk(KERN_DEBUG "IPX socket %p created, now we have %d alive\n", sk,
1390 atomic_read(&ipx_sock_nr));
1391#endif
1392 sock_init_data(sock, sk); 1377 sock_init_data(sock, sk);
1393 sk->sk_no_check = 1; /* Checksum off by default */ 1378 sk->sk_no_check = 1; /* Checksum off by default */
1394 sock->ops = &ipx_dgram_ops; 1379 sock->ops = &ipx_dgram_ops;
@@ -1409,6 +1394,7 @@ static int ipx_release(struct socket *sock)
1409 1394
1410 sock_set_flag(sk, SOCK_DEAD); 1395 sock_set_flag(sk, SOCK_DEAD);
1411 sock->sk = NULL; 1396 sock->sk = NULL;
1397 sk_refcnt_debug_release(sk);
1412 ipx_destroy_socket(sk); 1398 ipx_destroy_socket(sk);
1413out: 1399out:
1414 return 0; 1400 return 0;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 0328ae2654f4..48ce59a6e026 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1078,7 +1078,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
1078 } 1078 }
1079 1079
1080 /* Allocate networking socket */ 1080 /* Allocate networking socket */
1081 sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto, 1); 1081 sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto);
1082 if (sk == NULL) 1082 if (sk == NULL)
1083 return -ENOMEM; 1083 return -ENOMEM;
1084 1084
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index dc5e34a01620..a86a5d83786b 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -928,7 +928,7 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb)
928 928
929 opcode = fp[0]; 929 opcode = fp[0];
930 if (~opcode & 0x80) { 930 if (~opcode & 0x80) {
931 IRDA_WARNING("%s: IrIAS multiframe commands or results" 931 IRDA_WARNING("%s: IrIAS multiframe commands or results "
932 "is not implemented yet!\n", __FUNCTION__); 932 "is not implemented yet!\n", __FUNCTION__);
933 return; 933 return;
934 } 934 }
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 7f9c8542e5fc..c68220773d28 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -296,6 +296,7 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
296 */ 296 */
297void irlan_eth_send_gratuitous_arp(struct net_device *dev) 297void irlan_eth_send_gratuitous_arp(struct net_device *dev)
298{ 298{
299#ifdef CONFIG_INET
299 struct in_device *in_dev; 300 struct in_device *in_dev;
300 301
301 /* 302 /*
@@ -303,7 +304,6 @@ void irlan_eth_send_gratuitous_arp(struct net_device *dev)
303 * is useful if we have changed access points on the same 304 * is useful if we have changed access points on the same
304 * subnet. 305 * subnet.
305 */ 306 */
306#ifdef CONFIG_INET
307 IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n"); 307 IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n");
308 rcu_read_lock(); 308 rcu_read_lock();
309 in_dev = __in_dev_get_rcu(dev); 309 in_dev = __in_dev_get_rcu(dev);
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 2f9f8dce5a69..e0eab5927c4f 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -731,15 +731,25 @@ dev_irnet_ioctl(struct inode * inode,
731 /* Get termios */ 731 /* Get termios */
732 case TCGETS: 732 case TCGETS:
733 DEBUG(FS_INFO, "Get termios.\n"); 733 DEBUG(FS_INFO, "Get termios.\n");
734#ifndef TCGETS2
734 if(kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios)) 735 if(kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios))
735 break; 736 break;
737#else
738 if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios))
739 break;
740#endif
736 err = 0; 741 err = 0;
737 break; 742 break;
738 /* Set termios */ 743 /* Set termios */
739 case TCSETSF: 744 case TCSETSF:
740 DEBUG(FS_INFO, "Set termios.\n"); 745 DEBUG(FS_INFO, "Set termios.\n");
746#ifndef TCGETS2
741 if(user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp)) 747 if(user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp))
742 break; 748 break;
749#else
750 if(user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp))
751 break;
752#endif
743 err = 0; 753 err = 0;
744 break; 754 break;
745 755
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 43e01c8d382b..aef664580355 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -216,7 +216,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
216{ 216{
217 struct sock *sk; 217 struct sock *sk;
218 218
219 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, 1); 219 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
220 if (!sk) 220 if (!sk)
221 return NULL; 221 return NULL;
222 222
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a2f5a6ea3895..7698f6c459d6 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -97,7 +97,7 @@ struct iucv_irq_list {
97 struct iucv_irq_data data; 97 struct iucv_irq_data data;
98}; 98};
99 99
100static struct iucv_irq_data *iucv_irq_data; 100static struct iucv_irq_data *iucv_irq_data[NR_CPUS];
101static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; 101static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE;
102static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; 102static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE;
103 103
@@ -277,7 +277,7 @@ union iucv_param {
277/* 277/*
278 * Anchor for per-cpu IUCV command parameter block. 278 * Anchor for per-cpu IUCV command parameter block.
279 */ 279 */
280static union iucv_param *iucv_param; 280static union iucv_param *iucv_param[NR_CPUS];
281 281
282/** 282/**
283 * iucv_call_b2f0 283 * iucv_call_b2f0
@@ -356,7 +356,7 @@ static void iucv_allow_cpu(void *data)
356 * 0x10 - Flag to allow priority message completion interrupts 356 * 0x10 - Flag to allow priority message completion interrupts
357 * 0x08 - Flag to allow IUCV control interrupts 357 * 0x08 - Flag to allow IUCV control interrupts
358 */ 358 */
359 parm = percpu_ptr(iucv_param, smp_processor_id()); 359 parm = iucv_param[cpu];
360 memset(parm, 0, sizeof(union iucv_param)); 360 memset(parm, 0, sizeof(union iucv_param));
361 parm->set_mask.ipmask = 0xf8; 361 parm->set_mask.ipmask = 0xf8;
362 iucv_call_b2f0(IUCV_SETMASK, parm); 362 iucv_call_b2f0(IUCV_SETMASK, parm);
@@ -377,7 +377,7 @@ static void iucv_block_cpu(void *data)
377 union iucv_param *parm; 377 union iucv_param *parm;
378 378
379 /* Disable all iucv interrupts. */ 379 /* Disable all iucv interrupts. */
380 parm = percpu_ptr(iucv_param, smp_processor_id()); 380 parm = iucv_param[cpu];
381 memset(parm, 0, sizeof(union iucv_param)); 381 memset(parm, 0, sizeof(union iucv_param));
382 iucv_call_b2f0(IUCV_SETMASK, parm); 382 iucv_call_b2f0(IUCV_SETMASK, parm);
383 383
@@ -401,9 +401,9 @@ static void iucv_declare_cpu(void *data)
401 return; 401 return;
402 402
403 /* Declare interrupt buffer. */ 403 /* Declare interrupt buffer. */
404 parm = percpu_ptr(iucv_param, cpu); 404 parm = iucv_param[cpu];
405 memset(parm, 0, sizeof(union iucv_param)); 405 memset(parm, 0, sizeof(union iucv_param));
406 parm->db.ipbfadr1 = virt_to_phys(percpu_ptr(iucv_irq_data, cpu)); 406 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
407 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 407 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
408 if (rc) { 408 if (rc) {
409 char *err = "Unknown"; 409 char *err = "Unknown";
@@ -458,7 +458,7 @@ static void iucv_retrieve_cpu(void *data)
458 iucv_block_cpu(NULL); 458 iucv_block_cpu(NULL);
459 459
460 /* Retrieve interrupt buffer. */ 460 /* Retrieve interrupt buffer. */
461 parm = percpu_ptr(iucv_param, cpu); 461 parm = iucv_param[cpu];
462 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 462 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
463 463
464 /* Clear indication that an iucv buffer exists for this cpu. */ 464 /* Clear indication that an iucv buffer exists for this cpu. */
@@ -558,22 +558,23 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
558 switch (action) { 558 switch (action) {
559 case CPU_UP_PREPARE: 559 case CPU_UP_PREPARE:
560 case CPU_UP_PREPARE_FROZEN: 560 case CPU_UP_PREPARE_FROZEN:
561 if (!percpu_populate(iucv_irq_data, 561 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
562 sizeof(struct iucv_irq_data), 562 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
563 GFP_KERNEL|GFP_DMA, cpu)) 563 if (!iucv_irq_data[cpu])
564 return NOTIFY_BAD; 564 return NOTIFY_BAD;
565 if (!percpu_populate(iucv_param, sizeof(union iucv_param), 565 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
566 GFP_KERNEL|GFP_DMA, cpu)) { 566 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
567 percpu_depopulate(iucv_irq_data, cpu); 567 if (!iucv_param[cpu])
568 return NOTIFY_BAD; 568 return NOTIFY_BAD;
569 }
570 break; 569 break;
571 case CPU_UP_CANCELED: 570 case CPU_UP_CANCELED:
572 case CPU_UP_CANCELED_FROZEN: 571 case CPU_UP_CANCELED_FROZEN:
573 case CPU_DEAD: 572 case CPU_DEAD:
574 case CPU_DEAD_FROZEN: 573 case CPU_DEAD_FROZEN:
575 percpu_depopulate(iucv_param, cpu); 574 kfree(iucv_param[cpu]);
576 percpu_depopulate(iucv_irq_data, cpu); 575 iucv_param[cpu] = NULL;
576 kfree(iucv_irq_data[cpu]);
577 iucv_irq_data[cpu] = NULL;
577 break; 578 break;
578 case CPU_ONLINE: 579 case CPU_ONLINE:
579 case CPU_ONLINE_FROZEN: 580 case CPU_ONLINE_FROZEN:
@@ -612,7 +613,7 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
612{ 613{
613 union iucv_param *parm; 614 union iucv_param *parm;
614 615
615 parm = percpu_ptr(iucv_param, smp_processor_id()); 616 parm = iucv_param[smp_processor_id()];
616 memset(parm, 0, sizeof(union iucv_param)); 617 memset(parm, 0, sizeof(union iucv_param));
617 if (userdata) 618 if (userdata)
618 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 619 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
@@ -755,7 +756,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
755 756
756 local_bh_disable(); 757 local_bh_disable();
757 /* Prepare parameter block. */ 758 /* Prepare parameter block. */
758 parm = percpu_ptr(iucv_param, smp_processor_id()); 759 parm = iucv_param[smp_processor_id()];
759 memset(parm, 0, sizeof(union iucv_param)); 760 memset(parm, 0, sizeof(union iucv_param));
760 parm->ctrl.ippathid = path->pathid; 761 parm->ctrl.ippathid = path->pathid;
761 parm->ctrl.ipmsglim = path->msglim; 762 parm->ctrl.ipmsglim = path->msglim;
@@ -799,7 +800,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
799 BUG_ON(in_atomic()); 800 BUG_ON(in_atomic());
800 spin_lock_bh(&iucv_table_lock); 801 spin_lock_bh(&iucv_table_lock);
801 iucv_cleanup_queue(); 802 iucv_cleanup_queue();
802 parm = percpu_ptr(iucv_param, smp_processor_id()); 803 parm = iucv_param[smp_processor_id()];
803 memset(parm, 0, sizeof(union iucv_param)); 804 memset(parm, 0, sizeof(union iucv_param));
804 parm->ctrl.ipmsglim = path->msglim; 805 parm->ctrl.ipmsglim = path->msglim;
805 parm->ctrl.ipflags1 = path->flags; 806 parm->ctrl.ipflags1 = path->flags;
@@ -854,7 +855,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
854 int rc; 855 int rc;
855 856
856 local_bh_disable(); 857 local_bh_disable();
857 parm = percpu_ptr(iucv_param, smp_processor_id()); 858 parm = iucv_param[smp_processor_id()];
858 memset(parm, 0, sizeof(union iucv_param)); 859 memset(parm, 0, sizeof(union iucv_param));
859 if (userdata) 860 if (userdata)
860 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 861 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
@@ -881,7 +882,7 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
881 int rc; 882 int rc;
882 883
883 local_bh_disable(); 884 local_bh_disable();
884 parm = percpu_ptr(iucv_param, smp_processor_id()); 885 parm = iucv_param[smp_processor_id()];
885 memset(parm, 0, sizeof(union iucv_param)); 886 memset(parm, 0, sizeof(union iucv_param));
886 if (userdata) 887 if (userdata)
887 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 888 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
@@ -936,7 +937,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
936 int rc; 937 int rc;
937 938
938 local_bh_disable(); 939 local_bh_disable();
939 parm = percpu_ptr(iucv_param, smp_processor_id()); 940 parm = iucv_param[smp_processor_id()];
940 memset(parm, 0, sizeof(union iucv_param)); 941 memset(parm, 0, sizeof(union iucv_param));
941 parm->purge.ippathid = path->pathid; 942 parm->purge.ippathid = path->pathid;
942 parm->purge.ipmsgid = msg->id; 943 parm->purge.ipmsgid = msg->id;
@@ -1003,7 +1004,7 @@ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1003 } 1004 }
1004 1005
1005 local_bh_disable(); 1006 local_bh_disable();
1006 parm = percpu_ptr(iucv_param, smp_processor_id()); 1007 parm = iucv_param[smp_processor_id()];
1007 memset(parm, 0, sizeof(union iucv_param)); 1008 memset(parm, 0, sizeof(union iucv_param));
1008 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1009 parm->db.ipbfadr1 = (u32)(addr_t) buffer;
1009 parm->db.ipbfln1f = (u32) size; 1010 parm->db.ipbfln1f = (u32) size;
@@ -1040,7 +1041,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
1040 int rc; 1041 int rc;
1041 1042
1042 local_bh_disable(); 1043 local_bh_disable();
1043 parm = percpu_ptr(iucv_param, smp_processor_id()); 1044 parm = iucv_param[smp_processor_id()];
1044 memset(parm, 0, sizeof(union iucv_param)); 1045 memset(parm, 0, sizeof(union iucv_param));
1045 parm->db.ippathid = path->pathid; 1046 parm->db.ippathid = path->pathid;
1046 parm->db.ipmsgid = msg->id; 1047 parm->db.ipmsgid = msg->id;
@@ -1074,7 +1075,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
1074 int rc; 1075 int rc;
1075 1076
1076 local_bh_disable(); 1077 local_bh_disable();
1077 parm = percpu_ptr(iucv_param, smp_processor_id()); 1078 parm = iucv_param[smp_processor_id()];
1078 memset(parm, 0, sizeof(union iucv_param)); 1079 memset(parm, 0, sizeof(union iucv_param));
1079 if (flags & IUCV_IPRMDATA) { 1080 if (flags & IUCV_IPRMDATA) {
1080 parm->dpl.ippathid = path->pathid; 1081 parm->dpl.ippathid = path->pathid;
@@ -1118,7 +1119,7 @@ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1118 int rc; 1119 int rc;
1119 1120
1120 local_bh_disable(); 1121 local_bh_disable();
1121 parm = percpu_ptr(iucv_param, smp_processor_id()); 1122 parm = iucv_param[smp_processor_id()];
1122 memset(parm, 0, sizeof(union iucv_param)); 1123 memset(parm, 0, sizeof(union iucv_param));
1123 if (flags & IUCV_IPRMDATA) { 1124 if (flags & IUCV_IPRMDATA) {
1124 /* Message of 8 bytes can be placed into the parameter list. */ 1125 /* Message of 8 bytes can be placed into the parameter list. */
@@ -1172,7 +1173,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
1172 int rc; 1173 int rc;
1173 1174
1174 local_bh_disable(); 1175 local_bh_disable();
1175 parm = percpu_ptr(iucv_param, smp_processor_id()); 1176 parm = iucv_param[smp_processor_id()];
1176 memset(parm, 0, sizeof(union iucv_param)); 1177 memset(parm, 0, sizeof(union iucv_param));
1177 if (flags & IUCV_IPRMDATA) { 1178 if (flags & IUCV_IPRMDATA) {
1178 parm->dpl.ippathid = path->pathid; 1179 parm->dpl.ippathid = path->pathid;
@@ -1559,7 +1560,7 @@ static void iucv_external_interrupt(u16 code)
1559 struct iucv_irq_data *p; 1560 struct iucv_irq_data *p;
1560 struct iucv_irq_list *work; 1561 struct iucv_irq_list *work;
1561 1562
1562 p = percpu_ptr(iucv_irq_data, smp_processor_id()); 1563 p = iucv_irq_data[smp_processor_id()];
1563 if (p->ippathid >= iucv_max_pathid) { 1564 if (p->ippathid >= iucv_max_pathid) {
1564 printk(KERN_WARNING "iucv_do_int: Got interrupt with " 1565 printk(KERN_WARNING "iucv_do_int: Got interrupt with "
1565 "pathid %d > max_connections (%ld)\n", 1566 "pathid %d > max_connections (%ld)\n",
@@ -1598,6 +1599,7 @@ static void iucv_external_interrupt(u16 code)
1598static int __init iucv_init(void) 1599static int __init iucv_init(void)
1599{ 1600{
1600 int rc; 1601 int rc;
1602 int cpu;
1601 1603
1602 if (!MACHINE_IS_VM) { 1604 if (!MACHINE_IS_VM) {
1603 rc = -EPROTONOSUPPORT; 1605 rc = -EPROTONOSUPPORT;
@@ -1617,19 +1619,23 @@ static int __init iucv_init(void)
1617 rc = PTR_ERR(iucv_root); 1619 rc = PTR_ERR(iucv_root);
1618 goto out_bus; 1620 goto out_bus;
1619 } 1621 }
1620 /* Note: GFP_DMA used to get memory below 2G */ 1622
1621 iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data), 1623 for_each_online_cpu(cpu) {
1622 GFP_KERNEL|GFP_DMA); 1624 /* Note: GFP_DMA used to get memory below 2G */
1623 if (!iucv_irq_data) { 1625 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
1624 rc = -ENOMEM; 1626 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
1625 goto out_root; 1627 if (!iucv_irq_data[cpu]) {
1626 } 1628 rc = -ENOMEM;
1627 /* Allocate parameter blocks. */ 1629 goto out_free;
1628 iucv_param = percpu_alloc(sizeof(union iucv_param), 1630 }
1629 GFP_KERNEL|GFP_DMA); 1631
1630 if (!iucv_param) { 1632 /* Allocate parameter blocks. */
1631 rc = -ENOMEM; 1633 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
1632 goto out_extint; 1634 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
1635 if (!iucv_param[cpu]) {
1636 rc = -ENOMEM;
1637 goto out_free;
1638 }
1633 } 1639 }
1634 register_hotcpu_notifier(&iucv_cpu_notifier); 1640 register_hotcpu_notifier(&iucv_cpu_notifier);
1635 ASCEBC(iucv_error_no_listener, 16); 1641 ASCEBC(iucv_error_no_listener, 16);
@@ -1638,9 +1644,13 @@ static int __init iucv_init(void)
1638 iucv_available = 1; 1644 iucv_available = 1;
1639 return 0; 1645 return 0;
1640 1646
1641out_extint: 1647out_free:
1642 percpu_free(iucv_irq_data); 1648 for_each_possible_cpu(cpu) {
1643out_root: 1649 kfree(iucv_param[cpu]);
1650 iucv_param[cpu] = NULL;
1651 kfree(iucv_irq_data[cpu]);
1652 iucv_irq_data[cpu] = NULL;
1653 }
1644 s390_root_dev_unregister(iucv_root); 1654 s390_root_dev_unregister(iucv_root);
1645out_bus: 1655out_bus:
1646 bus_unregister(&iucv_bus); 1656 bus_unregister(&iucv_bus);
@@ -1658,6 +1668,7 @@ out:
1658static void __exit iucv_exit(void) 1668static void __exit iucv_exit(void)
1659{ 1669{
1660 struct iucv_irq_list *p, *n; 1670 struct iucv_irq_list *p, *n;
1671 int cpu;
1661 1672
1662 spin_lock_irq(&iucv_queue_lock); 1673 spin_lock_irq(&iucv_queue_lock);
1663 list_for_each_entry_safe(p, n, &iucv_task_queue, list) 1674 list_for_each_entry_safe(p, n, &iucv_task_queue, list)
@@ -1666,8 +1677,12 @@ static void __exit iucv_exit(void)
1666 kfree(p); 1677 kfree(p);
1667 spin_unlock_irq(&iucv_queue_lock); 1678 spin_unlock_irq(&iucv_queue_lock);
1668 unregister_hotcpu_notifier(&iucv_cpu_notifier); 1679 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1669 percpu_free(iucv_param); 1680 for_each_possible_cpu(cpu) {
1670 percpu_free(iucv_irq_data); 1681 kfree(iucv_param[cpu]);
1682 iucv_param[cpu] = NULL;
1683 kfree(iucv_irq_data[cpu]);
1684 iucv_irq_data[cpu] = NULL;
1685 }
1671 s390_root_dev_unregister(iucv_root); 1686 s390_root_dev_unregister(iucv_root);
1672 bus_unregister(&iucv_bus); 1687 bus_unregister(&iucv_bus);
1673 unregister_external_interrupt(0x4000, iucv_external_interrupt); 1688 unregister_external_interrupt(0x4000, iucv_external_interrupt);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7969f8a716df..878039b9557d 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -152,7 +152,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol)
152 return -EPROTONOSUPPORT; 152 return -EPROTONOSUPPORT;
153 153
154 err = -ENOMEM; 154 err = -ENOMEM;
155 sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, 1); 155 sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto);
156 if (sk == NULL) 156 if (sk == NULL)
157 goto out; 157 goto out;
158 158
@@ -395,9 +395,9 @@ static inline int pfkey_sec_ctx_len(struct sadb_x_sec_ctx *sec_ctx)
395static inline int verify_sec_ctx_len(void *p) 395static inline int verify_sec_ctx_len(void *p)
396{ 396{
397 struct sadb_x_sec_ctx *sec_ctx = (struct sadb_x_sec_ctx *)p; 397 struct sadb_x_sec_ctx *sec_ctx = (struct sadb_x_sec_ctx *)p;
398 int len; 398 int len = sec_ctx->sadb_x_ctx_len;
399 399
400 if (sec_ctx->sadb_x_ctx_len > PAGE_SIZE) 400 if (len > PAGE_SIZE)
401 return -EINVAL; 401 return -EINVAL;
402 402
403 len = pfkey_sec_ctx_len(sec_ctx); 403 len = pfkey_sec_ctx_len(sec_ctx);
@@ -1015,9 +1015,7 @@ static inline struct sk_buff *pfkey_xfrm_state2msg(struct xfrm_state *x)
1015{ 1015{
1016 struct sk_buff *skb; 1016 struct sk_buff *skb;
1017 1017
1018 spin_lock_bh(&x->lock);
1019 skb = __pfkey_xfrm_state2msg(x, 1, 3); 1018 skb = __pfkey_xfrm_state2msg(x, 1, 3);
1020 spin_unlock_bh(&x->lock);
1021 1019
1022 return skb; 1020 return skb;
1023} 1021}
@@ -1552,7 +1550,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
1552 1550
1553 out_hdr = (struct sadb_msg *) out_skb->data; 1551 out_hdr = (struct sadb_msg *) out_skb->data;
1554 out_hdr->sadb_msg_version = hdr->sadb_msg_version; 1552 out_hdr->sadb_msg_version = hdr->sadb_msg_version;
1555 out_hdr->sadb_msg_type = SADB_DUMP; 1553 out_hdr->sadb_msg_type = SADB_GET;
1556 out_hdr->sadb_msg_satype = pfkey_proto2satype(proto); 1554 out_hdr->sadb_msg_satype = pfkey_proto2satype(proto);
1557 out_hdr->sadb_msg_errno = 0; 1555 out_hdr->sadb_msg_errno = 0;
1558 out_hdr->sadb_msg_reserved = 0; 1556 out_hdr->sadb_msg_reserved = 0;
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 8ebc2769dfda..5c0b484237c8 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -869,7 +869,7 @@ static void llc_sk_init(struct sock* sk)
869 */ 869 */
870struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot) 870struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot)
871{ 871{
872 struct sock *sk = sk_alloc(net, family, priority, prot, 1); 872 struct sock *sk = sk_alloc(net, family, priority, prot);
873 873
874 if (!sk) 874 if (!sk)
875 goto out; 875 goto out;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 6fffb3845ab6..ce176e691afe 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -13,6 +13,18 @@ config MAC80211
13 This option enables the hardware independent IEEE 802.11 13 This option enables the hardware independent IEEE 802.11
14 networking stack. 14 networking stack.
15 15
16config MAC80211_RCSIMPLE
17 bool "'simple' rate control algorithm" if EMBEDDED
18 default y
19 depends on MAC80211
20 help
21 This option allows you to turn off the 'simple' rate
22 control algorithm in mac80211. If you do turn it off,
23 you absolutely need another rate control algorithm.
24
25 Say Y unless you know you will have another algorithm
26 available.
27
16config MAC80211_LEDS 28config MAC80211_LEDS
17 bool "Enable LED triggers" 29 bool "Enable LED triggers"
18 depends on MAC80211 && LEDS_TRIGGERS 30 depends on MAC80211 && LEDS_TRIGGERS
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 219cd9f9341f..1e6237b34846 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -1,8 +1,9 @@
1obj-$(CONFIG_MAC80211) += mac80211.o rc80211_simple.o 1obj-$(CONFIG_MAC80211) += mac80211.o
2 2
3mac80211-objs-$(CONFIG_MAC80211_LEDS) += ieee80211_led.o 3mac80211-objs-$(CONFIG_MAC80211_LEDS) += ieee80211_led.o
4mac80211-objs-$(CONFIG_MAC80211_DEBUGFS) += debugfs.o debugfs_sta.o debugfs_netdev.o debugfs_key.o 4mac80211-objs-$(CONFIG_MAC80211_DEBUGFS) += debugfs.o debugfs_sta.o debugfs_netdev.o debugfs_key.o
5mac80211-objs-$(CONFIG_NET_SCHED) += wme.o 5mac80211-objs-$(CONFIG_NET_SCHED) += wme.o
6mac80211-objs-$(CONFIG_MAC80211_RCSIMPLE) += rc80211_simple.o
6 7
7mac80211-objs := \ 8mac80211-objs := \
8 ieee80211.o \ 9 ieee80211.o \
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index bf7ba128b963..e62fe55944b8 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -11,7 +11,6 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/crypto.h> 12#include <linux/crypto.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <asm/scatterlist.h>
15 14
16#include <net/mac80211.h> 15#include <net/mac80211.h>
17#include "ieee80211_key.h" 16#include "ieee80211_key.h"
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index f484ca7ade9c..59350b8727ec 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -267,6 +267,17 @@ static int ieee80211_open(struct net_device *dev)
267 tasklet_enable(&local->tasklet); 267 tasklet_enable(&local->tasklet);
268 } 268 }
269 269
270 /*
271 * set_multicast_list will be invoked by the networking core
272 * which will check whether any increments here were done in
273 * error and sync them down to the hardware as filter flags.
274 */
275 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
276 atomic_inc(&local->iff_allmultis);
277
278 if (sdata->flags & IEEE80211_SDATA_PROMISC)
279 atomic_inc(&local->iff_promiscs);
280
270 local->open_count++; 281 local->open_count++;
271 282
272 netif_start_queue(dev); 283 netif_start_queue(dev);
@@ -284,6 +295,18 @@ static int ieee80211_stop(struct net_device *dev)
284 295
285 netif_stop_queue(dev); 296 netif_stop_queue(dev);
286 297
298 /*
299 * Don't count this interface for promisc/allmulti while it
300 * is down. dev_mc_unsync() will invoke set_multicast_list
301 * on the master interface which will sync these down to the
302 * hardware as filter flags.
303 */
304 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
305 atomic_dec(&local->iff_allmultis);
306
307 if (sdata->flags & IEEE80211_SDATA_PROMISC)
308 atomic_dec(&local->iff_promiscs);
309
287 dev_mc_unsync(local->mdev, dev); 310 dev_mc_unsync(local->mdev, dev);
288 311
289 /* down all dependent devices, that is VLANs */ 312 /* down all dependent devices, that is VLANs */
@@ -366,8 +389,8 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
366 389
367 allmulti = !!(dev->flags & IFF_ALLMULTI); 390 allmulti = !!(dev->flags & IFF_ALLMULTI);
368 promisc = !!(dev->flags & IFF_PROMISC); 391 promisc = !!(dev->flags & IFF_PROMISC);
369 sdata_allmulti = sdata->flags & IEEE80211_SDATA_ALLMULTI; 392 sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
370 sdata_promisc = sdata->flags & IEEE80211_SDATA_PROMISC; 393 sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
371 394
372 if (allmulti != sdata_allmulti) { 395 if (allmulti != sdata_allmulti) {
373 if (dev->flags & IFF_ALLMULTI) 396 if (dev->flags & IFF_ALLMULTI)
@@ -1072,7 +1095,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1072 ieee80211_debugfs_add_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev)); 1095 ieee80211_debugfs_add_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
1073 ieee80211_if_set_type(local->mdev, IEEE80211_IF_TYPE_AP); 1096 ieee80211_if_set_type(local->mdev, IEEE80211_IF_TYPE_AP);
1074 1097
1075 result = ieee80211_init_rate_ctrl_alg(local, NULL); 1098 result = ieee80211_init_rate_ctrl_alg(local,
1099 hw->rate_control_algorithm);
1076 if (result < 0) { 1100 if (result < 0) {
1077 printk(KERN_DEBUG "%s: Failed to initialize rate control " 1101 printk(KERN_DEBUG "%s: Failed to initialize rate control "
1078 "algorithm\n", wiphy_name(local->hw.wiphy)); 1102 "algorithm\n", wiphy_name(local->hw.wiphy));
@@ -1233,8 +1257,17 @@ static int __init ieee80211_init(void)
1233 1257
1234 BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb)); 1258 BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb));
1235 1259
1260#ifdef CONFIG_MAC80211_RCSIMPLE
1261 ret = ieee80211_rate_control_register(&mac80211_rcsimple);
1262 if (ret)
1263 return ret;
1264#endif
1265
1236 ret = ieee80211_wme_register(); 1266 ret = ieee80211_wme_register();
1237 if (ret) { 1267 if (ret) {
1268#ifdef CONFIG_MAC80211_RCSIMPLE
1269 ieee80211_rate_control_unregister(&mac80211_rcsimple);
1270#endif
1238 printk(KERN_DEBUG "ieee80211_init: failed to " 1271 printk(KERN_DEBUG "ieee80211_init: failed to "
1239 "initialize WME (err=%d)\n", ret); 1272 "initialize WME (err=%d)\n", ret);
1240 return ret; 1273 return ret;
@@ -1248,6 +1281,10 @@ static int __init ieee80211_init(void)
1248 1281
1249static void __exit ieee80211_exit(void) 1282static void __exit ieee80211_exit(void)
1250{ 1283{
1284#ifdef CONFIG_MAC80211_RCSIMPLE
1285 ieee80211_rate_control_unregister(&mac80211_rcsimple);
1286#endif
1287
1251 ieee80211_wme_unregister(); 1288 ieee80211_wme_unregister();
1252 ieee80211_debugfs_netdev_exit(); 1289 ieee80211_debugfs_netdev_exit();
1253} 1290}
diff --git a/net/mac80211/ieee80211_common.h b/net/mac80211/ieee80211_common.h
deleted file mode 100644
index c15295d43d87..000000000000
--- a/net/mac80211/ieee80211_common.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * IEEE 802.11 driver (80211.o) -- hostapd interface
3 * Copyright 2002-2004, Instant802 Networks, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef IEEE80211_COMMON_H
11#define IEEE80211_COMMON_H
12
13#include <linux/types.h>
14
15/*
16 * This is common header information with user space. It is used on all
17 * frames sent to wlan#ap interface.
18 */
19
20#define IEEE80211_FI_VERSION 0x80211001
21
22struct ieee80211_frame_info {
23 __be32 version;
24 __be32 length;
25 __be64 mactime;
26 __be64 hosttime;
27 __be32 phytype;
28 __be32 channel;
29 __be32 datarate;
30 __be32 antenna;
31 __be32 priority;
32 __be32 ssi_type;
33 __be32 ssi_signal;
34 __be32 ssi_noise;
35 __be32 preamble;
36 __be32 encoding;
37
38 /* Note: this structure is otherwise identical to capture format used
39 * in linux-wlan-ng, but this additional field is used to provide meta
40 * data about the frame to hostapd. This was the easiest method for
41 * providing this information, but this might change in the future. */
42 __be32 msg_type;
43} __attribute__ ((packed));
44
45
46enum ieee80211_msg_type {
47 ieee80211_msg_normal = 0,
48 ieee80211_msg_tx_callback_ack = 1,
49 ieee80211_msg_tx_callback_fail = 2,
50 /* hole at 3, was ieee80211_msg_passive_scan but unused */
51 /* hole at 4, was ieee80211_msg_wep_frame_unknown_key but now unused */
52 ieee80211_msg_michael_mic_failure = 5,
53 /* hole at 6, was monitor but never sent to userspace */
54 ieee80211_msg_sta_not_assoc = 7,
55 /* 8 was ieee80211_msg_set_aid_for_sta */
56 /* 9 was ieee80211_msg_key_threshold_notification */
57 /* 11 was ieee80211_msg_radar */
58};
59
60struct ieee80211_msg_key_notification {
61 int tx_rx_count;
62 char ifname[IFNAMSIZ];
63 u8 addr[ETH_ALEN]; /* ff:ff:ff:ff:ff:ff for broadcast keys */
64};
65
66
67enum ieee80211_phytype {
68 ieee80211_phytype_fhss_dot11_97 = 1,
69 ieee80211_phytype_dsss_dot11_97 = 2,
70 ieee80211_phytype_irbaseband = 3,
71 ieee80211_phytype_dsss_dot11_b = 4,
72 ieee80211_phytype_pbcc_dot11_b = 5,
73 ieee80211_phytype_ofdm_dot11_g = 6,
74 ieee80211_phytype_pbcc_dot11_g = 7,
75 ieee80211_phytype_ofdm_dot11_a = 8,
76};
77
78enum ieee80211_ssi_type {
79 ieee80211_ssi_none = 0,
80 ieee80211_ssi_norm = 1, /* normalized, 0-1000 */
81 ieee80211_ssi_dbm = 2,
82 ieee80211_ssi_raw = 3, /* raw SSI */
83};
84
85struct ieee80211_radar_info {
86 int channel;
87 int radar;
88 int radar_type;
89};
90
91#endif /* IEEE80211_COMMON_H */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 4b4ed2a5803c..72e1c93dd87e 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -230,6 +230,7 @@ struct ieee80211_if_vlan {
230#define IEEE80211_STA_AUTO_SSID_SEL BIT(10) 230#define IEEE80211_STA_AUTO_SSID_SEL BIT(10)
231#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) 231#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11)
232#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) 232#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12)
233#define IEEE80211_STA_PRIVACY_INVOKED BIT(13)
233struct ieee80211_if_sta { 234struct ieee80211_if_sta {
234 enum { 235 enum {
235 IEEE80211_DISABLED, IEEE80211_AUTHENTICATE, 236 IEEE80211_DISABLED, IEEE80211_AUTHENTICATE,
@@ -241,6 +242,8 @@ struct ieee80211_if_sta {
241 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; 242 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
242 u8 ssid[IEEE80211_MAX_SSID_LEN]; 243 u8 ssid[IEEE80211_MAX_SSID_LEN];
243 size_t ssid_len; 244 size_t ssid_len;
245 u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
246 size_t scan_ssid_len;
244 u16 aid; 247 u16 aid;
245 u16 ap_capab, capab; 248 u16 ap_capab, capab;
246 u8 *extra_ie; /* to be added to the end of AssocReq */ 249 u8 *extra_ie; /* to be added to the end of AssocReq */
@@ -259,7 +262,6 @@ struct ieee80211_if_sta {
259 unsigned long request; 262 unsigned long request;
260 struct sk_buff_head skb_queue; 263 struct sk_buff_head skb_queue;
261 264
262 int key_management_enabled;
263 unsigned long last_probe; 265 unsigned long last_probe;
264 266
265#define IEEE80211_AUTH_ALG_OPEN BIT(0) 267#define IEEE80211_AUTH_ALG_OPEN BIT(0)
diff --git a/net/mac80211/ieee80211_ioctl.c b/net/mac80211/ieee80211_ioctl.c
index 6caa3ec2cff7..7027eed4d4ae 100644
--- a/net/mac80211/ieee80211_ioctl.c
+++ b/net/mac80211/ieee80211_ioctl.c
@@ -917,7 +917,6 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
917 struct iw_request_info *info, 917 struct iw_request_info *info,
918 struct iw_param *data, char *extra) 918 struct iw_param *data, char *extra)
919{ 919{
920 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
921 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 920 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
922 int ret = 0; 921 int ret = 0;
923 922
@@ -927,18 +926,21 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
927 case IW_AUTH_CIPHER_GROUP: 926 case IW_AUTH_CIPHER_GROUP:
928 case IW_AUTH_WPA_ENABLED: 927 case IW_AUTH_WPA_ENABLED:
929 case IW_AUTH_RX_UNENCRYPTED_EAPOL: 928 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
930 break;
931 case IW_AUTH_KEY_MGMT: 929 case IW_AUTH_KEY_MGMT:
930 break;
931 case IW_AUTH_PRIVACY_INVOKED:
932 if (sdata->type != IEEE80211_IF_TYPE_STA) 932 if (sdata->type != IEEE80211_IF_TYPE_STA)
933 ret = -EINVAL; 933 ret = -EINVAL;
934 else { 934 else {
935 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
935 /* 936 /*
936 * Key management was set by wpa_supplicant, 937 * Privacy invoked by wpa_supplicant, store the
937 * we only need this to associate to a network 938 * value and allow associating to a protected
938 * that has privacy enabled regardless of not 939 * network without having a key up front.
939 * having a key.
940 */ 940 */
941 sdata->u.sta.key_management_enabled = !!data->value; 941 if (data->value)
942 sdata->u.sta.flags |=
943 IEEE80211_STA_PRIVACY_INVOKED;
942 } 944 }
943 break; 945 break;
944 case IW_AUTH_80211_AUTH_ALG: 946 case IW_AUTH_80211_AUTH_ALG:
@@ -948,11 +950,6 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
948 else 950 else
949 ret = -EOPNOTSUPP; 951 ret = -EOPNOTSUPP;
950 break; 952 break;
951 case IW_AUTH_PRIVACY_INVOKED:
952 if (local->ops->set_privacy_invoked)
953 ret = local->ops->set_privacy_invoked(
954 local_to_hw(local), data->value);
955 break;
956 default: 953 default:
957 ret = -EOPNOTSUPP; 954 ret = -EOPNOTSUPP;
958 break; 955 break;
diff --git a/net/mac80211/ieee80211_rate.c b/net/mac80211/ieee80211_rate.c
index 93abb8fff141..7254bd609839 100644
--- a/net/mac80211/ieee80211_rate.c
+++ b/net/mac80211/ieee80211_rate.c
@@ -25,13 +25,25 @@ int ieee80211_rate_control_register(struct rate_control_ops *ops)
25{ 25{
26 struct rate_control_alg *alg; 26 struct rate_control_alg *alg;
27 27
28 if (!ops->name)
29 return -EINVAL;
30
31 mutex_lock(&rate_ctrl_mutex);
32 list_for_each_entry(alg, &rate_ctrl_algs, list) {
33 if (!strcmp(alg->ops->name, ops->name)) {
34 /* don't register an algorithm twice */
35 WARN_ON(1);
36 return -EALREADY;
37 }
38 }
39
28 alg = kzalloc(sizeof(*alg), GFP_KERNEL); 40 alg = kzalloc(sizeof(*alg), GFP_KERNEL);
29 if (alg == NULL) { 41 if (alg == NULL) {
42 mutex_unlock(&rate_ctrl_mutex);
30 return -ENOMEM; 43 return -ENOMEM;
31 } 44 }
32 alg->ops = ops; 45 alg->ops = ops;
33 46
34 mutex_lock(&rate_ctrl_mutex);
35 list_add_tail(&alg->list, &rate_ctrl_algs); 47 list_add_tail(&alg->list, &rate_ctrl_algs);
36 mutex_unlock(&rate_ctrl_mutex); 48 mutex_unlock(&rate_ctrl_mutex);
37 49
@@ -61,9 +73,12 @@ ieee80211_try_rate_control_ops_get(const char *name)
61 struct rate_control_alg *alg; 73 struct rate_control_alg *alg;
62 struct rate_control_ops *ops = NULL; 74 struct rate_control_ops *ops = NULL;
63 75
76 if (!name)
77 return NULL;
78
64 mutex_lock(&rate_ctrl_mutex); 79 mutex_lock(&rate_ctrl_mutex);
65 list_for_each_entry(alg, &rate_ctrl_algs, list) { 80 list_for_each_entry(alg, &rate_ctrl_algs, list) {
66 if (!name || !strcmp(alg->ops->name, name)) 81 if (!strcmp(alg->ops->name, name))
67 if (try_module_get(alg->ops->module)) { 82 if (try_module_get(alg->ops->module)) {
68 ops = alg->ops; 83 ops = alg->ops;
69 break; 84 break;
@@ -80,9 +95,12 @@ ieee80211_rate_control_ops_get(const char *name)
80{ 95{
81 struct rate_control_ops *ops; 96 struct rate_control_ops *ops;
82 97
98 if (!name)
99 name = "simple";
100
83 ops = ieee80211_try_rate_control_ops_get(name); 101 ops = ieee80211_try_rate_control_ops_get(name);
84 if (!ops) { 102 if (!ops) {
85 request_module("rc80211_%s", name ? name : "default"); 103 request_module("rc80211_%s", name);
86 ops = ieee80211_try_rate_control_ops_get(name); 104 ops = ieee80211_try_rate_control_ops_get(name);
87 } 105 }
88 return ops; 106 return ops;
diff --git a/net/mac80211/ieee80211_rate.h b/net/mac80211/ieee80211_rate.h
index 7cd1ebab4f83..23688139ffb3 100644
--- a/net/mac80211/ieee80211_rate.h
+++ b/net/mac80211/ieee80211_rate.h
@@ -65,6 +65,9 @@ struct rate_control_ref {
65 struct kref kref; 65 struct kref kref;
66}; 66};
67 67
68/* default 'simple' algorithm */
69extern struct rate_control_ops mac80211_rcsimple;
70
68int ieee80211_rate_control_register(struct rate_control_ops *ops); 71int ieee80211_rate_control_register(struct rate_control_ops *ops);
69void ieee80211_rate_control_unregister(struct rate_control_ops *ops); 72void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
70 73
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index fda0e06453e8..16afd24d4f6b 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -704,10 +704,11 @@ static int ieee80211_privacy_mismatch(struct net_device *dev,
704{ 704{
705 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 705 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
706 struct ieee80211_sta_bss *bss; 706 struct ieee80211_sta_bss *bss;
707 int res = 0; 707 int bss_privacy;
708 int wep_privacy;
709 int privacy_invoked;
708 710
709 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL) || 711 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL))
710 ifsta->key_management_enabled)
711 return 0; 712 return 0;
712 713
713 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, local->hw.conf.channel, 714 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, local->hw.conf.channel,
@@ -715,13 +716,16 @@ static int ieee80211_privacy_mismatch(struct net_device *dev,
715 if (!bss) 716 if (!bss)
716 return 0; 717 return 0;
717 718
718 if (ieee80211_sta_wep_configured(dev) != 719 bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY);
719 !!(bss->capability & WLAN_CAPABILITY_PRIVACY)) 720 wep_privacy = !!ieee80211_sta_wep_configured(dev);
720 res = 1; 721 privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED);
721 722
722 ieee80211_rx_bss_put(dev, bss); 723 ieee80211_rx_bss_put(dev, bss);
723 724
724 return res; 725 if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked))
726 return 0;
727
728 return 1;
725} 729}
726 730
727 731
@@ -1998,7 +2002,10 @@ void ieee80211_sta_work(struct work_struct *work)
1998 if (ifsta->state != IEEE80211_AUTHENTICATE && 2002 if (ifsta->state != IEEE80211_AUTHENTICATE &&
1999 ifsta->state != IEEE80211_ASSOCIATE && 2003 ifsta->state != IEEE80211_ASSOCIATE &&
2000 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) { 2004 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) {
2001 ieee80211_sta_start_scan(dev, NULL, 0); 2005 if (ifsta->scan_ssid_len)
2006 ieee80211_sta_start_scan(dev, ifsta->scan_ssid, ifsta->scan_ssid_len);
2007 else
2008 ieee80211_sta_start_scan(dev, NULL, 0);
2002 return; 2009 return;
2003 } 2010 }
2004 2011
@@ -2640,7 +2647,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw)
2640 local->sta_scanning = 0; 2647 local->sta_scanning = 0;
2641 2648
2642 if (ieee80211_hw_config(local)) 2649 if (ieee80211_hw_config(local))
2643 printk(KERN_DEBUG "%s: failed to restore operational" 2650 printk(KERN_DEBUG "%s: failed to restore operational "
2644 "channel after scan\n", dev->name); 2651 "channel after scan\n", dev->name);
2645 2652
2646 2653
@@ -2868,6 +2875,9 @@ int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len)
2868 return -EBUSY; 2875 return -EBUSY;
2869 } 2876 }
2870 2877
2878 ifsta->scan_ssid_len = ssid_len;
2879 if (ssid_len)
2880 memcpy(ifsta->scan_ssid, ssid, ssid_len);
2871 set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request); 2881 set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request);
2872 queue_work(local->hw.workqueue, &ifsta->work); 2882 queue_work(local->hw.workqueue, &ifsta->work);
2873 return 0; 2883 return 0;
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c
index 314b8de88862..da72737364e4 100644
--- a/net/mac80211/rc80211_simple.c
+++ b/net/mac80211/rc80211_simple.c
@@ -7,7 +7,6 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/module.h>
11#include <linux/init.h> 10#include <linux/init.h>
12#include <linux/netdevice.h> 11#include <linux/netdevice.h>
13#include <linux/types.h> 12#include <linux/types.h>
@@ -29,8 +28,6 @@
29#define RATE_CONTROL_INTERVAL (HZ / 20) 28#define RATE_CONTROL_INTERVAL (HZ / 20)
30#define RATE_CONTROL_MIN_TX 10 29#define RATE_CONTROL_MIN_TX 10
31 30
32MODULE_ALIAS("rc80211_default");
33
34static void rate_control_rate_inc(struct ieee80211_local *local, 31static void rate_control_rate_inc(struct ieee80211_local *local,
35 struct sta_info *sta) 32 struct sta_info *sta)
36{ 33{
@@ -394,8 +391,7 @@ static void rate_control_simple_remove_sta_debugfs(void *priv, void *priv_sta)
394} 391}
395#endif 392#endif
396 393
397static struct rate_control_ops rate_control_simple = { 394struct rate_control_ops mac80211_rcsimple = {
398 .module = THIS_MODULE,
399 .name = "simple", 395 .name = "simple",
400 .tx_status = rate_control_simple_tx_status, 396 .tx_status = rate_control_simple_tx_status,
401 .get_rate = rate_control_simple_get_rate, 397 .get_rate = rate_control_simple_get_rate,
@@ -410,22 +406,3 @@ static struct rate_control_ops rate_control_simple = {
410 .remove_sta_debugfs = rate_control_simple_remove_sta_debugfs, 406 .remove_sta_debugfs = rate_control_simple_remove_sta_debugfs,
411#endif 407#endif
412}; 408};
413
414
415static int __init rate_control_simple_init(void)
416{
417 return ieee80211_rate_control_register(&rate_control_simple);
418}
419
420
421static void __exit rate_control_simple_exit(void)
422{
423 ieee80211_rate_control_unregister(&rate_control_simple);
424}
425
426
427subsys_initcall(rate_control_simple_init);
428module_exit(rate_control_simple_exit);
429
430MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211");
431MODULE_LICENSE("GPL");
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index ece77766ea2b..428a9fcf57d6 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -509,9 +509,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
509 rx->key->tx_rx_count++; 509 rx->key->tx_rx_count++;
510 /* TODO: add threshold stuff again */ 510 /* TODO: add threshold stuff again */
511 } else { 511 } else {
512#ifdef CONFIG_MAC80211_DEBUG
512 if (net_ratelimit()) 513 if (net_ratelimit())
513 printk(KERN_DEBUG "%s: RX protected frame," 514 printk(KERN_DEBUG "%s: RX protected frame,"
514 " but have no key\n", rx->dev->name); 515 " but have no key\n", rx->dev->name);
516#endif /* CONFIG_MAC80211_DEBUG */
515 return TXRX_DROP; 517 return TXRX_DROP;
516 } 518 }
517 519
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index a84a23310ff4..9bf0e1cc530a 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -314,9 +314,11 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx)
314 314
315 if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) { 315 if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) {
316 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) { 316 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) {
317#ifdef CONFIG_MAC80211_DEBUG
317 if (net_ratelimit()) 318 if (net_ratelimit())
318 printk(KERN_DEBUG "%s: RX WEP frame, decrypt " 319 printk(KERN_DEBUG "%s: RX WEP frame, decrypt "
319 "failed\n", rx->dev->name); 320 "failed\n", rx->dev->name);
321#endif /* CONFIG_MAC80211_DEBUG */
320 return TXRX_DROP; 322 return TXRX_DROP;
321 } 323 }
322 } else if (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) { 324 } else if (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) {
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 6695efba57ec..20cec1cb956f 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -323,9 +323,12 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx)
323 &rx->u.rx.tkip_iv32, 323 &rx->u.rx.tkip_iv32,
324 &rx->u.rx.tkip_iv16); 324 &rx->u.rx.tkip_iv16);
325 if (res != TKIP_DECRYPT_OK || wpa_test) { 325 if (res != TKIP_DECRYPT_OK || wpa_test) {
326 printk(KERN_DEBUG "%s: TKIP decrypt failed for RX frame from " 326#ifdef CONFIG_MAC80211_DEBUG
327 "%s (res=%d)\n", 327 if (net_ratelimit())
328 rx->dev->name, print_mac(mac, rx->sta->addr), res); 328 printk(KERN_DEBUG "%s: TKIP decrypt failed for RX "
329 "frame from %s (res=%d)\n", rx->dev->name,
330 print_mac(mac, rx->sta->addr), res);
331#endif /* CONFIG_MAC80211_DEBUG */
329 return TXRX_DROP; 332 return TXRX_DROP;
330 } 333 }
331 334
@@ -594,9 +597,12 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx)
594 skb->data + hdrlen + CCMP_HDR_LEN, data_len, 597 skb->data + hdrlen + CCMP_HDR_LEN, data_len,
595 skb->data + skb->len - CCMP_MIC_LEN, 598 skb->data + skb->len - CCMP_MIC_LEN,
596 skb->data + hdrlen + CCMP_HDR_LEN)) { 599 skb->data + hdrlen + CCMP_HDR_LEN)) {
597 printk(KERN_DEBUG "%s: CCMP decrypt failed for RX " 600#ifdef CONFIG_MAC80211_DEBUG
598 "frame from %s\n", rx->dev->name, 601 if (net_ratelimit())
599 print_mac(mac, rx->sta->addr)); 602 printk(KERN_DEBUG "%s: CCMP decrypt failed "
603 "for RX frame from %s\n", rx->dev->name,
604 print_mac(mac, rx->sta->addr));
605#endif /* CONFIG_MAC80211_DEBUG */
600 return TXRX_DROP; 606 return TXRX_DROP;
601 } 607 }
602 } 608 }
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index d7a600a5720a..21a9fcc03796 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -363,7 +363,7 @@ config NETFILTER_XT_TARGET_TRACE
363 the tables, chains, rules. 363 the tables, chains, rules.
364 364
365 If you want to compile it as a module, say M here and read 365 If you want to compile it as a module, say M here and read
366 <file:Documentation/modules.txt>. If unsure, say `N'. 366 <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
367 367
368config NETFILTER_XT_TARGET_SECMARK 368config NETFILTER_XT_TARGET_SECMARK
369 tristate '"SECMARK" target support' 369 tristate '"SECMARK" target support'
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 93c58f973831..ad0e36ebea3d 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -40,15 +40,15 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
40# targets 40# targets
41obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o 41obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
42obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o 42obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
43obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
43obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o 44obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
44obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o 45obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
45obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
46obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o 46obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o 48obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
48obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
49obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o 49obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
50obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o 50obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
51obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o 51obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
52 52
53# matches 53# matches
54obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o 54obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
@@ -59,22 +59,22 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
59obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o 59obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
60obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o 60obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
61obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o 61obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
62obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
62obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o 63obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
63obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o 64obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
64obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o 65obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
65obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o 66obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
66obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o 67obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o
67obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o 68obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
68obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o 69obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
69obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o 70obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
71obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
70obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o 72obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
71obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o 73obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
72obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o 74obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o
73obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o 75obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o
74obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o 76obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o
75obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o 77obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o
76obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
77obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o 78obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
78obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o 79obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
79obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o 80obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o
80obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 4d6171bc0829..000c2fb462d0 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -999,7 +999,7 @@ struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced)
999 *vmalloced = 0; 999 *vmalloced = 0;
1000 1000
1001 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); 1001 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
1002 hash = (void*)__get_free_pages(GFP_KERNEL, 1002 hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,
1003 get_order(sizeof(struct hlist_head) 1003 get_order(sizeof(struct hlist_head)
1004 * size)); 1004 * size));
1005 if (!hash) { 1005 if (!hash) {
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index a1a65a1313b3..cf6ba6659a80 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -109,7 +109,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
109 rcu_read_lock(); 109 rcu_read_lock();
110 t = rcu_dereference(nf_ct_ext_types[i]); 110 t = rcu_dereference(nf_ct_ext_types[i]);
111 if (t && t->move) 111 if (t && t->move)
112 t->move(ct, ct->ext + ct->ext->offset[id]); 112 t->move(ct, ct->ext + ct->ext->offset[i]);
113 rcu_read_unlock(); 113 rcu_read_unlock();
114 } 114 }
115 kfree(ct->ext); 115 kfree(ct->ext);
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index aa2831587b82..3dd4b3c76d81 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -23,14 +23,13 @@ static inline int overlap(int min1, int max1, int min2, int max2)
23/* Functions to register sockopt ranges (exclusive). */ 23/* Functions to register sockopt ranges (exclusive). */
24int nf_register_sockopt(struct nf_sockopt_ops *reg) 24int nf_register_sockopt(struct nf_sockopt_ops *reg)
25{ 25{
26 struct list_head *i; 26 struct nf_sockopt_ops *ops;
27 int ret = 0; 27 int ret = 0;
28 28
29 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) 29 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
30 return -EINTR; 30 return -EINTR;
31 31
32 list_for_each(i, &nf_sockopts) { 32 list_for_each_entry(ops, &nf_sockopts, list) {
33 struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
34 if (ops->pf == reg->pf 33 if (ops->pf == reg->pf
35 && (overlap(ops->set_optmin, ops->set_optmax, 34 && (overlap(ops->set_optmin, ops->set_optmax,
36 reg->set_optmin, reg->set_optmax) 35 reg->set_optmin, reg->set_optmax)
@@ -61,48 +60,57 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
61} 60}
62EXPORT_SYMBOL(nf_unregister_sockopt); 61EXPORT_SYMBOL(nf_unregister_sockopt);
63 62
64/* Call get/setsockopt() */ 63static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, int pf,
65static int nf_sockopt(struct sock *sk, int pf, int val, 64 int val, int get)
66 char __user *opt, int *len, int get)
67{ 65{
68 struct list_head *i;
69 struct nf_sockopt_ops *ops; 66 struct nf_sockopt_ops *ops;
70 int ret;
71 67
72 if (sk->sk_net != &init_net) 68 if (sk->sk_net != &init_net)
73 return -ENOPROTOOPT; 69 return ERR_PTR(-ENOPROTOOPT);
74 70
75 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) 71 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
76 return -EINTR; 72 return ERR_PTR(-EINTR);
77 73
78 list_for_each(i, &nf_sockopts) { 74 list_for_each_entry(ops, &nf_sockopts, list) {
79 ops = (struct nf_sockopt_ops *)i;
80 if (ops->pf == pf) { 75 if (ops->pf == pf) {
81 if (!try_module_get(ops->owner)) 76 if (!try_module_get(ops->owner))
82 goto out_nosup; 77 goto out_nosup;
78
83 if (get) { 79 if (get) {
84 if (val >= ops->get_optmin 80 if (val >= ops->get_optmin &&
85 && val < ops->get_optmax) { 81 val < ops->get_optmax)
86 mutex_unlock(&nf_sockopt_mutex);
87 ret = ops->get(sk, val, opt, len);
88 goto out; 82 goto out;
89 }
90 } else { 83 } else {
91 if (val >= ops->set_optmin 84 if (val >= ops->set_optmin &&
92 && val < ops->set_optmax) { 85 val < ops->set_optmax)
93 mutex_unlock(&nf_sockopt_mutex);
94 ret = ops->set(sk, val, opt, *len);
95 goto out; 86 goto out;
96 }
97 } 87 }
98 module_put(ops->owner); 88 module_put(ops->owner);
99 } 89 }
100 } 90 }
101 out_nosup: 91out_nosup:
92 ops = ERR_PTR(-ENOPROTOOPT);
93out:
102 mutex_unlock(&nf_sockopt_mutex); 94 mutex_unlock(&nf_sockopt_mutex);
103 return -ENOPROTOOPT; 95 return ops;
96}
97
98/* Call get/setsockopt() */
99static int nf_sockopt(struct sock *sk, int pf, int val,
100 char __user *opt, int *len, int get)
101{
102 struct nf_sockopt_ops *ops;
103 int ret;
104
105 ops = nf_sockopt_find(sk, pf, val, get);
106 if (IS_ERR(ops))
107 return PTR_ERR(ops);
108
109 if (get)
110 ret = ops->get(sk, val, opt, len);
111 else
112 ret = ops->set(sk, val, opt, *len);
104 113
105 out:
106 module_put(ops->owner); 114 module_put(ops->owner);
107 return ret; 115 return ret;
108} 116}
@@ -124,56 +132,25 @@ EXPORT_SYMBOL(nf_getsockopt);
124static int compat_nf_sockopt(struct sock *sk, int pf, int val, 132static int compat_nf_sockopt(struct sock *sk, int pf, int val,
125 char __user *opt, int *len, int get) 133 char __user *opt, int *len, int get)
126{ 134{
127 struct list_head *i;
128 struct nf_sockopt_ops *ops; 135 struct nf_sockopt_ops *ops;
129 int ret; 136 int ret;
130 137
131 if (sk->sk_net != &init_net) 138 ops = nf_sockopt_find(sk, pf, val, get);
132 return -ENOPROTOOPT; 139 if (IS_ERR(ops))
133 140 return PTR_ERR(ops);
134 141
135 if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) 142 if (get) {
136 return -EINTR; 143 if (ops->compat_get)
137 144 ret = ops->compat_get(sk, val, opt, len);
138 list_for_each(i, &nf_sockopts) { 145 else
139 ops = (struct nf_sockopt_ops *)i; 146 ret = ops->get(sk, val, opt, len);
140 if (ops->pf == pf) { 147 } else {
141 if (!try_module_get(ops->owner)) 148 if (ops->compat_set)
142 goto out_nosup; 149 ret = ops->compat_set(sk, val, opt, *len);
143 150 else
144 if (get) { 151 ret = ops->set(sk, val, opt, *len);
145 if (val >= ops->get_optmin
146 && val < ops->get_optmax) {
147 mutex_unlock(&nf_sockopt_mutex);
148 if (ops->compat_get)
149 ret = ops->compat_get(sk,
150 val, opt, len);
151 else
152 ret = ops->get(sk,
153 val, opt, len);
154 goto out;
155 }
156 } else {
157 if (val >= ops->set_optmin
158 && val < ops->set_optmax) {
159 mutex_unlock(&nf_sockopt_mutex);
160 if (ops->compat_set)
161 ret = ops->compat_set(sk,
162 val, opt, *len);
163 else
164 ret = ops->set(sk,
165 val, opt, *len);
166 goto out;
167 }
168 }
169 module_put(ops->owner);
170 }
171 } 152 }
172 out_nosup:
173 mutex_unlock(&nf_sockopt_mutex);
174 return -ENOPROTOOPT;
175 153
176 out:
177 module_put(ops->owner); 154 module_put(ops->owner);
178 return ret; 155 return ret;
179} 156}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 06cff1d13690..d7becf08a93a 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -4,7 +4,8 @@
4 * (c) 2000 Gerd Knorr <kraxel@bytesex.org> 4 * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
5 * Nov 2002: Martin Bene <martin.bene@icomedias.com>: 5 * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
6 * only ignore TIME_WAIT or gone connections 6 * only ignore TIME_WAIT or gone connections
7 * Copyright © Jan Engelhardt <jengelh@gmx.de>, 2007 7 * (C) CC Computer Consultants GmbH, 2007
8 * Contact: <jengelh@computergmbh.de>
8 * 9 *
9 * based on ... 10 * based on ...
10 * 11 *
@@ -306,7 +307,7 @@ static void __exit xt_connlimit_exit(void)
306 307
307module_init(xt_connlimit_init); 308module_init(xt_connlimit_init);
308module_exit(xt_connlimit_exit); 309module_exit(xt_connlimit_exit);
309MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>"); 310MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
310MODULE_DESCRIPTION("netfilter xt_connlimit match module"); 311MODULE_DESCRIPTION("netfilter xt_connlimit match module");
311MODULE_LICENSE("GPL"); 312MODULE_LICENSE("GPL");
312MODULE_ALIAS("ipt_connlimit"); 313MODULE_ALIAS("ipt_connlimit");
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index ef48bbd93573..f9c55dcd894b 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * xt_time 2 * xt_time
3 * Copyright © Jan Engelhardt <jengelh@computergmbh.de>, 2007 3 * Copyright © CC Computer Consultants GmbH, 2007
4 * Contact: <jengelh@computergmbh.de>
4 * 5 *
5 * based on ipt_time by Fabrice MARIE <fabrice@netfilter.org> 6 * based on ipt_time by Fabrice MARIE <fabrice@netfilter.org>
6 * This is a module which is used for time matching 7 * This is a module which is used for time matching
@@ -169,7 +170,7 @@ static bool xt_time_match(const struct sk_buff *skb,
169 if (skb->tstamp.tv64 == 0) 170 if (skb->tstamp.tv64 == 0)
170 __net_timestamp((struct sk_buff *)skb); 171 __net_timestamp((struct sk_buff *)skb);
171 172
172 stamp = skb->tstamp.tv64; 173 stamp = ktime_to_ns(skb->tstamp);
173 do_div(stamp, NSEC_PER_SEC); 174 do_div(stamp, NSEC_PER_SEC);
174 175
175 if (info->flags & XT_TIME_LOCAL_TZ) 176 if (info->flags & XT_TIME_LOCAL_TZ)
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
index bec427915b30..af75b8c3f20b 100644
--- a/net/netfilter/xt_u32.c
+++ b/net/netfilter/xt_u32.c
@@ -2,7 +2,8 @@
2 * xt_u32 - kernel module to match u32 packet content 2 * xt_u32 - kernel module to match u32 packet content
3 * 3 *
4 * Original author: Don Cohen <don@isis.cs3-inc.com> 4 * Original author: Don Cohen <don@isis.cs3-inc.com>
5 * © Jan Engelhardt <jengelh@gmx.de>, 2007 5 * (C) CC Computer Consultants GmbH, 2007
6 * Contact: <jengelh@computergmbh.de>
6 */ 7 */
7 8
8#include <linux/module.h> 9#include <linux/module.h>
@@ -129,7 +130,7 @@ static void __exit xt_u32_exit(void)
129 130
130module_init(xt_u32_init); 131module_init(xt_u32_init);
131module_exit(xt_u32_exit); 132module_exit(xt_u32_exit);
132MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>"); 133MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
133MODULE_DESCRIPTION("netfilter u32 match module"); 134MODULE_DESCRIPTION("netfilter u32 match module");
134MODULE_LICENSE("GPL"); 135MODULE_LICENSE("GPL");
135MODULE_ALIAS("ipt_u32"); 136MODULE_ALIAS("ipt_u32");
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4f994c0fb3f8..de3988ba1f46 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -396,7 +396,7 @@ static int __netlink_create(struct net *net, struct socket *sock,
396 396
397 sock->ops = &netlink_ops; 397 sock->ops = &netlink_ops;
398 398
399 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, 1); 399 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
400 if (!sk) 400 if (!sk)
401 return -ENOMEM; 401 return -ENOMEM;
402 402
@@ -752,7 +752,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
752 * 1: repeat lookup - reference dropped while waiting for socket memory. 752 * 1: repeat lookup - reference dropped while waiting for socket memory.
753 */ 753 */
754int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, 754int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
755 long timeo, struct sock *ssk) 755 long *timeo, struct sock *ssk)
756{ 756{
757 struct netlink_sock *nlk; 757 struct netlink_sock *nlk;
758 758
@@ -761,7 +761,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
761 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 761 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
762 test_bit(0, &nlk->state)) { 762 test_bit(0, &nlk->state)) {
763 DECLARE_WAITQUEUE(wait, current); 763 DECLARE_WAITQUEUE(wait, current);
764 if (!timeo) { 764 if (!*timeo) {
765 if (!ssk || netlink_is_kernel(ssk)) 765 if (!ssk || netlink_is_kernel(ssk))
766 netlink_overrun(sk); 766 netlink_overrun(sk);
767 sock_put(sk); 767 sock_put(sk);
@@ -775,7 +775,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
775 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 775 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
776 test_bit(0, &nlk->state)) && 776 test_bit(0, &nlk->state)) &&
777 !sock_flag(sk, SOCK_DEAD)) 777 !sock_flag(sk, SOCK_DEAD))
778 timeo = schedule_timeout(timeo); 778 *timeo = schedule_timeout(*timeo);
779 779
780 __set_current_state(TASK_RUNNING); 780 __set_current_state(TASK_RUNNING);
781 remove_wait_queue(&nlk->wait, &wait); 781 remove_wait_queue(&nlk->wait, &wait);
@@ -783,7 +783,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
783 783
784 if (signal_pending(current)) { 784 if (signal_pending(current)) {
785 kfree_skb(skb); 785 kfree_skb(skb);
786 return sock_intr_errno(timeo); 786 return sock_intr_errno(*timeo);
787 } 787 }
788 return 1; 788 return 1;
789 } 789 }
@@ -877,7 +877,7 @@ retry:
877 if (netlink_is_kernel(sk)) 877 if (netlink_is_kernel(sk))
878 return netlink_unicast_kernel(sk, skb); 878 return netlink_unicast_kernel(sk, skb);
879 879
880 err = netlink_attachskb(sk, skb, nonblock, timeo, ssk); 880 err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk);
881 if (err == 1) 881 if (err == 1)
882 goto retry; 882 goto retry;
883 if (err) 883 if (err)
@@ -1888,7 +1888,7 @@ static void __net_exit netlink_net_exit(struct net *net)
1888#endif 1888#endif
1889} 1889}
1890 1890
1891static struct pernet_operations netlink_net_ops = { 1891static struct pernet_operations __net_initdata netlink_net_ops = {
1892 .init = netlink_net_init, 1892 .init = netlink_net_init,
1893 .exit = netlink_net_exit, 1893 .exit = netlink_net_exit,
1894}; 1894};
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 3a4d479ea64e..972250c974f1 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -423,7 +423,8 @@ static int nr_create(struct net *net, struct socket *sock, int protocol)
423 if (sock->type != SOCK_SEQPACKET || protocol != 0) 423 if (sock->type != SOCK_SEQPACKET || protocol != 0)
424 return -ESOCKTNOSUPPORT; 424 return -ESOCKTNOSUPPORT;
425 425
426 if ((sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, 1)) == NULL) 426 sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto);
427 if (sk == NULL)
427 return -ENOMEM; 428 return -ENOMEM;
428 429
429 nr = nr_sk(sk); 430 nr = nr_sk(sk);
@@ -465,7 +466,8 @@ static struct sock *nr_make_new(struct sock *osk)
465 if (osk->sk_type != SOCK_SEQPACKET) 466 if (osk->sk_type != SOCK_SEQPACKET)
466 return NULL; 467 return NULL;
467 468
468 if ((sk = sk_alloc(osk->sk_net, PF_NETROM, GFP_ATOMIC, osk->sk_prot, 1)) == NULL) 469 sk = sk_alloc(osk->sk_net, PF_NETROM, GFP_ATOMIC, osk->sk_prot);
470 if (sk == NULL)
469 return NULL; 471 return NULL;
470 472
471 nr = nr_sk(sk); 473 nr = nr_sk(sk);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d0936506b731..8a7807dbba01 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -139,9 +139,6 @@ dev->hard_header == NULL (ll header is added by device, we cannot control it)
139static HLIST_HEAD(packet_sklist); 139static HLIST_HEAD(packet_sklist);
140static DEFINE_RWLOCK(packet_sklist_lock); 140static DEFINE_RWLOCK(packet_sklist_lock);
141 141
142static atomic_t packet_socks_nr;
143
144
145/* Private packet socket structures. */ 142/* Private packet socket structures. */
146 143
147struct packet_mclist 144struct packet_mclist
@@ -236,10 +233,7 @@ static void packet_sock_destruct(struct sock *sk)
236 return; 233 return;
237 } 234 }
238 235
239 atomic_dec(&packet_socks_nr); 236 sk_refcnt_debug_dec(sk);
240#ifdef PACKET_REFCNT_DEBUG
241 printk(KERN_DEBUG "PACKET socket %p is free, %d are alive\n", sk, atomic_read(&packet_socks_nr));
242#endif
243} 237}
244 238
245 239
@@ -515,7 +509,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
515 sll->sll_hatype = dev->type; 509 sll->sll_hatype = dev->type;
516 sll->sll_protocol = skb->protocol; 510 sll->sll_protocol = skb->protocol;
517 sll->sll_pkttype = skb->pkt_type; 511 sll->sll_pkttype = skb->pkt_type;
518 if (unlikely(po->origdev) && skb->pkt_type == PACKET_HOST) 512 if (unlikely(po->origdev))
519 sll->sll_ifindex = orig_dev->ifindex; 513 sll->sll_ifindex = orig_dev->ifindex;
520 else 514 else
521 sll->sll_ifindex = dev->ifindex; 515 sll->sll_ifindex = dev->ifindex;
@@ -661,7 +655,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
661 sll->sll_hatype = dev->type; 655 sll->sll_hatype = dev->type;
662 sll->sll_protocol = skb->protocol; 656 sll->sll_protocol = skb->protocol;
663 sll->sll_pkttype = skb->pkt_type; 657 sll->sll_pkttype = skb->pkt_type;
664 if (unlikely(po->origdev) && skb->pkt_type == PACKET_HOST) 658 if (unlikely(po->origdev))
665 sll->sll_ifindex = orig_dev->ifindex; 659 sll->sll_ifindex = orig_dev->ifindex;
666 else 660 else
667 sll->sll_ifindex = dev->ifindex; 661 sll->sll_ifindex = dev->ifindex;
@@ -849,6 +843,7 @@ static int packet_release(struct socket *sock)
849 /* Purge queues */ 843 /* Purge queues */
850 844
851 skb_queue_purge(&sk->sk_receive_queue); 845 skb_queue_purge(&sk->sk_receive_queue);
846 sk_refcnt_debug_release(sk);
852 847
853 sock_put(sk); 848 sock_put(sk);
854 return 0; 849 return 0;
@@ -886,20 +881,14 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
886 if (protocol == 0) 881 if (protocol == 0)
887 goto out_unlock; 882 goto out_unlock;
888 883
889 if (dev) { 884 if (!dev || (dev->flags & IFF_UP)) {
890 if (dev->flags&IFF_UP) {
891 dev_add_pack(&po->prot_hook);
892 sock_hold(sk);
893 po->running = 1;
894 } else {
895 sk->sk_err = ENETDOWN;
896 if (!sock_flag(sk, SOCK_DEAD))
897 sk->sk_error_report(sk);
898 }
899 } else {
900 dev_add_pack(&po->prot_hook); 885 dev_add_pack(&po->prot_hook);
901 sock_hold(sk); 886 sock_hold(sk);
902 po->running = 1; 887 po->running = 1;
888 } else {
889 sk->sk_err = ENETDOWN;
890 if (!sock_flag(sk, SOCK_DEAD))
891 sk->sk_error_report(sk);
903 } 892 }
904 893
905out_unlock: 894out_unlock:
@@ -995,7 +984,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
995 sock->state = SS_UNCONNECTED; 984 sock->state = SS_UNCONNECTED;
996 985
997 err = -ENOBUFS; 986 err = -ENOBUFS;
998 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, 1); 987 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
999 if (sk == NULL) 988 if (sk == NULL)
1000 goto out; 989 goto out;
1001 990
@@ -1010,7 +999,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
1010 po->num = proto; 999 po->num = proto;
1011 1000
1012 sk->sk_destruct = packet_sock_destruct; 1001 sk->sk_destruct = packet_sock_destruct;
1013 atomic_inc(&packet_socks_nr); 1002 sk_refcnt_debug_inc(sk);
1014 1003
1015 /* 1004 /*
1016 * Attach a protocol block 1005 * Attach a protocol block
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 51d151c0e962..73d60a307129 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -27,6 +27,10 @@
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/rfkill.h> 28#include <linux/rfkill.h>
29 29
30/* Get declaration of rfkill_switch_all() to shut up sparse. */
31#include "rfkill-input.h"
32
33
30MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); 34MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
31MODULE_VERSION("1.0"); 35MODULE_VERSION("1.0");
32MODULE_DESCRIPTION("RF switch support"); 36MODULE_DESCRIPTION("RF switch support");
@@ -276,21 +280,17 @@ static struct class rfkill_class = {
276 280
277static int rfkill_add_switch(struct rfkill *rfkill) 281static int rfkill_add_switch(struct rfkill *rfkill)
278{ 282{
279 int retval; 283 int error;
280
281 retval = mutex_lock_interruptible(&rfkill_mutex);
282 if (retval)
283 return retval;
284 284
285 retval = rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type]); 285 mutex_lock(&rfkill_mutex);
286 if (retval)
287 goto out;
288 286
289 list_add_tail(&rfkill->node, &rfkill_list); 287 error = rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type]);
288 if (!error)
289 list_add_tail(&rfkill->node, &rfkill_list);
290 290
291 out:
292 mutex_unlock(&rfkill_mutex); 291 mutex_unlock(&rfkill_mutex);
293 return retval; 292
293 return error;
294} 294}
295 295
296static void rfkill_remove_switch(struct rfkill *rfkill) 296static void rfkill_remove_switch(struct rfkill *rfkill)
@@ -387,20 +387,23 @@ int rfkill_register(struct rfkill *rfkill)
387 387
388 if (!rfkill->toggle_radio) 388 if (!rfkill->toggle_radio)
389 return -EINVAL; 389 return -EINVAL;
390 if (rfkill->type >= RFKILL_TYPE_MAX)
391 return -EINVAL;
392
393 snprintf(dev->bus_id, sizeof(dev->bus_id),
394 "rfkill%ld", (long)atomic_inc_return(&rfkill_no) - 1);
395
396 rfkill_led_trigger_register(rfkill);
390 397
391 error = rfkill_add_switch(rfkill); 398 error = rfkill_add_switch(rfkill);
392 if (error) 399 if (error)
393 return error; 400 return error;
394 401
395 snprintf(dev->bus_id, sizeof(dev->bus_id),
396 "rfkill%ld", (long)atomic_inc_return(&rfkill_no) - 1);
397
398 error = device_add(dev); 402 error = device_add(dev);
399 if (error) { 403 if (error) {
400 rfkill_remove_switch(rfkill); 404 rfkill_remove_switch(rfkill);
401 return error; 405 return error;
402 } 406 }
403 rfkill_led_trigger_register(rfkill);
404 407
405 return 0; 408 return 0;
406} 409}
@@ -416,9 +419,9 @@ EXPORT_SYMBOL(rfkill_register);
416 */ 419 */
417void rfkill_unregister(struct rfkill *rfkill) 420void rfkill_unregister(struct rfkill *rfkill)
418{ 421{
419 rfkill_led_trigger_unregister(rfkill);
420 device_del(&rfkill->dev); 422 device_del(&rfkill->dev);
421 rfkill_remove_switch(rfkill); 423 rfkill_remove_switch(rfkill);
424 rfkill_led_trigger_unregister(rfkill);
422 put_device(&rfkill->dev); 425 put_device(&rfkill->dev);
423} 426}
424EXPORT_SYMBOL(rfkill_unregister); 427EXPORT_SYMBOL(rfkill_unregister);
@@ -448,5 +451,5 @@ static void __exit rfkill_exit(void)
448 class_unregister(&rfkill_class); 451 class_unregister(&rfkill_class);
449} 452}
450 453
451module_init(rfkill_init); 454subsys_initcall(rfkill_init);
452module_exit(rfkill_exit); 455module_exit(rfkill_exit);
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 509defe53ee5..ed2d65cd8010 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -513,7 +513,8 @@ static int rose_create(struct net *net, struct socket *sock, int protocol)
513 if (sock->type != SOCK_SEQPACKET || protocol != 0) 513 if (sock->type != SOCK_SEQPACKET || protocol != 0)
514 return -ESOCKTNOSUPPORT; 514 return -ESOCKTNOSUPPORT;
515 515
516 if ((sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL) 516 sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
517 if (sk == NULL)
517 return -ENOMEM; 518 return -ENOMEM;
518 519
519 rose = rose_sk(sk); 520 rose = rose_sk(sk);
@@ -551,7 +552,8 @@ static struct sock *rose_make_new(struct sock *osk)
551 if (osk->sk_type != SOCK_SEQPACKET) 552 if (osk->sk_type != SOCK_SEQPACKET)
552 return NULL; 553 return NULL;
553 554
554 if ((sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL) 555 sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto);
556 if (sk == NULL)
555 return NULL; 557 return NULL;
556 558
557 rose = rose_sk(sk); 559 rose = rose_sk(sk);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index c680017f5c8e..d6389450c4bf 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -627,7 +627,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol)
627 sock->ops = &rxrpc_rpc_ops; 627 sock->ops = &rxrpc_rpc_ops;
628 sock->state = SS_UNCONNECTED; 628 sock->state = SS_UNCONNECTED;
629 629
630 sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1); 630 sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto);
631 if (!sk) 631 if (!sk)
632 return -ENOMEM; 632 return -ENOMEM;
633 633
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
index fe03f71f17da..f3a2bd747a8f 100644
--- a/net/rxrpc/ar-local.c
+++ b/net/rxrpc/ar-local.c
@@ -114,7 +114,7 @@ static int rxrpc_create_local(struct rxrpc_local *local)
114 return 0; 114 return 0;
115 115
116error: 116error:
117 local->socket->ops->shutdown(local->socket, 2); 117 kernel_sock_shutdown(local->socket, SHUT_RDWR);
118 local->socket->sk->sk_user_data = NULL; 118 local->socket->sk->sk_user_data = NULL;
119 sock_release(local->socket); 119 sock_release(local->socket);
120 local->socket = NULL; 120 local->socket = NULL;
@@ -267,7 +267,7 @@ static void rxrpc_destroy_local(struct work_struct *work)
267 /* finish cleaning up the local descriptor */ 267 /* finish cleaning up the local descriptor */
268 rxrpc_purge_queue(&local->accept_queue); 268 rxrpc_purge_queue(&local->accept_queue);
269 rxrpc_purge_queue(&local->reject_queue); 269 rxrpc_purge_queue(&local->reject_queue);
270 local->socket->ops->shutdown(local->socket, 2); 270 kernel_sock_shutdown(local->socket, SHUT_RDWR);
271 sock_release(local->socket); 271 sock_release(local->socket);
272 272
273 up_read(&rxrpc_local_sem); 273 up_read(&rxrpc_local_sem);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index eebefb6ef139..e09a95aa68ff 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -237,7 +237,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
237 len = data_size + call->conn->size_align - 1; 237 len = data_size + call->conn->size_align - 1;
238 len &= ~(call->conn->size_align - 1); 238 len &= ~(call->conn->size_align - 1);
239 239
240 sg_init_table(sg, skb_to_sgvec(skb, sg, 0, len)); 240 sg_init_table(sg, nsg);
241 skb_to_sgvec(skb, sg, 0, len);
241 crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); 242 crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
242 243
243 _leave(" = 0"); 244 _leave(" = 0");
@@ -344,7 +345,7 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
344 goto nomem; 345 goto nomem;
345 346
346 sg_init_table(sg, nsg); 347 sg_init_table(sg, nsg);
347 sg_mark_end(sg, skb_to_sgvec(skb, sg, 0, 8)); 348 skb_to_sgvec(skb, sg, 0, 8);
348 349
349 /* start the decryption afresh */ 350 /* start the decryption afresh */
350 memset(&iv, 0, sizeof(iv)); 351 memset(&iv, 0, sizeof(iv));
@@ -426,7 +427,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
426 } 427 }
427 428
428 sg_init_table(sg, nsg); 429 sg_init_table(sg, nsg);
429 sg_mark_end(sg, skb_to_sgvec(skb, sg, 0, skb->len)); 430 skb_to_sgvec(skb, sg, 0, skb->len);
430 431
431 /* decrypt from the session key */ 432 /* decrypt from the session key */
432 payload = call->conn->key->payload.data; 433 payload = call->conn->key->payload.data;
@@ -701,7 +702,7 @@ static void rxkad_sg_set_buf2(struct scatterlist sg[2],
701 nsg++; 702 nsg++;
702 } 703 }
703 704
704 sg_mark_end(sg, nsg); 705 sg_mark_end(&sg[nsg - 1]);
705 706
706 ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); 707 ASSERTCMP(sg[0].length + sg[1].length, ==, buflen);
707} 708}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 9e98c6e567dd..c39008209164 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -91,7 +91,7 @@ static struct tc_u_common *u32_list;
91 91
92static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift) 92static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
93{ 93{
94 unsigned h = (key & sel->hmask)>>fshift; 94 unsigned h = ntohl(key & sel->hmask)>>fshift;
95 95
96 return h; 96 return h;
97} 97}
@@ -613,17 +613,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
613 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); 613 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
614 n->ht_up = ht; 614 n->ht_up = ht;
615 n->handle = handle; 615 n->handle = handle;
616{ 616 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
617 u8 i = 0;
618 u32 mask = s->hmask;
619 if (mask) {
620 while (!(mask & 1)) {
621 i++;
622 mask>>=1;
623 }
624 }
625 n->fshift = i;
626}
627 617
628#ifdef CONFIG_CLS_U32_MARK 618#ifdef CONFIG_CLS_U32_MARK
629 if (tb[TCA_U32_MARK-1]) { 619 if (tb[TCA_U32_MARK-1]) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index fa1a6f45dc41..e595e6570ce0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -134,7 +134,7 @@ static inline int qdisc_restart(struct net_device *dev)
134{ 134{
135 struct Qdisc *q = dev->qdisc; 135 struct Qdisc *q = dev->qdisc;
136 struct sk_buff *skb; 136 struct sk_buff *skb;
137 int ret; 137 int ret = NETDEV_TX_BUSY;
138 138
139 /* Dequeue packet */ 139 /* Dequeue packet */
140 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) 140 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
@@ -145,7 +145,8 @@ static inline int qdisc_restart(struct net_device *dev)
145 spin_unlock(&dev->queue_lock); 145 spin_unlock(&dev->queue_lock);
146 146
147 HARD_TX_LOCK(dev, smp_processor_id()); 147 HARD_TX_LOCK(dev, smp_processor_id());
148 ret = dev_hard_start_xmit(skb, dev); 148 if (!netif_subqueue_stopped(dev, skb))
149 ret = dev_hard_start_xmit(skb, dev);
149 HARD_TX_UNLOCK(dev); 150 HARD_TX_UNLOCK(dev);
150 151
151 spin_lock(&dev->queue_lock); 152 spin_lock(&dev->queue_lock);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 421281d9dd1d..c0ed06d4a504 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -252,6 +252,9 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
252static inline int teql_resolve(struct sk_buff *skb, 252static inline int teql_resolve(struct sk_buff *skb,
253 struct sk_buff *skb_res, struct net_device *dev) 253 struct sk_buff *skb_res, struct net_device *dev)
254{ 254{
255 if (dev->qdisc == &noop_qdisc)
256 return -ENODEV;
257
255 if (dev->header_ops == NULL || 258 if (dev->header_ops == NULL ||
256 skb->dst == NULL || 259 skb->dst == NULL ||
257 skb->dst->neighbour == NULL) 260 skb->dst->neighbour == NULL)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 03158e3665da..013e3d3ab0f1 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -262,10 +262,14 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
262 */ 262 */
263 asoc->peer.sack_needed = 1; 263 asoc->peer.sack_needed = 1;
264 264
265 /* Assume that the peer recongizes ASCONF until reported otherwise 265 /* Assume that the peer will tell us if he recognizes ASCONF
266 * via an ERROR chunk. 266 * as part of INIT exchange.
267 * The sctp_addip_noauth option is there for backward compatibilty
268 * and will revert old behavior.
267 */ 269 */
268 asoc->peer.asconf_capable = 1; 270 asoc->peer.asconf_capable = 0;
271 if (sctp_addip_noauth)
272 asoc->peer.asconf_capable = 1;
269 273
270 /* Create an input queue. */ 274 /* Create an input queue. */
271 sctp_inq_init(&asoc->base.inqueue); 275 sctp_inq_init(&asoc->base.inqueue);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 8af1004abefe..6d5fa6bb371b 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -556,7 +556,7 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
556 return &sctp_hmac_list[id]; 556 return &sctp_hmac_list[id];
557} 557}
558 558
559static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id) 559static int __sctp_auth_find_hmacid(__be16 *hmacs, int n_elts, __be16 hmac_id)
560{ 560{
561 int found = 0; 561 int found = 0;
562 int i; 562 int i;
@@ -573,7 +573,7 @@ static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id)
573 573
574/* See if the HMAC_ID is one that we claim as supported */ 574/* See if the HMAC_ID is one that we claim as supported */
575int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, 575int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc,
576 __u16 hmac_id) 576 __be16 hmac_id)
577{ 577{
578 struct sctp_hmac_algo_param *hmacs; 578 struct sctp_hmac_algo_param *hmacs;
579 __u16 n_elt; 579 __u16 n_elt;
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index dfffa94fb9f6..cae95af9a8cc 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -180,9 +180,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
180/* Delete an address from the bind address list in the SCTP_bind_addr 180/* Delete an address from the bind address list in the SCTP_bind_addr
181 * structure. 181 * structure.
182 */ 182 */
183int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr, 183int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr)
184 void fastcall (*rcu_call)(struct rcu_head *head,
185 void (*func)(struct rcu_head *head)))
186{ 184{
187 struct sctp_sockaddr_entry *addr, *temp; 185 struct sctp_sockaddr_entry *addr, *temp;
188 186
@@ -198,15 +196,10 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
198 } 196 }
199 } 197 }
200 198
201 /* Call the rcu callback provided in the args. This function is
202 * called by both BH packet processing and user side socket option
203 * processing, but it works on different lists in those 2 contexts.
204 * Each context provides it's own callback, whether call_rcu_bh()
205 * or call_rcu(), to make sure that we wait for an appropriate time.
206 */
207 if (addr && !addr->valid) { 199 if (addr && !addr->valid) {
208 rcu_call(&addr->rcu, sctp_local_addr_free); 200 call_rcu(&addr->rcu, sctp_local_addr_free);
209 SCTP_DBG_OBJCNT_DEC(addr); 201 SCTP_DBG_OBJCNT_DEC(addr);
202 return 0;
210 } 203 }
211 204
212 return -EINVAL; 205 return -EINVAL;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2d2d81ef4a69..de6f505d6ff8 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -328,24 +328,35 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
328 const union sctp_addr *paddr, 328 const union sctp_addr *paddr,
329 struct sctp_transport **transport) 329 struct sctp_transport **transport)
330{ 330{
331 struct sctp_association *asoc = NULL;
332 struct sctp_transport *t = NULL;
333 struct sctp_hashbucket *head;
334 struct sctp_ep_common *epb;
335 struct hlist_node *node;
336 int hash;
331 int rport; 337 int rport;
332 struct sctp_association *asoc;
333 struct list_head *pos;
334 338
339 *transport = NULL;
335 rport = ntohs(paddr->v4.sin_port); 340 rport = ntohs(paddr->v4.sin_port);
336 341
337 list_for_each(pos, &ep->asocs) { 342 hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
338 asoc = list_entry(pos, struct sctp_association, asocs); 343 head = &sctp_assoc_hashtable[hash];
339 if (rport == asoc->peer.port) { 344 read_lock(&head->lock);
340 *transport = sctp_assoc_lookup_paddr(asoc, paddr); 345 sctp_for_each_hentry(epb, node, &head->chain) {
341 346 asoc = sctp_assoc(epb);
342 if (*transport) 347 if (asoc->ep != ep || rport != asoc->peer.port)
343 return asoc; 348 goto next;
349
350 t = sctp_assoc_lookup_paddr(asoc, paddr);
351 if (t) {
352 *transport = t;
353 break;
344 } 354 }
355next:
356 asoc = NULL;
345 } 357 }
346 358 read_unlock(&head->lock);
347 *transport = NULL; 359 return asoc;
348 return NULL;
349} 360}
350 361
351/* Lookup association on an endpoint based on a peer address. BH-safe. */ 362/* Lookup association on an endpoint based on a peer address. BH-safe. */
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 86503e7fa21e..91ae463b079b 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -656,7 +656,6 @@ discard:
656/* Insert endpoint into the hash table. */ 656/* Insert endpoint into the hash table. */
657static void __sctp_hash_endpoint(struct sctp_endpoint *ep) 657static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
658{ 658{
659 struct sctp_ep_common **epp;
660 struct sctp_ep_common *epb; 659 struct sctp_ep_common *epb;
661 struct sctp_hashbucket *head; 660 struct sctp_hashbucket *head;
662 661
@@ -666,12 +665,7 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
666 head = &sctp_ep_hashtable[epb->hashent]; 665 head = &sctp_ep_hashtable[epb->hashent];
667 666
668 sctp_write_lock(&head->lock); 667 sctp_write_lock(&head->lock);
669 epp = &head->chain; 668 hlist_add_head(&epb->node, &head->chain);
670 epb->next = *epp;
671 if (epb->next)
672 (*epp)->pprev = &epb->next;
673 *epp = epb;
674 epb->pprev = epp;
675 sctp_write_unlock(&head->lock); 669 sctp_write_unlock(&head->lock);
676} 670}
677 671
@@ -691,19 +685,15 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
691 685
692 epb = &ep->base; 686 epb = &ep->base;
693 687
688 if (hlist_unhashed(&epb->node))
689 return;
690
694 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); 691 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
695 692
696 head = &sctp_ep_hashtable[epb->hashent]; 693 head = &sctp_ep_hashtable[epb->hashent];
697 694
698 sctp_write_lock(&head->lock); 695 sctp_write_lock(&head->lock);
699 696 __hlist_del(&epb->node);
700 if (epb->pprev) {
701 if (epb->next)
702 epb->next->pprev = epb->pprev;
703 *epb->pprev = epb->next;
704 epb->pprev = NULL;
705 }
706
707 sctp_write_unlock(&head->lock); 697 sctp_write_unlock(&head->lock);
708} 698}
709 699
@@ -721,12 +711,13 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
721 struct sctp_hashbucket *head; 711 struct sctp_hashbucket *head;
722 struct sctp_ep_common *epb; 712 struct sctp_ep_common *epb;
723 struct sctp_endpoint *ep; 713 struct sctp_endpoint *ep;
714 struct hlist_node *node;
724 int hash; 715 int hash;
725 716
726 hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port)); 717 hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
727 head = &sctp_ep_hashtable[hash]; 718 head = &sctp_ep_hashtable[hash];
728 read_lock(&head->lock); 719 read_lock(&head->lock);
729 for (epb = head->chain; epb; epb = epb->next) { 720 sctp_for_each_hentry(epb, node, &head->chain) {
730 ep = sctp_ep(epb); 721 ep = sctp_ep(epb);
731 if (sctp_endpoint_is_match(ep, laddr)) 722 if (sctp_endpoint_is_match(ep, laddr))
732 goto hit; 723 goto hit;
@@ -744,7 +735,6 @@ hit:
744/* Insert association into the hash table. */ 735/* Insert association into the hash table. */
745static void __sctp_hash_established(struct sctp_association *asoc) 736static void __sctp_hash_established(struct sctp_association *asoc)
746{ 737{
747 struct sctp_ep_common **epp;
748 struct sctp_ep_common *epb; 738 struct sctp_ep_common *epb;
749 struct sctp_hashbucket *head; 739 struct sctp_hashbucket *head;
750 740
@@ -756,12 +746,7 @@ static void __sctp_hash_established(struct sctp_association *asoc)
756 head = &sctp_assoc_hashtable[epb->hashent]; 746 head = &sctp_assoc_hashtable[epb->hashent];
757 747
758 sctp_write_lock(&head->lock); 748 sctp_write_lock(&head->lock);
759 epp = &head->chain; 749 hlist_add_head(&epb->node, &head->chain);
760 epb->next = *epp;
761 if (epb->next)
762 (*epp)->pprev = &epb->next;
763 *epp = epb;
764 epb->pprev = epp;
765 sctp_write_unlock(&head->lock); 750 sctp_write_unlock(&head->lock);
766} 751}
767 752
@@ -790,14 +775,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
790 head = &sctp_assoc_hashtable[epb->hashent]; 775 head = &sctp_assoc_hashtable[epb->hashent];
791 776
792 sctp_write_lock(&head->lock); 777 sctp_write_lock(&head->lock);
793 778 __hlist_del(&epb->node);
794 if (epb->pprev) {
795 if (epb->next)
796 epb->next->pprev = epb->pprev;
797 *epb->pprev = epb->next;
798 epb->pprev = NULL;
799 }
800
801 sctp_write_unlock(&head->lock); 779 sctp_write_unlock(&head->lock);
802} 780}
803 781
@@ -822,6 +800,7 @@ static struct sctp_association *__sctp_lookup_association(
822 struct sctp_ep_common *epb; 800 struct sctp_ep_common *epb;
823 struct sctp_association *asoc; 801 struct sctp_association *asoc;
824 struct sctp_transport *transport; 802 struct sctp_transport *transport;
803 struct hlist_node *node;
825 int hash; 804 int hash;
826 805
827 /* Optimize here for direct hit, only listening connections can 806 /* Optimize here for direct hit, only listening connections can
@@ -830,7 +809,7 @@ static struct sctp_association *__sctp_lookup_association(
830 hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port)); 809 hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
831 head = &sctp_assoc_hashtable[hash]; 810 head = &sctp_assoc_hashtable[hash];
832 read_lock(&head->lock); 811 read_lock(&head->lock);
833 for (epb = head->chain; epb; epb = epb->next) { 812 sctp_for_each_hentry(epb, node, &head->chain) {
834 asoc = sctp_assoc(epb); 813 asoc = sctp_assoc(epb);
835 transport = sctp_assoc_is_match(asoc, local, peer); 814 transport = sctp_assoc_is_match(asoc, local, peer);
836 if (transport) 815 if (transport)
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index f10fe7fbf24c..cf4b7eb023b3 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -90,6 +90,10 @@ void sctp_inq_free(struct sctp_inq *queue)
90void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) 90void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
91{ 91{
92 /* Directly call the packet handling routine. */ 92 /* Directly call the packet handling routine. */
93 if (chunk->rcvr->dead) {
94 sctp_chunk_free(chunk);
95 return;
96 }
93 97
94 /* We are now calling this either from the soft interrupt 98 /* We are now calling this either from the soft interrupt
95 * or from the backlog processing. 99 * or from the backlog processing.
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index eb4deaf58914..7f31ff638bc6 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -631,7 +631,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
631 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 631 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
632 struct sctp6_sock *newsctp6sk; 632 struct sctp6_sock *newsctp6sk;
633 633
634 newsk = sk_alloc(sk->sk_net, PF_INET6, GFP_KERNEL, sk->sk_prot, 1); 634 newsk = sk_alloc(sk->sk_net, PF_INET6, GFP_KERNEL, sk->sk_prot);
635 if (!newsk) 635 if (!newsk)
636 goto out; 636 goto out;
637 637
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 28f4fe77ceee..fa76f235169b 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -382,7 +382,7 @@ static void sctp_insert_list(struct list_head *head, struct list_head *new)
382/* Mark all the eligible packets on a transport for retransmission. */ 382/* Mark all the eligible packets on a transport for retransmission. */
383void sctp_retransmit_mark(struct sctp_outq *q, 383void sctp_retransmit_mark(struct sctp_outq *q,
384 struct sctp_transport *transport, 384 struct sctp_transport *transport,
385 __u8 fast_retransmit) 385 __u8 reason)
386{ 386{
387 struct list_head *lchunk, *ltemp; 387 struct list_head *lchunk, *ltemp;
388 struct sctp_chunk *chunk; 388 struct sctp_chunk *chunk;
@@ -412,20 +412,20 @@ void sctp_retransmit_mark(struct sctp_outq *q,
412 continue; 412 continue;
413 } 413 }
414 414
415 /* If we are doing retransmission due to a fast retransmit, 415 /* If we are doing retransmission due to a timeout or pmtu
416 * only the chunk's that are marked for fast retransmit 416 * discovery, only the chunks that are not yet acked should
417 * should be added to the retransmit queue. If we are doing 417 * be added to the retransmit queue.
418 * retransmission due to a timeout or pmtu discovery, only the
419 * chunks that are not yet acked should be added to the
420 * retransmit queue.
421 */ 418 */
422 if ((fast_retransmit && (chunk->fast_retransmit > 0)) || 419 if ((reason == SCTP_RTXR_FAST_RTX &&
423 (!fast_retransmit && !chunk->tsn_gap_acked)) { 420 (chunk->fast_retransmit > 0)) ||
421 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
424 /* If this chunk was sent less then 1 rto ago, do not 422 /* If this chunk was sent less then 1 rto ago, do not
425 * retransmit this chunk, but give the peer time 423 * retransmit this chunk, but give the peer time
426 * to acknowlege it. 424 * to acknowlege it. Do this only when
425 * retransmitting due to T3 timeout.
427 */ 426 */
428 if ((jiffies - chunk->sent_at) < transport->rto) 427 if (reason == SCTP_RTXR_T3_RTX &&
428 (jiffies - chunk->sent_at) < transport->last_rto)
429 continue; 429 continue;
430 430
431 /* RFC 2960 6.2.1 Processing a Received SACK 431 /* RFC 2960 6.2.1 Processing a Received SACK
@@ -467,10 +467,10 @@ void sctp_retransmit_mark(struct sctp_outq *q,
467 } 467 }
468 } 468 }
469 469
470 SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, " 470 SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
471 "cwnd: %d, ssthresh: %d, flight_size: %d, " 471 "cwnd: %d, ssthresh: %d, flight_size: %d, "
472 "pba: %d\n", __FUNCTION__, 472 "pba: %d\n", __FUNCTION__,
473 transport, fast_retransmit, 473 transport, reason,
474 transport->cwnd, transport->ssthresh, 474 transport->cwnd, transport->ssthresh,
475 transport->flight_size, 475 transport->flight_size,
476 transport->partial_bytes_acked); 476 transport->partial_bytes_acked);
@@ -484,7 +484,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
484 sctp_retransmit_reason_t reason) 484 sctp_retransmit_reason_t reason)
485{ 485{
486 int error = 0; 486 int error = 0;
487 __u8 fast_retransmit = 0;
488 487
489 switch(reason) { 488 switch(reason) {
490 case SCTP_RTXR_T3_RTX: 489 case SCTP_RTXR_T3_RTX:
@@ -499,16 +498,18 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
499 case SCTP_RTXR_FAST_RTX: 498 case SCTP_RTXR_FAST_RTX:
500 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); 499 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
501 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); 500 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
502 fast_retransmit = 1;
503 break; 501 break;
504 case SCTP_RTXR_PMTUD: 502 case SCTP_RTXR_PMTUD:
505 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); 503 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
506 break; 504 break;
505 case SCTP_RTXR_T1_RTX:
506 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
507 break;
507 default: 508 default:
508 BUG(); 509 BUG();
509 } 510 }
510 511
511 sctp_retransmit_mark(q, transport, fast_retransmit); 512 sctp_retransmit_mark(q, transport, reason);
512 513
513 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, 514 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
514 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by 515 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
@@ -641,7 +642,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
641 642
642 /* If we are here due to a retransmit timeout or a fast 643 /* If we are here due to a retransmit timeout or a fast
643 * retransmit and if there are any chunks left in the retransmit 644 * retransmit and if there are any chunks left in the retransmit
644 * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit. 645 * queue that could not fit in the PMTU sized packet, they need
646 * to be marked as ineligible for a subsequent fast retransmit.
645 */ 647 */
646 if (rtx_timeout && !lchunk) { 648 if (rtx_timeout && !lchunk) {
647 list_for_each(lchunk1, lqueue) { 649 list_for_each(lchunk1, lqueue) {
@@ -660,10 +662,9 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
660int sctp_outq_uncork(struct sctp_outq *q) 662int sctp_outq_uncork(struct sctp_outq *q)
661{ 663{
662 int error = 0; 664 int error = 0;
663 if (q->cork) { 665 if (q->cork)
664 q->cork = 0; 666 q->cork = 0;
665 error = sctp_outq_flush(q, 0); 667 error = sctp_outq_flush(q, 0);
666 }
667 return error; 668 return error;
668} 669}
669 670
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index e4cd841a22e4..249973204070 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -225,6 +225,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
225 struct sctp_ep_common *epb; 225 struct sctp_ep_common *epb;
226 struct sctp_endpoint *ep; 226 struct sctp_endpoint *ep;
227 struct sock *sk; 227 struct sock *sk;
228 struct hlist_node *node;
228 int hash = *(loff_t *)v; 229 int hash = *(loff_t *)v;
229 230
230 if (hash >= sctp_ep_hashsize) 231 if (hash >= sctp_ep_hashsize)
@@ -233,7 +234,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
233 head = &sctp_ep_hashtable[hash]; 234 head = &sctp_ep_hashtable[hash];
234 sctp_local_bh_disable(); 235 sctp_local_bh_disable();
235 read_lock(&head->lock); 236 read_lock(&head->lock);
236 for (epb = head->chain; epb; epb = epb->next) { 237 sctp_for_each_hentry(epb, node, &head->chain) {
237 ep = sctp_ep(epb); 238 ep = sctp_ep(epb);
238 sk = epb->sk; 239 sk = epb->sk;
239 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, 240 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
@@ -328,6 +329,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
328 struct sctp_ep_common *epb; 329 struct sctp_ep_common *epb;
329 struct sctp_association *assoc; 330 struct sctp_association *assoc;
330 struct sock *sk; 331 struct sock *sk;
332 struct hlist_node *node;
331 int hash = *(loff_t *)v; 333 int hash = *(loff_t *)v;
332 334
333 if (hash >= sctp_assoc_hashsize) 335 if (hash >= sctp_assoc_hashsize)
@@ -336,7 +338,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
336 head = &sctp_assoc_hashtable[hash]; 338 head = &sctp_assoc_hashtable[hash];
337 sctp_local_bh_disable(); 339 sctp_local_bh_disable();
338 read_lock(&head->lock); 340 read_lock(&head->lock);
339 for (epb = head->chain; epb; epb = epb->next) { 341 sctp_for_each_hentry(epb, node, &head->chain) {
340 assoc = sctp_assoc(epb); 342 assoc = sctp_assoc(epb);
341 sk = epb->sk; 343 sk = epb->sk;
342 seq_printf(seq, 344 seq_printf(seq,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index f5cd96f5fe74..d50f610d1b02 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -552,7 +552,8 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
552{ 552{
553 struct inet_sock *inet = inet_sk(sk); 553 struct inet_sock *inet = inet_sk(sk);
554 struct inet_sock *newinet; 554 struct inet_sock *newinet;
555 struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL, sk->sk_prot, 1); 555 struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL,
556 sk->sk_prot);
556 557
557 if (!newsk) 558 if (!newsk)
558 goto out; 559 goto out;
@@ -1136,7 +1137,7 @@ SCTP_STATIC __init int sctp_init(void)
1136 } 1137 }
1137 for (i = 0; i < sctp_assoc_hashsize; i++) { 1138 for (i = 0; i < sctp_assoc_hashsize; i++) {
1138 rwlock_init(&sctp_assoc_hashtable[i].lock); 1139 rwlock_init(&sctp_assoc_hashtable[i].lock);
1139 sctp_assoc_hashtable[i].chain = NULL; 1140 INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain);
1140 } 1141 }
1141 1142
1142 /* Allocate and initialize the endpoint hash table. */ 1143 /* Allocate and initialize the endpoint hash table. */
@@ -1150,7 +1151,7 @@ SCTP_STATIC __init int sctp_init(void)
1150 } 1151 }
1151 for (i = 0; i < sctp_ep_hashsize; i++) { 1152 for (i = 0; i < sctp_ep_hashsize; i++) {
1152 rwlock_init(&sctp_ep_hashtable[i].lock); 1153 rwlock_init(&sctp_ep_hashtable[i].lock);
1153 sctp_ep_hashtable[i].chain = NULL; 1154 INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
1154 } 1155 }
1155 1156
1156 /* Allocate and initialize the SCTP port hash table. */ 1157 /* Allocate and initialize the SCTP port hash table. */
@@ -1169,7 +1170,7 @@ SCTP_STATIC __init int sctp_init(void)
1169 } 1170 }
1170 for (i = 0; i < sctp_port_hashsize; i++) { 1171 for (i = 0; i < sctp_port_hashsize; i++) {
1171 spin_lock_init(&sctp_port_hashtable[i].lock); 1172 spin_lock_init(&sctp_port_hashtable[i].lock);
1172 sctp_port_hashtable[i].chain = NULL; 1173 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
1173 } 1174 }
1174 1175
1175 printk(KERN_INFO "SCTP: Hash tables configured " 1176 printk(KERN_INFO "SCTP: Hash tables configured "
@@ -1178,6 +1179,7 @@ SCTP_STATIC __init int sctp_init(void)
1178 1179
1179 /* Disable ADDIP by default. */ 1180 /* Disable ADDIP by default. */
1180 sctp_addip_enable = 0; 1181 sctp_addip_enable = 0;
1182 sctp_addip_noauth = 0;
1181 1183
1182 /* Enable PR-SCTP by default. */ 1184 /* Enable PR-SCTP by default. */
1183 sctp_prsctp_enable = 1; 1185 sctp_prsctp_enable = 1;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index c377e4e8f653..5a9783c38de1 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1788,9 +1788,14 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
1788 sizeof(sctp_paramhdr_t); 1788 sizeof(sctp_paramhdr_t);
1789 1789
1790 1790
1791 /* This is a fatal error. Any accumulated non-fatal errors are
1792 * not reported.
1793 */
1794 if (*errp)
1795 sctp_chunk_free(*errp);
1796
1791 /* Create an error chunk and fill it in with our payload. */ 1797 /* Create an error chunk and fill it in with our payload. */
1792 if (!*errp) 1798 *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
1793 *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
1794 1799
1795 if (*errp) { 1800 if (*errp) {
1796 sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, 1801 sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
@@ -1813,9 +1818,15 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
1813{ 1818{
1814 __u16 len = ntohs(param.p->length); 1819 __u16 len = ntohs(param.p->length);
1815 1820
1816 /* Make an ERROR chunk. */ 1821 /* Processing of the HOST_NAME parameter will generate an
1817 if (!*errp) 1822 * ABORT. If we've accumulated any non-fatal errors, they
1818 *errp = sctp_make_op_error_space(asoc, chunk, len); 1823 * would be unrecognized parameters and we should not include
1824 * them in the ABORT.
1825 */
1826 if (*errp)
1827 sctp_chunk_free(*errp);
1828
1829 *errp = sctp_make_op_error_space(asoc, chunk, len);
1819 1830
1820 if (*errp) { 1831 if (*errp) {
1821 sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); 1832 sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
@@ -1847,7 +1858,7 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
1847 break; 1858 break;
1848 case SCTP_CID_ASCONF: 1859 case SCTP_CID_ASCONF:
1849 case SCTP_CID_ASCONF_ACK: 1860 case SCTP_CID_ASCONF_ACK:
1850 asoc->peer.addip_capable = 1; 1861 asoc->peer.asconf_capable = 1;
1851 break; 1862 break;
1852 default: 1863 default:
1853 break; 1864 break;
@@ -1862,56 +1873,40 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
1862 * taken if the processing endpoint does not recognize the 1873 * taken if the processing endpoint does not recognize the
1863 * Parameter Type. 1874 * Parameter Type.
1864 * 1875 *
1865 * 00 - Stop processing this SCTP chunk and discard it, 1876 * 00 - Stop processing this parameter; do not process any further
1866 * do not process any further chunks within it. 1877 * parameters within this chunk
1867 * 1878 *
1868 * 01 - Stop processing this SCTP chunk and discard it, 1879 * 01 - Stop processing this parameter, do not process any further
1869 * do not process any further chunks within it, and report 1880 * parameters within this chunk, and report the unrecognized
1870 * the unrecognized parameter in an 'Unrecognized 1881 * parameter in an 'Unrecognized Parameter' ERROR chunk.
1871 * Parameter Type' (in either an ERROR or in the INIT ACK).
1872 * 1882 *
1873 * 10 - Skip this parameter and continue processing. 1883 * 10 - Skip this parameter and continue processing.
1874 * 1884 *
1875 * 11 - Skip this parameter and continue processing but 1885 * 11 - Skip this parameter and continue processing but
1876 * report the unrecognized parameter in an 1886 * report the unrecognized parameter in an
1877 * 'Unrecognized Parameter Type' (in either an ERROR or in 1887 * 'Unrecognized Parameter' ERROR chunk.
1878 * the INIT ACK).
1879 * 1888 *
1880 * Return value: 1889 * Return value:
1881 * 0 - discard the chunk 1890 * SCTP_IERROR_NO_ERROR - continue with the chunk
1882 * 1 - continue with the chunk 1891 * SCTP_IERROR_ERROR - stop and report an error.
1892 * SCTP_IERROR_NOMEME - out of memory.
1883 */ 1893 */
1884static int sctp_process_unk_param(const struct sctp_association *asoc, 1894static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
1885 union sctp_params param, 1895 union sctp_params param,
1886 struct sctp_chunk *chunk, 1896 struct sctp_chunk *chunk,
1887 struct sctp_chunk **errp) 1897 struct sctp_chunk **errp)
1888{ 1898{
1889 int retval = 1; 1899 int retval = SCTP_IERROR_NO_ERROR;
1890 1900
1891 switch (param.p->type & SCTP_PARAM_ACTION_MASK) { 1901 switch (param.p->type & SCTP_PARAM_ACTION_MASK) {
1892 case SCTP_PARAM_ACTION_DISCARD: 1902 case SCTP_PARAM_ACTION_DISCARD:
1893 retval = 0; 1903 retval = SCTP_IERROR_ERROR;
1894 break;
1895 case SCTP_PARAM_ACTION_DISCARD_ERR:
1896 retval = 0;
1897 /* Make an ERROR chunk, preparing enough room for
1898 * returning multiple unknown parameters.
1899 */
1900 if (NULL == *errp)
1901 *errp = sctp_make_op_error_space(asoc, chunk,
1902 ntohs(chunk->chunk_hdr->length));
1903
1904 if (*errp) {
1905 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
1906 WORD_ROUND(ntohs(param.p->length)));
1907 sctp_addto_chunk(*errp,
1908 WORD_ROUND(ntohs(param.p->length)),
1909 param.v);
1910 }
1911
1912 break; 1904 break;
1913 case SCTP_PARAM_ACTION_SKIP: 1905 case SCTP_PARAM_ACTION_SKIP:
1914 break; 1906 break;
1907 case SCTP_PARAM_ACTION_DISCARD_ERR:
1908 retval = SCTP_IERROR_ERROR;
1909 /* Fall through */
1915 case SCTP_PARAM_ACTION_SKIP_ERR: 1910 case SCTP_PARAM_ACTION_SKIP_ERR:
1916 /* Make an ERROR chunk, preparing enough room for 1911 /* Make an ERROR chunk, preparing enough room for
1917 * returning multiple unknown parameters. 1912 * returning multiple unknown parameters.
@@ -1932,9 +1927,8 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
1932 * to the peer and the association won't be 1927 * to the peer and the association won't be
1933 * established. 1928 * established.
1934 */ 1929 */
1935 retval = 0; 1930 retval = SCTP_IERROR_NOMEM;
1936 } 1931 }
1937
1938 break; 1932 break;
1939 default: 1933 default:
1940 break; 1934 break;
@@ -1943,18 +1937,20 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
1943 return retval; 1937 return retval;
1944} 1938}
1945 1939
1946/* Find unrecognized parameters in the chunk. 1940/* Verify variable length parameters
1947 * Return values: 1941 * Return values:
1948 * 0 - discard the chunk 1942 * SCTP_IERROR_ABORT - trigger an ABORT
1949 * 1 - continue with the chunk 1943 * SCTP_IERROR_NOMEM - out of memory (abort)
1944 * SCTP_IERROR_ERROR - stop processing, trigger an ERROR
1945 * SCTP_IERROR_NO_ERROR - continue with the chunk
1950 */ 1946 */
1951static int sctp_verify_param(const struct sctp_association *asoc, 1947static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
1952 union sctp_params param, 1948 union sctp_params param,
1953 sctp_cid_t cid, 1949 sctp_cid_t cid,
1954 struct sctp_chunk *chunk, 1950 struct sctp_chunk *chunk,
1955 struct sctp_chunk **err_chunk) 1951 struct sctp_chunk **err_chunk)
1956{ 1952{
1957 int retval = 1; 1953 int retval = SCTP_IERROR_NO_ERROR;
1958 1954
1959 /* FIXME - This routine is not looking at each parameter per the 1955 /* FIXME - This routine is not looking at each parameter per the
1960 * chunk type, i.e., unrecognized parameters should be further 1956 * chunk type, i.e., unrecognized parameters should be further
@@ -1976,7 +1972,9 @@ static int sctp_verify_param(const struct sctp_association *asoc,
1976 1972
1977 case SCTP_PARAM_HOST_NAME_ADDRESS: 1973 case SCTP_PARAM_HOST_NAME_ADDRESS:
1978 /* Tell the peer, we won't support this param. */ 1974 /* Tell the peer, we won't support this param. */
1979 return sctp_process_hn_param(asoc, param, chunk, err_chunk); 1975 sctp_process_hn_param(asoc, param, chunk, err_chunk);
1976 retval = SCTP_IERROR_ABORT;
1977 break;
1980 1978
1981 case SCTP_PARAM_FWD_TSN_SUPPORT: 1979 case SCTP_PARAM_FWD_TSN_SUPPORT:
1982 if (sctp_prsctp_enable) 1980 if (sctp_prsctp_enable)
@@ -1993,9 +1991,11 @@ static int sctp_verify_param(const struct sctp_association *asoc,
1993 * cause 'Protocol Violation'. 1991 * cause 'Protocol Violation'.
1994 */ 1992 */
1995 if (SCTP_AUTH_RANDOM_LENGTH != 1993 if (SCTP_AUTH_RANDOM_LENGTH !=
1996 ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) 1994 ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) {
1997 return sctp_process_inv_paramlength(asoc, param.p, 1995 sctp_process_inv_paramlength(asoc, param.p,
1998 chunk, err_chunk); 1996 chunk, err_chunk);
1997 retval = SCTP_IERROR_ABORT;
1998 }
1999 break; 1999 break;
2000 2000
2001 case SCTP_PARAM_CHUNKS: 2001 case SCTP_PARAM_CHUNKS:
@@ -2007,9 +2007,11 @@ static int sctp_verify_param(const struct sctp_association *asoc,
2007 * INIT-ACK chunk if the sender wants to receive authenticated 2007 * INIT-ACK chunk if the sender wants to receive authenticated
2008 * chunks. Its maximum length is 260 bytes. 2008 * chunks. Its maximum length is 260 bytes.
2009 */ 2009 */
2010 if (260 < ntohs(param.p->length)) 2010 if (260 < ntohs(param.p->length)) {
2011 return sctp_process_inv_paramlength(asoc, param.p, 2011 sctp_process_inv_paramlength(asoc, param.p,
2012 chunk, err_chunk); 2012 chunk, err_chunk);
2013 retval = SCTP_IERROR_ABORT;
2014 }
2013 break; 2015 break;
2014 2016
2015 case SCTP_PARAM_HMAC_ALGO: 2017 case SCTP_PARAM_HMAC_ALGO:
@@ -2020,8 +2022,7 @@ fallthrough:
2020 default: 2022 default:
2021 SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n", 2023 SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
2022 ntohs(param.p->type), cid); 2024 ntohs(param.p->type), cid);
2023 return sctp_process_unk_param(asoc, param, chunk, err_chunk); 2025 retval = sctp_process_unk_param(asoc, param, chunk, err_chunk);
2024
2025 break; 2026 break;
2026 } 2027 }
2027 return retval; 2028 return retval;
@@ -2036,6 +2037,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
2036{ 2037{
2037 union sctp_params param; 2038 union sctp_params param;
2038 int has_cookie = 0; 2039 int has_cookie = 0;
2040 int result;
2039 2041
2040 /* Verify stream values are non-zero. */ 2042 /* Verify stream values are non-zero. */
2041 if ((0 == peer_init->init_hdr.num_outbound_streams) || 2043 if ((0 == peer_init->init_hdr.num_outbound_streams) ||
@@ -2043,8 +2045,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
2043 (0 == peer_init->init_hdr.init_tag) || 2045 (0 == peer_init->init_hdr.init_tag) ||
2044 (SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) { 2046 (SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) {
2045 2047
2046 sctp_process_inv_mandatory(asoc, chunk, errp); 2048 return sctp_process_inv_mandatory(asoc, chunk, errp);
2047 return 0;
2048 } 2049 }
2049 2050
2050 /* Check for missing mandatory parameters. */ 2051 /* Check for missing mandatory parameters. */
@@ -2062,29 +2063,29 @@ int sctp_verify_init(const struct sctp_association *asoc,
2062 * VIOLATION error. We build the ERROR chunk here and let the normal 2063 * VIOLATION error. We build the ERROR chunk here and let the normal
2063 * error handling code build and send the packet. 2064 * error handling code build and send the packet.
2064 */ 2065 */
2065 if (param.v != (void*)chunk->chunk_end) { 2066 if (param.v != (void*)chunk->chunk_end)
2066 sctp_process_inv_paramlength(asoc, param.p, chunk, errp); 2067 return sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
2067 return 0;
2068 }
2069 2068
2070 /* The only missing mandatory param possible today is 2069 /* The only missing mandatory param possible today is
2071 * the state cookie for an INIT-ACK chunk. 2070 * the state cookie for an INIT-ACK chunk.
2072 */ 2071 */
2073 if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) { 2072 if ((SCTP_CID_INIT_ACK == cid) && !has_cookie)
2074 sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE, 2073 return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE,
2075 chunk, errp); 2074 chunk, errp);
2076 return 0;
2077 }
2078
2079 /* Find unrecognized parameters. */
2080 2075
2076 /* Verify all the variable length parameters */
2081 sctp_walk_params(param, peer_init, init_hdr.params) { 2077 sctp_walk_params(param, peer_init, init_hdr.params) {
2082 2078
2083 if (!sctp_verify_param(asoc, param, cid, chunk, errp)) { 2079 result = sctp_verify_param(asoc, param, cid, chunk, errp);
2084 if (SCTP_PARAM_HOST_NAME_ADDRESS == param.p->type) 2080 switch (result) {
2081 case SCTP_IERROR_ABORT:
2082 case SCTP_IERROR_NOMEM:
2085 return 0; 2083 return 0;
2086 else 2084 case SCTP_IERROR_ERROR:
2087 return 1; 2085 return 1;
2086 case SCTP_IERROR_NO_ERROR:
2087 default:
2088 break;
2088 } 2089 }
2089 2090
2090 } /* for (loop through all parameters) */ 2091 } /* for (loop through all parameters) */
@@ -2137,11 +2138,14 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
2137 2138
2138 /* If the peer claims support for ADD-IP without support 2139 /* If the peer claims support for ADD-IP without support
2139 * for AUTH, disable support for ADD-IP. 2140 * for AUTH, disable support for ADD-IP.
2141 * Do this only if backward compatible mode is turned off.
2140 */ 2142 */
2141 if (asoc->peer.addip_capable && !asoc->peer.auth_capable) { 2143 if (!sctp_addip_noauth &&
2144 (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
2142 asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | 2145 asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
2143 SCTP_PARAM_DEL_IP | 2146 SCTP_PARAM_DEL_IP |
2144 SCTP_PARAM_SET_PRIMARY); 2147 SCTP_PARAM_SET_PRIMARY);
2148 asoc->peer.asconf_capable = 0;
2145 } 2149 }
2146 2150
2147 /* Walk list of transports, removing transports in the UNKNOWN state. */ 2151 /* Walk list of transports, removing transports in the UNKNOWN state. */
@@ -2848,10 +2852,11 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
2848 2852
2849 __be16 err_code; 2853 __be16 err_code;
2850 int length = 0; 2854 int length = 0;
2851 int chunk_len = asconf->skb->len; 2855 int chunk_len;
2852 __u32 serial; 2856 __u32 serial;
2853 int all_param_pass = 1; 2857 int all_param_pass = 1;
2854 2858
2859 chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
2855 hdr = (sctp_addiphdr_t *)asconf->skb->data; 2860 hdr = (sctp_addiphdr_t *)asconf->skb->data;
2856 serial = ntohl(hdr->serial); 2861 serial = ntohl(hdr->serial);
2857 2862
@@ -2952,13 +2957,17 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2952 /* This is always done in BH context with a socket lock 2957 /* This is always done in BH context with a socket lock
2953 * held, so the list can not change. 2958 * held, so the list can not change.
2954 */ 2959 */
2960 local_bh_disable();
2955 list_for_each_entry(saddr, &bp->address_list, list) { 2961 list_for_each_entry(saddr, &bp->address_list, list) {
2956 if (sctp_cmp_addr_exact(&saddr->a, &addr)) 2962 if (sctp_cmp_addr_exact(&saddr->a, &addr))
2957 saddr->use_as_src = 1; 2963 saddr->use_as_src = 1;
2958 } 2964 }
2965 local_bh_enable();
2959 break; 2966 break;
2960 case SCTP_PARAM_DEL_IP: 2967 case SCTP_PARAM_DEL_IP:
2961 retval = sctp_del_bind_addr(bp, &addr, call_rcu_bh); 2968 local_bh_disable();
2969 retval = sctp_del_bind_addr(bp, &addr);
2970 local_bh_enable();
2962 list_for_each(pos, &asoc->peer.transport_addr_list) { 2971 list_for_each(pos, &asoc->peer.transport_addr_list) {
2963 transport = list_entry(pos, struct sctp_transport, 2972 transport = list_entry(pos, struct sctp_transport,
2964 transports); 2973 transports);
@@ -2990,7 +2999,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
2990 sctp_addip_param_t *asconf_ack_param; 2999 sctp_addip_param_t *asconf_ack_param;
2991 sctp_errhdr_t *err_param; 3000 sctp_errhdr_t *err_param;
2992 int length; 3001 int length;
2993 int asconf_ack_len = asconf_ack->skb->len; 3002 int asconf_ack_len;
2994 __be16 err_code; 3003 __be16 err_code;
2995 3004
2996 if (no_err) 3005 if (no_err)
@@ -2998,6 +3007,9 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
2998 else 3007 else
2999 err_code = SCTP_ERROR_REQ_REFUSED; 3008 err_code = SCTP_ERROR_REQ_REFUSED;
3000 3009
3010 asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) -
3011 sizeof(sctp_chunkhdr_t);
3012
3001 /* Skip the addiphdr from the asconf_ack chunk and store a pointer to 3013 /* Skip the addiphdr from the asconf_ack chunk and store a pointer to
3002 * the first asconf_ack parameter. 3014 * the first asconf_ack parameter.
3003 */ 3015 */
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index bbdc938da86f..78d1a8a49bd0 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -453,6 +453,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
453 * maximum value discussed in rule C7 above (RTO.max) may be 453 * maximum value discussed in rule C7 above (RTO.max) may be
454 * used to provide an upper bound to this doubling operation. 454 * used to provide an upper bound to this doubling operation.
455 */ 455 */
456 transport->last_rto = transport->rto;
456 transport->rto = min((transport->rto * 2), transport->asoc->rto_max); 457 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
457} 458}
458 459
@@ -1267,6 +1268,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1267 sctp_ootb_pkt_free(packet); 1268 sctp_ootb_pkt_free(packet);
1268 break; 1269 break;
1269 1270
1271 case SCTP_CMD_T1_RETRAN:
1272 /* Mark a transport for retransmission. */
1273 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1274 SCTP_RTXR_T1_RTX);
1275 break;
1276
1270 case SCTP_CMD_RETRAN: 1277 case SCTP_CMD_RETRAN:
1271 /* Mark a transport for retransmission. */ 1278 /* Mark a transport for retransmission. */
1272 sctp_retransmit(&asoc->outqueue, cmd->obj.transport, 1279 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
@@ -1393,7 +1400,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1393 list_for_each(pos, &asoc->peer.transport_addr_list) { 1400 list_for_each(pos, &asoc->peer.transport_addr_list) {
1394 t = list_entry(pos, struct sctp_transport, 1401 t = list_entry(pos, struct sctp_transport,
1395 transports); 1402 transports);
1396 sctp_retransmit_mark(&asoc->outqueue, t, 0); 1403 sctp_retransmit_mark(&asoc->outqueue, t,
1404 SCTP_RTXR_T1_RTX);
1397 } 1405 }
1398 1406
1399 sctp_add_cmd_sf(commands, 1407 sctp_add_cmd_sf(commands,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index f01b408508ff..b8bbb960723c 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1146,7 +1146,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1146 /* Check if the timestamp looks valid. */ 1146 /* Check if the timestamp looks valid. */
1147 if (time_after(hbinfo->sent_at, jiffies) || 1147 if (time_after(hbinfo->sent_at, jiffies) ||
1148 time_after(jiffies, hbinfo->sent_at + max_interval)) { 1148 time_after(jiffies, hbinfo->sent_at + max_interval)) {
1149 SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp" 1149 SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp "
1150 "received for transport: %p\n", 1150 "received for transport: %p\n",
1151 __FUNCTION__, link); 1151 __FUNCTION__, link);
1152 return SCTP_DISPOSITION_DISCARD; 1152 return SCTP_DISPOSITION_DISCARD;
@@ -2305,7 +2305,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
2305 /* If we've sent any data bundled with COOKIE-ECHO we will need to 2305 /* If we've sent any data bundled with COOKIE-ECHO we will need to
2306 * resend 2306 * resend
2307 */ 2307 */
2308 sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, 2308 sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN,
2309 SCTP_TRANSPORT(asoc->peer.primary_path)); 2309 SCTP_TRANSPORT(asoc->peer.primary_path));
2310 2310
2311 /* Cast away the const modifier, as we want to just 2311 /* Cast away the const modifier, as we want to just
@@ -4064,11 +4064,6 @@ static sctp_disposition_t sctp_sf_abort_violation(
4064 struct sctp_chunk *chunk = arg; 4064 struct sctp_chunk *chunk = arg;
4065 struct sctp_chunk *abort = NULL; 4065 struct sctp_chunk *abort = NULL;
4066 4066
4067 /* Make the abort chunk. */
4068 abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
4069 if (!abort)
4070 goto nomem;
4071
4072 /* SCTP-AUTH, Section 6.3: 4067 /* SCTP-AUTH, Section 6.3:
4073 * It should be noted that if the receiver wants to tear 4068 * It should be noted that if the receiver wants to tear
4074 * down an association in an authenticated way only, the 4069 * down an association in an authenticated way only, the
@@ -4083,6 +4078,11 @@ static sctp_disposition_t sctp_sf_abort_violation(
4083 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) 4078 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
4084 goto discard; 4079 goto discard;
4085 4080
4081 /* Make the abort chunk. */
4082 abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
4083 if (!abort)
4084 goto nomem;
4085
4086 if (asoc) { 4086 if (asoc) {
4087 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 4087 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4088 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 4088 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index bd6f42a15a4b..ff8bc95670ed 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -660,7 +660,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
660 * socket routing and failover schemes. Refer to comments in 660 * socket routing and failover schemes. Refer to comments in
661 * sctp_do_bind(). -daisy 661 * sctp_do_bind(). -daisy
662 */ 662 */
663 retval = sctp_del_bind_addr(bp, sa_addr, call_rcu); 663 retval = sctp_del_bind_addr(bp, sa_addr);
664 664
665 addr_buf += af->sockaddr_len; 665 addr_buf += af->sockaddr_len;
666err_bindx_rem: 666err_bindx_rem:
@@ -5307,6 +5307,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5307{ 5307{
5308 struct sctp_bind_hashbucket *head; /* hash list */ 5308 struct sctp_bind_hashbucket *head; /* hash list */
5309 struct sctp_bind_bucket *pp; /* hash list port iterator */ 5309 struct sctp_bind_bucket *pp; /* hash list port iterator */
5310 struct hlist_node *node;
5310 unsigned short snum; 5311 unsigned short snum;
5311 int ret; 5312 int ret;
5312 5313
@@ -5331,7 +5332,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5331 index = sctp_phashfn(rover); 5332 index = sctp_phashfn(rover);
5332 head = &sctp_port_hashtable[index]; 5333 head = &sctp_port_hashtable[index];
5333 sctp_spin_lock(&head->lock); 5334 sctp_spin_lock(&head->lock);
5334 for (pp = head->chain; pp; pp = pp->next) 5335 sctp_for_each_hentry(pp, node, &head->chain)
5335 if (pp->port == rover) 5336 if (pp->port == rover)
5336 goto next; 5337 goto next;
5337 break; 5338 break;
@@ -5358,7 +5359,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5358 */ 5359 */
5359 head = &sctp_port_hashtable[sctp_phashfn(snum)]; 5360 head = &sctp_port_hashtable[sctp_phashfn(snum)];
5360 sctp_spin_lock(&head->lock); 5361 sctp_spin_lock(&head->lock);
5361 for (pp = head->chain; pp; pp = pp->next) { 5362 sctp_for_each_hentry(pp, node, &head->chain) {
5362 if (pp->port == snum) 5363 if (pp->port == snum)
5363 goto pp_found; 5364 goto pp_found;
5364 } 5365 }
@@ -5702,10 +5703,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
5702 pp->port = snum; 5703 pp->port = snum;
5703 pp->fastreuse = 0; 5704 pp->fastreuse = 0;
5704 INIT_HLIST_HEAD(&pp->owner); 5705 INIT_HLIST_HEAD(&pp->owner);
5705 if ((pp->next = head->chain) != NULL) 5706 hlist_add_head(&pp->node, &head->chain);
5706 pp->next->pprev = &pp->next;
5707 head->chain = pp;
5708 pp->pprev = &head->chain;
5709 } 5707 }
5710 return pp; 5708 return pp;
5711} 5709}
@@ -5714,9 +5712,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
5714static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 5712static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
5715{ 5713{
5716 if (pp && hlist_empty(&pp->owner)) { 5714 if (pp && hlist_empty(&pp->owner)) {
5717 if (pp->next) 5715 __hlist_del(&pp->node);
5718 pp->next->pprev = pp->pprev;
5719 *(pp->pprev) = pp->next;
5720 kmem_cache_free(sctp_bucket_cachep, pp); 5716 kmem_cache_free(sctp_bucket_cachep, pp);
5721 SCTP_DBG_OBJCNT_DEC(bind_bucket); 5717 SCTP_DBG_OBJCNT_DEC(bind_bucket);
5722 } 5718 }
@@ -6455,6 +6451,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
6455} 6451}
6456 6452
6457 6453
6454DEFINE_PROTO_INUSE(sctp)
6455
6458/* This proto struct describes the ULP interface for SCTP. */ 6456/* This proto struct describes the ULP interface for SCTP. */
6459struct proto sctp_prot = { 6457struct proto sctp_prot = {
6460 .name = "SCTP", 6458 .name = "SCTP",
@@ -6483,9 +6481,12 @@ struct proto sctp_prot = {
6483 .memory_pressure = &sctp_memory_pressure, 6481 .memory_pressure = &sctp_memory_pressure,
6484 .enter_memory_pressure = sctp_enter_memory_pressure, 6482 .enter_memory_pressure = sctp_enter_memory_pressure,
6485 .memory_allocated = &sctp_memory_allocated, 6483 .memory_allocated = &sctp_memory_allocated,
6484 REF_PROTO_INUSE(sctp)
6486}; 6485};
6487 6486
6488#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 6487#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
6488DEFINE_PROTO_INUSE(sctpv6)
6489
6489struct proto sctpv6_prot = { 6490struct proto sctpv6_prot = {
6490 .name = "SCTPv6", 6491 .name = "SCTPv6",
6491 .owner = THIS_MODULE, 6492 .owner = THIS_MODULE,
@@ -6513,5 +6514,6 @@ struct proto sctpv6_prot = {
6513 .memory_pressure = &sctp_memory_pressure, 6514 .memory_pressure = &sctp_memory_pressure,
6514 .enter_memory_pressure = sctp_enter_memory_pressure, 6515 .enter_memory_pressure = sctp_enter_memory_pressure,
6515 .memory_allocated = &sctp_memory_allocated, 6516 .memory_allocated = &sctp_memory_allocated,
6517 REF_PROTO_INUSE(sctpv6)
6516}; 6518};
6517#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 6519#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 0669778e4335..da4f15734fb1 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -263,6 +263,15 @@ static ctl_table sctp_table[] = {
263 .proc_handler = &proc_dointvec, 263 .proc_handler = &proc_dointvec,
264 .strategy = &sysctl_intvec 264 .strategy = &sysctl_intvec
265 }, 265 },
266 {
267 .ctl_name = CTL_UNNUMBERED,
268 .procname = "addip_noauth_enable",
269 .data = &sctp_addip_noauth,
270 .maxlen = sizeof(int),
271 .mode = 0644,
272 .proc_handler = &proc_dointvec,
273 .strategy = &sysctl_intvec
274 },
266 { .ctl_name = 0 } 275 { .ctl_name = 0 }
267}; 276};
268 277
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 5f467c914f80..d55ce83a020b 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -74,8 +74,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
74 * given destination transport address, set RTO to the protocol 74 * given destination transport address, set RTO to the protocol
75 * parameter 'RTO.Initial'. 75 * parameter 'RTO.Initial'.
76 */ 76 */
77 peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
77 peer->rtt = 0; 78 peer->rtt = 0;
78 peer->rto = msecs_to_jiffies(sctp_rto_initial);
79 peer->rttvar = 0; 79 peer->rttvar = 0;
80 peer->srtt = 0; 80 peer->srtt = 0;
81 peer->rto_pending = 0; 81 peer->rto_pending = 0;
@@ -385,6 +385,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
385 tp->rto = tp->asoc->rto_max; 385 tp->rto = tp->asoc->rto_max;
386 386
387 tp->rtt = rtt; 387 tp->rtt = rtt;
388 tp->last_rto = tp->rto;
388 389
389 /* Reset rto_pending so that a new RTT measurement is started when a 390 /* Reset rto_pending so that a new RTT measurement is started when a
390 * new data chunk is sent. 391 * new data chunk is sent.
@@ -578,7 +579,7 @@ void sctp_transport_reset(struct sctp_transport *t)
578 */ 579 */
579 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 580 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
580 t->ssthresh = asoc->peer.i.a_rwnd; 581 t->ssthresh = asoc->peer.i.a_rwnd;
581 t->rto = asoc->rto_initial; 582 t->last_rto = t->rto = asoc->rto_initial;
582 t->rtt = 0; 583 t->rtt = 0;
583 t->srtt = 0; 584 t->srtt = 0;
584 t->rttvar = 0; 585 t->rttvar = 0;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 4be92d0a2cab..4908041ffb31 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -862,7 +862,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
862 continue; 862 continue;
863 863
864 /* see if this ssn has been marked by skipping */ 864 /* see if this ssn has been marked by skipping */
865 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) 865 if (!SSN_lte(cssn, sctp_ssn_peek(in, csid)))
866 break; 866 break;
867 867
868 __skb_unlink(pos, &ulpq->lobby); 868 __skb_unlink(pos, &ulpq->lobby);
diff --git a/net/socket.c b/net/socket.c
index 540013ea8620..74784dfe8e5b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1250,11 +1250,14 @@ asmlinkage long sys_socketpair(int family, int type, int protocol,
1250 goto out_release_both; 1250 goto out_release_both;
1251 1251
1252 fd1 = sock_alloc_fd(&newfile1); 1252 fd1 = sock_alloc_fd(&newfile1);
1253 if (unlikely(fd1 < 0)) 1253 if (unlikely(fd1 < 0)) {
1254 err = fd1;
1254 goto out_release_both; 1255 goto out_release_both;
1256 }
1255 1257
1256 fd2 = sock_alloc_fd(&newfile2); 1258 fd2 = sock_alloc_fd(&newfile2);
1257 if (unlikely(fd2 < 0)) { 1259 if (unlikely(fd2 < 0)) {
1260 err = fd2;
1258 put_filp(newfile1); 1261 put_filp(newfile1);
1259 put_unused_fd(fd1); 1262 put_unused_fd(fd1);
1260 goto out_release_both; 1263 goto out_release_both;
@@ -2316,6 +2319,11 @@ int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
2316 return err; 2319 return err;
2317} 2320}
2318 2321
2322int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
2323{
2324 return sock->ops->shutdown(sock, how);
2325}
2326
2319/* ABI emulation layers need these two */ 2327/* ABI emulation layers need these two */
2320EXPORT_SYMBOL(move_addr_to_kernel); 2328EXPORT_SYMBOL(move_addr_to_kernel);
2321EXPORT_SYMBOL(move_addr_to_user); 2329EXPORT_SYMBOL(move_addr_to_user);
@@ -2342,3 +2350,4 @@ EXPORT_SYMBOL(kernel_getsockopt);
2342EXPORT_SYMBOL(kernel_setsockopt); 2350EXPORT_SYMBOL(kernel_setsockopt);
2343EXPORT_SYMBOL(kernel_sendpage); 2351EXPORT_SYMBOL(kernel_sendpage);
2344EXPORT_SYMBOL(kernel_sock_ioctl); 2352EXPORT_SYMBOL(kernel_sock_ioctl);
2353EXPORT_SYMBOL(kernel_sock_shutdown);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 53995af9ca4b..a6e57d1c2eb6 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -540,7 +540,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
540 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 540 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
541 if (IS_ERR(p)) { 541 if (IS_ERR(p)) {
542 err = PTR_ERR(p); 542 err = PTR_ERR(p);
543 gss_msg->msg.errno = (err == -EACCES) ? -EACCES : -EAGAIN; 543 gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES;
544 goto err_release_msg; 544 goto err_release_msg;
545 } 545 }
546 gss_msg->ctx = gss_get_ctx(ctx); 546 gss_msg->ctx = gss_get_ctx(ctx);
@@ -967,7 +967,7 @@ gss_validate(struct rpc_task *task, __be32 *p)
967 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 967 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
968 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 968 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
969 if (maj_stat) { 969 if (maj_stat) {
970 dprintk("RPC: %5u gss_validate: gss_verify_mic returned" 970 dprintk("RPC: %5u gss_validate: gss_verify_mic returned "
971 "error 0x%08x\n", task->tk_pid, maj_stat); 971 "error 0x%08x\n", task->tk_pid, maj_stat);
972 goto out_bad; 972 goto out_bad;
973 } 973 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 91cd8f0d1e10..0dd792338fa9 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -211,8 +211,8 @@ encryptor(struct scatterlist *sg, void *data)
211 if (thislen == 0) 211 if (thislen == 0)
212 return 0; 212 return 0;
213 213
214 sg_mark_end(desc->infrags, desc->fragno); 214 sg_mark_end(&desc->infrags[desc->fragno - 1]);
215 sg_mark_end(desc->outfrags, desc->fragno); 215 sg_mark_end(&desc->outfrags[desc->fragno - 1]);
216 216
217 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, 217 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
218 desc->infrags, thislen); 218 desc->infrags, thislen);
@@ -293,7 +293,7 @@ decryptor(struct scatterlist *sg, void *data)
293 if (thislen == 0) 293 if (thislen == 0)
294 return 0; 294 return 0;
295 295
296 sg_mark_end(desc->frags, desc->fragno); 296 sg_mark_end(&desc->frags[desc->fragno - 1]);
297 297
298 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, 298 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
299 desc->frags, thislen); 299 desc->frags, thislen);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 9843eacef11d..60c3dba545d7 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -147,13 +147,17 @@ gss_import_sec_context_kerberos(const void *p,
147 p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); 147 p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
148 if (IS_ERR(p)) 148 if (IS_ERR(p))
149 goto out_err_free_ctx; 149 goto out_err_free_ctx;
150 if (tmp != SGN_ALG_DES_MAC_MD5) 150 if (tmp != SGN_ALG_DES_MAC_MD5) {
151 p = ERR_PTR(-ENOSYS);
151 goto out_err_free_ctx; 152 goto out_err_free_ctx;
153 }
152 p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); 154 p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
153 if (IS_ERR(p)) 155 if (IS_ERR(p))
154 goto out_err_free_ctx; 156 goto out_err_free_ctx;
155 if (tmp != SEAL_ALG_DES) 157 if (tmp != SEAL_ALG_DES) {
158 p = ERR_PTR(-ENOSYS);
156 goto out_err_free_ctx; 159 goto out_err_free_ctx;
160 }
157 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); 161 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
158 if (IS_ERR(p)) 162 if (IS_ERR(p))
159 goto out_err_free_ctx; 163 goto out_err_free_ctx;
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index a0d9faa59cb5..dedcbd6108f4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -63,7 +63,6 @@
63#include <linux/jiffies.h> 63#include <linux/jiffies.h>
64#include <linux/sunrpc/gss_krb5.h> 64#include <linux/sunrpc/gss_krb5.h>
65#include <linux/random.h> 65#include <linux/random.h>
66#include <asm/scatterlist.h>
67#include <linux/crypto.h> 66#include <linux/crypto.h>
68 67
69#ifdef RPC_DEBUG 68#ifdef RPC_DEBUG
@@ -84,6 +83,7 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
84 u32 seq_send; 83 u32 seq_send;
85 84
86 dprintk("RPC: gss_krb5_seal\n"); 85 dprintk("RPC: gss_krb5_seal\n");
86 BUG_ON(ctx == NULL);
87 87
88 now = get_seconds(); 88 now = get_seconds();
89 89
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 8bd074df27d3..3bdc527ee64a 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -4,7 +4,6 @@
4#include <linux/sunrpc/gss_krb5.h> 4#include <linux/sunrpc/gss_krb5.h>
5#include <linux/random.h> 5#include <linux/random.h>
6#include <linux/pagemap.h> 6#include <linux/pagemap.h>
7#include <asm/scatterlist.h>
8#include <linux/crypto.h> 7#include <linux/crypto.h>
9 8
10#ifdef RPC_DEBUG 9#ifdef RPC_DEBUG
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 18f0a8dcc095..c59f3ca2b41b 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -280,7 +280,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
280 mask = POLLOUT | POLLWRNORM; 280 mask = POLLOUT | POLLWRNORM;
281 if (rpci->ops == NULL) 281 if (rpci->ops == NULL)
282 mask |= POLLERR | POLLHUP; 282 mask |= POLLERR | POLLHUP;
283 if (!list_empty(&rpci->pipe)) 283 if (filp->private_data || !list_empty(&rpci->pipe))
284 mask |= POLLIN | POLLRDNORM; 284 mask |= POLLIN | POLLRDNORM;
285 return mask; 285 return mask;
286} 286}
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 864b541bbf51..2be714e9b382 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -87,9 +87,8 @@ proc_dodebug(ctl_table *table, int write, struct file *file,
87 left--, s++; 87 left--, s++;
88 *(unsigned int *) table->data = value; 88 *(unsigned int *) table->data = value;
89 /* Display the RPC tasks on writing to rpc_debug */ 89 /* Display the RPC tasks on writing to rpc_debug */
90 if (table->ctl_name == CTL_RPCDEBUG) { 90 if (strcmp(table->procname, "rpc_debug") == 0)
91 rpc_show_tasks(); 91 rpc_show_tasks();
92 }
93 } else { 92 } else {
94 if (!access_ok(VERIFY_WRITE, buffer, left)) 93 if (!access_ok(VERIFY_WRITE, buffer, left))
95 return -EFAULT; 94 return -EFAULT;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 282a9a2ec90c..cd641c8634f0 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -62,7 +62,7 @@ static inline void do_xprt_reserve(struct rpc_task *);
62static void xprt_connect_status(struct rpc_task *task); 62static void xprt_connect_status(struct rpc_task *task);
63static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); 63static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
64 64
65static spinlock_t xprt_list_lock = SPIN_LOCK_UNLOCKED; 65static DEFINE_SPINLOCK(xprt_list_lock);
66static LIST_HEAD(xprt_list); 66static LIST_HEAD(xprt_list);
67 67
68/* 68/*
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 12db63580427..9e11ce715958 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -181,7 +181,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
181 struct rpcrdma_read_chunk *cur_rchunk = NULL; 181 struct rpcrdma_read_chunk *cur_rchunk = NULL;
182 struct rpcrdma_write_array *warray = NULL; 182 struct rpcrdma_write_array *warray = NULL;
183 struct rpcrdma_write_chunk *cur_wchunk = NULL; 183 struct rpcrdma_write_chunk *cur_wchunk = NULL;
184 u32 *iptr = headerp->rm_body.rm_chunks; 184 __be32 *iptr = headerp->rm_body.rm_chunks;
185 185
186 if (type == rpcrdma_readch || type == rpcrdma_areadch) { 186 if (type == rpcrdma_readch || type == rpcrdma_areadch) {
187 /* a read chunk - server will RDMA Read our memory */ 187 /* a read chunk - server will RDMA Read our memory */
@@ -217,25 +217,25 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
217 cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); 217 cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
218 cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); 218 cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
219 xdr_encode_hyper( 219 xdr_encode_hyper(
220 (u32 *)&cur_rchunk->rc_target.rs_offset, 220 (__be32 *)&cur_rchunk->rc_target.rs_offset,
221 seg->mr_base); 221 seg->mr_base);
222 dprintk("RPC: %s: read chunk " 222 dprintk("RPC: %s: read chunk "
223 "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, 223 "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__,
224 seg->mr_len, seg->mr_base, seg->mr_rkey, pos, 224 seg->mr_len, (unsigned long long)seg->mr_base,
225 n < nsegs ? "more" : "last"); 225 seg->mr_rkey, pos, n < nsegs ? "more" : "last");
226 cur_rchunk++; 226 cur_rchunk++;
227 r_xprt->rx_stats.read_chunk_count++; 227 r_xprt->rx_stats.read_chunk_count++;
228 } else { /* write/reply */ 228 } else { /* write/reply */
229 cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); 229 cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
230 cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); 230 cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
231 xdr_encode_hyper( 231 xdr_encode_hyper(
232 (u32 *)&cur_wchunk->wc_target.rs_offset, 232 (__be32 *)&cur_wchunk->wc_target.rs_offset,
233 seg->mr_base); 233 seg->mr_base);
234 dprintk("RPC: %s: %s chunk " 234 dprintk("RPC: %s: %s chunk "
235 "elem %d@0x%llx:0x%x (%s)\n", __func__, 235 "elem %d@0x%llx:0x%x (%s)\n", __func__,
236 (type == rpcrdma_replych) ? "reply" : "write", 236 (type == rpcrdma_replych) ? "reply" : "write",
237 seg->mr_len, seg->mr_base, seg->mr_rkey, 237 seg->mr_len, (unsigned long long)seg->mr_base,
238 n < nsegs ? "more" : "last"); 238 seg->mr_rkey, n < nsegs ? "more" : "last");
239 cur_wchunk++; 239 cur_wchunk++;
240 if (type == rpcrdma_replych) 240 if (type == rpcrdma_replych)
241 r_xprt->rx_stats.reply_chunk_count++; 241 r_xprt->rx_stats.reply_chunk_count++;
@@ -257,14 +257,14 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
257 * finish off header. If write, marshal discrim and nchunks. 257 * finish off header. If write, marshal discrim and nchunks.
258 */ 258 */
259 if (cur_rchunk) { 259 if (cur_rchunk) {
260 iptr = (u32 *) cur_rchunk; 260 iptr = (__be32 *) cur_rchunk;
261 *iptr++ = xdr_zero; /* finish the read chunk list */ 261 *iptr++ = xdr_zero; /* finish the read chunk list */
262 *iptr++ = xdr_zero; /* encode a NULL write chunk list */ 262 *iptr++ = xdr_zero; /* encode a NULL write chunk list */
263 *iptr++ = xdr_zero; /* encode a NULL reply chunk */ 263 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
264 } else { 264 } else {
265 warray->wc_discrim = xdr_one; 265 warray->wc_discrim = xdr_one;
266 warray->wc_nchunks = htonl(nchunks); 266 warray->wc_nchunks = htonl(nchunks);
267 iptr = (u32 *) cur_wchunk; 267 iptr = (__be32 *) cur_wchunk;
268 if (type == rpcrdma_writech) { 268 if (type == rpcrdma_writech) {
269 *iptr++ = xdr_zero; /* finish the write chunk list */ 269 *iptr++ = xdr_zero; /* finish the write chunk list */
270 *iptr++ = xdr_zero; /* encode a NULL reply chunk */ 270 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
@@ -559,7 +559,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
559 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) 559 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
560 */ 560 */
561static int 561static int
562rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) 562rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **iptrp)
563{ 563{
564 unsigned int i, total_len; 564 unsigned int i, total_len;
565 struct rpcrdma_write_chunk *cur_wchunk; 565 struct rpcrdma_write_chunk *cur_wchunk;
@@ -573,11 +573,11 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
573 struct rpcrdma_segment *seg = &cur_wchunk->wc_target; 573 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
574 ifdebug(FACILITY) { 574 ifdebug(FACILITY) {
575 u64 off; 575 u64 off;
576 xdr_decode_hyper((u32 *)&seg->rs_offset, &off); 576 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
577 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", 577 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
578 __func__, 578 __func__,
579 ntohl(seg->rs_length), 579 ntohl(seg->rs_length),
580 off, 580 (unsigned long long)off,
581 ntohl(seg->rs_handle)); 581 ntohl(seg->rs_handle));
582 } 582 }
583 total_len += ntohl(seg->rs_length); 583 total_len += ntohl(seg->rs_length);
@@ -585,7 +585,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
585 } 585 }
586 /* check and adjust for properly terminated write chunk */ 586 /* check and adjust for properly terminated write chunk */
587 if (wrchunk) { 587 if (wrchunk) {
588 u32 *w = (u32 *) cur_wchunk; 588 __be32 *w = (__be32 *) cur_wchunk;
589 if (*w++ != xdr_zero) 589 if (*w++ != xdr_zero)
590 return -1; 590 return -1;
591 cur_wchunk = (struct rpcrdma_write_chunk *) w; 591 cur_wchunk = (struct rpcrdma_write_chunk *) w;
@@ -593,7 +593,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
593 if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) 593 if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
594 return -1; 594 return -1;
595 595
596 *iptrp = (u32 *) cur_wchunk; 596 *iptrp = (__be32 *) cur_wchunk;
597 return total_len; 597 return total_len;
598} 598}
599 599
@@ -721,7 +721,7 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
721 struct rpc_rqst *rqst; 721 struct rpc_rqst *rqst;
722 struct rpc_xprt *xprt = rep->rr_xprt; 722 struct rpc_xprt *xprt = rep->rr_xprt;
723 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 723 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
724 u32 *iptr; 724 __be32 *iptr;
725 int i, rdmalen, status; 725 int i, rdmalen, status;
726 726
727 /* Check status. If bad, signal disconnect and return rep to pool */ 727 /* Check status. If bad, signal disconnect and return rep to pool */
@@ -801,7 +801,7 @@ repost:
801 r_xprt->rx_stats.total_rdma_reply += rdmalen; 801 r_xprt->rx_stats.total_rdma_reply += rdmalen;
802 } else { 802 } else {
803 /* else ordinary inline */ 803 /* else ordinary inline */
804 iptr = (u32 *)((unsigned char *)headerp + 28); 804 iptr = (__be32 *)((unsigned char *)headerp + 28);
805 rep->rr_len -= 28; /*sizeof *headerp;*/ 805 rep->rr_len -= 28; /*sizeof *headerp;*/
806 status = rep->rr_len; 806 status = rep->rr_len;
807 } 807 }
@@ -816,7 +816,7 @@ repost:
816 headerp->rm_body.rm_chunks[2] != xdr_one || 816 headerp->rm_body.rm_chunks[2] != xdr_one ||
817 req->rl_nchunks == 0) 817 req->rl_nchunks == 0)
818 goto badheader; 818 goto badheader;
819 iptr = (u32 *)((unsigned char *)headerp + 28); 819 iptr = (__be32 *)((unsigned char *)headerp + 28);
820 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); 820 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
821 if (rdmalen < 0) 821 if (rdmalen < 0)
822 goto badheader; 822 goto badheader;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index dc55cc974c90..6f2112dd9f78 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -89,7 +89,7 @@ static struct ctl_table_header *sunrpc_table_header;
89 89
90static ctl_table xr_tunables_table[] = { 90static ctl_table xr_tunables_table[] = {
91 { 91 {
92 .ctl_name = CTL_SLOTTABLE_RDMA, 92 .ctl_name = CTL_UNNUMBERED,
93 .procname = "rdma_slot_table_entries", 93 .procname = "rdma_slot_table_entries",
94 .data = &xprt_rdma_slot_table_entries, 94 .data = &xprt_rdma_slot_table_entries,
95 .maxlen = sizeof(unsigned int), 95 .maxlen = sizeof(unsigned int),
@@ -100,7 +100,7 @@ static ctl_table xr_tunables_table[] = {
100 .extra2 = &max_slot_table_size 100 .extra2 = &max_slot_table_size
101 }, 101 },
102 { 102 {
103 .ctl_name = CTL_RDMA_MAXINLINEREAD, 103 .ctl_name = CTL_UNNUMBERED,
104 .procname = "rdma_max_inline_read", 104 .procname = "rdma_max_inline_read",
105 .data = &xprt_rdma_max_inline_read, 105 .data = &xprt_rdma_max_inline_read,
106 .maxlen = sizeof(unsigned int), 106 .maxlen = sizeof(unsigned int),
@@ -109,7 +109,7 @@ static ctl_table xr_tunables_table[] = {
109 .strategy = &sysctl_intvec, 109 .strategy = &sysctl_intvec,
110 }, 110 },
111 { 111 {
112 .ctl_name = CTL_RDMA_MAXINLINEWRITE, 112 .ctl_name = CTL_UNNUMBERED,
113 .procname = "rdma_max_inline_write", 113 .procname = "rdma_max_inline_write",
114 .data = &xprt_rdma_max_inline_write, 114 .data = &xprt_rdma_max_inline_write,
115 .maxlen = sizeof(unsigned int), 115 .maxlen = sizeof(unsigned int),
@@ -118,7 +118,7 @@ static ctl_table xr_tunables_table[] = {
118 .strategy = &sysctl_intvec, 118 .strategy = &sysctl_intvec,
119 }, 119 },
120 { 120 {
121 .ctl_name = CTL_RDMA_WRITEPADDING, 121 .ctl_name = CTL_UNNUMBERED,
122 .procname = "rdma_inline_write_padding", 122 .procname = "rdma_inline_write_padding",
123 .data = &xprt_rdma_inline_write_padding, 123 .data = &xprt_rdma_inline_write_padding,
124 .maxlen = sizeof(unsigned int), 124 .maxlen = sizeof(unsigned int),
@@ -129,7 +129,7 @@ static ctl_table xr_tunables_table[] = {
129 .extra2 = &max_padding, 129 .extra2 = &max_padding,
130 }, 130 },
131 { 131 {
132 .ctl_name = CTL_RDMA_MEMREG, 132 .ctl_name = CTL_UNNUMBERED,
133 .procname = "rdma_memreg_strategy", 133 .procname = "rdma_memreg_strategy",
134 .data = &xprt_rdma_memreg_strategy, 134 .data = &xprt_rdma_memreg_strategy,
135 .maxlen = sizeof(unsigned int), 135 .maxlen = sizeof(unsigned int),
@@ -320,9 +320,9 @@ xprt_setup_rdma(struct xprt_create *args)
320 xprt->slot = kcalloc(xprt->max_reqs, 320 xprt->slot = kcalloc(xprt->max_reqs,
321 sizeof(struct rpc_rqst), GFP_KERNEL); 321 sizeof(struct rpc_rqst), GFP_KERNEL);
322 if (xprt->slot == NULL) { 322 if (xprt->slot == NULL) {
323 kfree(xprt);
324 dprintk("RPC: %s: couldn't allocate %d slots\n", 323 dprintk("RPC: %s: couldn't allocate %d slots\n",
325 __func__, xprt->max_reqs); 324 __func__, xprt->max_reqs);
325 kfree(xprt);
326 return ERR_PTR(-ENOMEM); 326 return ERR_PTR(-ENOMEM);
327 } 327 }
328 328
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 02298f529dad..2f630a512ab7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1828,7 +1828,7 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
1828 * @args: rpc transport creation arguments 1828 * @args: rpc transport creation arguments
1829 * 1829 *
1830 */ 1830 */
1831struct rpc_xprt *xs_setup_udp(struct xprt_create *args) 1831static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
1832{ 1832{
1833 struct sockaddr *addr = args->dstaddr; 1833 struct sockaddr *addr = args->dstaddr;
1834 struct rpc_xprt *xprt; 1834 struct rpc_xprt *xprt;
@@ -1894,7 +1894,7 @@ struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
1894 * @args: rpc transport creation arguments 1894 * @args: rpc transport creation arguments
1895 * 1895 *
1896 */ 1896 */
1897struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) 1897static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
1898{ 1898{
1899 struct sockaddr *addr = args->dstaddr; 1899 struct sockaddr *addr = args->dstaddr;
1900 struct rpc_xprt *xprt; 1900 struct rpc_xprt *xprt;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e36b4b5a5222..6b792265dc06 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -201,7 +201,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
201 return -EPROTOTYPE; 201 return -EPROTOTYPE;
202 } 202 }
203 203
204 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, 1); 204 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
205 if (!sk) { 205 if (!sk) {
206 tipc_deleteport(ref); 206 tipc_deleteport(ref);
207 return -ENOMEM; 207 return -ENOMEM;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 9163ec526c2a..e835da8fc091 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -457,7 +457,7 @@ static int unix_release_sock (struct sock *sk, int embrion)
457 * What the above comment does talk about? --ANK(980817) 457 * What the above comment does talk about? --ANK(980817)
458 */ 458 */
459 459
460 if (atomic_read(&unix_tot_inflight)) 460 if (unix_tot_inflight)
461 unix_gc(); /* Garbage collect fds */ 461 unix_gc(); /* Garbage collect fds */
462 462
463 return 0; 463 return 0;
@@ -599,15 +599,14 @@ static struct sock * unix_create1(struct net *net, struct socket *sock)
599 struct sock *sk = NULL; 599 struct sock *sk = NULL;
600 struct unix_sock *u; 600 struct unix_sock *u;
601 601
602 if (atomic_read(&unix_nr_socks) >= 2*get_max_files()) 602 atomic_inc(&unix_nr_socks);
603 if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
603 goto out; 604 goto out;
604 605
605 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, 1); 606 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
606 if (!sk) 607 if (!sk)
607 goto out; 608 goto out;
608 609
609 atomic_inc(&unix_nr_socks);
610
611 sock_init_data(sock,sk); 610 sock_init_data(sock,sk);
612 lockdep_set_class(&sk->sk_receive_queue.lock, 611 lockdep_set_class(&sk->sk_receive_queue.lock,
613 &af_unix_sk_receive_queue_lock_key); 612 &af_unix_sk_receive_queue_lock_key);
@@ -625,6 +624,8 @@ static struct sock * unix_create1(struct net *net, struct socket *sock)
625 init_waitqueue_head(&u->peer_wait); 624 init_waitqueue_head(&u->peer_wait);
626 unix_insert_socket(unix_sockets_unbound, sk); 625 unix_insert_socket(unix_sockets_unbound, sk);
627out: 626out:
627 if (sk == NULL)
628 atomic_dec(&unix_nr_socks);
628 return sk; 629 return sk;
629} 630}
630 631
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 406b6433e467..ebdff3d877a1 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -92,7 +92,7 @@ static LIST_HEAD(gc_inflight_list);
92static LIST_HEAD(gc_candidates); 92static LIST_HEAD(gc_candidates);
93static DEFINE_SPINLOCK(unix_gc_lock); 93static DEFINE_SPINLOCK(unix_gc_lock);
94 94
95atomic_t unix_tot_inflight = ATOMIC_INIT(0); 95unsigned int unix_tot_inflight;
96 96
97 97
98static struct sock *unix_get_socket(struct file *filp) 98static struct sock *unix_get_socket(struct file *filp)
@@ -133,7 +133,7 @@ void unix_inflight(struct file *fp)
133 } else { 133 } else {
134 BUG_ON(list_empty(&u->link)); 134 BUG_ON(list_empty(&u->link));
135 } 135 }
136 atomic_inc(&unix_tot_inflight); 136 unix_tot_inflight++;
137 spin_unlock(&unix_gc_lock); 137 spin_unlock(&unix_gc_lock);
138 } 138 }
139} 139}
@@ -147,7 +147,7 @@ void unix_notinflight(struct file *fp)
147 BUG_ON(list_empty(&u->link)); 147 BUG_ON(list_empty(&u->link));
148 if (atomic_dec_and_test(&u->inflight)) 148 if (atomic_dec_and_test(&u->inflight))
149 list_del_init(&u->link); 149 list_del_init(&u->link);
150 atomic_dec(&unix_tot_inflight); 150 unix_tot_inflight--;
151 spin_unlock(&unix_gc_lock); 151 spin_unlock(&unix_gc_lock);
152 } 152 }
153} 153}
@@ -161,7 +161,7 @@ static inline struct sk_buff *sock_queue_head(struct sock *sk)
161 for (skb = sock_queue_head(sk)->next, next = skb->next; \ 161 for (skb = sock_queue_head(sk)->next, next = skb->next; \
162 skb != sock_queue_head(sk); skb = next, next = skb->next) 162 skb != sock_queue_head(sk); skb = next, next = skb->next)
163 163
164static void scan_inflight(struct sock *x, void (*func)(struct sock *), 164static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
165 struct sk_buff_head *hitlist) 165 struct sk_buff_head *hitlist)
166{ 166{
167 struct sk_buff *skb; 167 struct sk_buff *skb;
@@ -185,9 +185,9 @@ static void scan_inflight(struct sock *x, void (*func)(struct sock *),
185 * if it indeed does so 185 * if it indeed does so
186 */ 186 */
187 struct sock *sk = unix_get_socket(*fp++); 187 struct sock *sk = unix_get_socket(*fp++);
188 if(sk) { 188 if (sk) {
189 hit = true; 189 hit = true;
190 func(sk); 190 func(unix_sk(sk));
191 } 191 }
192 } 192 }
193 if (hit && hitlist != NULL) { 193 if (hit && hitlist != NULL) {
@@ -199,7 +199,7 @@ static void scan_inflight(struct sock *x, void (*func)(struct sock *),
199 spin_unlock(&x->sk_receive_queue.lock); 199 spin_unlock(&x->sk_receive_queue.lock);
200} 200}
201 201
202static void scan_children(struct sock *x, void (*func)(struct sock *), 202static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
203 struct sk_buff_head *hitlist) 203 struct sk_buff_head *hitlist)
204{ 204{
205 if (x->sk_state != TCP_LISTEN) 205 if (x->sk_state != TCP_LISTEN)
@@ -235,20 +235,18 @@ static void scan_children(struct sock *x, void (*func)(struct sock *),
235 } 235 }
236} 236}
237 237
238static void dec_inflight(struct sock *sk) 238static void dec_inflight(struct unix_sock *usk)
239{ 239{
240 atomic_dec(&unix_sk(sk)->inflight); 240 atomic_dec(&usk->inflight);
241} 241}
242 242
243static void inc_inflight(struct sock *sk) 243static void inc_inflight(struct unix_sock *usk)
244{ 244{
245 atomic_inc(&unix_sk(sk)->inflight); 245 atomic_inc(&usk->inflight);
246} 246}
247 247
248static void inc_inflight_move_tail(struct sock *sk) 248static void inc_inflight_move_tail(struct unix_sock *u)
249{ 249{
250 struct unix_sock *u = unix_sk(sk);
251
252 atomic_inc(&u->inflight); 250 atomic_inc(&u->inflight);
253 /* 251 /*
254 * If this is still a candidate, move it to the end of the 252 * If this is still a candidate, move it to the end of the
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index 85e5f9dd0d8e..47e80cc2077c 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1094,7 +1094,7 @@ int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
1094 rtnl_lock(); 1094 rtnl_lock();
1095 ret = wireless_process_ioctl(net, ifr, cmd); 1095 ret = wireless_process_ioctl(net, ifr, cmd);
1096 rtnl_unlock(); 1096 rtnl_unlock();
1097 if (IW_IS_GET(cmd) && copy_to_user(arg, ifr, sizeof(struct ifreq))) 1097 if (IW_IS_GET(cmd) && copy_to_user(arg, ifr, sizeof(struct iwreq)))
1098 return -EFAULT; 1098 return -EFAULT;
1099 return ret; 1099 return ret;
1100} 1100}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index fc416f9606a9..92cfe8e3e0b8 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -472,7 +472,7 @@ static struct proto x25_proto = {
472static struct sock *x25_alloc_socket(struct net *net) 472static struct sock *x25_alloc_socket(struct net *net)
473{ 473{
474 struct x25_sock *x25; 474 struct x25_sock *x25;
475 struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, 1); 475 struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto);
476 476
477 if (!sk) 477 if (!sk)
478 goto out; 478 goto out;
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 0426388d351d..1686f64c4352 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -21,7 +21,6 @@
21#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) 21#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
22#include <net/esp.h> 22#include <net/esp.h>
23#endif 23#endif
24#include <asm/scatterlist.h>
25 24
26/* 25/*
27 * Algorithms supported by IPsec. These entries contain properties which 26 * Algorithms supported by IPsec. These entries contain properties which
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 224b44e31a07..cf43c49eab37 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -552,7 +552,7 @@ int __xfrm_state_delete(struct xfrm_state *x)
552 * The xfrm_state_alloc call gives a reference, and that 552 * The xfrm_state_alloc call gives a reference, and that
553 * is what we are dropping here. 553 * is what we are dropping here.
554 */ 554 */
555 __xfrm_state_put(x); 555 xfrm_state_put(x);
556 err = 0; 556 err = 0;
557 } 557 }
558 558
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d41588d101d0..e75dbdcb08a4 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -507,7 +507,6 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
507 struct xfrm_usersa_info *p, 507 struct xfrm_usersa_info *p,
508 struct sk_buff *skb) 508 struct sk_buff *skb)
509{ 509{
510 spin_lock_bh(&x->lock);
511 copy_to_user_state(x, p); 510 copy_to_user_state(x, p);
512 511
513 if (x->coaddr) 512 if (x->coaddr)
@@ -515,7 +514,6 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
515 514
516 if (x->lastused) 515 if (x->lastused)
517 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); 516 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
518 spin_unlock_bh(&x->lock);
519 517
520 if (x->aalg) 518 if (x->aalg)
521 NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg); 519 NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg);