aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/9p/trans_fd.c112
-rw-r--r--net/appletalk/aarp.c2
-rw-r--r--net/ax25/ax25_out.c6
-rw-r--r--net/bluetooth/bnep/core.c4
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap.c5
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/compat.c11
-rw-r--r--net/core/dev.c21
-rw-r--r--net/core/pktgen.c9
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dccp/ccid.c18
-rw-r--r--net/dccp/ccid.h2
-rw-r--r--net/dccp/probe.c23
-rw-r--r--net/ipv4/Kconfig6
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c2
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c21
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/syncookies.c27
-rw-r--r--net/ipv4/tcp_input.c24
-rw-r--r--net/ipv4/tcp_ipv4.c21
-rw-r--r--net/ipv4/tcp_minisocks.c10
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv4/tcp_probe.c19
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv4/xfrm4_policy.c14
-rw-r--r--net/ipv6/exthdrs.c7
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c8
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/route.c1
-rw-r--r--net/ipv6/syncookies.c28
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/ipv6/xfrm6_policy.c25
-rw-r--r--net/irda/irlap.c14
-rw-r--r--net/irda/irlap_event.c2
-rw-r--r--net/irda/irlmp.c4
-rw-r--r--net/irda/irnet/irnet.h1
-rw-r--r--net/irda/irnet/irnet_ppp.c8
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/mac80211/mesh_pathtbl.c4
-rw-r--r--net/netfilter/ipvs/Kconfig3
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c18
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c15
-rw-r--r--net/netfilter/nf_conntrack_ftp.c18
-rw-r--r--net/netfilter/xt_recent.c3
-rw-r--r--net/netlabel/netlabel_domainhash.c2
-rw-r--r--net/netrom/nr_route.c11
-rw-r--r--net/packet/af_packet.c90
-rw-r--r--net/rds/ib.c4
-rw-r--r--net/rds/iw.c4
-rw-r--r--net/rose/rose_link.c8
-rw-r--r--net/rose/rose_loopback.c2
-rw-r--r--net/rose/rose_route.c5
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/socket.c125
-rw-r--r--net/sunrpc/addr.c10
-rw-r--r--net/sunrpc/auth.c39
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c23
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c4
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c2
-rw-r--r--net/sunrpc/clnt.c54
-rw-r--r--net/sunrpc/rpcb_clnt.c104
-rw-r--r--net/sunrpc/sched.c15
-rw-r--r--net/sunrpc/sunrpc_syms.c3
-rw-r--r--net/sunrpc/svc_xprt.c34
-rw-r--r--net/sunrpc/svcauth_unix.c53
-rw-r--r--net/sunrpc/xprt.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c2
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/wimax/op-reset.c2
-rw-r--r--net/xfrm/xfrm_policy.c77
-rw-r--r--net/xfrm/xfrm_state.c6
-rw-r--r--net/xfrm/xfrm_user.c14
89 files changed, 748 insertions, 573 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b7889782047e..c1b92cab46c7 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -163,7 +163,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
163 goto err_unlock; 163 goto err_unlock;
164 } 164 }
165 165
166 rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, 166 rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
167 smp_processor_id()); 167 smp_processor_id());
168 rx_stats->rx_packets++; 168 rx_stats->rx_packets++;
169 rx_stats->rx_bytes += skb->len; 169 rx_stats->rx_bytes += skb->len;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 4dd873e3a1bb..be1cb909d8c0 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -42,6 +42,8 @@
42#include <net/9p/client.h> 42#include <net/9p/client.h>
43#include <net/9p/transport.h> 43#include <net/9p/transport.h>
44 44
45#include <linux/syscalls.h> /* killme */
46
45#define P9_PORT 564 47#define P9_PORT 564
46#define MAX_SOCK_BUF (64*1024) 48#define MAX_SOCK_BUF (64*1024)
47#define MAXPOLLWADDR 2 49#define MAXPOLLWADDR 2
@@ -788,24 +790,41 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
788 790
789static int p9_socket_open(struct p9_client *client, struct socket *csocket) 791static int p9_socket_open(struct p9_client *client, struct socket *csocket)
790{ 792{
791 int fd, ret; 793 struct p9_trans_fd *p;
794 int ret, fd;
795
796 p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
797 if (!p)
798 return -ENOMEM;
792 799
793 csocket->sk->sk_allocation = GFP_NOIO; 800 csocket->sk->sk_allocation = GFP_NOIO;
794 fd = sock_map_fd(csocket, 0); 801 fd = sock_map_fd(csocket, 0);
795 if (fd < 0) { 802 if (fd < 0) {
796 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n"); 803 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
804 sock_release(csocket);
805 kfree(p);
797 return fd; 806 return fd;
798 } 807 }
799 808
800 ret = p9_fd_open(client, fd, fd); 809 get_file(csocket->file);
801 if (ret < 0) { 810 get_file(csocket->file);
802 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n"); 811 p->wr = p->rd = csocket->file;
812 client->trans = p;
813 client->status = Connected;
814
815 sys_close(fd); /* still racy */
816
817 p->rd->f_flags |= O_NONBLOCK;
818
819 p->conn = p9_conn_create(client);
820 if (IS_ERR(p->conn)) {
821 ret = PTR_ERR(p->conn);
822 p->conn = NULL;
823 kfree(p);
824 sockfd_put(csocket);
803 sockfd_put(csocket); 825 sockfd_put(csocket);
804 return ret; 826 return ret;
805 } 827 }
806
807 ((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK;
808
809 return 0; 828 return 0;
810} 829}
811 830
@@ -883,7 +902,6 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
883 struct socket *csocket; 902 struct socket *csocket;
884 struct sockaddr_in sin_server; 903 struct sockaddr_in sin_server;
885 struct p9_fd_opts opts; 904 struct p9_fd_opts opts;
886 struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
887 905
888 err = parse_opts(args, &opts); 906 err = parse_opts(args, &opts);
889 if (err < 0) 907 if (err < 0)
@@ -897,12 +915,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
897 sin_server.sin_family = AF_INET; 915 sin_server.sin_family = AF_INET;
898 sin_server.sin_addr.s_addr = in_aton(addr); 916 sin_server.sin_addr.s_addr = in_aton(addr);
899 sin_server.sin_port = htons(opts.port); 917 sin_server.sin_port = htons(opts.port);
900 sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket); 918 err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
901 919
902 if (!csocket) { 920 if (err) {
903 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n"); 921 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
904 err = -EIO; 922 return err;
905 goto error;
906 } 923 }
907 924
908 err = csocket->ops->connect(csocket, 925 err = csocket->ops->connect(csocket,
@@ -912,30 +929,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
912 P9_EPRINTK(KERN_ERR, 929 P9_EPRINTK(KERN_ERR,
913 "p9_trans_tcp: problem connecting socket to %s\n", 930 "p9_trans_tcp: problem connecting socket to %s\n",
914 addr); 931 addr);
915 goto error;
916 }
917
918 err = p9_socket_open(client, csocket);
919 if (err < 0)
920 goto error;
921
922 p = (struct p9_trans_fd *) client->trans;
923 p->conn = p9_conn_create(client);
924 if (IS_ERR(p->conn)) {
925 err = PTR_ERR(p->conn);
926 p->conn = NULL;
927 goto error;
928 }
929
930 return 0;
931
932error:
933 if (csocket)
934 sock_release(csocket); 932 sock_release(csocket);
933 return err;
934 }
935 935
936 kfree(p); 936 return p9_socket_open(client, csocket);
937
938 return err;
939} 937}
940 938
941static int 939static int
@@ -944,49 +942,33 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
944 int err; 942 int err;
945 struct socket *csocket; 943 struct socket *csocket;
946 struct sockaddr_un sun_server; 944 struct sockaddr_un sun_server;
947 struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
948 945
949 csocket = NULL; 946 csocket = NULL;
950 947
951 if (strlen(addr) > UNIX_PATH_MAX) { 948 if (strlen(addr) > UNIX_PATH_MAX) {
952 P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", 949 P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
953 addr); 950 addr);
954 err = -ENAMETOOLONG; 951 return -ENAMETOOLONG;
955 goto error;
956 } 952 }
957 953
958 sun_server.sun_family = PF_UNIX; 954 sun_server.sun_family = PF_UNIX;
959 strcpy(sun_server.sun_path, addr); 955 strcpy(sun_server.sun_path, addr);
960 sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket); 956 err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
957 if (err < 0) {
958 P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
959 return err;
960 }
961 err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server, 961 err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
962 sizeof(struct sockaddr_un) - 1, 0); 962 sizeof(struct sockaddr_un) - 1, 0);
963 if (err < 0) { 963 if (err < 0) {
964 P9_EPRINTK(KERN_ERR, 964 P9_EPRINTK(KERN_ERR,
965 "p9_trans_unix: problem connecting socket: %s: %d\n", 965 "p9_trans_unix: problem connecting socket: %s: %d\n",
966 addr, err); 966 addr, err);
967 goto error;
968 }
969
970 err = p9_socket_open(client, csocket);
971 if (err < 0)
972 goto error;
973
974 p = (struct p9_trans_fd *) client->trans;
975 p->conn = p9_conn_create(client);
976 if (IS_ERR(p->conn)) {
977 err = PTR_ERR(p->conn);
978 p->conn = NULL;
979 goto error;
980 }
981
982 return 0;
983
984error:
985 if (csocket)
986 sock_release(csocket); 967 sock_release(csocket);
968 return err;
969 }
987 970
988 kfree(p); 971 return p9_socket_open(client, csocket);
989 return err;
990} 972}
991 973
992static int 974static int
@@ -994,7 +976,7 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
994{ 976{
995 int err; 977 int err;
996 struct p9_fd_opts opts; 978 struct p9_fd_opts opts;
997 struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */ 979 struct p9_trans_fd *p;
998 980
999 parse_opts(args, &opts); 981 parse_opts(args, &opts);
1000 982
@@ -1005,21 +987,19 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
1005 987
1006 err = p9_fd_open(client, opts.rfd, opts.wfd); 988 err = p9_fd_open(client, opts.rfd, opts.wfd);
1007 if (err < 0) 989 if (err < 0)
1008 goto error; 990 return err;
1009 991
1010 p = (struct p9_trans_fd *) client->trans; 992 p = (struct p9_trans_fd *) client->trans;
1011 p->conn = p9_conn_create(client); 993 p->conn = p9_conn_create(client);
1012 if (IS_ERR(p->conn)) { 994 if (IS_ERR(p->conn)) {
1013 err = PTR_ERR(p->conn); 995 err = PTR_ERR(p->conn);
1014 p->conn = NULL; 996 p->conn = NULL;
1015 goto error; 997 fput(p->rd);
998 fput(p->wr);
999 return err;
1016 } 1000 }
1017 1001
1018 return 0; 1002 return 0;
1019
1020error:
1021 kfree(p);
1022 return err;
1023} 1003}
1024 1004
1025static struct p9_trans_module p9_tcp_trans = { 1005static struct p9_trans_module p9_tcp_trans = {
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 9d4adfd22757..f2b3b56aa779 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -819,7 +819,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
819 ma = &ifa->address; 819 ma = &ifa->address;
820 else { /* We need to make a copy of the entry. */ 820 else { /* We need to make a copy of the entry. */
821 da.s_node = sa.s_node; 821 da.s_node = sa.s_node;
822 da.s_net = da.s_net; 822 da.s_net = sa.s_net;
823 ma = &da; 823 ma = &da;
824 } 824 }
825 825
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index bf706f83a5c9..14912600ec57 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -92,6 +92,12 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2
92#endif 92#endif
93 } 93 }
94 94
95 /*
96 * There is one ref for the state machine; a caller needs
97 * one more to put it back, just like with the existing one.
98 */
99 ax25_cb_hold(ax25);
100
95 ax25_cb_add(ax25); 101 ax25_cb_add(ax25);
96 102
97 ax25->state = AX25_STATE_1; 103 ax25->state = AX25_STATE_1;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 29b1b220d6cf..ef09c7b3a858 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -78,7 +78,7 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
78static void __bnep_link_session(struct bnep_session *s) 78static void __bnep_link_session(struct bnep_session *s)
79{ 79{
80 /* It's safe to call __module_get() here because sessions are added 80 /* It's safe to call __module_get() here because sessions are added
81 by the socket layer which has to hold the refference to this module. 81 by the socket layer which has to hold the reference to this module.
82 */ 82 */
83 __module_get(THIS_MODULE); 83 __module_get(THIS_MODULE);
84 list_add(&s->list, &bnep_session_list); 84 list_add(&s->list, &bnep_session_list);
@@ -632,7 +632,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
632 s = __bnep_get_session(req->dst); 632 s = __bnep_get_session(req->dst);
633 if (s) { 633 if (s) {
634 /* Wakeup user-space which is polling for socket errors. 634 /* Wakeup user-space which is polling for socket errors.
635 * This is temporary hack untill we have shutdown in L2CAP */ 635 * This is temporary hack until we have shutdown in L2CAP */
636 s->sock->sk->sk_err = EUNATCH; 636 s->sock->sk->sk_err = EUNATCH;
637 637
638 /* Kill session thread */ 638 /* Kill session thread */
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 569750010fd3..18e7f5a43dc4 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -770,7 +770,7 @@ static int hidp_setup_hid(struct hidp_session *session,
770 770
771 hid = hid_allocate_device(); 771 hid = hid_allocate_device();
772 if (IS_ERR(hid)) 772 if (IS_ERR(hid))
773 return PTR_ERR(session->hid); 773 return PTR_ERR(hid);
774 774
775 session->hid = hid; 775 session->hid = hid;
776 session->req = req; 776 session->req = req;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 5129b88c8e5b..1120cf14a548 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1212,6 +1212,7 @@ static void l2cap_monitor_timeout(unsigned long arg)
1212 bh_lock_sock(sk); 1212 bh_lock_sock(sk);
1213 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { 1213 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1214 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk); 1214 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1215 bh_unlock_sock(sk);
1215 return; 1216 return;
1216 } 1217 }
1217 1218
@@ -3435,8 +3436,8 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
3435 (pi->unacked_frames > 0)) 3436 (pi->unacked_frames > 0))
3436 __mod_retrans_timer(); 3437 __mod_retrans_timer();
3437 3438
3438 l2cap_ertm_send(sk);
3439 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3439 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3440 l2cap_ertm_send(sk);
3440 } 3441 }
3441 break; 3442 break;
3442 3443
@@ -3471,9 +3472,9 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
3471 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3472 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3472 3473
3473 if (rx_control & L2CAP_CTRL_POLL) { 3474 if (rx_control & L2CAP_CTRL_POLL) {
3474 l2cap_retransmit_frame(sk, tx_seq);
3475 pi->expected_ack_seq = tx_seq; 3475 pi->expected_ack_seq = tx_seq;
3476 l2cap_drop_acked_frames(sk); 3476 l2cap_drop_acked_frames(sk);
3477 l2cap_retransmit_frame(sk, tx_seq);
3477 l2cap_ertm_send(sk); 3478 l2cap_ertm_send(sk);
3478 if (pi->conn_state & L2CAP_CONN_WAIT_F) { 3479 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3479 pi->srej_save_reqseq = tx_seq; 3480 pi->srej_save_reqseq = tx_seq;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index bd1c65425d4f..0b7f262cd148 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1406,6 +1406,9 @@ static int do_ebt_set_ctl(struct sock *sk,
1406{ 1406{
1407 int ret; 1407 int ret;
1408 1408
1409 if (!capable(CAP_NET_ADMIN))
1410 return -EPERM;
1411
1409 switch(cmd) { 1412 switch(cmd) {
1410 case EBT_SO_SET_ENTRIES: 1413 case EBT_SO_SET_ENTRIES:
1411 ret = do_replace(sock_net(sk), user, len); 1414 ret = do_replace(sock_net(sk), user, len);
@@ -1425,6 +1428,9 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1425 struct ebt_replace tmp; 1428 struct ebt_replace tmp;
1426 struct ebt_table *t; 1429 struct ebt_table *t;
1427 1430
1431 if (!capable(CAP_NET_ADMIN))
1432 return -EPERM;
1433
1428 if (copy_from_user(&tmp, user, sizeof(tmp))) 1434 if (copy_from_user(&tmp, user, sizeof(tmp)))
1429 return -EFAULT; 1435 return -EFAULT;
1430 1436
diff --git a/net/compat.c b/net/compat.c
index e1a56ade803b..a1fb1b079a82 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -754,26 +754,21 @@ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
754 754
755asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, 755asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
756 unsigned vlen, unsigned int flags, 756 unsigned vlen, unsigned int flags,
757 struct timespec __user *timeout) 757 struct compat_timespec __user *timeout)
758{ 758{
759 int datagrams; 759 int datagrams;
760 struct timespec ktspec; 760 struct timespec ktspec;
761 struct compat_timespec __user *utspec;
762 761
763 if (timeout == NULL) 762 if (timeout == NULL)
764 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 763 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
765 flags | MSG_CMSG_COMPAT, NULL); 764 flags | MSG_CMSG_COMPAT, NULL);
766 765
767 utspec = (struct compat_timespec __user *)timeout; 766 if (get_compat_timespec(&ktspec, timeout))
768 if (get_user(ktspec.tv_sec, &utspec->tv_sec) ||
769 get_user(ktspec.tv_nsec, &utspec->tv_nsec))
770 return -EFAULT; 767 return -EFAULT;
771 768
772 datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 769 datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
773 flags | MSG_CMSG_COMPAT, &ktspec); 770 flags | MSG_CMSG_COMPAT, &ktspec);
774 if (datagrams > 0 && 771 if (datagrams > 0 && put_compat_timespec(&ktspec, timeout))
775 (put_user(ktspec.tv_sec, &utspec->tv_sec) ||
776 put_user(ktspec.tv_nsec, &utspec->tv_nsec)))
777 datagrams = -EFAULT; 772 datagrams = -EFAULT;
778 773
779 return datagrams; 774 return datagrams;
diff --git a/net/core/dev.c b/net/core/dev.c
index c36a17aafcf3..be9924f60ec3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4771,21 +4771,23 @@ static void net_set_todo(struct net_device *dev)
4771 4771
4772static void rollback_registered_many(struct list_head *head) 4772static void rollback_registered_many(struct list_head *head)
4773{ 4773{
4774 struct net_device *dev; 4774 struct net_device *dev, *tmp;
4775 4775
4776 BUG_ON(dev_boot_phase); 4776 BUG_ON(dev_boot_phase);
4777 ASSERT_RTNL(); 4777 ASSERT_RTNL();
4778 4778
4779 list_for_each_entry(dev, head, unreg_list) { 4779 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
4780 /* Some devices call without registering 4780 /* Some devices call without registering
4781 * for initialization unwind. 4781 * for initialization unwind. Remove those
4782 * devices and proceed with the remaining.
4782 */ 4783 */
4783 if (dev->reg_state == NETREG_UNINITIALIZED) { 4784 if (dev->reg_state == NETREG_UNINITIALIZED) {
4784 pr_debug("unregister_netdevice: device %s/%p never " 4785 pr_debug("unregister_netdevice: device %s/%p never "
4785 "was registered\n", dev->name, dev); 4786 "was registered\n", dev->name, dev);
4786 4787
4787 WARN_ON(1); 4788 WARN_ON(1);
4788 return; 4789 list_del(&dev->unreg_list);
4790 continue;
4789 } 4791 }
4790 4792
4791 BUG_ON(dev->reg_state != NETREG_REGISTERED); 4793 BUG_ON(dev->reg_state != NETREG_REGISTERED);
@@ -5033,6 +5035,11 @@ int register_netdevice(struct net_device *dev)
5033 rollback_registered(dev); 5035 rollback_registered(dev);
5034 dev->reg_state = NETREG_UNREGISTERED; 5036 dev->reg_state = NETREG_UNREGISTERED;
5035 } 5037 }
5038 /*
5039 * Prevent userspace races by waiting until the network
5040 * device is fully setup before sending notifications.
5041 */
5042 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5036 5043
5037out: 5044out:
5038 return ret; 5045 return ret;
@@ -5595,6 +5602,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5595 /* Notify protocols, that a new device appeared. */ 5602 /* Notify protocols, that a new device appeared. */
5596 call_netdevice_notifiers(NETDEV_REGISTER, dev); 5603 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5597 5604
5605 /*
5606 * Prevent userspace races by waiting until the network
5607 * device is fully setup before sending notifications.
5608 */
5609 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5610
5598 synchronize_net(); 5611 synchronize_net();
5599 err = 0; 5612 err = 0;
5600out: 5613out:
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a23b45f08ec9..de0c2c726420 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -250,8 +250,7 @@ struct pktgen_dev {
250 __u64 count; /* Default No packets to send */ 250 __u64 count; /* Default No packets to send */
251 __u64 sofar; /* How many pkts we've sent so far */ 251 __u64 sofar; /* How many pkts we've sent so far */
252 __u64 tx_bytes; /* How many bytes we've transmitted */ 252 __u64 tx_bytes; /* How many bytes we've transmitted */
253 __u64 errors; /* Errors when trying to transmit, 253 __u64 errors; /* Errors when trying to transmit, */
254 pkts will be re-sent */
255 254
256 /* runtime counters relating to clone_skb */ 255 /* runtime counters relating to clone_skb */
257 256
@@ -3465,6 +3464,12 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3465 pkt_dev->seq_num++; 3464 pkt_dev->seq_num++;
3466 pkt_dev->tx_bytes += pkt_dev->last_pkt_size; 3465 pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
3467 break; 3466 break;
3467 case NET_XMIT_DROP:
3468 case NET_XMIT_CN:
3469 case NET_XMIT_POLICED:
3470 /* skb has been consumed */
3471 pkt_dev->errors++;
3472 break;
3468 default: /* Drivers are not supposed to return other values! */ 3473 default: /* Drivers are not supposed to return other values! */
3469 if (net_ratelimit()) 3474 if (net_ratelimit())
3470 pr_info("pktgen: %s xmit error: %d\n", 3475 pr_info("pktgen: %s xmit error: %d\n",
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 33148a568199..794bcb897ff0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1364,15 +1364,15 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
1364 case NETDEV_UNREGISTER: 1364 case NETDEV_UNREGISTER:
1365 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 1365 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
1366 break; 1366 break;
1367 case NETDEV_REGISTER:
1368 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1369 break;
1370 case NETDEV_UP: 1367 case NETDEV_UP:
1371 case NETDEV_DOWN: 1368 case NETDEV_DOWN:
1372 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1369 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1373 break; 1370 break;
1371 case NETDEV_POST_INIT:
1372 case NETDEV_REGISTER:
1374 case NETDEV_CHANGE: 1373 case NETDEV_CHANGE:
1375 case NETDEV_GOING_DOWN: 1374 case NETDEV_GOING_DOWN:
1375 case NETDEV_UNREGISTER_BATCH:
1376 break; 1376 break;
1377 default: 1377 default:
1378 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1378 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bfa3e7865a8c..93c4e060c91e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -93,7 +93,7 @@ static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
93 93
94 94
95/* Pipe buffer operations for a socket. */ 95/* Pipe buffer operations for a socket. */
96static struct pipe_buf_operations sock_pipe_buf_ops = { 96static const struct pipe_buf_operations sock_pipe_buf_ops = {
97 .can_merge = 0, 97 .can_merge = 0,
98 .map = generic_pipe_buf_map, 98 .map = generic_pipe_buf_map,
99 .unmap = generic_pipe_buf_unmap, 99 .unmap = generic_pipe_buf_unmap,
diff --git a/net/core/sock.c b/net/core/sock.c
index 76ff58d43e26..e1f6f225f012 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1205,6 +1205,10 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1205 1205
1206 if (newsk->sk_prot->sockets_allocated) 1206 if (newsk->sk_prot->sockets_allocated)
1207 percpu_counter_inc(newsk->sk_prot->sockets_allocated); 1207 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
1208
1209 if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1210 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1211 net_enable_timestamp();
1208 } 1212 }
1209out: 1213out:
1210 return newsk; 1214 return newsk;
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index f3e9ba1cfd01..57dfb9c8c4f2 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -77,34 +77,24 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
77 return err; 77 return err;
78} 78}
79 79
80static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) 80static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...)
81{ 81{
82 struct kmem_cache *slab; 82 struct kmem_cache *slab;
83 char slab_name_fmt[32], *slab_name;
84 va_list args; 83 va_list args;
85 84
86 va_start(args, fmt); 85 va_start(args, fmt);
87 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 86 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
88 va_end(args); 87 va_end(args);
89 88
90 slab_name = kstrdup(slab_name_fmt, GFP_KERNEL); 89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
91 if (slab_name == NULL)
92 return NULL;
93 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
94 SLAB_HWCACHE_ALIGN, NULL); 90 SLAB_HWCACHE_ALIGN, NULL);
95 if (slab == NULL)
96 kfree(slab_name);
97 return slab; 91 return slab;
98} 92}
99 93
100static void ccid_kmem_cache_destroy(struct kmem_cache *slab) 94static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
101{ 95{
102 if (slab != NULL) { 96 if (slab != NULL)
103 const char *name = kmem_cache_name(slab);
104
105 kmem_cache_destroy(slab); 97 kmem_cache_destroy(slab);
106 kfree(name);
107 }
108} 98}
109 99
110static int ccid_activate(struct ccid_operations *ccid_ops) 100static int ccid_activate(struct ccid_operations *ccid_ops)
@@ -113,6 +103,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
113 103
114 ccid_ops->ccid_hc_rx_slab = 104 ccid_ops->ccid_hc_rx_slab =
115 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, 105 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
106 ccid_ops->ccid_hc_rx_slab_name,
116 "ccid%u_hc_rx_sock", 107 "ccid%u_hc_rx_sock",
117 ccid_ops->ccid_id); 108 ccid_ops->ccid_id);
118 if (ccid_ops->ccid_hc_rx_slab == NULL) 109 if (ccid_ops->ccid_hc_rx_slab == NULL)
@@ -120,6 +111,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
120 111
121 ccid_ops->ccid_hc_tx_slab = 112 ccid_ops->ccid_hc_tx_slab =
122 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, 113 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
114 ccid_ops->ccid_hc_tx_slab_name,
123 "ccid%u_hc_tx_sock", 115 "ccid%u_hc_tx_sock",
124 ccid_ops->ccid_id); 116 ccid_ops->ccid_id);
125 if (ccid_ops->ccid_hc_tx_slab == NULL) 117 if (ccid_ops->ccid_hc_tx_slab == NULL)
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index facedd20b531..269958bf7fe9 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -49,6 +49,8 @@ struct ccid_operations {
49 const char *ccid_name; 49 const char *ccid_name;
50 struct kmem_cache *ccid_hc_rx_slab, 50 struct kmem_cache *ccid_hc_rx_slab,
51 *ccid_hc_tx_slab; 51 *ccid_hc_tx_slab;
52 char ccid_hc_rx_slab_name[32];
53 char ccid_hc_tx_slab_name[32];
52 __u32 ccid_hc_rx_obj_size, 54 __u32 ccid_hc_rx_obj_size,
53 ccid_hc_tx_obj_size; 55 ccid_hc_tx_obj_size;
54 /* Interface Routines */ 56 /* Interface Routines */
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index dc328425fa20..bace1d8cbcfd 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -43,7 +43,7 @@ static int bufsize = 64 * 1024;
43static const char procname[] = "dccpprobe"; 43static const char procname[] = "dccpprobe";
44 44
45static struct { 45static struct {
46 struct kfifo *fifo; 46 struct kfifo fifo;
47 spinlock_t lock; 47 spinlock_t lock;
48 wait_queue_head_t wait; 48 wait_queue_head_t wait;
49 struct timespec tstart; 49 struct timespec tstart;
@@ -67,7 +67,7 @@ static void printl(const char *fmt, ...)
67 len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args); 67 len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
68 va_end(args); 68 va_end(args);
69 69
70 kfifo_put(dccpw.fifo, tbuf, len); 70 kfifo_in_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
71 wake_up(&dccpw.wait); 71 wake_up(&dccpw.wait);
72} 72}
73 73
@@ -109,7 +109,7 @@ static struct jprobe dccp_send_probe = {
109 109
110static int dccpprobe_open(struct inode *inode, struct file *file) 110static int dccpprobe_open(struct inode *inode, struct file *file)
111{ 111{
112 kfifo_reset(dccpw.fifo); 112 kfifo_reset(&dccpw.fifo);
113 getnstimeofday(&dccpw.tstart); 113 getnstimeofday(&dccpw.tstart);
114 return 0; 114 return 0;
115} 115}
@@ -131,11 +131,11 @@ static ssize_t dccpprobe_read(struct file *file, char __user *buf,
131 return -ENOMEM; 131 return -ENOMEM;
132 132
133 error = wait_event_interruptible(dccpw.wait, 133 error = wait_event_interruptible(dccpw.wait,
134 __kfifo_len(dccpw.fifo) != 0); 134 kfifo_len(&dccpw.fifo) != 0);
135 if (error) 135 if (error)
136 goto out_free; 136 goto out_free;
137 137
138 cnt = kfifo_get(dccpw.fifo, tbuf, len); 138 cnt = kfifo_out_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
139 error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; 139 error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
140 140
141out_free: 141out_free:
@@ -156,14 +156,13 @@ static __init int dccpprobe_init(void)
156 156
157 init_waitqueue_head(&dccpw.wait); 157 init_waitqueue_head(&dccpw.wait);
158 spin_lock_init(&dccpw.lock); 158 spin_lock_init(&dccpw.lock);
159 dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock); 159 if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL))
160 if (IS_ERR(dccpw.fifo)) 160 return ret;
161 return PTR_ERR(dccpw.fifo);
162
163 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
164 goto err0; 162 goto err0;
165 163
166 ret = register_jprobe(&dccp_send_probe); 164 ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0),
165 "dccp");
167 if (ret) 166 if (ret)
168 goto err1; 167 goto err1;
169 168
@@ -172,14 +171,14 @@ static __init int dccpprobe_init(void)
172err1: 171err1:
173 proc_net_remove(&init_net, procname); 172 proc_net_remove(&init_net, procname);
174err0: 173err0:
175 kfifo_free(dccpw.fifo); 174 kfifo_free(&dccpw.fifo);
176 return ret; 175 return ret;
177} 176}
178module_init(dccpprobe_init); 177module_init(dccpprobe_init);
179 178
180static __exit void dccpprobe_exit(void) 179static __exit void dccpprobe_exit(void)
181{ 180{
182 kfifo_free(dccpw.fifo); 181 kfifo_free(&dccpw.fifo);
183 proc_net_remove(&init_net, procname); 182 proc_net_remove(&init_net, procname);
184 unregister_jprobe(&dccp_send_probe); 183 unregister_jprobe(&dccp_send_probe);
185 184
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 70491d9035eb..0c94a1ac2946 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -166,7 +166,7 @@ config IP_PNP_DHCP
166 166
167 If unsure, say Y. Note that if you want to use DHCP, a DHCP server 167 If unsure, say Y. Note that if you want to use DHCP, a DHCP server
168 must be operating on your network. Read 168 must be operating on your network. Read
169 <file:Documentation/filesystems/nfsroot.txt> for details. 169 <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
170 170
171config IP_PNP_BOOTP 171config IP_PNP_BOOTP
172 bool "IP: BOOTP support" 172 bool "IP: BOOTP support"
@@ -181,7 +181,7 @@ config IP_PNP_BOOTP
181 does BOOTP itself, providing all necessary information on the kernel 181 does BOOTP itself, providing all necessary information on the kernel
182 command line, you can say N here. If unsure, say Y. Note that if you 182 command line, you can say N here. If unsure, say Y. Note that if you
183 want to use BOOTP, a BOOTP server must be operating on your network. 183 want to use BOOTP, a BOOTP server must be operating on your network.
184 Read <file:Documentation/filesystems/nfsroot.txt> for details. 184 Read <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
185 185
186config IP_PNP_RARP 186config IP_PNP_RARP
187 bool "IP: RARP support" 187 bool "IP: RARP support"
@@ -194,7 +194,7 @@ config IP_PNP_RARP
194 older protocol which is being obsoleted by BOOTP and DHCP), say Y 194 older protocol which is being obsoleted by BOOTP and DHCP), say Y
195 here. Note that if you want to use RARP, a RARP server must be 195 here. Note that if you want to use RARP, a RARP server must be
196 operating on your network. Read 196 operating on your network. Read
197 <file:Documentation/filesystems/nfsroot.txt> for details. 197 <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
198 198
199# not yet ready.. 199# not yet ready..
200# bool ' IP: ARP support' CONFIG_IP_PNP_ARP 200# bool ' IP: ARP support' CONFIG_IP_PNP_ARP
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5cdbc102a418..040c4f05b653 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1397,6 +1397,7 @@ static struct devinet_sysctl_table {
1397 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE, 1397 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
1398 "accept_source_route"), 1398 "accept_source_route"),
1399 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"), 1399 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
1400 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
1400 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"), 1401 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
1401 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"), 1402 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
1402 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"), 1403 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 3323168ee52d..82dbf711d6d0 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -252,6 +252,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
252 no_addr = in_dev->ifa_list == NULL; 252 no_addr = in_dev->ifa_list == NULL;
253 rpf = IN_DEV_RPFILTER(in_dev); 253 rpf = IN_DEV_RPFILTER(in_dev);
254 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); 254 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
255 if (mark && !IN_DEV_SRC_VMARK(in_dev))
256 fl.mark = 0;
255 } 257 }
256 rcu_read_unlock(); 258 rcu_read_unlock();
257 259
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index bdb78dd180ce..1aaa8110d84b 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -368,7 +368,7 @@ static int inet_diag_bc_run(const void *bc, int len,
368 yes = entry->sport >= op[1].no; 368 yes = entry->sport >= op[1].no;
369 break; 369 break;
370 case INET_DIAG_BC_S_LE: 370 case INET_DIAG_BC_S_LE:
371 yes = entry->dport <= op[1].no; 371 yes = entry->sport <= op[1].no;
372 break; 372 break;
373 case INET_DIAG_BC_D_GE: 373 case INET_DIAG_BC_D_GE:
374 yes = entry->dport >= op[1].no; 374 yes = entry->dport >= op[1].no;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e34013a78ef4..3451799e3dbf 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -254,7 +254,7 @@ int ip_mc_output(struct sk_buff *skb)
254 */ 254 */
255 255
256 if (rt->rt_flags&RTCF_MULTICAST) { 256 if (rt->rt_flags&RTCF_MULTICAST) {
257 if ((!sk || inet_sk(sk)->mc_loop) 257 if (sk_mc_loop(sk)
258#ifdef CONFIG_IP_MROUTE 258#ifdef CONFIG_IP_MROUTE
259 /* Small optimization: do not loopback not local frames, 259 /* Small optimization: do not loopback not local frames,
260 which returned after forwarding; they will be dropped 260 which returned after forwarding; they will be dropped
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 4e08b7f2331c..10a6a604bf32 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1446,7 +1446,7 @@ late_initcall(ip_auto_config);
1446 1446
1447/* 1447/*
1448 * Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel 1448 * Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel
1449 * command line parameter. See Documentation/filesystems/nfsroot.txt. 1449 * command line parameter. See Documentation/filesystems/nfs/nfsroot.txt.
1450 */ 1450 */
1451static int __init ic_proto_name(char *name) 1451static int __init ic_proto_name(char *name)
1452{ 1452{
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 549e206cdd42..ea5cea2415c1 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -50,7 +50,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
50 struct tcphdr _tcph, *tcph; 50 struct tcphdr _tcph, *tcph;
51 __be16 oldval; 51 __be16 oldval;
52 52
53 /* Not enought header? */ 53 /* Not enough header? */
54 tcph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); 54 tcph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph);
55 if (!tcph) 55 if (!tcph)
56 return false; 56 return false;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index fa2d6b6fc3e5..331ead3ebd1b 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -14,6 +14,7 @@
14#include <net/route.h> 14#include <net/route.h>
15#include <net/ip.h> 15#include <net/ip.h>
16 16
17#include <linux/netfilter_bridge.h>
17#include <linux/netfilter_ipv4.h> 18#include <linux/netfilter_ipv4.h>
18#include <net/netfilter/ipv4/nf_defrag_ipv4.h> 19#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
19 20
@@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
34 return err; 35 return err;
35} 36}
36 37
38static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
39 struct sk_buff *skb)
40{
41#ifdef CONFIG_BRIDGE_NETFILTER
42 if (skb->nf_bridge &&
43 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
44 return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
45#endif
46 if (hooknum == NF_INET_PRE_ROUTING)
47 return IP_DEFRAG_CONNTRACK_IN;
48 else
49 return IP_DEFRAG_CONNTRACK_OUT;
50}
51
37static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, 52static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
38 struct sk_buff *skb, 53 struct sk_buff *skb,
39 const struct net_device *in, 54 const struct net_device *in,
@@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
50#endif 65#endif
51 /* Gather fragments. */ 66 /* Gather fragments. */
52 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 67 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
53 if (nf_ct_ipv4_gather_frags(skb, 68 enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
54 hooknum == NF_INET_PRE_ROUTING ? 69 if (nf_ct_ipv4_gather_frags(skb, user))
55 IP_DEFRAG_CONNTRACK_IN :
56 IP_DEFRAG_CONNTRACK_OUT))
57 return NF_STOLEN; 70 return NF_STOLEN;
58 } 71 }
59 return NF_ACCEPT; 72 return NF_ACCEPT;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e446496f564f..d62b05d33384 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -586,7 +586,9 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
586{ 586{
587 remove_proc_entry("rt_cache", net->proc_net_stat); 587 remove_proc_entry("rt_cache", net->proc_net_stat);
588 remove_proc_entry("rt_cache", net->proc_net); 588 remove_proc_entry("rt_cache", net->proc_net);
589#ifdef CONFIG_NET_CLS_ROUTE
589 remove_proc_entry("rt_acct", net->proc_net); 590 remove_proc_entry("rt_acct", net->proc_net);
591#endif
590} 592}
591 593
592static struct pernet_operations ip_rt_proc_ops __net_initdata = { 594static struct pernet_operations ip_rt_proc_ops __net_initdata = {
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 26399ad2a289..66fd80ef2473 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -277,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
277 277
278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); 278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
279 279
280 /* check for timestamp cookie support */
281 memset(&tcp_opt, 0, sizeof(tcp_opt));
282 tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
283
284 if (tcp_opt.saw_tstamp)
285 cookie_check_timestamp(&tcp_opt);
286
280 ret = NULL; 287 ret = NULL;
281 req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ 288 req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
282 if (!req) 289 if (!req)
@@ -292,6 +299,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
292 ireq->loc_addr = ip_hdr(skb)->daddr; 299 ireq->loc_addr = ip_hdr(skb)->daddr;
293 ireq->rmt_addr = ip_hdr(skb)->saddr; 300 ireq->rmt_addr = ip_hdr(skb)->saddr;
294 ireq->ecn_ok = 0; 301 ireq->ecn_ok = 0;
302 ireq->snd_wscale = tcp_opt.snd_wscale;
303 ireq->rcv_wscale = tcp_opt.rcv_wscale;
304 ireq->sack_ok = tcp_opt.sack_ok;
305 ireq->wscale_ok = tcp_opt.wscale_ok;
306 ireq->tstamp_ok = tcp_opt.saw_tstamp;
307 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
295 308
296 /* We throwed the options of the initial SYN away, so we hope 309 /* We throwed the options of the initial SYN away, so we hope
297 * the ACK carries the same options again (see RFC1122 4.2.3.8) 310 * the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -340,20 +353,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
340 } 353 }
341 } 354 }
342 355
343 /* check for timestamp cookie support */
344 memset(&tcp_opt, 0, sizeof(tcp_opt));
345 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst);
346
347 if (tcp_opt.saw_tstamp)
348 cookie_check_timestamp(&tcp_opt);
349
350 ireq->snd_wscale = tcp_opt.snd_wscale;
351 ireq->rcv_wscale = tcp_opt.rcv_wscale;
352 ireq->sack_ok = tcp_opt.sack_ok;
353 ireq->wscale_ok = tcp_opt.wscale_ok;
354 ireq->tstamp_ok = tcp_opt.saw_tstamp;
355 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
356
357 /* Try to redo what tcp_v4_send_synack did. */ 356 /* Try to redo what tcp_v4_send_synack did. */
358 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); 357 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW);
359 358
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 12cab7d74dba..28e029632493 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3727,7 +3727,7 @@ old_ack:
3727 * the fast version below fails. 3727 * the fast version below fails.
3728 */ 3728 */
3729void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, 3729void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3730 u8 **hvpp, int estab, struct dst_entry *dst) 3730 u8 **hvpp, int estab)
3731{ 3731{
3732 unsigned char *ptr; 3732 unsigned char *ptr;
3733 struct tcphdr *th = tcp_hdr(skb); 3733 struct tcphdr *th = tcp_hdr(skb);
@@ -3766,8 +3766,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3766 break; 3766 break;
3767 case TCPOPT_WINDOW: 3767 case TCPOPT_WINDOW:
3768 if (opsize == TCPOLEN_WINDOW && th->syn && 3768 if (opsize == TCPOLEN_WINDOW && th->syn &&
3769 !estab && sysctl_tcp_window_scaling && 3769 !estab && sysctl_tcp_window_scaling) {
3770 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) {
3771 __u8 snd_wscale = *(__u8 *)ptr; 3770 __u8 snd_wscale = *(__u8 *)ptr;
3772 opt_rx->wscale_ok = 1; 3771 opt_rx->wscale_ok = 1;
3773 if (snd_wscale > 14) { 3772 if (snd_wscale > 14) {
@@ -3783,8 +3782,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3783 case TCPOPT_TIMESTAMP: 3782 case TCPOPT_TIMESTAMP:
3784 if ((opsize == TCPOLEN_TIMESTAMP) && 3783 if ((opsize == TCPOLEN_TIMESTAMP) &&
3785 ((estab && opt_rx->tstamp_ok) || 3784 ((estab && opt_rx->tstamp_ok) ||
3786 (!estab && sysctl_tcp_timestamps && 3785 (!estab && sysctl_tcp_timestamps))) {
3787 !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) {
3788 opt_rx->saw_tstamp = 1; 3786 opt_rx->saw_tstamp = 1;
3789 opt_rx->rcv_tsval = get_unaligned_be32(ptr); 3787 opt_rx->rcv_tsval = get_unaligned_be32(ptr);
3790 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); 3788 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@@ -3792,8 +3790,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3792 break; 3790 break;
3793 case TCPOPT_SACK_PERM: 3791 case TCPOPT_SACK_PERM:
3794 if (opsize == TCPOLEN_SACK_PERM && th->syn && 3792 if (opsize == TCPOLEN_SACK_PERM && th->syn &&
3795 !estab && sysctl_tcp_sack && 3793 !estab && sysctl_tcp_sack) {
3796 !dst_feature(dst, RTAX_FEATURE_NO_SACK)) {
3797 opt_rx->sack_ok = 1; 3794 opt_rx->sack_ok = 1;
3798 tcp_sack_reset(opt_rx); 3795 tcp_sack_reset(opt_rx);
3799 } 3796 }
@@ -3878,7 +3875,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3878 if (tcp_parse_aligned_timestamp(tp, th)) 3875 if (tcp_parse_aligned_timestamp(tp, th))
3879 return 1; 3876 return 1;
3880 } 3877 }
3881 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); 3878 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
3882 return 1; 3879 return 1;
3883} 3880}
3884 3881
@@ -4133,10 +4130,8 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4133static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4130static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4134{ 4131{
4135 struct tcp_sock *tp = tcp_sk(sk); 4132 struct tcp_sock *tp = tcp_sk(sk);
4136 struct dst_entry *dst = __sk_dst_get(sk);
4137 4133
4138 if (tcp_is_sack(tp) && sysctl_tcp_dsack && 4134 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
4139 !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
4140 int mib_idx; 4135 int mib_idx;
4141 4136
4142 if (before(seq, tp->rcv_nxt)) 4137 if (before(seq, tp->rcv_nxt))
@@ -4165,15 +4160,13 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4165static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) 4160static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
4166{ 4161{
4167 struct tcp_sock *tp = tcp_sk(sk); 4162 struct tcp_sock *tp = tcp_sk(sk);
4168 struct dst_entry *dst = __sk_dst_get(sk);
4169 4163
4170 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4164 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4171 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4165 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4172 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4166 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4173 tcp_enter_quickack_mode(sk); 4167 tcp_enter_quickack_mode(sk);
4174 4168
4175 if (tcp_is_sack(tp) && sysctl_tcp_dsack && 4169 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
4176 !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
4177 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4170 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4178 4171
4179 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 4172 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
@@ -5428,11 +5421,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5428 u8 *hash_location; 5421 u8 *hash_location;
5429 struct inet_connection_sock *icsk = inet_csk(sk); 5422 struct inet_connection_sock *icsk = inet_csk(sk);
5430 struct tcp_sock *tp = tcp_sk(sk); 5423 struct tcp_sock *tp = tcp_sk(sk);
5431 struct dst_entry *dst = __sk_dst_get(sk);
5432 struct tcp_cookie_values *cvp = tp->cookie_values; 5424 struct tcp_cookie_values *cvp = tp->cookie_values;
5433 int saved_clamp = tp->rx_opt.mss_clamp; 5425 int saved_clamp = tp->rx_opt.mss_clamp;
5434 5426
5435 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst); 5427 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
5436 5428
5437 if (th->ack) { 5429 if (th->ack) {
5438 /* rfc793: 5430 /* rfc793:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 15e96030ce47..65b8ebfd078a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1262,20 +1262,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1262 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; 1262 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1263#endif 1263#endif
1264 1264
1265 ireq = inet_rsk(req);
1266 ireq->loc_addr = daddr;
1267 ireq->rmt_addr = saddr;
1268 ireq->no_srccheck = inet_sk(sk)->transparent;
1269 ireq->opt = tcp_v4_save_options(sk, skb);
1270
1271 dst = inet_csk_route_req(sk, req);
1272 if(!dst)
1273 goto drop_and_free;
1274
1275 tcp_clear_options(&tmp_opt); 1265 tcp_clear_options(&tmp_opt);
1276 tmp_opt.mss_clamp = TCP_MSS_DEFAULT; 1266 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1277 tmp_opt.user_mss = tp->rx_opt.user_mss; 1267 tmp_opt.user_mss = tp->rx_opt.user_mss;
1278 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); 1268 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1279 1269
1280 if (tmp_opt.cookie_plus > 0 && 1270 if (tmp_opt.cookie_plus > 0 &&
1281 tmp_opt.saw_tstamp && 1271 tmp_opt.saw_tstamp &&
@@ -1319,8 +1309,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1319 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 1309 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1320 tcp_openreq_init(req, &tmp_opt, skb); 1310 tcp_openreq_init(req, &tmp_opt, skb);
1321 1311
1312 ireq = inet_rsk(req);
1313 ireq->loc_addr = daddr;
1314 ireq->rmt_addr = saddr;
1315 ireq->no_srccheck = inet_sk(sk)->transparent;
1316 ireq->opt = tcp_v4_save_options(sk, skb);
1317
1322 if (security_inet_conn_request(sk, skb, req)) 1318 if (security_inet_conn_request(sk, skb, req))
1323 goto drop_and_release; 1319 goto drop_and_free;
1324 1320
1325 if (!want_cookie) 1321 if (!want_cookie)
1326 TCP_ECN_create_request(req, tcp_hdr(skb)); 1322 TCP_ECN_create_request(req, tcp_hdr(skb));
@@ -1345,6 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1345 */ 1341 */
1346 if (tmp_opt.saw_tstamp && 1342 if (tmp_opt.saw_tstamp &&
1347 tcp_death_row.sysctl_tw_recycle && 1343 tcp_death_row.sysctl_tw_recycle &&
1344 (dst = inet_csk_route_req(sk, req)) != NULL &&
1348 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1345 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1349 peer->v4daddr == saddr) { 1346 peer->v4daddr == saddr) {
1350 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && 1347 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 87accec8d097..f206ee5dda80 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -95,9 +95,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
96 int paws_reject = 0; 96 int paws_reject = 0;
97 97
98 tmp_opt.saw_tstamp = 0;
98 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
99 tmp_opt.tstamp_ok = 1; 100 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
100 tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
101 101
102 if (tmp_opt.saw_tstamp) { 102 if (tmp_opt.saw_tstamp) {
103 tmp_opt.ts_recent = tcptw->tw_ts_recent; 103 tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -526,9 +526,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
526 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 526 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
527 int paws_reject = 0; 527 int paws_reject = 0;
528 528
529 if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) { 529 tmp_opt.saw_tstamp = 0;
530 tmp_opt.tstamp_ok = 1; 530 if (th->doff > (sizeof(struct tcphdr)>>2)) {
531 tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); 531 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
532 532
533 if (tmp_opt.saw_tstamp) { 533 if (tmp_opt.saw_tstamp) {
534 tmp_opt.ts_recent = req->ts_recent; 534 tmp_opt.ts_recent = req->ts_recent;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 93316a96d820..383ce237640f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -553,7 +553,6 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
553 struct tcp_md5sig_key **md5) { 553 struct tcp_md5sig_key **md5) {
554 struct tcp_sock *tp = tcp_sk(sk); 554 struct tcp_sock *tp = tcp_sk(sk);
555 struct tcp_cookie_values *cvp = tp->cookie_values; 555 struct tcp_cookie_values *cvp = tp->cookie_values;
556 struct dst_entry *dst = __sk_dst_get(sk);
557 unsigned remaining = MAX_TCP_OPTION_SPACE; 556 unsigned remaining = MAX_TCP_OPTION_SPACE;
558 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? 557 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
559 tcp_cookie_size_check(cvp->cookie_desired) : 558 tcp_cookie_size_check(cvp->cookie_desired) :
@@ -581,22 +580,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
581 opts->mss = tcp_advertise_mss(sk); 580 opts->mss = tcp_advertise_mss(sk);
582 remaining -= TCPOLEN_MSS_ALIGNED; 581 remaining -= TCPOLEN_MSS_ALIGNED;
583 582
584 if (likely(sysctl_tcp_timestamps && 583 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
585 !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) &&
586 *md5 == NULL)) {
587 opts->options |= OPTION_TS; 584 opts->options |= OPTION_TS;
588 opts->tsval = TCP_SKB_CB(skb)->when; 585 opts->tsval = TCP_SKB_CB(skb)->when;
589 opts->tsecr = tp->rx_opt.ts_recent; 586 opts->tsecr = tp->rx_opt.ts_recent;
590 remaining -= TCPOLEN_TSTAMP_ALIGNED; 587 remaining -= TCPOLEN_TSTAMP_ALIGNED;
591 } 588 }
592 if (likely(sysctl_tcp_window_scaling && 589 if (likely(sysctl_tcp_window_scaling)) {
593 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) {
594 opts->ws = tp->rx_opt.rcv_wscale; 590 opts->ws = tp->rx_opt.rcv_wscale;
595 opts->options |= OPTION_WSCALE; 591 opts->options |= OPTION_WSCALE;
596 remaining -= TCPOLEN_WSCALE_ALIGNED; 592 remaining -= TCPOLEN_WSCALE_ALIGNED;
597 } 593 }
598 if (likely(sysctl_tcp_sack && 594 if (likely(sysctl_tcp_sack)) {
599 !dst_feature(dst, RTAX_FEATURE_NO_SACK))) {
600 opts->options |= OPTION_SACK_ADVERTISE; 595 opts->options |= OPTION_SACK_ADVERTISE;
601 if (unlikely(!(OPTION_TS & opts->options))) 596 if (unlikely(!(OPTION_TS & opts->options)))
602 remaining -= TCPOLEN_SACKPERM_ALIGNED; 597 remaining -= TCPOLEN_SACKPERM_ALIGNED;
@@ -2527,9 +2522,7 @@ static void tcp_connect_init(struct sock *sk)
2527 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2522 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2528 */ 2523 */
2529 tp->tcp_header_len = sizeof(struct tcphdr) + 2524 tp->tcp_header_len = sizeof(struct tcphdr) +
2530 (sysctl_tcp_timestamps && 2525 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2531 (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ?
2532 TCPOLEN_TSTAMP_ALIGNED : 0));
2533 2526
2534#ifdef CONFIG_TCP_MD5SIG 2527#ifdef CONFIG_TCP_MD5SIG
2535 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2528 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
@@ -2555,8 +2548,7 @@ static void tcp_connect_init(struct sock *sk)
2555 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2548 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2556 &tp->rcv_wnd, 2549 &tp->rcv_wnd,
2557 &tp->window_clamp, 2550 &tp->window_clamp,
2558 (sysctl_tcp_window_scaling && 2551 sysctl_tcp_window_scaling,
2559 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)),
2560 &rcv_wscale); 2552 &rcv_wscale);
2561 2553
2562 tp->rx_opt.rcv_wscale = rcv_wscale; 2554 tp->rx_opt.rcv_wscale = rcv_wscale;
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index bb110c5ce1d2..9bc805df95d2 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -39,9 +39,9 @@ static int port __read_mostly = 0;
39MODULE_PARM_DESC(port, "Port to match (0=all)"); 39MODULE_PARM_DESC(port, "Port to match (0=all)");
40module_param(port, int, 0); 40module_param(port, int, 0);
41 41
42static int bufsize __read_mostly = 4096; 42static unsigned int bufsize __read_mostly = 4096;
43MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); 43MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
44module_param(bufsize, int, 0); 44module_param(bufsize, uint, 0);
45 45
46static int full __read_mostly; 46static int full __read_mostly;
47MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); 47MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
@@ -75,12 +75,12 @@ static struct {
75 75
76static inline int tcp_probe_used(void) 76static inline int tcp_probe_used(void)
77{ 77{
78 return (tcp_probe.head - tcp_probe.tail) % bufsize; 78 return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1);
79} 79}
80 80
81static inline int tcp_probe_avail(void) 81static inline int tcp_probe_avail(void)
82{ 82{
83 return bufsize - tcp_probe_used(); 83 return bufsize - tcp_probe_used() - 1;
84} 84}
85 85
86/* 86/*
@@ -116,7 +116,7 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
116 p->ssthresh = tcp_current_ssthresh(sk); 116 p->ssthresh = tcp_current_ssthresh(sk);
117 p->srtt = tp->srtt >> 3; 117 p->srtt = tp->srtt >> 3;
118 118
119 tcp_probe.head = (tcp_probe.head + 1) % bufsize; 119 tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
120 } 120 }
121 tcp_probe.lastcwnd = tp->snd_cwnd; 121 tcp_probe.lastcwnd = tp->snd_cwnd;
122 spin_unlock(&tcp_probe.lock); 122 spin_unlock(&tcp_probe.lock);
@@ -149,7 +149,7 @@ static int tcpprobe_open(struct inode * inode, struct file * file)
149static int tcpprobe_sprint(char *tbuf, int n) 149static int tcpprobe_sprint(char *tbuf, int n)
150{ 150{
151 const struct tcp_log *p 151 const struct tcp_log *p
152 = tcp_probe.log + tcp_probe.tail % bufsize; 152 = tcp_probe.log + tcp_probe.tail;
153 struct timespec tv 153 struct timespec tv
154 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); 154 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
155 155
@@ -192,7 +192,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
192 width = tcpprobe_sprint(tbuf, sizeof(tbuf)); 192 width = tcpprobe_sprint(tbuf, sizeof(tbuf));
193 193
194 if (cnt + width < len) 194 if (cnt + width < len)
195 tcp_probe.tail = (tcp_probe.tail + 1) % bufsize; 195 tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1);
196 196
197 spin_unlock_bh(&tcp_probe.lock); 197 spin_unlock_bh(&tcp_probe.lock);
198 198
@@ -222,9 +222,10 @@ static __init int tcpprobe_init(void)
222 init_waitqueue_head(&tcp_probe.wait); 222 init_waitqueue_head(&tcp_probe.wait);
223 spin_lock_init(&tcp_probe.lock); 223 spin_lock_init(&tcp_probe.lock);
224 224
225 if (bufsize < 0) 225 if (bufsize == 0)
226 return -EINVAL; 226 return -EINVAL;
227 227
228 bufsize = roundup_pow_of_two(bufsize);
228 tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); 229 tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL);
229 if (!tcp_probe.log) 230 if (!tcp_probe.log)
230 goto err0; 231 goto err0;
@@ -236,7 +237,7 @@ static __init int tcpprobe_init(void)
236 if (ret) 237 if (ret)
237 goto err1; 238 goto err1;
238 239
239 pr_info("TCP probe registered (port=%d)\n", port); 240 pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize);
240 return 0; 241 return 0;
241 err1: 242 err1:
242 proc_net_remove(&init_net, procname); 243 proc_net_remove(&init_net, procname);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1f9534846ca9..f0126fdd7e04 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -216,9 +216,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
216 * force rand to be an odd multiple of UDP_HTABLE_SIZE 216 * force rand to be an odd multiple of UDP_HTABLE_SIZE
217 */ 217 */
218 rand = (rand | 1) * (udptable->mask + 1); 218 rand = (rand | 1) * (udptable->mask + 1);
219 for (last = first + udptable->mask + 1; 219 last = first + udptable->mask + 1;
220 first != last; 220 do {
221 first++) {
222 hslot = udp_hashslot(udptable, net, first); 221 hslot = udp_hashslot(udptable, net, first);
223 bitmap_zero(bitmap, PORTS_PER_CHAIN); 222 bitmap_zero(bitmap, PORTS_PER_CHAIN);
224 spin_lock_bh(&hslot->lock); 223 spin_lock_bh(&hslot->lock);
@@ -238,7 +237,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
238 snum += rand; 237 snum += rand;
239 } while (snum != first); 238 } while (snum != first);
240 spin_unlock_bh(&hslot->lock); 239 spin_unlock_bh(&hslot->lock);
241 } 240 } while (++first != last);
242 goto fail; 241 goto fail;
243 } else { 242 } else {
244 hslot = udp_hashslot(udptable, net, snum); 243 hslot = udp_hashslot(udptable, net, snum);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 8c08a28d8f83..67107d63c1cd 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -15,7 +15,6 @@
15#include <net/xfrm.h> 15#include <net/xfrm.h>
16#include <net/ip.h> 16#include <net/ip.h>
17 17
18static struct dst_ops xfrm4_dst_ops;
19static struct xfrm_policy_afinfo xfrm4_policy_afinfo; 18static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
20 19
21static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, 20static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
@@ -190,8 +189,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
190 189
191static inline int xfrm4_garbage_collect(struct dst_ops *ops) 190static inline int xfrm4_garbage_collect(struct dst_ops *ops)
192{ 191{
193 xfrm4_policy_afinfo.garbage_collect(&init_net); 192 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
194 return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2); 193
194 xfrm4_policy_afinfo.garbage_collect(net);
195 return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
195} 196}
196 197
197static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) 198static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -268,7 +269,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
268static struct ctl_table xfrm4_policy_table[] = { 269static struct ctl_table xfrm4_policy_table[] = {
269 { 270 {
270 .procname = "xfrm4_gc_thresh", 271 .procname = "xfrm4_gc_thresh",
271 .data = &xfrm4_dst_ops.gc_thresh, 272 .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
272 .maxlen = sizeof(int), 273 .maxlen = sizeof(int),
273 .mode = 0644, 274 .mode = 0644,
274 .proc_handler = proc_dointvec, 275 .proc_handler = proc_dointvec,
@@ -295,8 +296,6 @@ static void __exit xfrm4_policy_fini(void)
295 296
296void __init xfrm4_init(int rt_max_size) 297void __init xfrm4_init(int rt_max_size)
297{ 298{
298 xfrm4_state_init();
299 xfrm4_policy_init();
300 /* 299 /*
301 * Select a default value for the gc_thresh based on the main route 300 * Select a default value for the gc_thresh based on the main route
302 * table hash size. It seems to me the worst case scenario is when 301 * table hash size. It seems to me the worst case scenario is when
@@ -308,6 +307,9 @@ void __init xfrm4_init(int rt_max_size)
308 * and start cleaning when were 1/2 full 307 * and start cleaning when were 1/2 full
309 */ 308 */
310 xfrm4_dst_ops.gc_thresh = rt_max_size/2; 309 xfrm4_dst_ops.gc_thresh = rt_max_size/2;
310
311 xfrm4_state_init();
312 xfrm4_policy_init();
311#ifdef CONFIG_SYSCTL 313#ifdef CONFIG_SYSCTL
312 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, 314 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
313 xfrm4_policy_table); 315 xfrm4_policy_table);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index df159fffe4bc..4bac362b1335 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -559,6 +559,11 @@ static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
559 return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev); 559 return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
560} 560}
561 561
562static inline struct net *ipv6_skb_net(struct sk_buff *skb)
563{
564 return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
565}
566
562/* Router Alert as of RFC 2711 */ 567/* Router Alert as of RFC 2711 */
563 568
564static int ipv6_hop_ra(struct sk_buff *skb, int optoff) 569static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
@@ -580,8 +585,8 @@ static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
580static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) 585static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
581{ 586{
582 const unsigned char *nh = skb_network_header(skb); 587 const unsigned char *nh = skb_network_header(skb);
588 struct net *net = ipv6_skb_net(skb);
583 u32 pkt_len; 589 u32 pkt_len;
584 struct net *net = dev_net(skb_dst(skb)->dev);
585 590
586 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { 591 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
587 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", 592 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cd48801a8d6f..eb6d09728633 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -121,10 +121,9 @@ static int ip6_output2(struct sk_buff *skb)
121 skb->dev = dev; 121 skb->dev = dev;
122 122
123 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { 123 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
124 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
125 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); 124 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
126 125
127 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && 126 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
128 ((mroute6_socket(dev_net(dev)) && 127 ((mroute6_socket(dev_net(dev)) &&
129 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || 128 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
130 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, 129 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 5f2ec208a8c3..0956ebabbff2 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -20,6 +20,7 @@
20#include <net/ipv6.h> 20#include <net/ipv6.h>
21#include <net/inet_frag.h> 21#include <net/inet_frag.h>
22 22
23#include <linux/netfilter_bridge.h>
23#include <linux/netfilter_ipv6.h> 24#include <linux/netfilter_ipv6.h>
24#include <net/netfilter/nf_conntrack.h> 25#include <net/netfilter/nf_conntrack.h>
25#include <net/netfilter/nf_conntrack_helper.h> 26#include <net/netfilter/nf_conntrack_helper.h>
@@ -187,6 +188,21 @@ out:
187 return nf_conntrack_confirm(skb); 188 return nf_conntrack_confirm(skb);
188} 189}
189 190
191static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
192 struct sk_buff *skb)
193{
194#ifdef CONFIG_BRIDGE_NETFILTER
195 if (skb->nf_bridge &&
196 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
197 return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
198#endif
199 if (hooknum == NF_INET_PRE_ROUTING)
200 return IP6_DEFRAG_CONNTRACK_IN;
201 else
202 return IP6_DEFRAG_CONNTRACK_OUT;
203
204}
205
190static unsigned int ipv6_defrag(unsigned int hooknum, 206static unsigned int ipv6_defrag(unsigned int hooknum,
191 struct sk_buff *skb, 207 struct sk_buff *skb,
192 const struct net_device *in, 208 const struct net_device *in,
@@ -199,8 +215,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
199 if (skb->nfct) 215 if (skb->nfct)
200 return NF_ACCEPT; 216 return NF_ACCEPT;
201 217
202 reasm = nf_ct_frag6_gather(skb); 218 reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
203
204 /* queued */ 219 /* queued */
205 if (reasm == NULL) 220 if (reasm == NULL)
206 return NF_STOLEN; 221 return NF_STOLEN;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e0b9424fa1b2..624a54832a7c 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -63,6 +63,7 @@ struct nf_ct_frag6_queue
63 struct inet_frag_queue q; 63 struct inet_frag_queue q;
64 64
65 __be32 id; /* fragment id */ 65 __be32 id; /* fragment id */
66 u32 user;
66 struct in6_addr saddr; 67 struct in6_addr saddr;
67 struct in6_addr daddr; 68 struct in6_addr daddr;
68 69
@@ -168,13 +169,14 @@ out:
168/* Creation primitives. */ 169/* Creation primitives. */
169 170
170static __inline__ struct nf_ct_frag6_queue * 171static __inline__ struct nf_ct_frag6_queue *
171fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) 172fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
172{ 173{
173 struct inet_frag_queue *q; 174 struct inet_frag_queue *q;
174 struct ip6_create_arg arg; 175 struct ip6_create_arg arg;
175 unsigned int hash; 176 unsigned int hash;
176 177
177 arg.id = id; 178 arg.id = id;
179 arg.user = user;
178 arg.src = src; 180 arg.src = src;
179 arg.dst = dst; 181 arg.dst = dst;
180 182
@@ -559,7 +561,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
559 return 0; 561 return 0;
560} 562}
561 563
562struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) 564struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
563{ 565{
564 struct sk_buff *clone; 566 struct sk_buff *clone;
565 struct net_device *dev = skb->dev; 567 struct net_device *dev = skb->dev;
@@ -605,7 +607,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
605 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) 607 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
606 nf_ct_frag6_evictor(); 608 nf_ct_frag6_evictor();
607 609
608 fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); 610 fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
609 if (fq == NULL) { 611 if (fq == NULL) {
610 pr_debug("Can't find and can't create new queue\n"); 612 pr_debug("Can't find and can't create new queue\n");
611 goto ret_orig; 613 goto ret_orig;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 4d98549a6868..2cddea3bd6be 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -72,6 +72,7 @@ struct frag_queue
72 struct inet_frag_queue q; 72 struct inet_frag_queue q;
73 73
74 __be32 id; /* fragment id */ 74 __be32 id; /* fragment id */
75 u32 user;
75 struct in6_addr saddr; 76 struct in6_addr saddr;
76 struct in6_addr daddr; 77 struct in6_addr daddr;
77 78
@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
141 struct ip6_create_arg *arg = a; 142 struct ip6_create_arg *arg = a;
142 143
143 fq = container_of(q, struct frag_queue, q); 144 fq = container_of(q, struct frag_queue, q);
144 return (fq->id == arg->id && 145 return (fq->id == arg->id && fq->user == arg->user &&
145 ipv6_addr_equal(&fq->saddr, arg->src) && 146 ipv6_addr_equal(&fq->saddr, arg->src) &&
146 ipv6_addr_equal(&fq->daddr, arg->dst)); 147 ipv6_addr_equal(&fq->daddr, arg->dst));
147} 148}
@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
163 struct ip6_create_arg *arg = a; 164 struct ip6_create_arg *arg = a;
164 165
165 fq->id = arg->id; 166 fq->id = arg->id;
167 fq->user = arg->user;
166 ipv6_addr_copy(&fq->saddr, arg->src); 168 ipv6_addr_copy(&fq->saddr, arg->src);
167 ipv6_addr_copy(&fq->daddr, arg->dst); 169 ipv6_addr_copy(&fq->daddr, arg->dst);
168} 170}
@@ -243,6 +245,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
243 unsigned int hash; 245 unsigned int hash;
244 246
245 arg.id = id; 247 arg.id = id;
248 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
246 arg.src = src; 249 arg.src = src;
247 arg.dst = dst; 250 arg.dst = dst;
248 251
@@ -705,7 +708,8 @@ static void ip6_frags_ns_sysctl_unregister(struct net *net)
705 708
706 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; 709 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
707 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); 710 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
708 kfree(table); 711 if (!net_eq(net, &init_net))
712 kfree(table);
709} 713}
710 714
711static struct ctl_table_header *ip6_ctl_header; 715static struct ctl_table_header *ip6_ctl_header;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index db3b27303890..c2bd74c5f8d9 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2630,6 +2630,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2630 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; 2630 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2631 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; 2631 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2632 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; 2632 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2633 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2633 } 2634 }
2634 2635
2635 return table; 2636 return table;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5b9af508b8f2..7208a06576c6 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -185,6 +185,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
185 185
186 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); 186 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
187 187
188 /* check for timestamp cookie support */
189 memset(&tcp_opt, 0, sizeof(tcp_opt));
190 tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
191
192 if (tcp_opt.saw_tstamp)
193 cookie_check_timestamp(&tcp_opt);
194
188 ret = NULL; 195 ret = NULL;
189 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 196 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
190 if (!req) 197 if (!req)
@@ -218,6 +225,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
218 req->expires = 0UL; 225 req->expires = 0UL;
219 req->retrans = 0; 226 req->retrans = 0;
220 ireq->ecn_ok = 0; 227 ireq->ecn_ok = 0;
228 ireq->snd_wscale = tcp_opt.snd_wscale;
229 ireq->rcv_wscale = tcp_opt.rcv_wscale;
230 ireq->sack_ok = tcp_opt.sack_ok;
231 ireq->wscale_ok = tcp_opt.wscale_ok;
232 ireq->tstamp_ok = tcp_opt.saw_tstamp;
233 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
221 treq->rcv_isn = ntohl(th->seq) - 1; 234 treq->rcv_isn = ntohl(th->seq) - 1;
222 treq->snt_isn = cookie; 235 treq->snt_isn = cookie;
223 236
@@ -253,21 +266,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
253 goto out_free; 266 goto out_free;
254 } 267 }
255 268
256 /* check for timestamp cookie support */
257 memset(&tcp_opt, 0, sizeof(tcp_opt));
258 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst);
259
260 if (tcp_opt.saw_tstamp)
261 cookie_check_timestamp(&tcp_opt);
262
263 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
264
265 ireq->snd_wscale = tcp_opt.snd_wscale;
266 ireq->rcv_wscale = tcp_opt.rcv_wscale;
267 ireq->sack_ok = tcp_opt.sack_ok;
268 ireq->wscale_ok = tcp_opt.wscale_ok;
269 ireq->tstamp_ok = tcp_opt.saw_tstamp;
270
271 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); 269 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
272 tcp_select_initial_window(tcp_full_space(sk), req->mss, 270 tcp_select_initial_window(tcp_full_space(sk), req->mss,
273 &req->rcv_wnd, &req->window_clamp, 271 &req->rcv_wnd, &req->window_clamp,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ee9cf62458d4..febfd595a40d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1169,7 +1169,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1169 struct inet6_request_sock *treq; 1169 struct inet6_request_sock *treq;
1170 struct ipv6_pinfo *np = inet6_sk(sk); 1170 struct ipv6_pinfo *np = inet6_sk(sk);
1171 struct tcp_sock *tp = tcp_sk(sk); 1171 struct tcp_sock *tp = tcp_sk(sk);
1172 struct dst_entry *dst = __sk_dst_get(sk);
1173 __u32 isn = TCP_SKB_CB(skb)->when; 1172 __u32 isn = TCP_SKB_CB(skb)->when;
1174#ifdef CONFIG_SYN_COOKIES 1173#ifdef CONFIG_SYN_COOKIES
1175 int want_cookie = 0; 1174 int want_cookie = 0;
@@ -1208,7 +1207,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1208 tcp_clear_options(&tmp_opt); 1207 tcp_clear_options(&tmp_opt);
1209 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1208 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1210 tmp_opt.user_mss = tp->rx_opt.user_mss; 1209 tmp_opt.user_mss = tp->rx_opt.user_mss;
1211 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); 1210 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1212 1211
1213 if (tmp_opt.cookie_plus > 0 && 1212 if (tmp_opt.cookie_plus > 0 &&
1214 tmp_opt.saw_tstamp && 1213 tmp_opt.saw_tstamp &&
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 7254e3f899a7..dbdc696f5fc5 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -24,7 +24,6 @@
24#include <net/mip6.h> 24#include <net/mip6.h>
25#endif 25#endif
26 26
27static struct dst_ops xfrm6_dst_ops;
28static struct xfrm_policy_afinfo xfrm6_policy_afinfo; 27static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
29 28
30static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, 29static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
@@ -224,8 +223,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
224 223
225static inline int xfrm6_garbage_collect(struct dst_ops *ops) 224static inline int xfrm6_garbage_collect(struct dst_ops *ops)
226{ 225{
227 xfrm6_policy_afinfo.garbage_collect(&init_net); 226 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
228 return (atomic_read(&xfrm6_dst_ops.entries) > xfrm6_dst_ops.gc_thresh*2); 227
228 xfrm6_policy_afinfo.garbage_collect(net);
229 return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
229} 230}
230 231
231static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) 232static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -310,7 +311,7 @@ static void xfrm6_policy_fini(void)
310static struct ctl_table xfrm6_policy_table[] = { 311static struct ctl_table xfrm6_policy_table[] = {
311 { 312 {
312 .procname = "xfrm6_gc_thresh", 313 .procname = "xfrm6_gc_thresh",
313 .data = &xfrm6_dst_ops.gc_thresh, 314 .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh,
314 .maxlen = sizeof(int), 315 .maxlen = sizeof(int),
315 .mode = 0644, 316 .mode = 0644,
316 .proc_handler = proc_dointvec, 317 .proc_handler = proc_dointvec,
@@ -326,13 +327,6 @@ int __init xfrm6_init(void)
326 int ret; 327 int ret;
327 unsigned int gc_thresh; 328 unsigned int gc_thresh;
328 329
329 ret = xfrm6_policy_init();
330 if (ret)
331 goto out;
332
333 ret = xfrm6_state_init();
334 if (ret)
335 goto out_policy;
336 /* 330 /*
337 * We need a good default value for the xfrm6 gc threshold. 331 * We need a good default value for the xfrm6 gc threshold.
338 * In ipv4 we set it to the route hash table size * 8, which 332 * In ipv4 we set it to the route hash table size * 8, which
@@ -346,6 +340,15 @@ int __init xfrm6_init(void)
346 */ 340 */
347 gc_thresh = FIB6_TABLE_HASHSZ * 8; 341 gc_thresh = FIB6_TABLE_HASHSZ * 8;
348 xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; 342 xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh;
343
344 ret = xfrm6_policy_init();
345 if (ret)
346 goto out;
347
348 ret = xfrm6_state_init();
349 if (ret)
350 goto out_policy;
351
349#ifdef CONFIG_SYSCTL 352#ifdef CONFIG_SYSCTL
350 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, 353 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path,
351 xfrm6_policy_table); 354 xfrm6_policy_table);
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 356e65b1dc42..783c5f367d29 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -450,10 +450,10 @@ void irlap_disconnect_request(struct irlap_cb *self)
450 450
451 /* Check if we are in the right state for disconnecting */ 451 /* Check if we are in the right state for disconnecting */
452 switch (self->state) { 452 switch (self->state) {
453 case LAP_XMIT_P: /* FALLTROUGH */ 453 case LAP_XMIT_P: /* FALLTHROUGH */
454 case LAP_XMIT_S: /* FALLTROUGH */ 454 case LAP_XMIT_S: /* FALLTHROUGH */
455 case LAP_CONN: /* FALLTROUGH */ 455 case LAP_CONN: /* FALLTHROUGH */
456 case LAP_RESET_WAIT: /* FALLTROUGH */ 456 case LAP_RESET_WAIT: /* FALLTHROUGH */
457 case LAP_RESET_CHECK: 457 case LAP_RESET_CHECK:
458 irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL); 458 irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
459 break; 459 break;
@@ -485,9 +485,9 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason)
485 IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__); 485 IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__);
486 irlap_do_event(self, RESET_REQUEST, NULL, NULL); 486 irlap_do_event(self, RESET_REQUEST, NULL, NULL);
487 break; 487 break;
488 case LAP_NO_RESPONSE: /* FALLTROUGH */ 488 case LAP_NO_RESPONSE: /* FALLTHROUGH */
489 case LAP_DISC_INDICATION: /* FALLTROUGH */ 489 case LAP_DISC_INDICATION: /* FALLTHROUGH */
490 case LAP_FOUND_NONE: /* FALLTROUGH */ 490 case LAP_FOUND_NONE: /* FALLTHROUGH */
491 case LAP_MEDIA_BUSY: 491 case LAP_MEDIA_BUSY:
492 irlmp_link_disconnect_indication(self->notify.instance, self, 492 irlmp_link_disconnect_indication(self->notify.instance, self,
493 reason, NULL); 493 reason, NULL);
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index c5c51959e3ce..94a9884d7146 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -1741,7 +1741,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event,
1741 * Function irlap_state_xmit_s (event, skb, info) 1741 * Function irlap_state_xmit_s (event, skb, info)
1742 * 1742 *
1743 * XMIT_S, The secondary station has been given the right to transmit, 1743 * XMIT_S, The secondary station has been given the right to transmit,
1744 * and we therefor do not expect to receive any transmissions from other 1744 * and we therefore do not expect to receive any transmissions from other
1745 * stations. 1745 * stations.
1746 */ 1746 */
1747static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, 1747static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 7bf5b913828b..0e7d8bde145d 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -105,7 +105,7 @@ int __init irlmp_init(void)
105 105
106 init_timer(&irlmp->discovery_timer); 106 init_timer(&irlmp->discovery_timer);
107 107
108 /* Do discovery every 3 seconds, conditionaly */ 108 /* Do discovery every 3 seconds, conditionally */
109 if (sysctl_discovery) 109 if (sysctl_discovery)
110 irlmp_start_discovery_timer(irlmp, 110 irlmp_start_discovery_timer(irlmp,
111 sysctl_discovery_timeout*HZ); 111 sysctl_discovery_timeout*HZ);
@@ -1842,7 +1842,7 @@ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason)
1842 reason = LM_CONNECT_FAILURE; 1842 reason = LM_CONNECT_FAILURE;
1843 break; 1843 break;
1844 default: 1844 default:
1845 IRDA_DEBUG(1, "%s(), Unknow IrLAP disconnect reason %d!\n", 1845 IRDA_DEBUG(1, "%s(), Unknown IrLAP disconnect reason %d!\n",
1846 __func__, lap_reason); 1846 __func__, lap_reason);
1847 reason = LM_LAP_DISCONNECT; 1847 reason = LM_LAP_DISCONNECT;
1848 break; 1848 break;
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index b001c361ad30..4300df35d37d 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -249,6 +249,7 @@
249#include <linux/poll.h> 249#include <linux/poll.h>
250#include <linux/capability.h> 250#include <linux/capability.h>
251#include <linux/ctype.h> /* isspace() */ 251#include <linux/ctype.h> /* isspace() */
252#include <linux/string.h> /* skip_spaces() */
252#include <asm/uaccess.h> 253#include <asm/uaccess.h>
253#include <linux/init.h> 254#include <linux/init.h>
254 255
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 7dea882dbb75..156020d138b5 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -76,9 +76,8 @@ irnet_ctrl_write(irnet_socket * ap,
76 /* Look at the next command */ 76 /* Look at the next command */
77 start = next; 77 start = next;
78 78
79 /* Scrap whitespaces before the command */ 79 /* Scrap whitespaces before the command */
80 while(isspace(*start)) 80 start = skip_spaces(start);
81 start++;
82 81
83 /* ',' is our command separator */ 82 /* ',' is our command separator */
84 next = strchr(start, ','); 83 next = strchr(start, ',');
@@ -133,8 +132,7 @@ irnet_ctrl_write(irnet_socket * ap,
133 char * endp; 132 char * endp;
134 133
135 /* Scrap whitespaces before the command */ 134 /* Scrap whitespaces before the command */
136 while(isspace(*begp)) 135 begp = skip_spaces(begp);
137 begp++;
138 136
139 /* Convert argument to a number (last arg is the base) */ 137 /* Convert argument to a number (last arg is the base) */
140 addr = simple_strtoul(begp, &endp, 16); 138 addr = simple_strtoul(begp, &endp, 16);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 1e428863574f..c18286a2167b 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -221,7 +221,7 @@ static int afiucv_pm_restore_thaw(struct device *dev)
221 return 0; 221 return 0;
222} 222}
223 223
224static struct dev_pm_ops afiucv_pm_ops = { 224static const struct dev_pm_ops afiucv_pm_ops = {
225 .prepare = afiucv_pm_prepare, 225 .prepare = afiucv_pm_prepare,
226 .complete = afiucv_pm_complete, 226 .complete = afiucv_pm_complete,
227 .freeze = afiucv_pm_freeze, 227 .freeze = afiucv_pm_freeze,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 3b1f5f5f8de7..fd8b28361a64 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -93,7 +93,7 @@ static int iucv_pm_freeze(struct device *);
93static int iucv_pm_thaw(struct device *); 93static int iucv_pm_thaw(struct device *);
94static int iucv_pm_restore(struct device *); 94static int iucv_pm_restore(struct device *);
95 95
96static struct dev_pm_ops iucv_pm_ops = { 96static const struct dev_pm_ops iucv_pm_ops = {
97 .prepare = iucv_pm_prepare, 97 .prepare = iucv_pm_prepare,
98 .complete = iucv_pm_complete, 98 .complete = iucv_pm_complete,
99 .freeze = iucv_pm_freeze, 99 .freeze = iucv_pm_freeze,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 84209fbbeb17..76fa6fef6473 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1193,6 +1193,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1193 x->aalg->alg_key_len = key->sadb_key_bits; 1193 x->aalg->alg_key_len = key->sadb_key_bits;
1194 memcpy(x->aalg->alg_key, key+1, keysize); 1194 memcpy(x->aalg->alg_key, key+1, keysize);
1195 } 1195 }
1196 x->aalg->alg_trunc_len = a->uinfo.auth.icv_truncbits;
1196 x->props.aalgo = sa->sadb_sa_auth; 1197 x->props.aalgo = sa->sadb_sa_auth;
1197 /* x->algo.flags = sa->sadb_sa_flags; */ 1198 /* x->algo.flags = sa->sadb_sa_flags; */
1198 } 1199 }
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a8da23905c70..0192cfdacae4 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -244,7 +244,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
244 * @addr: destination address of the path (ETH_ALEN length) 244 * @addr: destination address of the path (ETH_ALEN length)
245 * @sdata: local subif 245 * @sdata: local subif
246 * 246 *
247 * Returns: 0 on sucess 247 * Returns: 0 on success
248 * 248 *
249 * State: the initial state of the new path is set to 0 249 * State: the initial state of the new path is set to 0
250 */ 250 */
@@ -532,7 +532,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
532 * @addr: dst address (ETH_ALEN length) 532 * @addr: dst address (ETH_ALEN length)
533 * @sdata: local subif 533 * @sdata: local subif
534 * 534 *
535 * Returns: 0 if succesful 535 * Returns: 0 if successful
536 */ 536 */
537int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) 537int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
538{ 538{
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 79a698052218..f2d76238b9b5 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -112,7 +112,8 @@ config IP_VS_RR
112 module, choose M here. If unsure, say N. 112 module, choose M here. If unsure, say N.
113 113
114config IP_VS_WRR 114config IP_VS_WRR
115 tristate "weighted round-robin scheduling" 115 tristate "weighted round-robin scheduling"
116 select GCD
116 ---help--- 117 ---help---
117 The weighted robin-robin scheduling algorithm directs network 118 The weighted robin-robin scheduling algorithm directs network
118 connections to different real servers based on server weights 119 connections to different real servers based on server weights
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b95699f00545..847ffca40184 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1366,6 +1366,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1366 == sysctl_ip_vs_sync_threshold[0])) || 1366 == sysctl_ip_vs_sync_threshold[0])) ||
1367 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && 1367 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
1368 ((cp->state == IP_VS_TCP_S_FIN_WAIT) || 1368 ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
1369 (cp->state == IP_VS_TCP_S_CLOSE) ||
1369 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || 1370 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
1370 (cp->state == IP_VS_TCP_S_TIME_WAIT))))) 1371 (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
1371 ip_vs_sync_conn(cp); 1372 ip_vs_sync_conn(cp);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e55a6861d26f..c37ac2d7bec4 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2077,6 +2077,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2077 if (!capable(CAP_NET_ADMIN)) 2077 if (!capable(CAP_NET_ADMIN))
2078 return -EPERM; 2078 return -EPERM;
2079 2079
2080 if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX)
2081 return -EINVAL;
2082 if (len < 0 || len > MAX_ARG_LEN)
2083 return -EINVAL;
2080 if (len != set_arglen[SET_CMDID(cmd)]) { 2084 if (len != set_arglen[SET_CMDID(cmd)]) {
2081 pr_err("set_ctl: len %u != %u\n", 2085 pr_err("set_ctl: len %u != %u\n",
2082 len, set_arglen[SET_CMDID(cmd)]); 2086 len, set_arglen[SET_CMDID(cmd)]);
@@ -2352,17 +2356,25 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2352{ 2356{
2353 unsigned char arg[128]; 2357 unsigned char arg[128];
2354 int ret = 0; 2358 int ret = 0;
2359 unsigned int copylen;
2355 2360
2356 if (!capable(CAP_NET_ADMIN)) 2361 if (!capable(CAP_NET_ADMIN))
2357 return -EPERM; 2362 return -EPERM;
2358 2363
2364 if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
2365 return -EINVAL;
2366
2359 if (*len < get_arglen[GET_CMDID(cmd)]) { 2367 if (*len < get_arglen[GET_CMDID(cmd)]) {
2360 pr_err("get_ctl: len %u < %u\n", 2368 pr_err("get_ctl: len %u < %u\n",
2361 *len, get_arglen[GET_CMDID(cmd)]); 2369 *len, get_arglen[GET_CMDID(cmd)]);
2362 return -EINVAL; 2370 return -EINVAL;
2363 } 2371 }
2364 2372
2365 if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0) 2373 copylen = get_arglen[GET_CMDID(cmd)];
2374 if (copylen > 128)
2375 return -EINVAL;
2376
2377 if (copy_from_user(arg, user, copylen) != 0)
2366 return -EFAULT; 2378 return -EFAULT;
2367 2379
2368 if (mutex_lock_interruptible(&__ip_vs_mutex)) 2380 if (mutex_lock_interruptible(&__ip_vs_mutex))
@@ -2714,6 +2726,8 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
2714 if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) 2726 if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
2715 return -EINVAL; 2727 return -EINVAL;
2716 2728
2729 memset(usvc, 0, sizeof(*usvc));
2730
2717 usvc->af = nla_get_u16(nla_af); 2731 usvc->af = nla_get_u16(nla_af);
2718#ifdef CONFIG_IP_VS_IPV6 2732#ifdef CONFIG_IP_VS_IPV6
2719 if (usvc->af != AF_INET && usvc->af != AF_INET6) 2733 if (usvc->af != AF_INET && usvc->af != AF_INET6)
@@ -2901,6 +2915,8 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
2901 if (!(nla_addr && nla_port)) 2915 if (!(nla_addr && nla_port))
2902 return -EINVAL; 2916 return -EINVAL;
2903 2917
2918 memset(udest, 0, sizeof(*udest));
2919
2904 nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); 2920 nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
2905 udest->port = nla_get_u16(nla_port); 2921 udest->port = nla_get_u16(nla_port);
2906 2922
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 6182e8ea0be7..3c115fc19784 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/net.h> 26#include <linux/net.h>
27#include <linux/gcd.h>
27 28
28#include <net/ip_vs.h> 29#include <net/ip_vs.h>
29 30
@@ -38,20 +39,6 @@ struct ip_vs_wrr_mark {
38}; 39};
39 40
40 41
41/*
42 * Get the gcd of server weights
43 */
44static int gcd(int a, int b)
45{
46 int c;
47
48 while ((c = a % b)) {
49 a = b;
50 b = c;
51 }
52 return b;
53}
54
55static int ip_vs_wrr_gcd_weight(struct ip_vs_service *svc) 42static int ip_vs_wrr_gcd_weight(struct ip_vs_service *svc)
56{ 43{
57 struct ip_vs_dest *dest; 44 struct ip_vs_dest *dest;
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 38ea7ef3ccd2..f0732aa18e4f 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -323,24 +323,24 @@ static void update_nl_seq(struct nf_conn *ct, u32 nl_seq,
323 struct nf_ct_ftp_master *info, int dir, 323 struct nf_ct_ftp_master *info, int dir,
324 struct sk_buff *skb) 324 struct sk_buff *skb)
325{ 325{
326 unsigned int i, oldest = NUM_SEQ_TO_REMEMBER; 326 unsigned int i, oldest;
327 327
328 /* Look for oldest: if we find exact match, we're done. */ 328 /* Look for oldest: if we find exact match, we're done. */
329 for (i = 0; i < info->seq_aft_nl_num[dir]; i++) { 329 for (i = 0; i < info->seq_aft_nl_num[dir]; i++) {
330 if (info->seq_aft_nl[dir][i] == nl_seq) 330 if (info->seq_aft_nl[dir][i] == nl_seq)
331 return; 331 return;
332
333 if (oldest == info->seq_aft_nl_num[dir] ||
334 before(info->seq_aft_nl[dir][i],
335 info->seq_aft_nl[dir][oldest]))
336 oldest = i;
337 } 332 }
338 333
339 if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { 334 if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
340 info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; 335 info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
341 } else if (oldest != NUM_SEQ_TO_REMEMBER && 336 } else {
342 after(nl_seq, info->seq_aft_nl[dir][oldest])) { 337 if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1]))
343 info->seq_aft_nl[dir][oldest] = nl_seq; 338 oldest = 0;
339 else
340 oldest = 1;
341
342 if (after(nl_seq, info->seq_aft_nl[dir][oldest]))
343 info->seq_aft_nl[dir][oldest] = nl_seq;
344 } 344 }
345} 345}
346 346
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index eb0ceb846527..fc70a49c0afd 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -482,8 +482,7 @@ static ssize_t recent_old_proc_write(struct file *file,
482 if (copy_from_user(buf, input, size)) 482 if (copy_from_user(buf, input, size))
483 return -EFAULT; 483 return -EFAULT;
484 484
485 while (isspace(*c)) 485 c = skip_spaces(c);
486 c++;
487 486
488 if (size - (c - buf) < 5) 487 if (size - (c - buf) < 5)
489 return c - buf; 488 return c - buf;
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 7a10bbe02c13..c5d9f97ef217 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -682,7 +682,7 @@ struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
682 * buckets and @skip_chain entries. For each entry in the table call 682 * buckets and @skip_chain entries. For each entry in the table call
683 * @callback, if @callback returns a negative value stop 'walking' through the 683 * @callback, if @callback returns a negative value stop 'walking' through the
684 * table and return. Updates the values in @skip_bkt and @skip_chain on 684 * table and return. Updates the values in @skip_bkt and @skip_chain on
685 * return. Returns zero on succcess, negative values on failure. 685 * return. Returns zero on success, negative values on failure.
686 * 686 *
687 */ 687 */
688int netlbl_domhsh_walk(u32 *skip_bkt, 688int netlbl_domhsh_walk(u32 *skip_bkt,
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index aacba76070fc..e2e2d33cafdf 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -843,12 +843,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
843 dptr = skb_push(skb, 1); 843 dptr = skb_push(skb, 1);
844 *dptr = AX25_P_NETROM; 844 *dptr = AX25_P_NETROM;
845 845
846 ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); 846 ax25s = nr_neigh->ax25;
847 if (nr_neigh->ax25 && ax25s) { 847 nr_neigh->ax25 = ax25_send_frame(skb, 256,
848 /* We were already holding this ax25_cb */ 848 (ax25_address *)dev->dev_addr,
849 &nr_neigh->callsign,
850 nr_neigh->digipeat, nr_neigh->dev);
851 if (ax25s)
849 ax25_cb_put(ax25s); 852 ax25_cb_put(ax25s);
850 }
851 nr_neigh->ax25 = ax25s;
852 853
853 dev_put(dev); 854 dev_put(dev);
854 ret = (nr_neigh->ax25 != NULL); 855 ret = (nr_neigh->ax25 != NULL);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 020562164b56..f126d18dbdc4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -415,7 +415,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
415{ 415{
416 struct sock *sk = sock->sk; 416 struct sock *sk = sock->sk;
417 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; 417 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
418 struct sk_buff *skb; 418 struct sk_buff *skb = NULL;
419 struct net_device *dev; 419 struct net_device *dev;
420 __be16 proto = 0; 420 __be16 proto = 0;
421 int err; 421 int err;
@@ -437,6 +437,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
437 */ 437 */
438 438
439 saddr->spkt_device[13] = 0; 439 saddr->spkt_device[13] = 0;
440retry:
440 rcu_read_lock(); 441 rcu_read_lock();
441 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 442 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
442 err = -ENODEV; 443 err = -ENODEV;
@@ -456,58 +457,48 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
456 if (len > dev->mtu + dev->hard_header_len) 457 if (len > dev->mtu + dev->hard_header_len)
457 goto out_unlock; 458 goto out_unlock;
458 459
459 err = -ENOBUFS; 460 if (!skb) {
460 skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL); 461 size_t reserved = LL_RESERVED_SPACE(dev);
461 462 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
462 /* 463
463 * If the write buffer is full, then tough. At this level the user 464 rcu_read_unlock();
464 * gets to deal with the problem - do your own algorithmic backoffs. 465 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
465 * That's far more flexible. 466 if (skb == NULL)
466 */ 467 return -ENOBUFS;
467 468 /* FIXME: Save some space for broken drivers that write a hard
468 if (skb == NULL) 469 * header at transmission time by themselves. PPP is the notable
469 goto out_unlock; 470 * one here. This should really be fixed at the driver level.
470 471 */
471 /* 472 skb_reserve(skb, reserved);
472 * Fill it in 473 skb_reset_network_header(skb);
473 */ 474
474 475 /* Try to align data part correctly */
475 /* FIXME: Save some space for broken drivers that write a 476 if (hhlen) {
476 * hard header at transmission time by themselves. PPP is the 477 skb->data -= hhlen;
477 * notable one here. This should really be fixed at the driver level. 478 skb->tail -= hhlen;
478 */ 479 if (len < hhlen)
479 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 480 skb_reset_network_header(skb);
480 skb_reset_network_header(skb); 481 }
481 482 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
482 /* Try to align data part correctly */ 483 if (err)
483 if (dev->header_ops) { 484 goto out_free;
484 skb->data -= dev->hard_header_len; 485 goto retry;
485 skb->tail -= dev->hard_header_len;
486 if (len < dev->hard_header_len)
487 skb_reset_network_header(skb);
488 } 486 }
489 487
490 /* Returns -EFAULT on error */ 488
491 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
492 skb->protocol = proto; 489 skb->protocol = proto;
493 skb->dev = dev; 490 skb->dev = dev;
494 skb->priority = sk->sk_priority; 491 skb->priority = sk->sk_priority;
495 skb->mark = sk->sk_mark; 492 skb->mark = sk->sk_mark;
496 if (err)
497 goto out_free;
498
499 /*
500 * Now send it
501 */
502 493
503 dev_queue_xmit(skb); 494 dev_queue_xmit(skb);
504 rcu_read_unlock(); 495 rcu_read_unlock();
505 return len; 496 return len;
506 497
507out_free:
508 kfree_skb(skb);
509out_unlock: 498out_unlock:
510 rcu_read_unlock(); 499 rcu_read_unlock();
500out_free:
501 kfree_skb(skb);
511 return err; 502 return err;
512} 503}
513 504
@@ -1030,8 +1021,20 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1030 1021
1031 status = TP_STATUS_SEND_REQUEST; 1022 status = TP_STATUS_SEND_REQUEST;
1032 err = dev_queue_xmit(skb); 1023 err = dev_queue_xmit(skb);
1033 if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0)) 1024 if (unlikely(err > 0)) {
1034 goto out_xmit; 1025 err = net_xmit_errno(err);
1026 if (err && __packet_get_status(po, ph) ==
1027 TP_STATUS_AVAILABLE) {
1028 /* skb was destructed already */
1029 skb = NULL;
1030 goto out_status;
1031 }
1032 /*
1033 * skb was dropped but not destructed yet;
1034 * let's treat it like congestion or err < 0
1035 */
1036 err = 0;
1037 }
1035 packet_increment_head(&po->tx_ring); 1038 packet_increment_head(&po->tx_ring);
1036 len_sum += tp_len; 1039 len_sum += tp_len;
1037 } while (likely((ph != NULL) || 1040 } while (likely((ph != NULL) ||
@@ -1042,9 +1045,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1042 err = len_sum; 1045 err = len_sum;
1043 goto out_put; 1046 goto out_put;
1044 1047
1045out_xmit:
1046 skb->destructor = sock_wfree;
1047 atomic_dec(&po->tx_ring.pending);
1048out_status: 1048out_status:
1049 __packet_set_status(po, ph, status); 1049 __packet_set_status(po, ph, status);
1050 kfree_skb(skb); 1050 kfree_skb(skb);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 536ebe5d3f6b..3b8992361042 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -182,8 +182,8 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
182 ic = conn->c_transport_data; 182 ic = conn->c_transport_data;
183 dev_addr = &ic->i_cm_id->route.addr.dev_addr; 183 dev_addr = &ic->i_cm_id->route.addr.dev_addr;
184 184
185 ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); 185 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
186 ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); 186 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
187 187
188 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 188 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
189 iinfo->max_send_wr = ic->i_send_ring.w_nr; 189 iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index db224f7c2937..b28fa8525b24 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -184,8 +184,8 @@ static int rds_iw_conn_info_visitor(struct rds_connection *conn,
184 ic = conn->c_transport_data; 184 ic = conn->c_transport_data;
185 dev_addr = &ic->i_cm_id->route.addr.dev_addr; 185 dev_addr = &ic->i_cm_id->route.addr.dev_addr;
186 186
187 ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); 187 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
188 ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); 188 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
189 189
190 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); 190 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
191 iinfo->max_send_wr = ic->i_send_ring.w_nr; 191 iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index bd86a63960ce..5ef5f6988a2e 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -101,13 +101,17 @@ static void rose_t0timer_expiry(unsigned long param)
101static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) 101static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
102{ 102{
103 ax25_address *rose_call; 103 ax25_address *rose_call;
104 ax25_cb *ax25s;
104 105
105 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 106 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
106 rose_call = (ax25_address *)neigh->dev->dev_addr; 107 rose_call = (ax25_address *)neigh->dev->dev_addr;
107 else 108 else
108 rose_call = &rose_callsign; 109 rose_call = &rose_callsign;
109 110
111 ax25s = neigh->ax25;
110 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 112 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
113 if (ax25s)
114 ax25_cb_put(ax25s);
111 115
112 return (neigh->ax25 != NULL); 116 return (neigh->ax25 != NULL);
113} 117}
@@ -120,13 +124,17 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
120static int rose_link_up(struct rose_neigh *neigh) 124static int rose_link_up(struct rose_neigh *neigh)
121{ 125{
122 ax25_address *rose_call; 126 ax25_address *rose_call;
127 ax25_cb *ax25s;
123 128
124 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 129 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
125 rose_call = (ax25_address *)neigh->dev->dev_addr; 130 rose_call = (ax25_address *)neigh->dev->dev_addr;
126 else 131 else
127 rose_call = &rose_callsign; 132 rose_call = &rose_callsign;
128 133
134 ax25s = neigh->ax25;
129 neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 135 neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
136 if (ax25s)
137 ax25_cb_put(ax25s);
130 138
131 return (neigh->ax25 != NULL); 139 return (neigh->ax25 != NULL);
132} 140}
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 114df6eec8c3..968e8bac1b5d 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -75,7 +75,7 @@ static void rose_loopback_timer(unsigned long param)
75 lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 75 lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
76 frametype = skb->data[2]; 76 frametype = skb->data[2];
77 dest = (rose_address *)(skb->data + 4); 77 dest = (rose_address *)(skb->data + 4);
78 lci_o = 0xFFF - lci_i; 78 lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i;
79 79
80 skb_reset_transport_header(skb); 80 skb_reset_transport_header(skb);
81 81
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 795c4b025e31..70a0b3b4b4d2 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -235,6 +235,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
235 235
236 if ((s = rose_neigh_list) == rose_neigh) { 236 if ((s = rose_neigh_list) == rose_neigh) {
237 rose_neigh_list = rose_neigh->next; 237 rose_neigh_list = rose_neigh->next;
238 if (rose_neigh->ax25)
239 ax25_cb_put(rose_neigh->ax25);
238 kfree(rose_neigh->digipeat); 240 kfree(rose_neigh->digipeat);
239 kfree(rose_neigh); 241 kfree(rose_neigh);
240 return; 242 return;
@@ -243,6 +245,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
243 while (s != NULL && s->next != NULL) { 245 while (s != NULL && s->next != NULL) {
244 if (s->next == rose_neigh) { 246 if (s->next == rose_neigh) {
245 s->next = rose_neigh->next; 247 s->next = rose_neigh->next;
248 if (rose_neigh->ax25)
249 ax25_cb_put(rose_neigh->ax25);
246 kfree(rose_neigh->digipeat); 250 kfree(rose_neigh->digipeat);
247 kfree(rose_neigh); 251 kfree(rose_neigh);
248 return; 252 return;
@@ -812,6 +816,7 @@ void rose_link_failed(ax25_cb *ax25, int reason)
812 816
813 if (rose_neigh != NULL) { 817 if (rose_neigh != NULL) {
814 rose_neigh->ax25 = NULL; 818 rose_neigh->ax25 = NULL;
819 ax25_cb_put(ax25);
815 820
816 rose_del_route_by_neigh(rose_neigh); 821 rose_del_route_by_neigh(rose_neigh);
817 rose_kill_by_neigh(rose_neigh); 822 rose_kill_by_neigh(rose_neigh);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 2a740035aa6b..64f5e328cee9 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -598,7 +598,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
598 goto errout; 598 goto errout;
599 599
600 /* compat_mode being true specifies a call that is supposed 600 /* compat_mode being true specifies a call that is supposed
601 * to add additional backward compatiblity statistic TLVs. 601 * to add additional backward compatibility statistic TLVs.
602 */ 602 */
603 if (compat_mode) { 603 if (compat_mode) {
604 if (a->type == TCA_OLD_COMPAT) 604 if (a->type == TCA_OLD_COMPAT)
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d771cc1b777a..4e4ca65cd320 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -717,7 +717,7 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
717 717
718 if (sctp_style(sk, TCP)) { 718 if (sctp_style(sk, TCP)) {
719 /* Change the sk->sk_state of a TCP-style socket that has 719 /* Change the sk->sk_state of a TCP-style socket that has
720 * sucessfully completed a connect() call. 720 * successfully completed a connect() call.
721 */ 721 */
722 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) 722 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
723 sk->sk_state = SCTP_SS_ESTABLISHED; 723 sk->sk_state = SCTP_SS_ESTABLISHED;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 1ef9de9bbae9..47bc20d3a85b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3577,7 +3577,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
3577 * To do this properly, we'll set the destination address of the chunk 3577 * To do this properly, we'll set the destination address of the chunk
3578 * and at the transmit time, will try look up the transport to use. 3578 * and at the transmit time, will try look up the transport to use.
3579 * Since ASCONFs may be bundled, the correct transport may not be 3579 * Since ASCONFs may be bundled, the correct transport may not be
3580 * created untill we process the entire packet, thus this workaround. 3580 * created until we process the entire packet, thus this workaround.
3581 */ 3581 */
3582 asconf_ack->dest = chunk->source; 3582 asconf_ack->dest = chunk->source;
3583 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); 3583 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 89ab66e54740..67fdac9d2d33 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2087,8 +2087,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2087 if (copy_from_user(&sp->autoclose, optval, optlen)) 2087 if (copy_from_user(&sp->autoclose, optval, optlen))
2088 return -EFAULT; 2088 return -EFAULT;
2089 /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */ 2089 /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
2090 if (sp->autoclose > (MAX_SCHEDULE_TIMEOUT / HZ) ) 2090 sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
2091 sp->autoclose = (__u32)(MAX_SCHEDULE_TIMEOUT / HZ) ;
2092 2091
2093 return 0; 2092 return 0;
2094} 2093}
diff --git a/net/socket.c b/net/socket.c
index b94c3dd71015..769c386bd428 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -312,18 +312,6 @@ static struct file_system_type sock_fs_type = {
312 .kill_sb = kill_anon_super, 312 .kill_sb = kill_anon_super,
313}; 313};
314 314
315static int sockfs_delete_dentry(struct dentry *dentry)
316{
317 /*
318 * At creation time, we pretended this dentry was hashed
319 * (by clearing DCACHE_UNHASHED bit in d_flags)
320 * At delete time, we restore the truth : not hashed.
321 * (so that dput() can proceed correctly)
322 */
323 dentry->d_flags |= DCACHE_UNHASHED;
324 return 0;
325}
326
327/* 315/*
328 * sockfs_dname() is called from d_path(). 316 * sockfs_dname() is called from d_path().
329 */ 317 */
@@ -334,7 +322,6 @@ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
334} 322}
335 323
336static const struct dentry_operations sockfs_dentry_operations = { 324static const struct dentry_operations sockfs_dentry_operations = {
337 .d_delete = sockfs_delete_dentry,
338 .d_dname = sockfs_dname, 325 .d_dname = sockfs_dname,
339}; 326};
340 327
@@ -355,68 +342,55 @@ static const struct dentry_operations sockfs_dentry_operations = {
355 * but we take care of internal coherence yet. 342 * but we take care of internal coherence yet.
356 */ 343 */
357 344
358static int sock_alloc_fd(struct file **filep, int flags) 345static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
359{ 346{
347 struct qstr name = { .name = "" };
348 struct path path;
349 struct file *file;
360 int fd; 350 int fd;
361 351
362 fd = get_unused_fd_flags(flags); 352 fd = get_unused_fd_flags(flags);
363 if (likely(fd >= 0)) { 353 if (unlikely(fd < 0))
364 struct file *file = get_empty_filp(); 354 return fd;
365
366 *filep = file;
367 if (unlikely(!file)) {
368 put_unused_fd(fd);
369 return -ENFILE;
370 }
371 } else
372 *filep = NULL;
373 return fd;
374}
375
376static int sock_attach_fd(struct socket *sock, struct file *file, int flags)
377{
378 struct dentry *dentry;
379 struct qstr name = { .name = "" };
380 355
381 dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name); 356 path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
382 if (unlikely(!dentry)) 357 if (unlikely(!path.dentry)) {
358 put_unused_fd(fd);
383 return -ENOMEM; 359 return -ENOMEM;
360 }
361 path.mnt = mntget(sock_mnt);
384 362
385 dentry->d_op = &sockfs_dentry_operations; 363 path.dentry->d_op = &sockfs_dentry_operations;
386 /* 364 d_instantiate(path.dentry, SOCK_INODE(sock));
387 * We dont want to push this dentry into global dentry hash table. 365 SOCK_INODE(sock)->i_fop = &socket_file_ops;
388 * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
389 * This permits a working /proc/$pid/fd/XXX on sockets
390 */
391 dentry->d_flags &= ~DCACHE_UNHASHED;
392 d_instantiate(dentry, SOCK_INODE(sock));
393 366
394 sock->file = file; 367 file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
395 init_file(file, sock_mnt, dentry, FMODE_READ | FMODE_WRITE,
396 &socket_file_ops); 368 &socket_file_ops);
397 SOCK_INODE(sock)->i_fop = &socket_file_ops; 369 if (unlikely(!file)) {
370 /* drop dentry, keep inode */
371 atomic_inc(&path.dentry->d_inode->i_count);
372 path_put(&path);
373 put_unused_fd(fd);
374 return -ENFILE;
375 }
376
377 sock->file = file;
398 file->f_flags = O_RDWR | (flags & O_NONBLOCK); 378 file->f_flags = O_RDWR | (flags & O_NONBLOCK);
399 file->f_pos = 0; 379 file->f_pos = 0;
400 file->private_data = sock; 380 file->private_data = sock;
401 381
402 return 0; 382 *f = file;
383 return fd;
403} 384}
404 385
405int sock_map_fd(struct socket *sock, int flags) 386int sock_map_fd(struct socket *sock, int flags)
406{ 387{
407 struct file *newfile; 388 struct file *newfile;
408 int fd = sock_alloc_fd(&newfile, flags); 389 int fd = sock_alloc_file(sock, &newfile, flags);
409 390
410 if (likely(fd >= 0)) { 391 if (likely(fd >= 0))
411 int err = sock_attach_fd(sock, newfile, flags);
412
413 if (unlikely(err < 0)) {
414 put_filp(newfile);
415 put_unused_fd(fd);
416 return err;
417 }
418 fd_install(fd, newfile); 392 fd_install(fd, newfile);
419 } 393
420 return fd; 394 return fd;
421} 395}
422 396
@@ -1390,29 +1364,19 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
1390 if (err < 0) 1364 if (err < 0)
1391 goto out_release_both; 1365 goto out_release_both;
1392 1366
1393 fd1 = sock_alloc_fd(&newfile1, flags & O_CLOEXEC); 1367 fd1 = sock_alloc_file(sock1, &newfile1, flags);
1394 if (unlikely(fd1 < 0)) { 1368 if (unlikely(fd1 < 0)) {
1395 err = fd1; 1369 err = fd1;
1396 goto out_release_both; 1370 goto out_release_both;
1397 } 1371 }
1398 1372
1399 fd2 = sock_alloc_fd(&newfile2, flags & O_CLOEXEC); 1373 fd2 = sock_alloc_file(sock2, &newfile2, flags);
1400 if (unlikely(fd2 < 0)) { 1374 if (unlikely(fd2 < 0)) {
1401 err = fd2; 1375 err = fd2;
1402 put_filp(newfile1);
1403 put_unused_fd(fd1);
1404 goto out_release_both;
1405 }
1406
1407 err = sock_attach_fd(sock1, newfile1, flags & O_NONBLOCK);
1408 if (unlikely(err < 0)) {
1409 goto out_fd2;
1410 }
1411
1412 err = sock_attach_fd(sock2, newfile2, flags & O_NONBLOCK);
1413 if (unlikely(err < 0)) {
1414 fput(newfile1); 1376 fput(newfile1);
1415 goto out_fd1; 1377 put_unused_fd(fd1);
1378 sock_release(sock2);
1379 goto out;
1416 } 1380 }
1417 1381
1418 audit_fd_pair(fd1, fd2); 1382 audit_fd_pair(fd1, fd2);
@@ -1438,16 +1402,6 @@ out_release_1:
1438 sock_release(sock1); 1402 sock_release(sock1);
1439out: 1403out:
1440 return err; 1404 return err;
1441
1442out_fd2:
1443 put_filp(newfile1);
1444 sock_release(sock1);
1445out_fd1:
1446 put_filp(newfile2);
1447 sock_release(sock2);
1448 put_unused_fd(fd1);
1449 put_unused_fd(fd2);
1450 goto out;
1451} 1405}
1452 1406
1453/* 1407/*
@@ -1551,17 +1505,13 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
1551 */ 1505 */
1552 __module_get(newsock->ops->owner); 1506 __module_get(newsock->ops->owner);
1553 1507
1554 newfd = sock_alloc_fd(&newfile, flags & O_CLOEXEC); 1508 newfd = sock_alloc_file(newsock, &newfile, flags);
1555 if (unlikely(newfd < 0)) { 1509 if (unlikely(newfd < 0)) {
1556 err = newfd; 1510 err = newfd;
1557 sock_release(newsock); 1511 sock_release(newsock);
1558 goto out_put; 1512 goto out_put;
1559 } 1513 }
1560 1514
1561 err = sock_attach_fd(newsock, newfile, flags & O_NONBLOCK);
1562 if (err < 0)
1563 goto out_fd_simple;
1564
1565 err = security_socket_accept(sock, newsock); 1515 err = security_socket_accept(sock, newsock);
1566 if (err) 1516 if (err)
1567 goto out_fd; 1517 goto out_fd;
@@ -1591,11 +1541,6 @@ out_put:
1591 fput_light(sock->file, fput_needed); 1541 fput_light(sock->file, fput_needed);
1592out: 1542out:
1593 return err; 1543 return err;
1594out_fd_simple:
1595 sock_release(newsock);
1596 put_filp(newfile);
1597 put_unused_fd(newfd);
1598 goto out_put;
1599out_fd: 1544out_fd:
1600 fput(newfile); 1545 fput(newfile);
1601 put_unused_fd(newfd); 1546 put_unused_fd(newfd);
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index c7450c8f0a7c..6dcdd2517819 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -55,16 +55,8 @@ static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
55 55
56 /* 56 /*
57 * RFC 4291, Section 2.2.1 57 * RFC 4291, Section 2.2.1
58 *
59 * To keep the result as short as possible, especially
60 * since we don't shorthand, we don't want leading zeros
61 * in each halfword, so avoid %pI6.
62 */ 58 */
63 return snprintf(buf, buflen, "%x:%x:%x:%x:%x:%x:%x:%x", 59 return snprintf(buf, buflen, "%pI6c", addr);
64 ntohs(addr->s6_addr16[0]), ntohs(addr->s6_addr16[1]),
65 ntohs(addr->s6_addr16[2]), ntohs(addr->s6_addr16[3]),
66 ntohs(addr->s6_addr16[4]), ntohs(addr->s6_addr16[5]),
67 ntohs(addr->s6_addr16[6]), ntohs(addr->s6_addr16[7]));
68} 60}
69 61
70static size_t rpc_ntop6(const struct sockaddr *sap, 62static size_t rpc_ntop6(const struct sockaddr *sap,
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 7535a7bed2fa..f394fc190a49 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -123,16 +123,19 @@ rpcauth_unhash_cred_locked(struct rpc_cred *cred)
123 clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); 123 clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
124} 124}
125 125
126static void 126static int
127rpcauth_unhash_cred(struct rpc_cred *cred) 127rpcauth_unhash_cred(struct rpc_cred *cred)
128{ 128{
129 spinlock_t *cache_lock; 129 spinlock_t *cache_lock;
130 int ret;
130 131
131 cache_lock = &cred->cr_auth->au_credcache->lock; 132 cache_lock = &cred->cr_auth->au_credcache->lock;
132 spin_lock(cache_lock); 133 spin_lock(cache_lock);
133 if (atomic_read(&cred->cr_count) == 0) 134 ret = atomic_read(&cred->cr_count) == 0;
135 if (ret)
134 rpcauth_unhash_cred_locked(cred); 136 rpcauth_unhash_cred_locked(cred);
135 spin_unlock(cache_lock); 137 spin_unlock(cache_lock);
138 return ret;
136} 139}
137 140
138/* 141/*
@@ -446,31 +449,35 @@ void
446put_rpccred(struct rpc_cred *cred) 449put_rpccred(struct rpc_cred *cred)
447{ 450{
448 /* Fast path for unhashed credentials */ 451 /* Fast path for unhashed credentials */
449 if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) 452 if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) {
450 goto need_lock; 453 if (atomic_dec_and_test(&cred->cr_count))
451 454 cred->cr_ops->crdestroy(cred);
452 if (!atomic_dec_and_test(&cred->cr_count))
453 return; 455 return;
454 goto out_destroy; 456 }
455need_lock: 457
456 if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock)) 458 if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock))
457 return; 459 return;
458 if (!list_empty(&cred->cr_lru)) { 460 if (!list_empty(&cred->cr_lru)) {
459 number_cred_unused--; 461 number_cred_unused--;
460 list_del_init(&cred->cr_lru); 462 list_del_init(&cred->cr_lru);
461 } 463 }
462 if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
463 rpcauth_unhash_cred(cred);
464 if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { 464 if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) {
465 cred->cr_expire = jiffies; 465 if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) {
466 list_add_tail(&cred->cr_lru, &cred_unused); 466 cred->cr_expire = jiffies;
467 number_cred_unused++; 467 list_add_tail(&cred->cr_lru, &cred_unused);
468 spin_unlock(&rpc_credcache_lock); 468 number_cred_unused++;
469 return; 469 goto out_nodestroy;
470 }
471 if (!rpcauth_unhash_cred(cred)) {
472 /* We were hashed and someone looked us up... */
473 goto out_nodestroy;
474 }
470 } 475 }
471 spin_unlock(&rpc_credcache_lock); 476 spin_unlock(&rpc_credcache_lock);
472out_destroy:
473 cred->cr_ops->crdestroy(cred); 477 cred->cr_ops->crdestroy(cred);
478 return;
479out_nodestroy:
480 spin_unlock(&rpc_credcache_lock);
474} 481}
475EXPORT_SYMBOL_GPL(put_rpccred); 482EXPORT_SYMBOL_GPL(put_rpccred);
476 483
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index fc6a43ccd950..f7a7f8380e38 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -304,7 +304,7 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
304 * to that upcall instead of adding the new upcall. 304 * to that upcall instead of adding the new upcall.
305 */ 305 */
306static inline struct gss_upcall_msg * 306static inline struct gss_upcall_msg *
307gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) 307gss_add_msg(struct gss_upcall_msg *gss_msg)
308{ 308{
309 struct rpc_inode *rpci = gss_msg->inode; 309 struct rpc_inode *rpci = gss_msg->inode;
310 struct inode *inode = &rpci->vfs_inode; 310 struct inode *inode = &rpci->vfs_inode;
@@ -445,7 +445,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
445 gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred); 445 gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred);
446 if (IS_ERR(gss_new)) 446 if (IS_ERR(gss_new))
447 return gss_new; 447 return gss_new;
448 gss_msg = gss_add_msg(gss_auth, gss_new); 448 gss_msg = gss_add_msg(gss_new);
449 if (gss_msg == gss_new) { 449 if (gss_msg == gss_new) {
450 struct inode *inode = &gss_new->inode->vfs_inode; 450 struct inode *inode = &gss_new->inode->vfs_inode;
451 int res = rpc_queue_upcall(inode, &gss_new->msg); 451 int res = rpc_queue_upcall(inode, &gss_new->msg);
@@ -485,7 +485,7 @@ gss_refresh_upcall(struct rpc_task *task)
485 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, 485 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
486 cred->cr_uid); 486 cred->cr_uid);
487 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); 487 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
488 if (IS_ERR(gss_msg) == -EAGAIN) { 488 if (PTR_ERR(gss_msg) == -EAGAIN) {
489 /* XXX: warning on the first, under the assumption we 489 /* XXX: warning on the first, under the assumption we
490 * shouldn't normally hit this case on a refresh. */ 490 * shouldn't normally hit this case on a refresh. */
491 warn_gssd(); 491 warn_gssd();
@@ -644,7 +644,22 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
644 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 644 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
645 if (IS_ERR(p)) { 645 if (IS_ERR(p)) {
646 err = PTR_ERR(p); 646 err = PTR_ERR(p);
647 gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES; 647 switch (err) {
648 case -EACCES:
649 gss_msg->msg.errno = err;
650 err = mlen;
651 break;
652 case -EFAULT:
653 case -ENOMEM:
654 case -EINVAL:
655 case -ENOSYS:
656 gss_msg->msg.errno = -EAGAIN;
657 break;
658 default:
659 printk(KERN_CRIT "%s: bad return from "
660 "gss_fill_context: %zd\n", __func__, err);
661 BUG();
662 }
648 goto err_release_msg; 663 goto err_release_msg;
649 } 664 }
650 gss_msg->ctx = gss_get_ctx(ctx); 665 gss_msg->ctx = gss_get_ctx(ctx);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index ef45eba22485..2deb0ed72ff4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -131,8 +131,10 @@ gss_import_sec_context_kerberos(const void *p,
131 struct krb5_ctx *ctx; 131 struct krb5_ctx *ctx;
132 int tmp; 132 int tmp;
133 133
134 if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) 134 if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
135 p = ERR_PTR(-ENOMEM);
135 goto out_err; 136 goto out_err;
137 }
136 138
137 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 139 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
138 if (IS_ERR(p)) 140 if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 6efbb0cd3c7c..76e4c6f4ac3c 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -252,7 +252,7 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
252 struct gss_ctx **ctx_id) 252 struct gss_ctx **ctx_id)
253{ 253{
254 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) 254 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
255 return GSS_S_FAILURE; 255 return -ENOMEM;
256 (*ctx_id)->mech_type = gss_mech_get(mech); 256 (*ctx_id)->mech_type = gss_mech_get(mech);
257 257
258 return mech->gm_ops 258 return mech->gm_ops
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 38829e20500b..154034b675bd 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -79,7 +79,7 @@ static void call_connect_status(struct rpc_task *task);
79 79
80static __be32 *rpc_encode_header(struct rpc_task *task); 80static __be32 *rpc_encode_header(struct rpc_task *task);
81static __be32 *rpc_verify_header(struct rpc_task *task); 81static __be32 *rpc_verify_header(struct rpc_task *task);
82static int rpc_ping(struct rpc_clnt *clnt, int flags); 82static int rpc_ping(struct rpc_clnt *clnt);
83 83
84static void rpc_register_client(struct rpc_clnt *clnt) 84static void rpc_register_client(struct rpc_clnt *clnt)
85{ 85{
@@ -340,7 +340,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
340 return clnt; 340 return clnt;
341 341
342 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 342 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
343 int err = rpc_ping(clnt, RPC_TASK_SOFT); 343 int err = rpc_ping(clnt);
344 if (err != 0) { 344 if (err != 0) {
345 rpc_shutdown_client(clnt); 345 rpc_shutdown_client(clnt);
346 return ERR_PTR(err); 346 return ERR_PTR(err);
@@ -528,7 +528,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
528 clnt->cl_prog = program->number; 528 clnt->cl_prog = program->number;
529 clnt->cl_vers = version->number; 529 clnt->cl_vers = version->number;
530 clnt->cl_stats = program->stats; 530 clnt->cl_stats = program->stats;
531 err = rpc_ping(clnt, RPC_TASK_SOFT); 531 err = rpc_ping(clnt);
532 if (err != 0) { 532 if (err != 0) {
533 rpc_shutdown_client(clnt); 533 rpc_shutdown_client(clnt);
534 clnt = ERR_PTR(err); 534 clnt = ERR_PTR(err);
@@ -1060,7 +1060,7 @@ call_bind_status(struct rpc_task *task)
1060 goto retry_timeout; 1060 goto retry_timeout;
1061 case -EPFNOSUPPORT: 1061 case -EPFNOSUPPORT:
1062 /* server doesn't support any rpcbind version we know of */ 1062 /* server doesn't support any rpcbind version we know of */
1063 dprintk("RPC: %5u remote rpcbind service unavailable\n", 1063 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1064 task->tk_pid); 1064 task->tk_pid);
1065 break; 1065 break;
1066 case -EPROTONOSUPPORT: 1066 case -EPROTONOSUPPORT:
@@ -1069,6 +1069,21 @@ call_bind_status(struct rpc_task *task)
1069 task->tk_status = 0; 1069 task->tk_status = 0;
1070 task->tk_action = call_bind; 1070 task->tk_action = call_bind;
1071 return; 1071 return;
1072 case -ECONNREFUSED: /* connection problems */
1073 case -ECONNRESET:
1074 case -ENOTCONN:
1075 case -EHOSTDOWN:
1076 case -EHOSTUNREACH:
1077 case -ENETUNREACH:
1078 case -EPIPE:
1079 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1080 task->tk_pid, task->tk_status);
1081 if (!RPC_IS_SOFTCONN(task)) {
1082 rpc_delay(task, 5*HZ);
1083 goto retry_timeout;
1084 }
1085 status = task->tk_status;
1086 break;
1072 default: 1087 default:
1073 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", 1088 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1074 task->tk_pid, -task->tk_status); 1089 task->tk_pid, -task->tk_status);
@@ -1180,11 +1195,25 @@ static void
1180call_transmit_status(struct rpc_task *task) 1195call_transmit_status(struct rpc_task *task)
1181{ 1196{
1182 task->tk_action = call_status; 1197 task->tk_action = call_status;
1198
1199 /*
1200 * Common case: success. Force the compiler to put this
1201 * test first.
1202 */
1203 if (task->tk_status == 0) {
1204 xprt_end_transmit(task);
1205 rpc_task_force_reencode(task);
1206 return;
1207 }
1208
1183 switch (task->tk_status) { 1209 switch (task->tk_status) {
1184 case -EAGAIN: 1210 case -EAGAIN:
1185 break; 1211 break;
1186 default: 1212 default:
1213 dprint_status(task);
1187 xprt_end_transmit(task); 1214 xprt_end_transmit(task);
1215 rpc_task_force_reencode(task);
1216 break;
1188 /* 1217 /*
1189 * Special cases: if we've been waiting on the 1218 * Special cases: if we've been waiting on the
1190 * socket's write_space() callback, or if the 1219 * socket's write_space() callback, or if the
@@ -1192,11 +1221,16 @@ call_transmit_status(struct rpc_task *task)
1192 * then hold onto the transport lock. 1221 * then hold onto the transport lock.
1193 */ 1222 */
1194 case -ECONNREFUSED: 1223 case -ECONNREFUSED:
1195 case -ECONNRESET:
1196 case -ENOTCONN:
1197 case -EHOSTDOWN: 1224 case -EHOSTDOWN:
1198 case -EHOSTUNREACH: 1225 case -EHOSTUNREACH:
1199 case -ENETUNREACH: 1226 case -ENETUNREACH:
1227 if (RPC_IS_SOFTCONN(task)) {
1228 xprt_end_transmit(task);
1229 rpc_exit(task, task->tk_status);
1230 break;
1231 }
1232 case -ECONNRESET:
1233 case -ENOTCONN:
1200 case -EPIPE: 1234 case -EPIPE:
1201 rpc_task_force_reencode(task); 1235 rpc_task_force_reencode(task);
1202 } 1236 }
@@ -1346,6 +1380,10 @@ call_timeout(struct rpc_task *task)
1346 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); 1380 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1347 task->tk_timeouts++; 1381 task->tk_timeouts++;
1348 1382
1383 if (RPC_IS_SOFTCONN(task)) {
1384 rpc_exit(task, -ETIMEDOUT);
1385 return;
1386 }
1349 if (RPC_IS_SOFT(task)) { 1387 if (RPC_IS_SOFT(task)) {
1350 if (clnt->cl_chatty) 1388 if (clnt->cl_chatty)
1351 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1389 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
@@ -1675,14 +1713,14 @@ static struct rpc_procinfo rpcproc_null = {
1675 .p_decode = rpcproc_decode_null, 1713 .p_decode = rpcproc_decode_null,
1676}; 1714};
1677 1715
1678static int rpc_ping(struct rpc_clnt *clnt, int flags) 1716static int rpc_ping(struct rpc_clnt *clnt)
1679{ 1717{
1680 struct rpc_message msg = { 1718 struct rpc_message msg = {
1681 .rpc_proc = &rpcproc_null, 1719 .rpc_proc = &rpcproc_null,
1682 }; 1720 };
1683 int err; 1721 int err;
1684 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1722 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
1685 err = rpc_call_sync(clnt, &msg, flags); 1723 err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
1686 put_rpccred(msg.rpc_cred); 1724 put_rpccred(msg.rpc_cred);
1687 return err; 1725 return err;
1688} 1726}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 830faf4d9997..3e3772d8eb92 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -20,6 +20,7 @@
20#include <linux/in6.h> 20#include <linux/in6.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/mutex.h>
23#include <net/ipv6.h> 24#include <net/ipv6.h>
24 25
25#include <linux/sunrpc/clnt.h> 26#include <linux/sunrpc/clnt.h>
@@ -110,6 +111,9 @@ static void rpcb_getport_done(struct rpc_task *, void *);
110static void rpcb_map_release(void *data); 111static void rpcb_map_release(void *data);
111static struct rpc_program rpcb_program; 112static struct rpc_program rpcb_program;
112 113
114static struct rpc_clnt * rpcb_local_clnt;
115static struct rpc_clnt * rpcb_local_clnt4;
116
113struct rpcbind_args { 117struct rpcbind_args {
114 struct rpc_xprt * r_xprt; 118 struct rpc_xprt * r_xprt;
115 119
@@ -163,21 +167,60 @@ static const struct sockaddr_in rpcb_inaddr_loopback = {
163 .sin_port = htons(RPCBIND_PORT), 167 .sin_port = htons(RPCBIND_PORT),
164}; 168};
165 169
166static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr, 170static DEFINE_MUTEX(rpcb_create_local_mutex);
167 size_t addrlen, u32 version) 171
172/*
173 * Returns zero on success, otherwise a negative errno value
174 * is returned.
175 */
176static int rpcb_create_local(void)
168{ 177{
169 struct rpc_create_args args = { 178 struct rpc_create_args args = {
170 .protocol = XPRT_TRANSPORT_UDP, 179 .protocol = XPRT_TRANSPORT_TCP,
171 .address = addr, 180 .address = (struct sockaddr *)&rpcb_inaddr_loopback,
172 .addrsize = addrlen, 181 .addrsize = sizeof(rpcb_inaddr_loopback),
173 .servername = "localhost", 182 .servername = "localhost",
174 .program = &rpcb_program, 183 .program = &rpcb_program,
175 .version = version, 184 .version = RPCBVERS_2,
176 .authflavor = RPC_AUTH_UNIX, 185 .authflavor = RPC_AUTH_UNIX,
177 .flags = RPC_CLNT_CREATE_NOPING, 186 .flags = RPC_CLNT_CREATE_NOPING,
178 }; 187 };
188 struct rpc_clnt *clnt, *clnt4;
189 int result = 0;
190
191 if (rpcb_local_clnt)
192 return result;
193
194 mutex_lock(&rpcb_create_local_mutex);
195 if (rpcb_local_clnt)
196 goto out;
197
198 clnt = rpc_create(&args);
199 if (IS_ERR(clnt)) {
200 dprintk("RPC: failed to create local rpcbind "
201 "client (errno %ld).\n", PTR_ERR(clnt));
202 result = -PTR_ERR(clnt);
203 goto out;
204 }
179 205
180 return rpc_create(&args); 206 /*
207 * This results in an RPC ping. On systems running portmapper,
208 * the v4 ping will fail. Proceed anyway, but disallow rpcb
209 * v4 upcalls.
210 */
211 clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
212 if (IS_ERR(clnt4)) {
213 dprintk("RPC: failed to create local rpcbind v4 "
214 "cleint (errno %ld).\n", PTR_ERR(clnt4));
215 clnt4 = NULL;
216 }
217
218 rpcb_local_clnt = clnt;
219 rpcb_local_clnt4 = clnt4;
220
221out:
222 mutex_unlock(&rpcb_create_local_mutex);
223 return result;
181} 224}
182 225
183static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, 226static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
@@ -209,22 +252,13 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
209 return rpc_create(&args); 252 return rpc_create(&args);
210} 253}
211 254
212static int rpcb_register_call(const u32 version, struct rpc_message *msg) 255static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
213{ 256{
214 struct sockaddr *addr = (struct sockaddr *)&rpcb_inaddr_loopback;
215 size_t addrlen = sizeof(rpcb_inaddr_loopback);
216 struct rpc_clnt *rpcb_clnt;
217 int result, error = 0; 257 int result, error = 0;
218 258
219 msg->rpc_resp = &result; 259 msg->rpc_resp = &result;
220 260
221 rpcb_clnt = rpcb_create_local(addr, addrlen, version); 261 error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
222 if (!IS_ERR(rpcb_clnt)) {
223 error = rpc_call_sync(rpcb_clnt, msg, 0);
224 rpc_shutdown_client(rpcb_clnt);
225 } else
226 error = PTR_ERR(rpcb_clnt);
227
228 if (error < 0) { 262 if (error < 0) {
229 dprintk("RPC: failed to contact local rpcbind " 263 dprintk("RPC: failed to contact local rpcbind "
230 "server (errno %d).\n", -error); 264 "server (errno %d).\n", -error);
@@ -279,6 +313,11 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
279 struct rpc_message msg = { 313 struct rpc_message msg = {
280 .rpc_argp = &map, 314 .rpc_argp = &map,
281 }; 315 };
316 int error;
317
318 error = rpcb_create_local();
319 if (error)
320 return error;
282 321
283 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " 322 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
284 "rpcbind\n", (port ? "" : "un"), 323 "rpcbind\n", (port ? "" : "un"),
@@ -288,7 +327,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
288 if (port) 327 if (port)
289 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET]; 328 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
290 329
291 return rpcb_register_call(RPCBVERS_2, &msg); 330 return rpcb_register_call(rpcb_local_clnt, &msg);
292} 331}
293 332
294/* 333/*
@@ -313,7 +352,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
313 if (port) 352 if (port)
314 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 353 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
315 354
316 result = rpcb_register_call(RPCBVERS_4, msg); 355 result = rpcb_register_call(rpcb_local_clnt4, msg);
317 kfree(map->r_addr); 356 kfree(map->r_addr);
318 return result; 357 return result;
319} 358}
@@ -340,7 +379,7 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
340 if (port) 379 if (port)
341 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 380 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
342 381
343 result = rpcb_register_call(RPCBVERS_4, msg); 382 result = rpcb_register_call(rpcb_local_clnt4, msg);
344 kfree(map->r_addr); 383 kfree(map->r_addr);
345 return result; 384 return result;
346} 385}
@@ -356,7 +395,7 @@ static int rpcb_unregister_all_protofamilies(struct rpc_message *msg)
356 map->r_addr = ""; 395 map->r_addr = "";
357 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; 396 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
358 397
359 return rpcb_register_call(RPCBVERS_4, msg); 398 return rpcb_register_call(rpcb_local_clnt4, msg);
360} 399}
361 400
362/** 401/**
@@ -414,6 +453,13 @@ int rpcb_v4_register(const u32 program, const u32 version,
414 struct rpc_message msg = { 453 struct rpc_message msg = {
415 .rpc_argp = &map, 454 .rpc_argp = &map,
416 }; 455 };
456 int error;
457
458 error = rpcb_create_local();
459 if (error)
460 return error;
461 if (rpcb_local_clnt4 == NULL)
462 return -EPROTONOSUPPORT;
417 463
418 if (address == NULL) 464 if (address == NULL)
419 return rpcb_unregister_all_protofamilies(&msg); 465 return rpcb_unregister_all_protofamilies(&msg);
@@ -491,7 +537,7 @@ static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbi
491 .rpc_message = &msg, 537 .rpc_message = &msg,
492 .callback_ops = &rpcb_getport_ops, 538 .callback_ops = &rpcb_getport_ops,
493 .callback_data = map, 539 .callback_data = map,
494 .flags = RPC_TASK_ASYNC, 540 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFTCONN,
495 }; 541 };
496 542
497 return rpc_run_task(&task_setup_data); 543 return rpc_run_task(&task_setup_data);
@@ -1027,3 +1073,15 @@ static struct rpc_program rpcb_program = {
1027 .version = rpcb_version, 1073 .version = rpcb_version,
1028 .stats = &rpcb_stats, 1074 .stats = &rpcb_stats,
1029}; 1075};
1076
1077/**
1078 * cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister
1079 *
1080 */
1081void cleanup_rpcb_clnt(void)
1082{
1083 if (rpcb_local_clnt4)
1084 rpc_shutdown_client(rpcb_local_clnt4);
1085 if (rpcb_local_clnt)
1086 rpc_shutdown_client(rpcb_local_clnt);
1087}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index cef74ba0666c..aae6907fd546 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -210,6 +210,7 @@ void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qnam
210{ 210{
211 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); 211 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
212} 212}
213EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
213 214
214void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) 215void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
215{ 216{
@@ -385,6 +386,20 @@ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct r
385} 386}
386 387
387/* 388/*
389 * Tests whether rpc queue is empty
390 */
391int rpc_queue_empty(struct rpc_wait_queue *queue)
392{
393 int res;
394
395 spin_lock_bh(&queue->lock);
396 res = queue->qlen;
397 spin_unlock_bh(&queue->lock);
398 return (res == 0);
399}
400EXPORT_SYMBOL_GPL(rpc_queue_empty);
401
402/*
388 * Wake up a task on a specific queue 403 * Wake up a task on a specific queue
389 */ 404 */
390void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) 405void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 8cce92189019..f438347d817b 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -24,6 +24,8 @@
24 24
25extern struct cache_detail ip_map_cache, unix_gid_cache; 25extern struct cache_detail ip_map_cache, unix_gid_cache;
26 26
27extern void cleanup_rpcb_clnt(void);
28
27static int __init 29static int __init
28init_sunrpc(void) 30init_sunrpc(void)
29{ 31{
@@ -53,6 +55,7 @@ out:
53static void __exit 55static void __exit
54cleanup_sunrpc(void) 56cleanup_sunrpc(void)
55{ 57{
58 cleanup_rpcb_clnt();
56 rpcauth_remove_module(); 59 rpcauth_remove_module();
57 cleanup_socket_xprt(); 60 cleanup_socket_xprt();
58 svc_cleanup_xprt_sock(); 61 svc_cleanup_xprt_sock();
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b845e2293dfe..7d1f9e928f69 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -16,8 +16,6 @@
16 16
17#define RPCDBG_FACILITY RPCDBG_SVCXPRT 17#define RPCDBG_FACILITY RPCDBG_SVCXPRT
18 18
19#define SVC_MAX_WAKING 5
20
21static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 19static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
22static int svc_deferred_recv(struct svc_rqst *rqstp); 20static int svc_deferred_recv(struct svc_rqst *rqstp);
23static struct cache_deferred_req *svc_defer(struct cache_req *req); 21static struct cache_deferred_req *svc_defer(struct cache_req *req);
@@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
306 struct svc_pool *pool; 304 struct svc_pool *pool;
307 struct svc_rqst *rqstp; 305 struct svc_rqst *rqstp;
308 int cpu; 306 int cpu;
309 int thread_avail;
310 307
311 if (!(xprt->xpt_flags & 308 if (!(xprt->xpt_flags &
312 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) 309 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
@@ -318,6 +315,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
318 315
319 spin_lock_bh(&pool->sp_lock); 316 spin_lock_bh(&pool->sp_lock);
320 317
318 if (!list_empty(&pool->sp_threads) &&
319 !list_empty(&pool->sp_sockets))
320 printk(KERN_ERR
321 "svc_xprt_enqueue: "
322 "threads and transports both waiting??\n");
323
321 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { 324 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
322 /* Don't enqueue dead transports */ 325 /* Don't enqueue dead transports */
323 dprintk("svc: transport %p is dead, not enqueued\n", xprt); 326 dprintk("svc: transport %p is dead, not enqueued\n", xprt);
@@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
358 } 361 }
359 362
360 process: 363 process:
361 /* Work out whether threads are available */ 364 if (!list_empty(&pool->sp_threads)) {
362 thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */
363 if (pool->sp_nwaking >= SVC_MAX_WAKING) {
364 /* too many threads are runnable and trying to wake up */
365 thread_avail = 0;
366 pool->sp_stats.overloads_avoided++;
367 }
368
369 if (thread_avail) {
370 rqstp = list_entry(pool->sp_threads.next, 365 rqstp = list_entry(pool->sp_threads.next,
371 struct svc_rqst, 366 struct svc_rqst,
372 rq_list); 367 rq_list);
@@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
381 svc_xprt_get(xprt); 376 svc_xprt_get(xprt);
382 rqstp->rq_reserved = serv->sv_max_mesg; 377 rqstp->rq_reserved = serv->sv_max_mesg;
383 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 378 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
384 rqstp->rq_waking = 1;
385 pool->sp_nwaking++;
386 pool->sp_stats.threads_woken++; 379 pool->sp_stats.threads_woken++;
387 BUG_ON(xprt->xpt_pool != pool); 380 BUG_ON(xprt->xpt_pool != pool);
388 wake_up(&rqstp->rq_wait); 381 wake_up(&rqstp->rq_wait);
@@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
651 return -EINTR; 644 return -EINTR;
652 645
653 spin_lock_bh(&pool->sp_lock); 646 spin_lock_bh(&pool->sp_lock);
654 if (rqstp->rq_waking) {
655 rqstp->rq_waking = 0;
656 pool->sp_nwaking--;
657 BUG_ON(pool->sp_nwaking < 0);
658 }
659 xprt = svc_xprt_dequeue(pool); 647 xprt = svc_xprt_dequeue(pool);
660 if (xprt) { 648 if (xprt) {
661 rqstp->rq_xprt = xprt; 649 rqstp->rq_xprt = xprt;
@@ -711,7 +699,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
711 spin_unlock_bh(&pool->sp_lock); 699 spin_unlock_bh(&pool->sp_lock);
712 700
713 len = 0; 701 len = 0;
714 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 702 if (test_bit(XPT_LISTENER, &xprt->xpt_flags) &&
703 !test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
715 struct svc_xprt *newxpt; 704 struct svc_xprt *newxpt;
716 newxpt = xprt->xpt_ops->xpo_accept(xprt); 705 newxpt = xprt->xpt_ops->xpo_accept(xprt);
717 if (newxpt) { 706 if (newxpt) {
@@ -1204,16 +1193,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
1204 struct svc_pool *pool = p; 1193 struct svc_pool *pool = p;
1205 1194
1206 if (p == SEQ_START_TOKEN) { 1195 if (p == SEQ_START_TOKEN) {
1207 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n"); 1196 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
1208 return 0; 1197 return 0;
1209 } 1198 }
1210 1199
1211 seq_printf(m, "%u %lu %lu %lu %lu %lu\n", 1200 seq_printf(m, "%u %lu %lu %lu %lu\n",
1212 pool->sp_id, 1201 pool->sp_id,
1213 pool->sp_stats.packets, 1202 pool->sp_stats.packets,
1214 pool->sp_stats.sockets_queued, 1203 pool->sp_stats.sockets_queued,
1215 pool->sp_stats.threads_woken, 1204 pool->sp_stats.threads_woken,
1216 pool->sp_stats.overloads_avoided,
1217 pool->sp_stats.threads_timedout); 1205 pool->sp_stats.threads_timedout);
1218 1206
1219 return 0; 1207 return 0;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 4a8f6558718a..d8c041114497 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -655,23 +655,25 @@ static struct unix_gid *unix_gid_lookup(uid_t uid)
655 return NULL; 655 return NULL;
656} 656}
657 657
658static int unix_gid_find(uid_t uid, struct group_info **gip, 658static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
659 struct svc_rqst *rqstp)
660{ 659{
661 struct unix_gid *ug = unix_gid_lookup(uid); 660 struct unix_gid *ug;
661 struct group_info *gi;
662 int ret;
663
664 ug = unix_gid_lookup(uid);
662 if (!ug) 665 if (!ug)
663 return -EAGAIN; 666 return ERR_PTR(-EAGAIN);
664 switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) { 667 ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
668 switch (ret) {
665 case -ENOENT: 669 case -ENOENT:
666 *gip = NULL; 670 return ERR_PTR(-ENOENT);
667 return 0;
668 case 0: 671 case 0:
669 *gip = ug->gi; 672 gi = get_group_info(ug->gi);
670 get_group_info(*gip);
671 cache_put(&ug->h, &unix_gid_cache); 673 cache_put(&ug->h, &unix_gid_cache);
672 return 0; 674 return gi;
673 default: 675 default:
674 return -EAGAIN; 676 return ERR_PTR(-EAGAIN);
675 } 677 }
676} 678}
677 679
@@ -681,6 +683,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
681 struct sockaddr_in *sin; 683 struct sockaddr_in *sin;
682 struct sockaddr_in6 *sin6, sin6_storage; 684 struct sockaddr_in6 *sin6, sin6_storage;
683 struct ip_map *ipm; 685 struct ip_map *ipm;
686 struct group_info *gi;
687 struct svc_cred *cred = &rqstp->rq_cred;
684 688
685 switch (rqstp->rq_addr.ss_family) { 689 switch (rqstp->rq_addr.ss_family) {
686 case AF_INET: 690 case AF_INET:
@@ -721,6 +725,17 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
721 ip_map_cached_put(rqstp, ipm); 725 ip_map_cached_put(rqstp, ipm);
722 break; 726 break;
723 } 727 }
728
729 gi = unix_gid_find(cred->cr_uid, rqstp);
730 switch (PTR_ERR(gi)) {
731 case -EAGAIN:
732 return SVC_DROP;
733 case -ENOENT:
734 break;
735 default:
736 put_group_info(cred->cr_group_info);
737 cred->cr_group_info = gi;
738 }
724 return SVC_OK; 739 return SVC_OK;
725} 740}
726 741
@@ -817,19 +832,11 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
817 slen = svc_getnl(argv); /* gids length */ 832 slen = svc_getnl(argv); /* gids length */
818 if (slen > 16 || (len -= (slen + 2)*4) < 0) 833 if (slen > 16 || (len -= (slen + 2)*4) < 0)
819 goto badcred; 834 goto badcred;
820 if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp) 835 cred->cr_group_info = groups_alloc(slen);
821 == -EAGAIN) 836 if (cred->cr_group_info == NULL)
822 return SVC_DROP; 837 return SVC_DROP;
823 if (cred->cr_group_info == NULL) { 838 for (i = 0; i < slen; i++)
824 cred->cr_group_info = groups_alloc(slen); 839 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
825 if (cred->cr_group_info == NULL)
826 return SVC_DROP;
827 for (i = 0; i < slen; i++)
828 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
829 } else {
830 for (i = 0; i < slen ; i++)
831 svc_getnl(argv);
832 }
833 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { 840 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
834 *authp = rpc_autherr_badverf; 841 *authp = rpc_autherr_badverf;
835 return SVC_DENIED; 842 return SVC_DENIED;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index fd46d42afa89..469de292c23c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -700,6 +700,10 @@ void xprt_connect(struct rpc_task *task)
700 } 700 }
701 if (!xprt_lock_write(xprt, task)) 701 if (!xprt_lock_write(xprt, task))
702 return; 702 return;
703
704 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
705 xprt->ops->close(xprt);
706
703 if (xprt_connected(xprt)) 707 if (xprt_connected(xprt))
704 xprt_release_write(xprt, task); 708 xprt_release_write(xprt, task);
705 else { 709 else {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index f11be72a1a80..b15e1ebb2bfa 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -54,7 +54,7 @@
54 * Assumptions: 54 * Assumptions:
55 * - head[0] is physically contiguous. 55 * - head[0] is physically contiguous.
56 * - tail[0] is physically contiguous. 56 * - tail[0] is physically contiguous.
57 * - pages[] is not physically or virtually contigous and consists of 57 * - pages[] is not physically or virtually contiguous and consists of
58 * PAGE_SIZE elements. 58 * PAGE_SIZE elements.
59 * 59 *
60 * Output: 60 * Output:
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 04732d09013e..3d739e5d15d8 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2019,7 +2019,7 @@ static void xs_connect(struct rpc_task *task)
2019 if (xprt_test_and_set_connecting(xprt)) 2019 if (xprt_test_and_set_connecting(xprt))
2020 return; 2020 return;
2021 2021
2022 if (transport->sock != NULL) { 2022 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
2023 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2023 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2024 "seconds\n", 2024 "seconds\n",
2025 xprt, xprt->reestablish_timeout / HZ); 2025 xprt, xprt->reestablish_timeout / HZ);
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index ca269178c4d4..35f370091f4f 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -62,7 +62,7 @@
62 * Called when wanting to reset the device for any reason. Device is 62 * Called when wanting to reset the device for any reason. Device is
63 * taken back to power on status. 63 * taken back to power on status.
64 * 64 *
65 * This call blocks; on succesful return, the device has completed the 65 * This call blocks; on successful return, the device has completed the
66 * reset process and is ready to operate. 66 * reset process and is ready to operate.
67 */ 67 */
68int wimax_reset(struct wimax_dev *wimax_dev) 68int wimax_reset(struct wimax_dev *wimax_dev)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cb81ca35b0d6..0ecb16a9a883 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -469,16 +469,16 @@ static inline int xfrm_byidx_should_resize(struct net *net, int total)
469 return 0; 469 return 0;
470} 470}
471 471
472void xfrm_spd_getinfo(struct xfrmk_spdinfo *si) 472void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
473{ 473{
474 read_lock_bh(&xfrm_policy_lock); 474 read_lock_bh(&xfrm_policy_lock);
475 si->incnt = init_net.xfrm.policy_count[XFRM_POLICY_IN]; 475 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
476 si->outcnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT]; 476 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
477 si->fwdcnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD]; 477 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
478 si->inscnt = init_net.xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 478 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
479 si->outscnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 479 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
480 si->fwdscnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 480 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
481 si->spdhcnt = init_net.xfrm.policy_idx_hmask; 481 si->spdhcnt = net->xfrm.policy_idx_hmask;
482 si->spdhmcnt = xfrm_policy_hashmax; 482 si->spdhmcnt = xfrm_policy_hashmax;
483 read_unlock_bh(&xfrm_policy_lock); 483 read_unlock_bh(&xfrm_policy_lock);
484} 484}
@@ -1309,15 +1309,28 @@ static inline int xfrm_get_tos(struct flowi *fl, int family)
1309 return tos; 1309 return tos;
1310} 1310}
1311 1311
1312static inline struct xfrm_dst *xfrm_alloc_dst(int family) 1312static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1313{ 1313{
1314 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1314 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1315 struct dst_ops *dst_ops;
1315 struct xfrm_dst *xdst; 1316 struct xfrm_dst *xdst;
1316 1317
1317 if (!afinfo) 1318 if (!afinfo)
1318 return ERR_PTR(-EINVAL); 1319 return ERR_PTR(-EINVAL);
1319 1320
1320 xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS); 1321 switch (family) {
1322 case AF_INET:
1323 dst_ops = &net->xfrm.xfrm4_dst_ops;
1324 break;
1325#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1326 case AF_INET6:
1327 dst_ops = &net->xfrm.xfrm6_dst_ops;
1328 break;
1329#endif
1330 default:
1331 BUG();
1332 }
1333 xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
1321 1334
1322 xfrm_policy_put_afinfo(afinfo); 1335 xfrm_policy_put_afinfo(afinfo);
1323 1336
@@ -1366,6 +1379,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1366 struct flowi *fl, 1379 struct flowi *fl,
1367 struct dst_entry *dst) 1380 struct dst_entry *dst)
1368{ 1381{
1382 struct net *net = xp_net(policy);
1369 unsigned long now = jiffies; 1383 unsigned long now = jiffies;
1370 struct net_device *dev; 1384 struct net_device *dev;
1371 struct dst_entry *dst_prev = NULL; 1385 struct dst_entry *dst_prev = NULL;
@@ -1389,7 +1403,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1389 dst_hold(dst); 1403 dst_hold(dst);
1390 1404
1391 for (; i < nx; i++) { 1405 for (; i < nx; i++) {
1392 struct xfrm_dst *xdst = xfrm_alloc_dst(family); 1406 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1393 struct dst_entry *dst1 = &xdst->u.dst; 1407 struct dst_entry *dst1 = &xdst->u.dst;
1394 1408
1395 err = PTR_ERR(xdst); 1409 err = PTR_ERR(xdst);
@@ -1445,7 +1459,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1445 if (!dev) 1459 if (!dev)
1446 goto free_dst; 1460 goto free_dst;
1447 1461
1448 /* Copy neighbout for reachability confirmation */ 1462 /* Copy neighbour for reachability confirmation */
1449 dst0->neighbour = neigh_clone(dst->neighbour); 1463 dst0->neighbour = neigh_clone(dst->neighbour);
1450 1464
1451 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1465 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
@@ -2279,6 +2293,7 @@ EXPORT_SYMBOL(xfrm_bundle_ok);
2279 2293
2280int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2294int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2281{ 2295{
2296 struct net *net;
2282 int err = 0; 2297 int err = 0;
2283 if (unlikely(afinfo == NULL)) 2298 if (unlikely(afinfo == NULL))
2284 return -EINVAL; 2299 return -EINVAL;
@@ -2302,6 +2317,27 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2302 xfrm_policy_afinfo[afinfo->family] = afinfo; 2317 xfrm_policy_afinfo[afinfo->family] = afinfo;
2303 } 2318 }
2304 write_unlock_bh(&xfrm_policy_afinfo_lock); 2319 write_unlock_bh(&xfrm_policy_afinfo_lock);
2320
2321 rtnl_lock();
2322 for_each_net(net) {
2323 struct dst_ops *xfrm_dst_ops;
2324
2325 switch (afinfo->family) {
2326 case AF_INET:
2327 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2328 break;
2329#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2330 case AF_INET6:
2331 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2332 break;
2333#endif
2334 default:
2335 BUG();
2336 }
2337 *xfrm_dst_ops = *afinfo->dst_ops;
2338 }
2339 rtnl_unlock();
2340
2305 return err; 2341 return err;
2306} 2342}
2307EXPORT_SYMBOL(xfrm_policy_register_afinfo); 2343EXPORT_SYMBOL(xfrm_policy_register_afinfo);
@@ -2332,6 +2368,22 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2332} 2368}
2333EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2369EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2334 2370
2371static void __net_init xfrm_dst_ops_init(struct net *net)
2372{
2373 struct xfrm_policy_afinfo *afinfo;
2374
2375 read_lock_bh(&xfrm_policy_afinfo_lock);
2376 afinfo = xfrm_policy_afinfo[AF_INET];
2377 if (afinfo)
2378 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2379#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2380 afinfo = xfrm_policy_afinfo[AF_INET6];
2381 if (afinfo)
2382 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2383#endif
2384 read_unlock_bh(&xfrm_policy_afinfo_lock);
2385}
2386
2335static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2387static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2336{ 2388{
2337 struct xfrm_policy_afinfo *afinfo; 2389 struct xfrm_policy_afinfo *afinfo;
@@ -2494,6 +2546,7 @@ static int __net_init xfrm_net_init(struct net *net)
2494 rv = xfrm_policy_init(net); 2546 rv = xfrm_policy_init(net);
2495 if (rv < 0) 2547 if (rv < 0)
2496 goto out_policy; 2548 goto out_policy;
2549 xfrm_dst_ops_init(net);
2497 rv = xfrm_sysctl_init(net); 2550 rv = xfrm_sysctl_init(net);
2498 if (rv < 0) 2551 if (rv < 0)
2499 goto out_sysctl; 2552 goto out_sysctl;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d847f1a52b44..b36cc344474b 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -641,11 +641,11 @@ out:
641} 641}
642EXPORT_SYMBOL(xfrm_state_flush); 642EXPORT_SYMBOL(xfrm_state_flush);
643 643
644void xfrm_sad_getinfo(struct xfrmk_sadinfo *si) 644void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
645{ 645{
646 spin_lock_bh(&xfrm_state_lock); 646 spin_lock_bh(&xfrm_state_lock);
647 si->sadcnt = init_net.xfrm.state_num; 647 si->sadcnt = net->xfrm.state_num;
648 si->sadhcnt = init_net.xfrm.state_hmask; 648 si->sadhcnt = net->xfrm.state_hmask;
649 si->sadhmcnt = xfrm_state_hashmax; 649 si->sadhmcnt = xfrm_state_hashmax;
650 spin_unlock_bh(&xfrm_state_lock); 650 spin_unlock_bh(&xfrm_state_lock);
651} 651}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 1ada6186933c..d5a712976004 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -781,7 +781,8 @@ static inline size_t xfrm_spdinfo_msgsize(void)
781 + nla_total_size(sizeof(struct xfrmu_spdhinfo)); 781 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
782} 782}
783 783
784static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 784static int build_spdinfo(struct sk_buff *skb, struct net *net,
785 u32 pid, u32 seq, u32 flags)
785{ 786{
786 struct xfrmk_spdinfo si; 787 struct xfrmk_spdinfo si;
787 struct xfrmu_spdinfo spc; 788 struct xfrmu_spdinfo spc;
@@ -795,7 +796,7 @@ static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
795 796
796 f = nlmsg_data(nlh); 797 f = nlmsg_data(nlh);
797 *f = flags; 798 *f = flags;
798 xfrm_spd_getinfo(&si); 799 xfrm_spd_getinfo(net, &si);
799 spc.incnt = si.incnt; 800 spc.incnt = si.incnt;
800 spc.outcnt = si.outcnt; 801 spc.outcnt = si.outcnt;
801 spc.fwdcnt = si.fwdcnt; 802 spc.fwdcnt = si.fwdcnt;
@@ -828,7 +829,7 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
828 if (r_skb == NULL) 829 if (r_skb == NULL)
829 return -ENOMEM; 830 return -ENOMEM;
830 831
831 if (build_spdinfo(r_skb, spid, seq, *flags) < 0) 832 if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0)
832 BUG(); 833 BUG();
833 834
834 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 835 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
@@ -841,7 +842,8 @@ static inline size_t xfrm_sadinfo_msgsize(void)
841 + nla_total_size(4); /* XFRMA_SAD_CNT */ 842 + nla_total_size(4); /* XFRMA_SAD_CNT */
842} 843}
843 844
844static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 845static int build_sadinfo(struct sk_buff *skb, struct net *net,
846 u32 pid, u32 seq, u32 flags)
845{ 847{
846 struct xfrmk_sadinfo si; 848 struct xfrmk_sadinfo si;
847 struct xfrmu_sadhinfo sh; 849 struct xfrmu_sadhinfo sh;
@@ -854,7 +856,7 @@ static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
854 856
855 f = nlmsg_data(nlh); 857 f = nlmsg_data(nlh);
856 *f = flags; 858 *f = flags;
857 xfrm_sad_getinfo(&si); 859 xfrm_sad_getinfo(net, &si);
858 860
859 sh.sadhmcnt = si.sadhmcnt; 861 sh.sadhmcnt = si.sadhmcnt;
860 sh.sadhcnt = si.sadhcnt; 862 sh.sadhcnt = si.sadhcnt;
@@ -882,7 +884,7 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
882 if (r_skb == NULL) 884 if (r_skb == NULL)
883 return -ENOMEM; 885 return -ENOMEM;
884 886
885 if (build_sadinfo(r_skb, spid, seq, *flags) < 0) 887 if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0)
886 BUG(); 888 BUG();
887 889
888 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 890 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);