aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c46
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/9p/Kconfig10
-rw-r--r--net/9p/client.c59
-rw-r--r--net/9p/trans_rdma.c5
-rw-r--r--net/atm/svc.c6
-rw-r--r--net/bridge/br_netfilter.c13
-rw-r--r--net/can/af_can.c68
-rw-r--r--net/can/bcm.c7
-rw-r--r--net/compat.c54
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/net_namespace.c32
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c28
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/scm.c22
-rw-r--r--net/core/skbuff.c16
-rw-r--r--net/core/sock.c33
-rw-r--r--net/dsa/slave.c72
-rw-r--r--net/dsa/tag_dsa.c1
-rw-r--r--net/dsa/tag_edsa.c1
-rw-r--r--net/dsa/tag_trailer.c1
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/cipso_ipv4.c7
-rw-r--r--net/ipv4/ip_input.c10
-rw-r--r--net/ipv4/ipmr.c9
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c2
-rw-r--r--net/ipv4/proc.c58
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_htcp.c14
-rw-r--r--net/ipv4/tcp_output.c25
-rw-r--r--net/ipv4/tcp_vegas.c82
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/datagram.c5
-rw-r--r--net/ipv6/ip6mr.c13
-rw-r--r--net/ipv6/ipv6_sockglue.c5
-rw-r--r--net/ipv6/ndisc.c7
-rw-r--r--net/ipv6/proc.c6
-rw-r--r--net/ipv6/udp.c36
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/mac80211/debugfs_sta.c2
-rw-r--r--net/mac80211/mlme.c22
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c6
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/wext.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c3
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_helper.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c7
-rw-r--r--net/netfilter/nf_conntrack_proto.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c4
-rw-r--r--net/netfilter/xt_socket.c2
-rw-r--r--net/netlabel/netlabel_addrlist.c2
-rw-r--r--net/netlabel/netlabel_addrlist.h22
-rw-r--r--net/netlabel/netlabel_mgmt.c2
-rw-r--r--net/netlabel/netlabel_unlabeled.c48
-rw-r--r--net/phonet/af_phonet.c61
-rw-r--r--net/phonet/pep-gprs.c27
-rw-r--r--net/phonet/pn_dev.c2
-rw-r--r--net/phonet/pn_netlink.c3
-rw-r--r--net/rfkill/rfkill-input.c5
-rw-r--r--net/rfkill/rfkill.c2
-rw-r--r--net/rose/af_rose.c10
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_generic.c7
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/socket.c81
-rw-r--r--net/sunrpc/auth.c18
-rw-r--r--net/sunrpc/auth_generic.c20
-rw-r--r--net/sunrpc/svcsock.c9
-rw-r--r--net/sunrpc/xprtsock.c58
-rw-r--r--net/unix/af_unix.c35
-rw-r--r--net/unix/garbage.c62
-rw-r--r--net/wireless/reg.c4
-rw-r--r--net/xfrm/xfrm_policy.c7
-rw-r--r--net/xfrm/xfrm_user.c2
79 files changed, 814 insertions, 533 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 916061f681b6..68ced4bf158c 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -3,11 +3,20 @@
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include "vlan.h" 4#include "vlan.h"
5 5
6struct vlan_hwaccel_cb {
7 struct net_device *dev;
8};
9
10static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb)
11{
12 return (struct vlan_hwaccel_cb *)skb->cb;
13}
14
6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 15/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 16int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
8 u16 vlan_tci, int polling) 17 u16 vlan_tci, int polling)
9{ 18{
10 struct net_device_stats *stats; 19 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb);
11 20
12 if (skb_bond_should_drop(skb)) { 21 if (skb_bond_should_drop(skb)) {
13 dev_kfree_skb_any(skb); 22 dev_kfree_skb_any(skb);
@@ -15,23 +24,35 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
15 } 24 }
16 25
17 skb->vlan_tci = vlan_tci; 26 skb->vlan_tci = vlan_tci;
27 cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
28
29 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
30}
31EXPORT_SYMBOL(__vlan_hwaccel_rx);
32
33int vlan_hwaccel_do_receive(struct sk_buff *skb)
34{
35 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb);
36 struct net_device *dev = cb->dev;
37 struct net_device_stats *stats;
38
18 netif_nit_deliver(skb); 39 netif_nit_deliver(skb);
19 40
20 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 41 if (dev == NULL) {
21 if (skb->dev == NULL) { 42 kfree_skb(skb);
22 dev_kfree_skb_any(skb); 43 return -1;
23 /* Not NET_RX_DROP, this is not being dropped
24 * due to congestion. */
25 return NET_RX_SUCCESS;
26 } 44 }
27 skb->dev->last_rx = jiffies; 45
46 skb->dev = dev;
47 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
28 skb->vlan_tci = 0; 48 skb->vlan_tci = 0;
29 49
30 stats = &skb->dev->stats; 50 dev->last_rx = jiffies;
51
52 stats = &dev->stats;
31 stats->rx_packets++; 53 stats->rx_packets++;
32 stats->rx_bytes += skb->len; 54 stats->rx_bytes += skb->len;
33 55
34 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
35 switch (skb->pkt_type) { 56 switch (skb->pkt_type) {
36 case PACKET_BROADCAST: 57 case PACKET_BROADCAST:
37 break; 58 break;
@@ -43,13 +64,12 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
43 * This allows the VLAN to have a different MAC than the 64 * This allows the VLAN to have a different MAC than the
44 * underlying device, and still route correctly. */ 65 * underlying device, and still route correctly. */
45 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 66 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
46 skb->dev->dev_addr)) 67 dev->dev_addr))
47 skb->pkt_type = PACKET_HOST; 68 skb->pkt_type = PACKET_HOST;
48 break; 69 break;
49 }; 70 };
50 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 71 return 0;
51} 72}
52EXPORT_SYMBOL(__vlan_hwaccel_rx);
53 73
54struct net_device *vlan_dev_real_dev(const struct net_device *dev) 74struct net_device *vlan_dev_real_dev(const struct net_device *dev)
55{ 75{
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 0feefa4e1a4b..3628e0a81b40 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -314,7 +314,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
314 dev_info->ingress_priority_map[6], 314 dev_info->ingress_priority_map[6],
315 dev_info->ingress_priority_map[7]); 315 dev_info->ingress_priority_map[7]);
316 316
317 seq_printf(seq, "EGRESSS priority Mappings: "); 317 seq_printf(seq, " EGRESS priority mappings: ");
318 for (i = 0; i < 16; i++) { 318 for (i = 0; i < 16; i++) {
319 const struct vlan_priority_tci_mapping *mp 319 const struct vlan_priority_tci_mapping *mp
320 = dev_info->egress_priority_map[i]; 320 = dev_info->egress_priority_map[i];
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index c42c0c400bf9..0663f99e977a 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -13,22 +13,24 @@ menuconfig NET_9P
13 13
14 If unsure, say N. 14 If unsure, say N.
15 15
16if NET_9P
17
16config NET_9P_VIRTIO 18config NET_9P_VIRTIO
17 depends on NET_9P && EXPERIMENTAL && VIRTIO 19 depends on EXPERIMENTAL && VIRTIO
18 tristate "9P Virtio Transport (Experimental)" 20 tristate "9P Virtio Transport (Experimental)"
19 help 21 help
20 This builds support for a transports between 22 This builds support for a transports between
21 guest partitions and a host partition. 23 guest partitions and a host partition.
22 24
23config NET_9P_RDMA 25config NET_9P_RDMA
24 depends on NET_9P && INFINIBAND && EXPERIMENTAL 26 depends on INET && INFINIBAND && EXPERIMENTAL
25 tristate "9P RDMA Transport (Experimental)" 27 tristate "9P RDMA Transport (Experimental)"
26 help 28 help
27 This builds support for a RDMA transport. 29 This builds support for an RDMA transport.
28 30
29config NET_9P_DEBUG 31config NET_9P_DEBUG
30 bool "Debug information" 32 bool "Debug information"
31 depends on NET_9P
32 help 33 help
33 Say Y if you want the 9P subsystem to log debug information. 34 Say Y if you want the 9P subsystem to log debug information.
34 35
36endif
diff --git a/net/9p/client.c b/net/9p/client.c
index 67717f69412e..4b529454616d 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -189,6 +189,9 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
189 printk(KERN_ERR "Couldn't grow tag array\n"); 189 printk(KERN_ERR "Couldn't grow tag array\n");
190 kfree(req->tc); 190 kfree(req->tc);
191 kfree(req->rc); 191 kfree(req->rc);
192 kfree(req->wq);
193 req->tc = req->rc = NULL;
194 req->wq = NULL;
192 return ERR_PTR(-ENOMEM); 195 return ERR_PTR(-ENOMEM);
193 } 196 }
194 req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall); 197 req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall);
@@ -311,12 +314,6 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r)
311 r->status = REQ_STATUS_IDLE; 314 r->status = REQ_STATUS_IDLE;
312 if (tag != P9_NOTAG && p9_idpool_check(tag, c->tagpool)) 315 if (tag != P9_NOTAG && p9_idpool_check(tag, c->tagpool))
313 p9_idpool_put(tag, c->tagpool); 316 p9_idpool_put(tag, c->tagpool);
314
315 /* if this was a flush request we have to free response fcall */
316 if (r->rc->id == P9_RFLUSH) {
317 kfree(r->tc);
318 kfree(r->rc);
319 }
320} 317}
321 318
322/** 319/**
@@ -611,19 +608,21 @@ reterr:
611 608
612static struct p9_fid *p9_fid_create(struct p9_client *clnt) 609static struct p9_fid *p9_fid_create(struct p9_client *clnt)
613{ 610{
614 int err; 611 int ret;
615 struct p9_fid *fid; 612 struct p9_fid *fid;
613 unsigned long flags;
616 614
617 P9_DPRINTK(P9_DEBUG_FID, "clnt %p\n", clnt); 615 P9_DPRINTK(P9_DEBUG_FID, "clnt %p\n", clnt);
618 fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL); 616 fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL);
619 if (!fid) 617 if (!fid)
620 return ERR_PTR(-ENOMEM); 618 return ERR_PTR(-ENOMEM);
621 619
622 fid->fid = p9_idpool_get(clnt->fidpool); 620 ret = p9_idpool_get(clnt->fidpool);
623 if (fid->fid < 0) { 621 if (fid->fid < 0) {
624 err = -ENOSPC; 622 ret = -ENOSPC;
625 goto error; 623 goto error;
626 } 624 }
625 fid->fid = ret;
627 626
628 memset(&fid->qid, 0, sizeof(struct p9_qid)); 627 memset(&fid->qid, 0, sizeof(struct p9_qid));
629 fid->mode = -1; 628 fid->mode = -1;
@@ -632,27 +631,28 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
632 fid->clnt = clnt; 631 fid->clnt = clnt;
633 fid->aux = NULL; 632 fid->aux = NULL;
634 633
635 spin_lock(&clnt->lock); 634 spin_lock_irqsave(&clnt->lock, flags);
636 list_add(&fid->flist, &clnt->fidlist); 635 list_add(&fid->flist, &clnt->fidlist);
637 spin_unlock(&clnt->lock); 636 spin_unlock_irqrestore(&clnt->lock, flags);
638 637
639 return fid; 638 return fid;
640 639
641error: 640error:
642 kfree(fid); 641 kfree(fid);
643 return ERR_PTR(err); 642 return ERR_PTR(ret);
644} 643}
645 644
646static void p9_fid_destroy(struct p9_fid *fid) 645static void p9_fid_destroy(struct p9_fid *fid)
647{ 646{
648 struct p9_client *clnt; 647 struct p9_client *clnt;
648 unsigned long flags;
649 649
650 P9_DPRINTK(P9_DEBUG_FID, "fid %d\n", fid->fid); 650 P9_DPRINTK(P9_DEBUG_FID, "fid %d\n", fid->fid);
651 clnt = fid->clnt; 651 clnt = fid->clnt;
652 p9_idpool_put(fid->fid, clnt->fidpool); 652 p9_idpool_put(fid->fid, clnt->fidpool);
653 spin_lock(&clnt->lock); 653 spin_lock_irqsave(&clnt->lock, flags);
654 list_del(&fid->flist); 654 list_del(&fid->flist);
655 spin_unlock(&clnt->lock); 655 spin_unlock_irqrestore(&clnt->lock, flags);
656 kfree(fid); 656 kfree(fid);
657} 657}
658 658
@@ -818,7 +818,9 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
818 } 818 }
819 819
820 P9_DPRINTK(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n", 820 P9_DPRINTK(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n",
821 qid.type, qid.path, qid.version); 821 qid.type,
822 (unsigned long long)qid.path,
823 qid.version);
822 824
823 memmove(&fid->qid, &qid, sizeof(struct p9_qid)); 825 memmove(&fid->qid, &qid, sizeof(struct p9_qid));
824 826
@@ -865,7 +867,9 @@ p9_client_auth(struct p9_client *clnt, char *uname, u32 n_uname, char *aname)
865 } 867 }
866 868
867 P9_DPRINTK(P9_DEBUG_9P, "<<< RAUTH qid %x.%llx.%x\n", 869 P9_DPRINTK(P9_DEBUG_9P, "<<< RAUTH qid %x.%llx.%x\n",
868 qid.type, qid.path, qid.version); 870 qid.type,
871 (unsigned long long)qid.path,
872 qid.version);
869 873
870 memmove(&afid->qid, &qid, sizeof(struct p9_qid)); 874 memmove(&afid->qid, &qid, sizeof(struct p9_qid));
871 p9_free_req(clnt, req); 875 p9_free_req(clnt, req);
@@ -930,7 +934,8 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
930 934
931 for (count = 0; count < nwqids; count++) 935 for (count = 0; count < nwqids; count++)
932 P9_DPRINTK(P9_DEBUG_9P, "<<< [%d] %x.%llx.%x\n", 936 P9_DPRINTK(P9_DEBUG_9P, "<<< [%d] %x.%llx.%x\n",
933 count, wqids[count].type, wqids[count].path, 937 count, wqids[count].type,
938 (unsigned long long)wqids[count].path,
934 wqids[count].version); 939 wqids[count].version);
935 940
936 if (nwname) 941 if (nwname)
@@ -980,7 +985,9 @@ int p9_client_open(struct p9_fid *fid, int mode)
980 } 985 }
981 986
982 P9_DPRINTK(P9_DEBUG_9P, "<<< ROPEN qid %x.%llx.%x iounit %x\n", 987 P9_DPRINTK(P9_DEBUG_9P, "<<< ROPEN qid %x.%llx.%x iounit %x\n",
983 qid.type, qid.path, qid.version, iounit); 988 qid.type,
989 (unsigned long long)qid.path,
990 qid.version, iounit);
984 991
985 fid->mode = mode; 992 fid->mode = mode;
986 fid->iounit = iounit; 993 fid->iounit = iounit;
@@ -1023,7 +1030,9 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
1023 } 1030 }
1024 1031
1025 P9_DPRINTK(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n", 1032 P9_DPRINTK(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n",
1026 qid.type, qid.path, qid.version, iounit); 1033 qid.type,
1034 (unsigned long long)qid.path,
1035 qid.version, iounit);
1027 1036
1028 fid->mode = mode; 1037 fid->mode = mode;
1029 fid->iounit = iounit; 1038 fid->iounit = iounit;
@@ -1230,9 +1239,9 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
1230 "<<< name=%s uid=%s gid=%s muid=%s extension=(%s)\n" 1239 "<<< name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
1231 "<<< uid=%d gid=%d n_muid=%d\n", 1240 "<<< uid=%d gid=%d n_muid=%d\n",
1232 ret->size, ret->type, ret->dev, ret->qid.type, 1241 ret->size, ret->type, ret->dev, ret->qid.type,
1233 ret->qid.path, ret->qid.version, ret->mode, 1242 (unsigned long long)ret->qid.path, ret->qid.version, ret->mode,
1234 ret->atime, ret->mtime, ret->length, ret->name, 1243 ret->atime, ret->mtime, (unsigned long long)ret->length,
1235 ret->uid, ret->gid, ret->muid, ret->extension, 1244 ret->name, ret->uid, ret->gid, ret->muid, ret->extension,
1236 ret->n_uid, ret->n_gid, ret->n_muid); 1245 ret->n_uid, ret->n_gid, ret->n_muid);
1237 1246
1238free_and_error: 1247free_and_error:
@@ -1255,9 +1264,9 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
1255 " name=%s uid=%s gid=%s muid=%s extension=(%s)\n" 1264 " name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
1256 " uid=%d gid=%d n_muid=%d\n", 1265 " uid=%d gid=%d n_muid=%d\n",
1257 wst->size, wst->type, wst->dev, wst->qid.type, 1266 wst->size, wst->type, wst->dev, wst->qid.type,
1258 wst->qid.path, wst->qid.version, wst->mode, 1267 (unsigned long long)wst->qid.path, wst->qid.version, wst->mode,
1259 wst->atime, wst->mtime, wst->length, wst->name, 1268 wst->atime, wst->mtime, (unsigned long long)wst->length,
1260 wst->uid, wst->gid, wst->muid, wst->extension, 1269 wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
1261 wst->n_uid, wst->n_gid, wst->n_muid); 1270 wst->n_uid, wst->n_gid, wst->n_muid);
1262 err = 0; 1271 err = 0;
1263 clnt = fid->clnt; 1272 clnt = fid->clnt;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 8d6cc4777aae..2f1fe5fc1228 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -45,7 +45,6 @@
45#include <net/9p/transport.h> 45#include <net/9p/transport.h>
46#include <rdma/ib_verbs.h> 46#include <rdma/ib_verbs.h>
47#include <rdma/rdma_cm.h> 47#include <rdma/rdma_cm.h>
48#include <rdma/ib_verbs.h>
49 48
50#define P9_PORT 5640 49#define P9_PORT 5640
51#define P9_RDMA_SQ_DEPTH 32 50#define P9_RDMA_SQ_DEPTH 32
@@ -589,6 +588,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
589 if (IS_ERR(rdma->cm_id)) 588 if (IS_ERR(rdma->cm_id))
590 goto error; 589 goto error;
591 590
591 /* Associate the client with the transport */
592 client->trans = rdma;
593
592 /* Resolve the server's address */ 594 /* Resolve the server's address */
593 rdma->addr.sin_family = AF_INET; 595 rdma->addr.sin_family = AF_INET;
594 rdma->addr.sin_addr.s_addr = in_aton(addr); 596 rdma->addr.sin_addr.s_addr = in_aton(addr);
@@ -669,7 +671,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
669 if (err || (rdma->state != P9_RDMA_CONNECTED)) 671 if (err || (rdma->state != P9_RDMA_CONNECTED))
670 goto error; 672 goto error;
671 673
672 client->trans = rdma;
673 client->status = Connected; 674 client->status = Connected;
674 675
675 return 0; 676 return 0;
diff --git a/net/atm/svc.c b/net/atm/svc.c
index de1e4f2f3a43..8fb54dc870b3 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -293,7 +293,10 @@ static int svc_listen(struct socket *sock,int backlog)
293 error = -EINVAL; 293 error = -EINVAL;
294 goto out; 294 goto out;
295 } 295 }
296 vcc_insert_socket(sk); 296 if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
297 error = -EADDRINUSE;
298 goto out;
299 }
297 set_bit(ATM_VF_WAITING, &vcc->flags); 300 set_bit(ATM_VF_WAITING, &vcc->flags);
298 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 301 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
299 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); 302 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local);
@@ -307,6 +310,7 @@ static int svc_listen(struct socket *sock,int backlog)
307 goto out; 310 goto out;
308 } 311 }
309 set_bit(ATM_VF_LISTEN,&vcc->flags); 312 set_bit(ATM_VF_LISTEN,&vcc->flags);
313 vcc_insert_socket(sk);
310 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; 314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
311 error = -sk->sk_err; 315 error = -sk->sk_err;
312out: 316out:
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index fa5cda4e552a..45f61c348e36 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -101,6 +101,18 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
101 pppoe_proto(skb) == htons(PPP_IPV6) && \ 101 pppoe_proto(skb) == htons(PPP_IPV6) && \
102 brnf_filter_pppoe_tagged) 102 brnf_filter_pppoe_tagged)
103 103
104static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
105{
106}
107
108static struct dst_ops fake_dst_ops = {
109 .family = AF_INET,
110 .protocol = __constant_htons(ETH_P_IP),
111 .update_pmtu = fake_update_pmtu,
112 .entry_size = sizeof(struct rtable),
113 .entries = ATOMIC_INIT(0),
114};
115
104/* 116/*
105 * Initialize bogus route table used to keep netfilter happy. 117 * Initialize bogus route table used to keep netfilter happy.
106 * Currently, we fill in the PMTU entry because netfilter 118 * Currently, we fill in the PMTU entry because netfilter
@@ -117,6 +129,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
117 rt->u.dst.path = &rt->u.dst; 129 rt->u.dst.path = &rt->u.dst;
118 rt->u.dst.metrics[RTAX_MTU - 1] = 1500; 130 rt->u.dst.metrics[RTAX_MTU - 1] = 1500;
119 rt->u.dst.flags = DST_NOXFRM; 131 rt->u.dst.flags = DST_NOXFRM;
132 rt->u.dst.ops = &fake_dst_ops;
120} 133}
121 134
122static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) 135static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7d4d2b3c137e..3dadb338addd 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -319,23 +319,52 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
319 return n ? d : NULL; 319 return n ? d : NULL;
320} 320}
321 321
322/**
323 * find_rcv_list - determine optimal filterlist inside device filter struct
324 * @can_id: pointer to CAN identifier of a given can_filter
325 * @mask: pointer to CAN mask of a given can_filter
326 * @d: pointer to the device filter struct
327 *
328 * Description:
329 * Returns the optimal filterlist to reduce the filter handling in the
330 * receive path. This function is called by service functions that need
331 * to register or unregister a can_filter in the filter lists.
332 *
333 * A filter matches in general, when
334 *
335 * <received_can_id> & mask == can_id & mask
336 *
337 * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
338 * relevant bits for the filter.
339 *
340 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
341 * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames
342 * there is a special filterlist and a special rx path filter handling.
343 *
344 * Return:
345 * Pointer to optimal filterlist for the given can_id/mask pair.
346 * Constistency checked mask.
347 * Reduced can_id to have a preprocessed filter compare value.
348 */
322static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, 349static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
323 struct dev_rcv_lists *d) 350 struct dev_rcv_lists *d)
324{ 351{
325 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ 352 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
326 353
327 /* filter error frames */ 354 /* filter for error frames in extra filterlist */
328 if (*mask & CAN_ERR_FLAG) { 355 if (*mask & CAN_ERR_FLAG) {
329 /* clear CAN_ERR_FLAG in list entry */ 356 /* clear CAN_ERR_FLAG in filter entry */
330 *mask &= CAN_ERR_MASK; 357 *mask &= CAN_ERR_MASK;
331 return &d->rx[RX_ERR]; 358 return &d->rx[RX_ERR];
332 } 359 }
333 360
334 /* ensure valid values in can_mask */ 361 /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
335 if (*mask & CAN_EFF_FLAG) 362
336 *mask &= (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG); 363#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
337 else 364
338 *mask &= (CAN_SFF_MASK | CAN_RTR_FLAG); 365 /* ensure valid values in can_mask for 'SFF only' frame filtering */
366 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
367 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
339 368
340 /* reduce condition testing at receive time */ 369 /* reduce condition testing at receive time */
341 *can_id &= *mask; 370 *can_id &= *mask;
@@ -348,15 +377,19 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
348 if (!(*mask)) 377 if (!(*mask))
349 return &d->rx[RX_ALL]; 378 return &d->rx[RX_ALL];
350 379
351 /* use extra filterset for the subscription of exactly *ONE* can_id */ 380 /* extra filterlists for the subscription of a single non-RTR can_id */
352 if (*can_id & CAN_EFF_FLAG) { 381 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS)
353 if (*mask == (CAN_EFF_MASK | CAN_EFF_FLAG)) { 382 && !(*can_id & CAN_RTR_FLAG)) {
354 /* RFC: a use-case for hash-tables in the future? */ 383
355 return &d->rx[RX_EFF]; 384 if (*can_id & CAN_EFF_FLAG) {
385 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
386 /* RFC: a future use-case for hash-tables? */
387 return &d->rx[RX_EFF];
388 }
389 } else {
390 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
391 return &d->rx_sff[*can_id];
356 } 392 }
357 } else {
358 if (*mask == CAN_SFF_MASK)
359 return &d->rx_sff[*can_id];
360 } 393 }
361 394
362 /* default: filter via can_id/can_mask */ 395 /* default: filter via can_id/can_mask */
@@ -589,7 +622,10 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
589 } 622 }
590 } 623 }
591 624
592 /* check CAN_ID specific entries */ 625 /* check filterlists for single non-RTR can_ids */
626 if (can_id & CAN_RTR_FLAG)
627 return matches;
628
593 if (can_id & CAN_EFF_FLAG) { 629 if (can_id & CAN_EFF_FLAG) {
594 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { 630 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
595 if (r->can_id == can_id) { 631 if (r->can_id == can_id) {
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d0dd382001e2..da0d426c0ce4 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -64,10 +64,11 @@
64#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ 64#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
65 65
66/* get best masking value for can_rx_register() for a given single can_id */ 66/* get best masking value for can_rx_register() for a given single can_id */
67#define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \ 67#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
68 (CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK)) 68 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
69 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
69 70
70#define CAN_BCM_VERSION "20080415" 71#define CAN_BCM_VERSION CAN_VERSION
71static __initdata const char banner[] = KERN_INFO 72static __initdata const char banner[] = KERN_INFO
72 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; 73 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n";
73 74
diff --git a/net/compat.c b/net/compat.c
index 67fb6a3834a3..a3a2ba0fac08 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -226,14 +226,14 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
226 return 0; /* XXX: return error? check spec. */ 226 return 0; /* XXX: return error? check spec. */
227 } 227 }
228 228
229 if (level == SOL_SOCKET && type == SO_TIMESTAMP) { 229 if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
230 struct timeval *tv = (struct timeval *)data; 230 struct timeval *tv = (struct timeval *)data;
231 ctv.tv_sec = tv->tv_sec; 231 ctv.tv_sec = tv->tv_sec;
232 ctv.tv_usec = tv->tv_usec; 232 ctv.tv_usec = tv->tv_usec;
233 data = &ctv; 233 data = &ctv;
234 len = sizeof(ctv); 234 len = sizeof(ctv);
235 } 235 }
236 if (level == SOL_SOCKET && type == SO_TIMESTAMPNS) { 236 if (level == SOL_SOCKET && type == SCM_TIMESTAMPNS) {
237 struct timespec *ts = (struct timespec *)data; 237 struct timespec *ts = (struct timespec *)data;
238 cts.tv_sec = ts->tv_sec; 238 cts.tv_sec = ts->tv_sec;
239 cts.tv_nsec = ts->tv_nsec; 239 cts.tv_nsec = ts->tv_nsec;
@@ -725,7 +725,7 @@ EXPORT_SYMBOL(compat_mc_getsockopt);
725static unsigned char nas[19]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), 725static unsigned char nas[19]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
726 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), 726 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
727 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), 727 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3),
728 AL(6)}; 728 AL(4)};
729#undef AL 729#undef AL
730 730
731asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) 731asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
@@ -738,52 +738,13 @@ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, uns
738 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 738 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
739} 739}
740 740
741asmlinkage long compat_sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr,
742 int __user *upeer_addrlen,
743 const compat_sigset_t __user *sigmask,
744 compat_size_t sigsetsize, int flags)
745{
746 compat_sigset_t ss32;
747 sigset_t ksigmask, sigsaved;
748 int ret;
749
750 if (sigmask) {
751 if (sigsetsize != sizeof(compat_sigset_t))
752 return -EINVAL;
753 if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
754 return -EFAULT;
755 sigset_from_compat(&ksigmask, &ss32);
756
757 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
758 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
759 }
760
761 ret = do_accept(fd, upeer_sockaddr, upeer_addrlen, flags);
762
763 if (ret == -ERESTARTNOHAND) {
764 /*
765 * Don't restore the signal mask yet. Let do_signal() deliver
766 * the signal on the way back to userspace, before the signal
767 * mask is restored.
768 */
769 if (sigmask) {
770 memcpy(&current->saved_sigmask, &sigsaved,
771 sizeof(sigsaved));
772 set_restore_sigmask();
773 }
774 } else if (sigmask)
775 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
776
777 return ret;
778}
779
780asmlinkage long compat_sys_socketcall(int call, u32 __user *args) 741asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
781{ 742{
782 int ret; 743 int ret;
783 u32 a[6]; 744 u32 a[6];
784 u32 a0, a1; 745 u32 a0, a1;
785 746
786 if (call < SYS_SOCKET || call > SYS_PACCEPT) 747 if (call < SYS_SOCKET || call > SYS_ACCEPT4)
787 return -EINVAL; 748 return -EINVAL;
788 if (copy_from_user(a, args, nas[call])) 749 if (copy_from_user(a, args, nas[call]))
789 return -EFAULT; 750 return -EFAULT;
@@ -804,7 +765,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
804 ret = sys_listen(a0, a1); 765 ret = sys_listen(a0, a1);
805 break; 766 break;
806 case SYS_ACCEPT: 767 case SYS_ACCEPT:
807 ret = do_accept(a0, compat_ptr(a1), compat_ptr(a[2]), 0); 768 ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), 0);
808 break; 769 break;
809 case SYS_GETSOCKNAME: 770 case SYS_GETSOCKNAME:
810 ret = sys_getsockname(a0, compat_ptr(a1), compat_ptr(a[2])); 771 ret = sys_getsockname(a0, compat_ptr(a1), compat_ptr(a[2]));
@@ -844,9 +805,8 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
844 case SYS_RECVMSG: 805 case SYS_RECVMSG:
845 ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); 806 ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]);
846 break; 807 break;
847 case SYS_PACCEPT: 808 case SYS_ACCEPT4:
848 ret = compat_sys_paccept(a0, compat_ptr(a1), compat_ptr(a[2]), 809 ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), a[3]);
849 compat_ptr(a[3]), a[4], a[5]);
850 break; 810 break;
851 default: 811 default:
852 ret = -EINVAL; 812 ret = -EINVAL;
diff --git a/net/core/dev.c b/net/core/dev.c
index d9038e328cc1..9174c77d3112 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2218,6 +2218,9 @@ int netif_receive_skb(struct sk_buff *skb)
2218 int ret = NET_RX_DROP; 2218 int ret = NET_RX_DROP;
2219 __be16 type; 2219 __be16 type;
2220 2220
2221 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2222 return NET_RX_SUCCESS;
2223
2221 /* if we've gotten here through NAPI, check netpoll */ 2224 /* if we've gotten here through NAPI, check netpoll */
2222 if (netpoll_receive_skb(skb)) 2225 if (netpoll_receive_skb(skb))
2223 return NET_RX_DROP; 2226 return NET_RX_DROP;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index f1d07b5c1e17..1895a4ca9c4f 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -325,6 +325,38 @@ void unregister_pernet_subsys(struct pernet_operations *module)
325} 325}
326EXPORT_SYMBOL_GPL(unregister_pernet_subsys); 326EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
327 327
328int register_pernet_gen_subsys(int *id, struct pernet_operations *ops)
329{
330 int rv;
331
332 mutex_lock(&net_mutex);
333again:
334 rv = ida_get_new_above(&net_generic_ids, 1, id);
335 if (rv < 0) {
336 if (rv == -EAGAIN) {
337 ida_pre_get(&net_generic_ids, GFP_KERNEL);
338 goto again;
339 }
340 goto out;
341 }
342 rv = register_pernet_operations(first_device, ops);
343 if (rv < 0)
344 ida_remove(&net_generic_ids, *id);
345 mutex_unlock(&net_mutex);
346out:
347 return rv;
348}
349EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);
350
351void unregister_pernet_gen_subsys(int id, struct pernet_operations *ops)
352{
353 mutex_lock(&net_mutex);
354 unregister_pernet_operations(ops);
355 ida_remove(&net_generic_ids, id);
356 mutex_unlock(&net_mutex);
357}
358EXPORT_SYMBOL_GPL(unregister_pernet_gen_subsys);
359
328/** 360/**
329 * register_pernet_device - register a network namespace device 361 * register_pernet_device - register a network namespace device
330 * @ops: pernet operations structure for the subsystem 362 * @ops: pernet operations structure for the subsystem
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6c7af390be0a..dadac6281f20 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -133,9 +133,11 @@ static int poll_one_napi(struct netpoll_info *npinfo,
133 133
134 npinfo->rx_flags |= NETPOLL_RX_DROP; 134 npinfo->rx_flags |= NETPOLL_RX_DROP;
135 atomic_inc(&trapped); 135 atomic_inc(&trapped);
136 set_bit(NAPI_STATE_NPSVC, &napi->state);
136 137
137 work = napi->poll(napi, budget); 138 work = napi->poll(napi, budget);
138 139
140 clear_bit(NAPI_STATE_NPSVC, &napi->state);
139 atomic_dec(&trapped); 141 atomic_dec(&trapped);
140 npinfo->rx_flags &= ~NETPOLL_RX_DROP; 142 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
141 143
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 99f656d35b4f..8997e912aaaf 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1973,28 +1973,21 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1973 1973
1974 /* make sure that we don't pick a non-existing transmit queue */ 1974 /* make sure that we don't pick a non-existing transmit queue */
1975 ntxq = pkt_dev->odev->real_num_tx_queues; 1975 ntxq = pkt_dev->odev->real_num_tx_queues;
1976 if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) { 1976
1977 printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
1978 "disabled because CPU count (%d) exceeds number ",
1979 num_online_cpus());
1980 printk(KERN_WARNING "pktgen: WARNING: of tx queues "
1981 "(%d) on %s \n", ntxq, pkt_dev->odev->name);
1982 pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
1983 }
1984 if (ntxq <= pkt_dev->queue_map_min) { 1977 if (ntxq <= pkt_dev->queue_map_min) {
1985 printk(KERN_WARNING "pktgen: WARNING: Requested " 1978 printk(KERN_WARNING "pktgen: WARNING: Requested "
1986 "queue_map_min (%d) exceeds number of tx\n", 1979 "queue_map_min (zero-based) (%d) exceeds valid range "
1987 pkt_dev->queue_map_min); 1980 "[0 - %d] for (%d) queues on %s, resetting\n",
1988 printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " 1981 pkt_dev->queue_map_min, (ntxq ?: 1)- 1, ntxq,
1989 "%s, resetting\n", ntxq, pkt_dev->odev->name); 1982 pkt_dev->odev->name);
1990 pkt_dev->queue_map_min = ntxq - 1; 1983 pkt_dev->queue_map_min = ntxq - 1;
1991 } 1984 }
1992 if (ntxq <= pkt_dev->queue_map_max) { 1985 if (pkt_dev->queue_map_max >= ntxq) {
1993 printk(KERN_WARNING "pktgen: WARNING: Requested " 1986 printk(KERN_WARNING "pktgen: WARNING: Requested "
1994 "queue_map_max (%d) exceeds number of tx\n", 1987 "queue_map_max (zero-based) (%d) exceeds valid range "
1995 pkt_dev->queue_map_max); 1988 "[0 - %d] for (%d) queues on %s, resetting\n",
1996 printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " 1989 pkt_dev->queue_map_max, (ntxq ?: 1)- 1, ntxq,
1997 "%s, resetting\n", ntxq, pkt_dev->odev->name); 1990 pkt_dev->odev->name);
1998 pkt_dev->queue_map_max = ntxq - 1; 1991 pkt_dev->queue_map_max = ntxq - 1;
1999 } 1992 }
2000 1993
@@ -2203,6 +2196,7 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2203 } 2196 }
2204 pkt_dev->cur_queue_map = t; 2197 pkt_dev->cur_queue_map = t;
2205 } 2198 }
2199 pkt_dev->cur_queue_map = pkt_dev->cur_queue_map % pkt_dev->odev->real_num_tx_queues;
2206} 2200}
2207 2201
2208/* Increment/randomize headers according to flags and current values 2202/* Increment/randomize headers according to flags and current values
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 31f29d2989fd..4dfb6b4d4559 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -878,7 +878,9 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
878 if (ifm->ifi_change) 878 if (ifm->ifi_change)
879 flags = (flags & ifm->ifi_change) | 879 flags = (flags & ifm->ifi_change) |
880 (dev->flags & ~ifm->ifi_change); 880 (dev->flags & ~ifm->ifi_change);
881 dev_change_flags(dev, flags); 881 err = dev_change_flags(dev, flags);
882 if (err < 0)
883 goto errout;
882 } 884 }
883 885
884 if (tb[IFLA_TXQLEN]) 886 if (tb[IFLA_TXQLEN])
diff --git a/net/core/scm.c b/net/core/scm.c
index 10f5c65f6a47..b12303dd39d9 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -106,9 +106,25 @@ void __scm_destroy(struct scm_cookie *scm)
106 106
107 if (fpl) { 107 if (fpl) {
108 scm->fp = NULL; 108 scm->fp = NULL;
109 for (i=fpl->count-1; i>=0; i--) 109 if (current->scm_work_list) {
110 fput(fpl->fp[i]); 110 list_add_tail(&fpl->list, current->scm_work_list);
111 kfree(fpl); 111 } else {
112 LIST_HEAD(work_list);
113
114 current->scm_work_list = &work_list;
115
116 list_add(&fpl->list, &work_list);
117 while (!list_empty(&work_list)) {
118 fpl = list_first_entry(&work_list, struct scm_fp_list, list);
119
120 list_del(&fpl->list);
121 for (i=fpl->count-1; i>=0; i--)
122 fput(fpl->fp[i]);
123 kfree(fpl);
124 }
125
126 current->scm_work_list = NULL;
127 }
112 } 128 }
113} 129}
114 130
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4e22e3a35359..65f7757465bd 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -149,7 +149,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
149 149
150void skb_truesize_bug(struct sk_buff *skb) 150void skb_truesize_bug(struct sk_buff *skb)
151{ 151{
152 printk(KERN_ERR "SKB BUG: Invalid truesize (%u) " 152 WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
153 "len=%u, sizeof(sk_buff)=%Zd\n", 153 "len=%u, sizeof(sk_buff)=%Zd\n",
154 skb->truesize, skb->len, sizeof(struct sk_buff)); 154 skb->truesize, skb->len, sizeof(struct sk_buff));
155} 155}
@@ -449,6 +449,18 @@ void kfree_skb(struct sk_buff *skb)
449 __kfree_skb(skb); 449 __kfree_skb(skb);
450} 450}
451 451
452/**
453 * skb_recycle_check - check if skb can be reused for receive
454 * @skb: buffer
455 * @skb_size: minimum receive buffer size
456 *
457 * Checks that the skb passed in is not shared or cloned, and
458 * that it is linear and its head portion at least as large as
459 * skb_size so that it can be recycled as a receive buffer.
460 * If these conditions are met, this function does any necessary
461 * reference count dropping and cleans up the skbuff as if it
462 * just came from __alloc_skb().
463 */
452int skb_recycle_check(struct sk_buff *skb, int skb_size) 464int skb_recycle_check(struct sk_buff *skb, int skb_size)
453{ 465{
454 struct skb_shared_info *shinfo; 466 struct skb_shared_info *shinfo;
@@ -474,8 +486,8 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
474 shinfo->frag_list = NULL; 486 shinfo->frag_list = NULL;
475 487
476 memset(skb, 0, offsetof(struct sk_buff, tail)); 488 memset(skb, 0, offsetof(struct sk_buff, tail));
477 skb_reset_tail_pointer(skb);
478 skb->data = skb->head + NET_SKB_PAD; 489 skb->data = skb->head + NET_SKB_PAD;
490 skb_reset_tail_pointer(skb);
479 491
480 return 1; 492 return 1;
481} 493}
diff --git a/net/core/sock.c b/net/core/sock.c
index 5e2a3132a8c9..edf7220889a4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -136,7 +136,6 @@
136static struct lock_class_key af_family_keys[AF_MAX]; 136static struct lock_class_key af_family_keys[AF_MAX];
137static struct lock_class_key af_family_slock_keys[AF_MAX]; 137static struct lock_class_key af_family_slock_keys[AF_MAX];
138 138
139#ifdef CONFIG_DEBUG_LOCK_ALLOC
140/* 139/*
141 * Make lock validator output more readable. (we pre-construct these 140 * Make lock validator output more readable. (we pre-construct these
142 * strings build-time, so that runtime initialization of socket 141 * strings build-time, so that runtime initialization of socket
@@ -187,7 +186,6 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = {
187 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 186 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
188 "clock-AF_MAX" 187 "clock-AF_MAX"
189}; 188};
190#endif
191 189
192/* 190/*
193 * sk_callback_lock locking rules are per-address-family, 191 * sk_callback_lock locking rules are per-address-family,
@@ -2037,9 +2035,6 @@ static inline void release_proto_idx(struct proto *prot)
2037 2035
2038int proto_register(struct proto *prot, int alloc_slab) 2036int proto_register(struct proto *prot, int alloc_slab)
2039{ 2037{
2040 char *request_sock_slab_name = NULL;
2041 char *timewait_sock_slab_name;
2042
2043 if (alloc_slab) { 2038 if (alloc_slab) {
2044 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2039 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2045 SLAB_HWCACHE_ALIGN, NULL); 2040 SLAB_HWCACHE_ALIGN, NULL);
@@ -2053,12 +2048,12 @@ int proto_register(struct proto *prot, int alloc_slab)
2053 if (prot->rsk_prot != NULL) { 2048 if (prot->rsk_prot != NULL) {
2054 static const char mask[] = "request_sock_%s"; 2049 static const char mask[] = "request_sock_%s";
2055 2050
2056 request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); 2051 prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2057 if (request_sock_slab_name == NULL) 2052 if (prot->rsk_prot->slab_name == NULL)
2058 goto out_free_sock_slab; 2053 goto out_free_sock_slab;
2059 2054
2060 sprintf(request_sock_slab_name, mask, prot->name); 2055 sprintf(prot->rsk_prot->slab_name, mask, prot->name);
2061 prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name, 2056 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2062 prot->rsk_prot->obj_size, 0, 2057 prot->rsk_prot->obj_size, 0,
2063 SLAB_HWCACHE_ALIGN, NULL); 2058 SLAB_HWCACHE_ALIGN, NULL);
2064 2059
@@ -2072,14 +2067,14 @@ int proto_register(struct proto *prot, int alloc_slab)
2072 if (prot->twsk_prot != NULL) { 2067 if (prot->twsk_prot != NULL) {
2073 static const char mask[] = "tw_sock_%s"; 2068 static const char mask[] = "tw_sock_%s";
2074 2069
2075 timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); 2070 prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2076 2071
2077 if (timewait_sock_slab_name == NULL) 2072 if (prot->twsk_prot->twsk_slab_name == NULL)
2078 goto out_free_request_sock_slab; 2073 goto out_free_request_sock_slab;
2079 2074
2080 sprintf(timewait_sock_slab_name, mask, prot->name); 2075 sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name);
2081 prot->twsk_prot->twsk_slab = 2076 prot->twsk_prot->twsk_slab =
2082 kmem_cache_create(timewait_sock_slab_name, 2077 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2083 prot->twsk_prot->twsk_obj_size, 2078 prot->twsk_prot->twsk_obj_size,
2084 0, SLAB_HWCACHE_ALIGN, 2079 0, SLAB_HWCACHE_ALIGN,
2085 NULL); 2080 NULL);
@@ -2095,14 +2090,14 @@ int proto_register(struct proto *prot, int alloc_slab)
2095 return 0; 2090 return 0;
2096 2091
2097out_free_timewait_sock_slab_name: 2092out_free_timewait_sock_slab_name:
2098 kfree(timewait_sock_slab_name); 2093 kfree(prot->twsk_prot->twsk_slab_name);
2099out_free_request_sock_slab: 2094out_free_request_sock_slab:
2100 if (prot->rsk_prot && prot->rsk_prot->slab) { 2095 if (prot->rsk_prot && prot->rsk_prot->slab) {
2101 kmem_cache_destroy(prot->rsk_prot->slab); 2096 kmem_cache_destroy(prot->rsk_prot->slab);
2102 prot->rsk_prot->slab = NULL; 2097 prot->rsk_prot->slab = NULL;
2103 } 2098 }
2104out_free_request_sock_slab_name: 2099out_free_request_sock_slab_name:
2105 kfree(request_sock_slab_name); 2100 kfree(prot->rsk_prot->slab_name);
2106out_free_sock_slab: 2101out_free_sock_slab:
2107 kmem_cache_destroy(prot->slab); 2102 kmem_cache_destroy(prot->slab);
2108 prot->slab = NULL; 2103 prot->slab = NULL;
@@ -2125,18 +2120,14 @@ void proto_unregister(struct proto *prot)
2125 } 2120 }
2126 2121
2127 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2122 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2128 const char *name = kmem_cache_name(prot->rsk_prot->slab);
2129
2130 kmem_cache_destroy(prot->rsk_prot->slab); 2123 kmem_cache_destroy(prot->rsk_prot->slab);
2131 kfree(name); 2124 kfree(prot->rsk_prot->slab_name);
2132 prot->rsk_prot->slab = NULL; 2125 prot->rsk_prot->slab = NULL;
2133 } 2126 }
2134 2127
2135 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2128 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2136 const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab);
2137
2138 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2129 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2139 kfree(name); 2130 kfree(prot->twsk_prot->twsk_slab_name);
2140 prot->twsk_prot->twsk_slab = NULL; 2131 prot->twsk_prot->twsk_slab = NULL;
2141 } 2132 }
2142} 2133}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 37616884b8a9..1af5a79309e9 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
13#include <linux/phy.h> 14#include <linux/phy.h>
14#include "dsa_priv.h" 15#include "dsa_priv.h"
15 16
@@ -49,11 +50,57 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
49/* slave device handling ****************************************************/ 50/* slave device handling ****************************************************/
50static int dsa_slave_open(struct net_device *dev) 51static int dsa_slave_open(struct net_device *dev)
51{ 52{
53 struct dsa_slave_priv *p = netdev_priv(dev);
54 struct net_device *master = p->parent->master_netdev;
55 int err;
56
57 if (!(master->flags & IFF_UP))
58 return -ENETDOWN;
59
60 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
61 err = dev_unicast_add(master, dev->dev_addr, ETH_ALEN);
62 if (err < 0)
63 goto out;
64 }
65
66 if (dev->flags & IFF_ALLMULTI) {
67 err = dev_set_allmulti(master, 1);
68 if (err < 0)
69 goto del_unicast;
70 }
71 if (dev->flags & IFF_PROMISC) {
72 err = dev_set_promiscuity(master, 1);
73 if (err < 0)
74 goto clear_allmulti;
75 }
76
52 return 0; 77 return 0;
78
79clear_allmulti:
80 if (dev->flags & IFF_ALLMULTI)
81 dev_set_allmulti(master, -1);
82del_unicast:
83 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
84 dev_unicast_delete(master, dev->dev_addr, ETH_ALEN);
85out:
86 return err;
53} 87}
54 88
55static int dsa_slave_close(struct net_device *dev) 89static int dsa_slave_close(struct net_device *dev)
56{ 90{
91 struct dsa_slave_priv *p = netdev_priv(dev);
92 struct net_device *master = p->parent->master_netdev;
93
94 dev_mc_unsync(master, dev);
95 dev_unicast_unsync(master, dev);
96 if (dev->flags & IFF_ALLMULTI)
97 dev_set_allmulti(master, -1);
98 if (dev->flags & IFF_PROMISC)
99 dev_set_promiscuity(master, -1);
100
101 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
102 dev_unicast_delete(master, dev->dev_addr, ETH_ALEN);
103
57 return 0; 104 return 0;
58} 105}
59 106
@@ -77,9 +124,30 @@ static void dsa_slave_set_rx_mode(struct net_device *dev)
77 dev_unicast_sync(master, dev); 124 dev_unicast_sync(master, dev);
78} 125}
79 126
80static int dsa_slave_set_mac_address(struct net_device *dev, void *addr) 127static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
81{ 128{
82 memcpy(dev->dev_addr, addr + 2, 6); 129 struct dsa_slave_priv *p = netdev_priv(dev);
130 struct net_device *master = p->parent->master_netdev;
131 struct sockaddr *addr = a;
132 int err;
133
134 if (!is_valid_ether_addr(addr->sa_data))
135 return -EADDRNOTAVAIL;
136
137 if (!(dev->flags & IFF_UP))
138 goto out;
139
140 if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
141 err = dev_unicast_add(master, addr->sa_data, ETH_ALEN);
142 if (err < 0)
143 return err;
144 }
145
146 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
147 dev_unicast_delete(master, dev->dev_addr, ETH_ALEN);
148
149out:
150 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
83 151
84 return 0; 152 return 0;
85} 153}
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index bdc0510b53b7..31866543332e 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -159,6 +159,7 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
159 159
160 skb->dev = ds->ports[source_port]; 160 skb->dev = ds->ports[source_port];
161 skb_push(skb, ETH_HLEN); 161 skb_push(skb, ETH_HLEN);
162 skb->pkt_type = PACKET_HOST;
162 skb->protocol = eth_type_trans(skb, skb->dev); 163 skb->protocol = eth_type_trans(skb, skb->dev);
163 164
164 skb->dev->last_rx = jiffies; 165 skb->dev->last_rx = jiffies;
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index f985ea993843..9f4ce55eae59 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -178,6 +178,7 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
178 178
179 skb->dev = ds->ports[source_port]; 179 skb->dev = ds->ports[source_port];
180 skb_push(skb, ETH_HLEN); 180 skb_push(skb, ETH_HLEN);
181 skb->pkt_type = PACKET_HOST;
181 skb->protocol = eth_type_trans(skb, skb->dev); 182 skb->protocol = eth_type_trans(skb, skb->dev);
182 183
183 skb->dev->last_rx = jiffies; 184 skb->dev->last_rx = jiffies;
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index d3117764b2c2..efd26697e716 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -95,6 +95,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
95 95
96 skb->dev = ds->ports[source_port]; 96 skb->dev = ds->ports[source_port];
97 skb_push(skb, ETH_HLEN); 97 skb_push(skb, ETH_HLEN);
98 skb->pkt_type = PACKET_HOST;
98 skb->protocol = eth_type_trans(skb, skb->dev); 99 skb->protocol = eth_type_trans(skb, skb->dev);
99 100
100 skb->dev->last_rx = jiffies; 101 skb->dev->last_rx = jiffies;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1fbff5fa4241..1aa2dc9e380e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1117,6 +1117,7 @@ int inet_sk_rebuild_header(struct sock *sk)
1117 }, 1117 },
1118 }, 1118 },
1119 .proto = sk->sk_protocol, 1119 .proto = sk->sk_protocol,
1120 .flags = inet_sk_flowi_flags(sk),
1120 .uli_u = { 1121 .uli_u = {
1121 .ports = { 1122 .ports = {
1122 .sport = inet->sport, 1123 .sport = inet->sport,
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 490e035c6d90..2e78f6bd9775 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -2063,9 +2063,10 @@ int cipso_v4_skbuff_setattr(struct sk_buff *skb,
2063 u32 opt_len; 2063 u32 opt_len;
2064 int len_delta; 2064 int len_delta;
2065 2065
2066 buf_len = cipso_v4_genopt(buf, buf_len, doi_def, secattr); 2066 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
2067 if (buf_len < 0) 2067 if (ret_val < 0)
2068 return buf_len; 2068 return ret_val;
2069 buf_len = ret_val;
2069 opt_len = (buf_len + 3) & ~3; 2070 opt_len = (buf_len + 3) & ~3;
2070 2071
2071 /* we overwrite any existing options to ensure that we have enough 2072 /* we overwrite any existing options to ensure that we have enough
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 861978a4f1a8..cfb38ac9d698 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -209,9 +209,17 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
209 209
210 hash = protocol & (MAX_INET_PROTOS - 1); 210 hash = protocol & (MAX_INET_PROTOS - 1);
211 ipprot = rcu_dereference(inet_protos[hash]); 211 ipprot = rcu_dereference(inet_protos[hash]);
212 if (ipprot != NULL && (net == &init_net || ipprot->netns_ok)) { 212 if (ipprot != NULL) {
213 int ret; 213 int ret;
214 214
215 if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
216 if (net_ratelimit())
217 printk("%s: proto %d isn't netns-ready\n",
218 __func__, protocol);
219 kfree_skb(skb);
220 goto out;
221 }
222
215 if (!ipprot->no_policy) { 223 if (!ipprot->no_policy) {
216 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 224 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
217 kfree_skb(skb); 225 kfree_skb(skb);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b42e082cc170..25924b1eb2ef 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1945,13 +1945,14 @@ int __init ip_mr_init(void)
1945 goto proc_cache_fail; 1945 goto proc_cache_fail;
1946#endif 1946#endif
1947 return 0; 1947 return 0;
1948reg_notif_fail:
1949 kmem_cache_destroy(mrt_cachep);
1950#ifdef CONFIG_PROC_FS 1948#ifdef CONFIG_PROC_FS
1951proc_vif_fail:
1952 unregister_netdevice_notifier(&ip_mr_notifier);
1953proc_cache_fail: 1949proc_cache_fail:
1954 proc_net_remove(&init_net, "ip_mr_vif"); 1950 proc_net_remove(&init_net, "ip_mr_vif");
1951proc_vif_fail:
1952 unregister_netdevice_notifier(&ip_mr_notifier);
1955#endif 1953#endif
1954reg_notif_fail:
1955 del_timer(&ipmr_expire_timer);
1956 kmem_cache_destroy(mrt_cachep);
1956 return err; 1957 return err;
1957} 1958}
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index bea54a685109..8d489e746b21 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -61,7 +61,7 @@ static struct
61static struct xt_table nat_table = { 61static struct xt_table nat_table = {
62 .name = "nat", 62 .name = "nat",
63 .valid_hooks = NAT_VALID_HOOKS, 63 .valid_hooks = NAT_VALID_HOOKS,
64 .lock = __RW_LOCK_UNLOCKED(__nat_table.lock), 64 .lock = __RW_LOCK_UNLOCKED(nat_table.lock),
65 .me = THIS_MODULE, 65 .me = THIS_MODULE,
66 .af = AF_INET, 66 .af = AF_INET,
67}; 67};
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8f5a403f6f6b..a631a1f110ca 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -237,43 +237,45 @@ static const struct snmp_mib snmp4_net_list[] = {
237 SNMP_MIB_SENTINEL 237 SNMP_MIB_SENTINEL
238}; 238};
239 239
240static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals,
241 unsigned short *type, int count)
242{
243 int j;
244
245 if (count) {
246 seq_printf(seq, "\nIcmpMsg:");
247 for (j = 0; j < count; ++j)
248 seq_printf(seq, " %sType%u",
249 type[j] & 0x100 ? "Out" : "In",
250 type[j] & 0xff);
251 seq_printf(seq, "\nIcmpMsg:");
252 for (j = 0; j < count; ++j)
253 seq_printf(seq, " %lu", vals[j]);
254 }
255}
256
240static void icmpmsg_put(struct seq_file *seq) 257static void icmpmsg_put(struct seq_file *seq)
241{ 258{
242#define PERLINE 16 259#define PERLINE 16
243 260
244 int j, i, count; 261 int i, count;
245 static int out[PERLINE]; 262 unsigned short type[PERLINE];
263 unsigned long vals[PERLINE], val;
246 struct net *net = seq->private; 264 struct net *net = seq->private;
247 265
248 count = 0; 266 count = 0;
249 for (i = 0; i < ICMPMSG_MIB_MAX; i++) { 267 for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
250 268 val = snmp_fold_field((void **) net->mib.icmpmsg_statistics, i);
251 if (snmp_fold_field((void **) net->mib.icmpmsg_statistics, i)) 269 if (val) {
252 out[count++] = i; 270 type[count] = i;
253 if (count < PERLINE) 271 vals[count++] = val;
254 continue; 272 }
255 273 if (count == PERLINE) {
256 seq_printf(seq, "\nIcmpMsg:"); 274 icmpmsg_put_line(seq, vals, type, count);
257 for (j = 0; j < PERLINE; ++j) 275 count = 0;
258 seq_printf(seq, " %sType%u", i & 0x100 ? "Out" : "In", 276 }
259 i & 0xff);
260 seq_printf(seq, "\nIcmpMsg: ");
261 for (j = 0; j < PERLINE; ++j)
262 seq_printf(seq, " %lu",
263 snmp_fold_field((void **) net->mib.icmpmsg_statistics,
264 out[j]));
265 seq_putc(seq, '\n');
266 }
267 if (count) {
268 seq_printf(seq, "\nIcmpMsg:");
269 for (j = 0; j < count; ++j)
270 seq_printf(seq, " %sType%u", out[j] & 0x100 ? "Out" :
271 "In", out[j] & 0xff);
272 seq_printf(seq, "\nIcmpMsg:");
273 for (j = 0; j < count; ++j)
274 seq_printf(seq, " %lu", snmp_fold_field((void **)
275 net->mib.icmpmsg_statistics, out[j]));
276 } 277 }
278 icmpmsg_put_line(seq, vals, type, count);
277 279
278#undef PERLINE 280#undef PERLINE
279} 281}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eccb7165a80c..c5aca0bb116a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1374,8 +1374,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1374 sk->sk_state == TCP_CLOSE || 1374 sk->sk_state == TCP_CLOSE ||
1375 (sk->sk_shutdown & RCV_SHUTDOWN) || 1375 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1376 !timeo || 1376 !timeo ||
1377 signal_pending(current) || 1377 signal_pending(current))
1378 (flags & MSG_PEEK))
1379 break; 1378 break;
1380 } else { 1379 } else {
1381 if (sock_flag(sk, SOCK_DONE)) 1380 if (sock_flag(sk, SOCK_DONE))
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index af99776146ff..937549b8a921 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -69,9 +69,12 @@ static u32 htcp_cwnd_undo(struct sock *sk)
69 const struct tcp_sock *tp = tcp_sk(sk); 69 const struct tcp_sock *tp = tcp_sk(sk);
70 struct htcp *ca = inet_csk_ca(sk); 70 struct htcp *ca = inet_csk_ca(sk);
71 71
72 ca->last_cong = ca->undo_last_cong; 72 if (ca->undo_last_cong) {
73 ca->maxRTT = ca->undo_maxRTT; 73 ca->last_cong = ca->undo_last_cong;
74 ca->old_maxB = ca->undo_old_maxB; 74 ca->maxRTT = ca->undo_maxRTT;
75 ca->old_maxB = ca->undo_old_maxB;
76 ca->undo_last_cong = 0;
77 }
75 78
76 return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta); 79 return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta);
77} 80}
@@ -268,7 +271,10 @@ static void htcp_state(struct sock *sk, u8 new_state)
268 case TCP_CA_Open: 271 case TCP_CA_Open:
269 { 272 {
270 struct htcp *ca = inet_csk_ca(sk); 273 struct htcp *ca = inet_csk_ca(sk);
271 ca->last_cong = jiffies; 274 if (ca->undo_last_cong) {
275 ca->last_cong = jiffies;
276 ca->undo_last_cong = 0;
277 }
272 } 278 }
273 break; 279 break;
274 case TCP_CA_CWR: 280 case TCP_CA_CWR:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e4c5ac9fe89b..fe3b4bdfd251 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1028,10 +1028,6 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1028 1028
1029/* Compute the current effective MSS, taking SACKs and IP options, 1029/* Compute the current effective MSS, taking SACKs and IP options,
1030 * and even PMTU discovery events into account. 1030 * and even PMTU discovery events into account.
1031 *
1032 * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up
1033 * cannot be large. However, taking into account rare use of URG, this
1034 * is not a big flaw.
1035 */ 1031 */
1036unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 1032unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
1037{ 1033{
@@ -1046,7 +1042,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
1046 1042
1047 mss_now = tp->mss_cache; 1043 mss_now = tp->mss_cache;
1048 1044
1049 if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp)) 1045 if (large_allowed && sk_can_gso(sk))
1050 doing_tso = 1; 1046 doing_tso = 1;
1051 1047
1052 if (dst) { 1048 if (dst) {
@@ -1516,6 +1512,10 @@ static int tcp_mtu_probe(struct sock *sk)
1516 * send_head. This happens as incoming acks open up the remote 1512 * send_head. This happens as incoming acks open up the remote
1517 * window for us. 1513 * window for us.
1518 * 1514 *
1515 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1516 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1517 * account rare use of URG, this is not a big flaw.
1518 *
1519 * Returns 1, if no segments are in flight and we have queued segments, but 1519 * Returns 1, if no segments are in flight and we have queued segments, but
1520 * cannot send anything now because of SWS or another problem. 1520 * cannot send anything now because of SWS or another problem.
1521 */ 1521 */
@@ -1567,7 +1567,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1567 } 1567 }
1568 1568
1569 limit = mss_now; 1569 limit = mss_now;
1570 if (tso_segs > 1) 1570 if (tso_segs > 1 && !tcp_urg_mode(tp))
1571 limit = tcp_mss_split_point(sk, skb, mss_now, 1571 limit = tcp_mss_split_point(sk, skb, mss_now,
1572 cwnd_quota); 1572 cwnd_quota);
1573 1573
@@ -1616,6 +1616,7 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1616 */ 1616 */
1617void tcp_push_one(struct sock *sk, unsigned int mss_now) 1617void tcp_push_one(struct sock *sk, unsigned int mss_now)
1618{ 1618{
1619 struct tcp_sock *tp = tcp_sk(sk);
1619 struct sk_buff *skb = tcp_send_head(sk); 1620 struct sk_buff *skb = tcp_send_head(sk);
1620 unsigned int tso_segs, cwnd_quota; 1621 unsigned int tso_segs, cwnd_quota;
1621 1622
@@ -1630,7 +1631,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1630 BUG_ON(!tso_segs); 1631 BUG_ON(!tso_segs);
1631 1632
1632 limit = mss_now; 1633 limit = mss_now;
1633 if (tso_segs > 1) 1634 if (tso_segs > 1 && !tcp_urg_mode(tp))
1634 limit = tcp_mss_split_point(sk, skb, mss_now, 1635 limit = tcp_mss_split_point(sk, skb, mss_now,
1635 cwnd_quota); 1636 cwnd_quota);
1636 1637
@@ -2279,6 +2280,11 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2279 } 2280 }
2280 2281
2281 memset(&opts, 0, sizeof(opts)); 2282 memset(&opts, 0, sizeof(opts));
2283#ifdef CONFIG_SYN_COOKIES
2284 if (unlikely(req->cookie_ts))
2285 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
2286 else
2287#endif
2282 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2288 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2283 tcp_header_size = tcp_synack_options(sk, req, mss, 2289 tcp_header_size = tcp_synack_options(sk, req, mss,
2284 skb, &opts, &md5) + 2290 skb, &opts, &md5) +
@@ -2304,11 +2310,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2304 2310
2305 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2311 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2306 th->window = htons(min(req->rcv_wnd, 65535U)); 2312 th->window = htons(min(req->rcv_wnd, 65535U));
2307#ifdef CONFIG_SYN_COOKIES
2308 if (unlikely(req->cookie_ts))
2309 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
2310 else
2311#endif
2312 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 2313 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location);
2313 th->doff = (tcp_header_size >> 2); 2314 th->doff = (tcp_header_size >> 2);
2314 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 2315 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 14504dada116..a453aac91bd3 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -40,18 +40,14 @@
40 40
41#include "tcp_vegas.h" 41#include "tcp_vegas.h"
42 42
43/* Default values of the Vegas variables, in fixed-point representation 43static int alpha = 2;
44 * with V_PARAM_SHIFT bits to the right of the binary point. 44static int beta = 4;
45 */ 45static int gamma = 1;
46#define V_PARAM_SHIFT 1
47static int alpha = 2<<V_PARAM_SHIFT;
48static int beta = 4<<V_PARAM_SHIFT;
49static int gamma = 1<<V_PARAM_SHIFT;
50 46
51module_param(alpha, int, 0644); 47module_param(alpha, int, 0644);
52MODULE_PARM_DESC(alpha, "lower bound of packets in network (scale by 2)"); 48MODULE_PARM_DESC(alpha, "lower bound of packets in network");
53module_param(beta, int, 0644); 49module_param(beta, int, 0644);
54MODULE_PARM_DESC(beta, "upper bound of packets in network (scale by 2)"); 50MODULE_PARM_DESC(beta, "upper bound of packets in network");
55module_param(gamma, int, 0644); 51module_param(gamma, int, 0644);
56MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); 52MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
57 53
@@ -172,49 +168,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
172 return; 168 return;
173 } 169 }
174 170
175 /* The key players are v_beg_snd_una and v_beg_snd_nxt.
176 *
177 * These are so named because they represent the approximate values
178 * of snd_una and snd_nxt at the beginning of the current RTT. More
179 * precisely, they represent the amount of data sent during the RTT.
180 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
181 * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
182 * bytes of data have been ACKed during the course of the RTT, giving
183 * an "actual" rate of:
184 *
185 * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
186 *
187 * Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
188 * because delayed ACKs can cover more than one segment, so they
189 * don't line up nicely with the boundaries of RTTs.
190 *
191 * Another unfortunate fact of life is that delayed ACKs delay the
192 * advance of the left edge of our send window, so that the number
193 * of bytes we send in an RTT is often less than our cwnd will allow.
194 * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
195 */
196
197 if (after(ack, vegas->beg_snd_nxt)) { 171 if (after(ack, vegas->beg_snd_nxt)) {
198 /* Do the Vegas once-per-RTT cwnd adjustment. */ 172 /* Do the Vegas once-per-RTT cwnd adjustment. */
199 u32 old_wnd, old_snd_cwnd;
200
201
202 /* Here old_wnd is essentially the window of data that was
203 * sent during the previous RTT, and has all
204 * been acknowledged in the course of the RTT that ended
205 * with the ACK we just received. Likewise, old_snd_cwnd
206 * is the cwnd during the previous RTT.
207 */
208 old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
209 tp->mss_cache;
210 old_snd_cwnd = vegas->beg_snd_cwnd;
211 173
212 /* Save the extent of the current window so we can use this 174 /* Save the extent of the current window so we can use this
213 * at the end of the next RTT. 175 * at the end of the next RTT.
214 */ 176 */
215 vegas->beg_snd_una = vegas->beg_snd_nxt;
216 vegas->beg_snd_nxt = tp->snd_nxt; 177 vegas->beg_snd_nxt = tp->snd_nxt;
217 vegas->beg_snd_cwnd = tp->snd_cwnd;
218 178
219 /* We do the Vegas calculations only if we got enough RTT 179 /* We do the Vegas calculations only if we got enough RTT
220 * samples that we can be reasonably sure that we got 180 * samples that we can be reasonably sure that we got
@@ -252,22 +212,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
252 * 212 *
253 * This is: 213 * This is:
254 * (actual rate in segments) * baseRTT 214 * (actual rate in segments) * baseRTT
255 * We keep it as a fixed point number with
256 * V_PARAM_SHIFT bits to the right of the binary point.
257 */ 215 */
258 target_cwnd = ((u64)old_wnd * vegas->baseRTT); 216 target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
259 target_cwnd <<= V_PARAM_SHIFT;
260 do_div(target_cwnd, rtt);
261 217
262 /* Calculate the difference between the window we had, 218 /* Calculate the difference between the window we had,
263 * and the window we would like to have. This quantity 219 * and the window we would like to have. This quantity
264 * is the "Diff" from the Arizona Vegas papers. 220 * is the "Diff" from the Arizona Vegas papers.
265 *
266 * Again, this is a fixed point number with
267 * V_PARAM_SHIFT bits to the right of the binary
268 * point.
269 */ 221 */
270 diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; 222 diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
271 223
272 if (diff > gamma && tp->snd_ssthresh > 2 ) { 224 if (diff > gamma && tp->snd_ssthresh > 2 ) {
273 /* Going too fast. Time to slow down 225 /* Going too fast. Time to slow down
@@ -282,16 +234,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
282 * truncation robs us of full link 234 * truncation robs us of full link
283 * utilization. 235 * utilization.
284 */ 236 */
285 tp->snd_cwnd = min(tp->snd_cwnd, 237 tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
286 ((u32)target_cwnd >>
287 V_PARAM_SHIFT)+1);
288 238
289 } else if (tp->snd_cwnd <= tp->snd_ssthresh) { 239 } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
290 /* Slow start. */ 240 /* Slow start. */
291 tcp_slow_start(tp); 241 tcp_slow_start(tp);
292 } else { 242 } else {
293 /* Congestion avoidance. */ 243 /* Congestion avoidance. */
294 u32 next_snd_cwnd;
295 244
296 /* Figure out where we would like cwnd 245 /* Figure out where we would like cwnd
297 * to be. 246 * to be.
@@ -300,32 +249,25 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
300 /* The old window was too fast, so 249 /* The old window was too fast, so
301 * we slow down. 250 * we slow down.
302 */ 251 */
303 next_snd_cwnd = old_snd_cwnd - 1; 252 tp->snd_cwnd--;
304 } else if (diff < alpha) { 253 } else if (diff < alpha) {
305 /* We don't have enough extra packets 254 /* We don't have enough extra packets
306 * in the network, so speed up. 255 * in the network, so speed up.
307 */ 256 */
308 next_snd_cwnd = old_snd_cwnd + 1; 257 tp->snd_cwnd++;
309 } else { 258 } else {
310 /* Sending just as fast as we 259 /* Sending just as fast as we
311 * should be. 260 * should be.
312 */ 261 */
313 next_snd_cwnd = old_snd_cwnd;
314 } 262 }
315
316 /* Adjust cwnd upward or downward, toward the
317 * desired value.
318 */
319 if (next_snd_cwnd > tp->snd_cwnd)
320 tp->snd_cwnd++;
321 else if (next_snd_cwnd < tp->snd_cwnd)
322 tp->snd_cwnd--;
323 } 263 }
324 264
325 if (tp->snd_cwnd < 2) 265 if (tp->snd_cwnd < 2)
326 tp->snd_cwnd = 2; 266 tp->snd_cwnd = 2;
327 else if (tp->snd_cwnd > tp->snd_cwnd_clamp) 267 else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
328 tp->snd_cwnd = tp->snd_cwnd_clamp; 268 tp->snd_cwnd = tp->snd_cwnd_clamp;
269
270 tp->snd_ssthresh = tcp_current_ssthresh(sk);
329 } 271 }
330 272
331 /* Wipe the slate clean for the next RTT. */ 273 /* Wipe the slate clean for the next RTT. */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 2095abc3caba..98c1fd09be88 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -284,7 +284,7 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
284} 284}
285EXPORT_SYMBOL_GPL(udp4_lib_lookup); 285EXPORT_SYMBOL_GPL(udp4_lib_lookup);
286 286
287static inline struct sock *udp_v4_mcast_next(struct sock *sk, 287static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
288 __be16 loc_port, __be32 loc_addr, 288 __be16 loc_port, __be32 loc_addr,
289 __be16 rmt_port, __be32 rmt_addr, 289 __be16 rmt_port, __be32 rmt_addr,
290 int dif) 290 int dif)
@@ -296,7 +296,8 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk,
296 sk_for_each_from(s, node) { 296 sk_for_each_from(s, node) {
297 struct inet_sock *inet = inet_sk(s); 297 struct inet_sock *inet = inet_sk(s);
298 298
299 if (s->sk_hash != hnum || 299 if (!net_eq(sock_net(s), net) ||
300 s->sk_hash != hnum ||
300 (inet->daddr && inet->daddr != rmt_addr) || 301 (inet->daddr && inet->daddr != rmt_addr) ||
301 (inet->dport != rmt_port && inet->dport) || 302 (inet->dport != rmt_port && inet->dport) ||
302 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) || 303 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
@@ -632,6 +633,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
632 .saddr = saddr, 633 .saddr = saddr,
633 .tos = tos } }, 634 .tos = tos } },
634 .proto = sk->sk_protocol, 635 .proto = sk->sk_protocol,
636 .flags = inet_sk_flowi_flags(sk),
635 .uli_u = { .ports = 637 .uli_u = { .ports =
636 { .sport = inet->sport, 638 { .sport = inet->sport,
637 .dport = dport } } }; 639 .dport = dport } } };
@@ -1079,15 +1081,16 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1079 read_lock(&udp_hash_lock); 1081 read_lock(&udp_hash_lock);
1080 sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]); 1082 sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]);
1081 dif = skb->dev->ifindex; 1083 dif = skb->dev->ifindex;
1082 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); 1084 sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
1083 if (sk) { 1085 if (sk) {
1084 struct sock *sknext = NULL; 1086 struct sock *sknext = NULL;
1085 1087
1086 do { 1088 do {
1087 struct sk_buff *skb1 = skb; 1089 struct sk_buff *skb1 = skb;
1088 1090
1089 sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr, 1091 sknext = udp_v4_mcast_next(net, sk_next(sk), uh->dest,
1090 uh->source, saddr, dif); 1092 daddr, uh->source, saddr,
1093 dif);
1091 if (sknext) 1094 if (sknext)
1092 skb1 = skb_clone(skb, GFP_ATOMIC); 1095 skb1 = skb_clone(skb, GFP_ATOMIC);
1093 1096
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 07735ed280d7..55dc6beab9aa 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -33,6 +33,7 @@ __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
33 x->sel.dport_mask = htons(0xffff); 33 x->sel.dport_mask = htons(0xffff);
34 x->sel.sport = xfrm_flowi_sport(fl); 34 x->sel.sport = xfrm_flowi_sport(fl);
35 x->sel.sport_mask = htons(0xffff); 35 x->sel.sport_mask = htons(0xffff);
36 x->sel.family = AF_INET;
36 x->sel.prefixlen_d = 32; 37 x->sel.prefixlen_d = 32;
37 x->sel.prefixlen_s = 32; 38 x->sel.prefixlen_s = 32;
38 x->sel.proto = fl->proto; 39 x->sel.proto = fl->proto;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index eea9542728ca..d9da5eb9dcb2 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2483,8 +2483,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2483 if (!idev && dev->mtu >= IPV6_MIN_MTU) 2483 if (!idev && dev->mtu >= IPV6_MIN_MTU)
2484 idev = ipv6_add_dev(dev); 2484 idev = ipv6_add_dev(dev);
2485 2485
2486 if (idev) 2486 if (idev) {
2487 idev->if_flags |= IF_READY; 2487 idev->if_flags |= IF_READY;
2488 run_pending = 1;
2489 }
2488 } else { 2490 } else {
2489 if (!addrconf_qdisc_ok(dev)) { 2491 if (!addrconf_qdisc_ok(dev)) {
2490 /* device is still not ready. */ 2492 /* device is still not ready. */
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 410046a8cc91..e44deb8d4df2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -661,6 +661,11 @@ int datagram_send_ctl(struct net *net,
661 switch (rthdr->type) { 661 switch (rthdr->type) {
662#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 662#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
663 case IPV6_SRCRT_TYPE_2: 663 case IPV6_SRCRT_TYPE_2:
664 if (rthdr->hdrlen != 2 ||
665 rthdr->segments_left != 1) {
666 err = -EINVAL;
667 goto exit_f;
668 }
664 break; 669 break;
665#endif 670#endif
666 default: 671 default:
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 182f8a177e7f..0524769632e7 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -224,7 +224,7 @@ static struct file_operations ip6mr_vif_fops = {
224 .open = ip6mr_vif_open, 224 .open = ip6mr_vif_open,
225 .read = seq_read, 225 .read = seq_read,
226 .llseek = seq_lseek, 226 .llseek = seq_lseek,
227 .release = seq_release, 227 .release = seq_release_private,
228}; 228};
229 229
230static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) 230static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
@@ -338,7 +338,7 @@ static struct file_operations ip6mr_mfc_fops = {
338 .open = ipmr_mfc_open, 338 .open = ipmr_mfc_open,
339 .read = seq_read, 339 .read = seq_read,
340 .llseek = seq_lseek, 340 .llseek = seq_lseek,
341 .release = seq_release, 341 .release = seq_release_private,
342}; 342};
343#endif 343#endif
344 344
@@ -981,14 +981,15 @@ int __init ip6_mr_init(void)
981 goto proc_cache_fail; 981 goto proc_cache_fail;
982#endif 982#endif
983 return 0; 983 return 0;
984reg_notif_fail:
985 kmem_cache_destroy(mrt_cachep);
986#ifdef CONFIG_PROC_FS 984#ifdef CONFIG_PROC_FS
987proc_vif_fail:
988 unregister_netdevice_notifier(&ip6_mr_notifier);
989proc_cache_fail: 985proc_cache_fail:
990 proc_net_remove(&init_net, "ip6_mr_vif"); 986 proc_net_remove(&init_net, "ip6_mr_vif");
987proc_vif_fail:
988 unregister_netdevice_notifier(&ip6_mr_notifier);
991#endif 989#endif
990reg_notif_fail:
991 del_timer(&ipmr_expire_timer);
992 kmem_cache_destroy(mrt_cachep);
992 return err; 993 return err;
993} 994}
994 995
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 4e5eac301f91..2aa294be0c79 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -366,11 +366,16 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
366 } 366 }
367 367
368 /* routing header option needs extra check */ 368 /* routing header option needs extra check */
369 retv = -EINVAL;
369 if (optname == IPV6_RTHDR && opt && opt->srcrt) { 370 if (optname == IPV6_RTHDR && opt && opt->srcrt) {
370 struct ipv6_rt_hdr *rthdr = opt->srcrt; 371 struct ipv6_rt_hdr *rthdr = opt->srcrt;
371 switch (rthdr->type) { 372 switch (rthdr->type) {
372#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 373#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
373 case IPV6_SRCRT_TYPE_2: 374 case IPV6_SRCRT_TYPE_2:
375 if (rthdr->hdrlen != 2 ||
376 rthdr->segments_left != 1)
377 goto sticky_done;
378
374 break; 379 break;
375#endif 380#endif
376 default: 381 default:
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 172438320eec..d0f54d18e19b 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -912,8 +912,13 @@ static void ndisc_recv_na(struct sk_buff *skb)
912 is invalid, but ndisc specs say nothing 912 is invalid, but ndisc specs say nothing
913 about it. It could be misconfiguration, or 913 about it. It could be misconfiguration, or
914 an smart proxy agent tries to help us :-) 914 an smart proxy agent tries to help us :-)
915
916 We should not print the error if NA has been
917 received from loopback - it is just our own
918 unsolicited advertisement.
915 */ 919 */
916 ND_PRINTK1(KERN_WARNING 920 if (skb->pkt_type != PACKET_LOOPBACK)
921 ND_PRINTK1(KERN_WARNING
917 "ICMPv6 NA: someone advertises our address on %s!\n", 922 "ICMPv6 NA: someone advertises our address on %s!\n",
918 ifp->idev->dev->name); 923 ifp->idev->dev->name);
919 in6_ifa_put(ifp); 924 in6_ifa_put(ifp);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 07f0b76e7427..97c17fdd6f75 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -132,7 +132,7 @@ static struct snmp_mib snmp6_udplite6_list[] = {
132 132
133static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) 133static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib)
134{ 134{
135 static char name[32]; 135 char name[32];
136 int i; 136 int i;
137 137
138 /* print by name -- deprecated items */ 138 /* print by name -- deprecated items */
@@ -144,7 +144,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib)
144 p = icmp6type2name[icmptype]; 144 p = icmp6type2name[icmptype];
145 if (!p) /* don't print un-named types here */ 145 if (!p) /* don't print un-named types here */
146 continue; 146 continue;
147 (void) snprintf(name, sizeof(name)-1, "Icmp6%s%s", 147 snprintf(name, sizeof(name), "Icmp6%s%s",
148 i & 0x100 ? "Out" : "In", p); 148 i & 0x100 ? "Out" : "In", p);
149 seq_printf(seq, "%-32s\t%lu\n", name, 149 seq_printf(seq, "%-32s\t%lu\n", name,
150 snmp_fold_field(mib, i)); 150 snmp_fold_field(mib, i));
@@ -157,7 +157,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib)
157 val = snmp_fold_field(mib, i); 157 val = snmp_fold_field(mib, i);
158 if (!val) 158 if (!val)
159 continue; 159 continue;
160 (void) snprintf(name, sizeof(name)-1, "Icmp6%sType%u", 160 snprintf(name, sizeof(name), "Icmp6%sType%u",
161 i & 0x100 ? "Out" : "In", i & 0xff); 161 i & 0x100 ? "Out" : "In", i & 0xff);
162 seq_printf(seq, "%-32s\t%lu\n", name, val); 162 seq_printf(seq, "%-32s\t%lu\n", name, val);
163 } 163 }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e51da8c092fa..8b48512ebf6a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -138,6 +138,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
138 int peeked; 138 int peeked;
139 int err; 139 int err;
140 int is_udplite = IS_UDPLITE(sk); 140 int is_udplite = IS_UDPLITE(sk);
141 int is_udp4;
141 142
142 if (addr_len) 143 if (addr_len)
143 *addr_len=sizeof(struct sockaddr_in6); 144 *addr_len=sizeof(struct sockaddr_in6);
@@ -158,6 +159,8 @@ try_again:
158 else if (copied < ulen) 159 else if (copied < ulen)
159 msg->msg_flags |= MSG_TRUNC; 160 msg->msg_flags |= MSG_TRUNC;
160 161
162 is_udp4 = (skb->protocol == htons(ETH_P_IP));
163
161 /* 164 /*
162 * If checksum is needed at all, try to do it while copying the 165 * If checksum is needed at all, try to do it while copying the
163 * data. If the data is truncated, or if we only want a partial 166 * data. If the data is truncated, or if we only want a partial
@@ -180,9 +183,14 @@ try_again:
180 if (err) 183 if (err)
181 goto out_free; 184 goto out_free;
182 185
183 if (!peeked) 186 if (!peeked) {
184 UDP6_INC_STATS_USER(sock_net(sk), 187 if (is_udp4)
185 UDP_MIB_INDATAGRAMS, is_udplite); 188 UDP_INC_STATS_USER(sock_net(sk),
189 UDP_MIB_INDATAGRAMS, is_udplite);
190 else
191 UDP6_INC_STATS_USER(sock_net(sk),
192 UDP_MIB_INDATAGRAMS, is_udplite);
193 }
186 194
187 sock_recv_timestamp(msg, sk, skb); 195 sock_recv_timestamp(msg, sk, skb);
188 196
@@ -196,7 +204,7 @@ try_again:
196 sin6->sin6_flowinfo = 0; 204 sin6->sin6_flowinfo = 0;
197 sin6->sin6_scope_id = 0; 205 sin6->sin6_scope_id = 0;
198 206
199 if (skb->protocol == htons(ETH_P_IP)) 207 if (is_udp4)
200 ipv6_addr_set(&sin6->sin6_addr, 0, 0, 208 ipv6_addr_set(&sin6->sin6_addr, 0, 0,
201 htonl(0xffff), ip_hdr(skb)->saddr); 209 htonl(0xffff), ip_hdr(skb)->saddr);
202 else { 210 else {
@@ -207,7 +215,7 @@ try_again:
207 } 215 }
208 216
209 } 217 }
210 if (skb->protocol == htons(ETH_P_IP)) { 218 if (is_udp4) {
211 if (inet->cmsg_flags) 219 if (inet->cmsg_flags)
212 ip_cmsg_recv(msg, skb); 220 ip_cmsg_recv(msg, skb);
213 } else { 221 } else {
@@ -228,8 +236,14 @@ out:
228 236
229csum_copy_err: 237csum_copy_err:
230 lock_sock(sk); 238 lock_sock(sk);
231 if (!skb_kill_datagram(sk, skb, flags)) 239 if (!skb_kill_datagram(sk, skb, flags)) {
232 UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 240 if (is_udp4)
241 UDP_INC_STATS_USER(sock_net(sk),
242 UDP_MIB_INERRORS, is_udplite);
243 else
244 UDP6_INC_STATS_USER(sock_net(sk),
245 UDP_MIB_INERRORS, is_udplite);
246 }
233 release_sock(sk); 247 release_sock(sk);
234 248
235 if (flags & MSG_DONTWAIT) 249 if (flags & MSG_DONTWAIT)
@@ -328,7 +342,7 @@ drop:
328 return -1; 342 return -1;
329} 343}
330 344
331static struct sock *udp_v6_mcast_next(struct sock *sk, 345static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
332 __be16 loc_port, struct in6_addr *loc_addr, 346 __be16 loc_port, struct in6_addr *loc_addr,
333 __be16 rmt_port, struct in6_addr *rmt_addr, 347 __be16 rmt_port, struct in6_addr *rmt_addr,
334 int dif) 348 int dif)
@@ -340,7 +354,7 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
340 sk_for_each_from(s, node) { 354 sk_for_each_from(s, node) {
341 struct inet_sock *inet = inet_sk(s); 355 struct inet_sock *inet = inet_sk(s);
342 356
343 if (sock_net(s) != sock_net(sk)) 357 if (!net_eq(sock_net(s), net))
344 continue; 358 continue;
345 359
346 if (s->sk_hash == num && s->sk_family == PF_INET6) { 360 if (s->sk_hash == num && s->sk_family == PF_INET6) {
@@ -383,14 +397,14 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
383 read_lock(&udp_hash_lock); 397 read_lock(&udp_hash_lock);
384 sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]); 398 sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]);
385 dif = inet6_iif(skb); 399 dif = inet6_iif(skb);
386 sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); 400 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
387 if (!sk) { 401 if (!sk) {
388 kfree_skb(skb); 402 kfree_skb(skb);
389 goto out; 403 goto out;
390 } 404 }
391 405
392 sk2 = sk; 406 sk2 = sk;
393 while ((sk2 = udp_v6_mcast_next(sk_next(sk2), uh->dest, daddr, 407 while ((sk2 = udp_v6_mcast_next(net, sk_next(sk2), uh->dest, daddr,
394 uh->source, saddr, dif))) { 408 uh->source, saddr, dif))) {
395 struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); 409 struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
396 if (buff) { 410 if (buff) {
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 89884a4f23aa..60c78cfc2737 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -34,6 +34,7 @@ __xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
34 x->sel.dport_mask = htons(0xffff); 34 x->sel.dport_mask = htons(0xffff);
35 x->sel.sport = xfrm_flowi_sport(fl); 35 x->sel.sport = xfrm_flowi_sport(fl);
36 x->sel.sport_mask = htons(0xffff); 36 x->sel.sport_mask = htons(0xffff);
37 x->sel.family = AF_INET6;
37 x->sel.prefixlen_d = 128; 38 x->sel.prefixlen_d = 128;
38 x->sel.prefixlen_s = 128; 39 x->sel.prefixlen_s = 128;
39 x->sel.proto = fl->proto; 40 x->sel.proto = fl->proto;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index e55e0441e4d9..5b22e011653b 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2075,7 +2075,6 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in
2075 req_size += socklen * 2; 2075 req_size += socklen * 2;
2076 } else { 2076 } else {
2077 size -= 2*socklen; 2077 size -= 2*socklen;
2078 socklen = 0;
2079 } 2078 }
2080 rq = (void*)skb_put(skb, req_size); 2079 rq = (void*)skb_put(skb, req_size);
2081 pol->sadb_x_policy_len += req_size/8; 2080 pol->sadb_x_policy_len += req_size/8;
@@ -3189,6 +3188,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
3189 return xp; 3188 return xp;
3190 3189
3191out: 3190out:
3191 xp->walk.dead = 1;
3192 xfrm_policy_destroy(xp); 3192 xfrm_policy_destroy(xp);
3193 return NULL; 3193 return NULL;
3194} 3194}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 189d0bafa91a..b85c4f27b361 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -199,7 +199,7 @@ static ssize_t sta_agg_status_write(struct file *file,
199 /* toggle Rx aggregation command */ 199 /* toggle Rx aggregation command */
200 tid_num = tid_num - 100; 200 tid_num = tid_num - 100;
201 if (tid_static_rx[tid_num] == 1) { 201 if (tid_static_rx[tid_num] == 1) {
202 strcpy(state, "off "); 202 strcpy(state, "off");
203 ieee80211_sta_stop_rx_ba_session(sta->sdata, da, tid_num, 0, 203 ieee80211_sta_stop_rx_ba_session(sta->sdata, da, tid_num, 0,
204 WLAN_REASON_QSTA_REQUIRE_SETUP); 204 WLAN_REASON_QSTA_REQUIRE_SETUP);
205 sta->ampdu_mlme.tid_state_rx[tid_num] |= 205 sta->ampdu_mlme.tid_state_rx[tid_num] |=
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 87665d7bb4f9..409bb7716236 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2560,25 +2560,3 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
2560 ieee80211_restart_sta_timer(sdata); 2560 ieee80211_restart_sta_timer(sdata);
2561 rcu_read_unlock(); 2561 rcu_read_unlock();
2562} 2562}
2563
2564/* driver notification call */
2565void ieee80211_notify_mac(struct ieee80211_hw *hw,
2566 enum ieee80211_notification_types notif_type)
2567{
2568 struct ieee80211_local *local = hw_to_local(hw);
2569 struct ieee80211_sub_if_data *sdata;
2570
2571 switch (notif_type) {
2572 case IEEE80211_NOTIFY_RE_ASSOC:
2573 rcu_read_lock();
2574 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2575 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2576 continue;
2577
2578 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
2579 }
2580 rcu_read_unlock();
2581 break;
2582 }
2583}
2584EXPORT_SYMBOL(ieee80211_notify_mac);
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 0b024cd6b809..98f480708050 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -94,8 +94,8 @@ minstrel_stats_open(struct inode *inode, struct file *file)
94 prob / 10, prob % 10, 94 prob / 10, prob % 10,
95 mr->last_success, 95 mr->last_success,
96 mr->last_attempts, 96 mr->last_attempts,
97 mr->succ_hist, 97 (unsigned long long)mr->succ_hist,
98 mr->att_hist); 98 (unsigned long long)mr->att_hist);
99 } 99 }
100 p += sprintf(p, "\nTotal packet count:: ideal %d " 100 p += sprintf(p, "\nTotal packet count:: ideal %d "
101 "lookaround %d\n\n", 101 "lookaround %d\n\n",
@@ -106,7 +106,7 @@ minstrel_stats_open(struct inode *inode, struct file *file)
106 return 0; 106 return 0;
107} 107}
108 108
109static int 109static ssize_t
110minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *o) 110minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *o)
111{ 111{
112 struct minstrel_stats_info *ms; 112 struct minstrel_stats_info *ms;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7fef8ea1f5ec..d254446b85b5 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -99,7 +99,7 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr)
99 99
100 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 100 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
101 while (sta) { 101 while (sta) {
102 if (compare_ether_addr(sta->sta.addr, addr) == 0) 102 if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
103 break; 103 break;
104 sta = rcu_dereference(sta->hnext); 104 sta = rcu_dereference(sta->hnext);
105 } 105 }
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 742f811ca416..ab4ddba874be 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -271,6 +271,7 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev,
271 __u32 *mode, char *extra) 271 __u32 *mode, char *extra)
272{ 272{
273 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 273 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
274 struct ieee80211_local *local = sdata->local;
274 int type; 275 int type;
275 276
276 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 277 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -281,6 +282,13 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev,
281 type = NL80211_IFTYPE_STATION; 282 type = NL80211_IFTYPE_STATION;
282 break; 283 break;
283 case IW_MODE_ADHOC: 284 case IW_MODE_ADHOC:
285 /* Setting ad-hoc mode on non ibss channel is not
286 * supported.
287 */
288 if (local->oper_channel &&
289 (local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS))
290 return -EOPNOTSUPP;
291
284 type = NL80211_IFTYPE_ADHOC; 292 type = NL80211_IFTYPE_ADHOC;
285 break; 293 break;
286 case IW_MODE_REPEAT: 294 case IW_MODE_REPEAT:
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 02ddc2b3ce2e..e90d52f199bc 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -713,7 +713,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
713 iph = ipv6_hdr(skb); 713 iph = ipv6_hdr(skb);
714 iph->version = 6; 714 iph->version = 6;
715 iph->nexthdr = IPPROTO_IPV6; 715 iph->nexthdr = IPPROTO_IPV6;
716 iph->payload_len = old_iph->payload_len + sizeof(old_iph); 716 iph->payload_len = old_iph->payload_len;
717 be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
717 iph->priority = old_iph->priority; 718 iph->priority = old_iph->priority;
718 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl)); 719 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
719 iph->daddr = rt->rt6i_dst.addr; 720 iph->daddr = rt->rt6i_dst.addr;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 622d7c671cb7..233fdd2d7d21 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -305,9 +305,7 @@ void nf_conntrack_hash_insert(struct nf_conn *ct)
305 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 305 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
306 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 306 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
307 307
308 spin_lock_bh(&nf_conntrack_lock);
309 __nf_conntrack_hash_insert(ct, hash, repl_hash); 308 __nf_conntrack_hash_insert(ct, hash, repl_hash);
310 spin_unlock_bh(&nf_conntrack_lock);
311} 309}
312EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 310EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
313 311
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 9c06b9f86ad4..c39b6a994133 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/rculist.h> 23#include <linux/rculist.h>
24#include <linux/rtnetlink.h>
24 25
25#include <net/netfilter/nf_conntrack.h> 26#include <net/netfilter/nf_conntrack.h>
26#include <net/netfilter/nf_conntrack_l3proto.h> 27#include <net/netfilter/nf_conntrack_l3proto.h>
@@ -167,10 +168,12 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
167 */ 168 */
168 synchronize_rcu(); 169 synchronize_rcu();
169 170
171 rtnl_lock();
170 spin_lock_bh(&nf_conntrack_lock); 172 spin_lock_bh(&nf_conntrack_lock);
171 for_each_net(net) 173 for_each_net(net)
172 __nf_conntrack_helper_unregister(me, net); 174 __nf_conntrack_helper_unregister(me, net);
173 spin_unlock_bh(&nf_conntrack_lock); 175 spin_unlock_bh(&nf_conntrack_lock);
176 rtnl_unlock();
174} 177}
175EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); 178EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
176 179
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index a040d46f85d6..5f4a6516b3b6 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1090,7 +1090,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1090 struct nf_conn_help *help; 1090 struct nf_conn_help *help;
1091 struct nf_conntrack_helper *helper; 1091 struct nf_conntrack_helper *helper;
1092 1092
1093 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_KERNEL); 1093 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC);
1094 if (ct == NULL || IS_ERR(ct)) 1094 if (ct == NULL || IS_ERR(ct))
1095 return -ENOMEM; 1095 return -ENOMEM;
1096 1096
@@ -1138,7 +1138,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1138 } 1138 }
1139 } 1139 }
1140 1140
1141 nf_ct_acct_ext_add(ct, GFP_KERNEL); 1141 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1142 1142
1143#if defined(CONFIG_NF_CONNTRACK_MARK) 1143#if defined(CONFIG_NF_CONNTRACK_MARK)
1144 if (cda[CTA_MARK]) 1144 if (cda[CTA_MARK])
@@ -1212,13 +1212,14 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1212 atomic_inc(&master_ct->ct_general.use); 1212 atomic_inc(&master_ct->ct_general.use);
1213 } 1213 }
1214 1214
1215 spin_unlock_bh(&nf_conntrack_lock);
1216 err = -ENOENT; 1215 err = -ENOENT;
1217 if (nlh->nlmsg_flags & NLM_F_CREATE) 1216 if (nlh->nlmsg_flags & NLM_F_CREATE)
1218 err = ctnetlink_create_conntrack(cda, 1217 err = ctnetlink_create_conntrack(cda,
1219 &otuple, 1218 &otuple,
1220 &rtuple, 1219 &rtuple,
1221 master_ct); 1220 master_ct);
1221 spin_unlock_bh(&nf_conntrack_lock);
1222
1222 if (err < 0 && master_ct) 1223 if (err < 0 && master_ct)
1223 nf_ct_put(master_ct); 1224 nf_ct_put(master_ct);
1224 1225
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index a59a307e685d..592d73344d46 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -22,6 +22,7 @@
22#include <linux/notifier.h> 22#include <linux/notifier.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/rtnetlink.h>
25 26
26#include <net/netfilter/nf_conntrack.h> 27#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_l3proto.h> 28#include <net/netfilter/nf_conntrack_l3proto.h>
@@ -221,8 +222,10 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
221 synchronize_rcu(); 222 synchronize_rcu();
222 223
223 /* Remove all contrack entries for this protocol */ 224 /* Remove all contrack entries for this protocol */
225 rtnl_lock();
224 for_each_net(net) 226 for_each_net(net)
225 nf_ct_iterate_cleanup(net, kill_l3proto, proto); 227 nf_ct_iterate_cleanup(net, kill_l3proto, proto);
228 rtnl_unlock();
226} 229}
227EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); 230EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
228 231
@@ -333,8 +336,10 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
333 synchronize_rcu(); 336 synchronize_rcu();
334 337
335 /* Remove all contrack entries for this protocol */ 338 /* Remove all contrack entries for this protocol */
339 rtnl_lock();
336 for_each_net(net) 340 for_each_net(net)
337 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto); 341 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
342 rtnl_unlock();
338} 343}
339EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); 344EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
340 345
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index a2cdbcbf64c4..4ab62ad85dd4 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -335,7 +335,7 @@ static int __init nf_ct_proto_gre_init(void)
335 rv = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4); 335 rv = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4);
336 if (rv < 0) 336 if (rv < 0)
337 return rv; 337 return rv;
338 rv = register_pernet_gen_device(&proto_gre_net_id, &proto_gre_net_ops); 338 rv = register_pernet_gen_subsys(&proto_gre_net_id, &proto_gre_net_ops);
339 if (rv < 0) 339 if (rv < 0)
340 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4); 340 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
341 return rv; 341 return rv;
@@ -344,7 +344,7 @@ static int __init nf_ct_proto_gre_init(void)
344static void nf_ct_proto_gre_fini(void) 344static void nf_ct_proto_gre_fini(void)
345{ 345{
346 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4); 346 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
347 unregister_pernet_gen_device(proto_gre_net_id, &proto_gre_net_ops); 347 unregister_pernet_gen_subsys(proto_gre_net_id, &proto_gre_net_ops);
348} 348}
349 349
350module_init(nf_ct_proto_gre_init); 350module_init(nf_ct_proto_gre_init);
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 02a8fed21082..1acc089be7e9 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -141,7 +141,7 @@ socket_mt(const struct sk_buff *skb, const struct xt_match_param *par)
141 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, 141 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
142 saddr, daddr, sport, dport, par->in, false); 142 saddr, daddr, sport, dport, par->in, false);
143 if (sk != NULL) { 143 if (sk != NULL) {
144 bool wildcard = (inet_sk(sk)->rcv_saddr == 0); 144 bool wildcard = (sk->sk_state != TCP_TIME_WAIT && inet_sk(sk)->rcv_saddr == 0);
145 145
146 nf_tproxy_put_sock(sk); 146 nf_tproxy_put_sock(sk);
147 if (wildcard) 147 if (wildcard)
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index b0925a303353..249f6b92f153 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -315,6 +315,7 @@ struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr,
315 * Audit Helper Functions 315 * Audit Helper Functions
316 */ 316 */
317 317
318#ifdef CONFIG_AUDIT
318/** 319/**
319 * netlbl_af4list_audit_addr - Audit an IPv4 address 320 * netlbl_af4list_audit_addr - Audit an IPv4 address
320 * @audit_buf: audit buffer 321 * @audit_buf: audit buffer
@@ -386,3 +387,4 @@ void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf,
386 } 387 }
387} 388}
388#endif /* IPv6 */ 389#endif /* IPv6 */
390#endif /* CONFIG_AUDIT */
diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h
index 0242bead405f..07ae7fd82be1 100644
--- a/net/netlabel/netlabel_addrlist.h
+++ b/net/netlabel/netlabel_addrlist.h
@@ -120,9 +120,19 @@ struct netlbl_af4list *netlbl_af4list_search(__be32 addr,
120struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr, 120struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr,
121 __be32 mask, 121 __be32 mask,
122 struct list_head *head); 122 struct list_head *head);
123
124#ifdef CONFIG_AUDIT
123void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, 125void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
124 int src, const char *dev, 126 int src, const char *dev,
125 __be32 addr, __be32 mask); 127 __be32 addr, __be32 mask);
128#else
129static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
130 int src, const char *dev,
131 __be32 addr, __be32 mask)
132{
133 return;
134}
135#endif
126 136
127#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 137#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
128 138
@@ -179,11 +189,23 @@ struct netlbl_af6list *netlbl_af6list_search(const struct in6_addr *addr,
179struct netlbl_af6list *netlbl_af6list_search_exact(const struct in6_addr *addr, 189struct netlbl_af6list *netlbl_af6list_search_exact(const struct in6_addr *addr,
180 const struct in6_addr *mask, 190 const struct in6_addr *mask,
181 struct list_head *head); 191 struct list_head *head);
192
193#ifdef CONFIG_AUDIT
182void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, 194void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf,
183 int src, 195 int src,
184 const char *dev, 196 const char *dev,
185 const struct in6_addr *addr, 197 const struct in6_addr *addr,
186 const struct in6_addr *mask); 198 const struct in6_addr *mask);
199#else
200static inline void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf,
201 int src,
202 const char *dev,
203 const struct in6_addr *addr,
204 const struct in6_addr *mask)
205{
206 return;
207}
208#endif
187#endif /* IPV6 */ 209#endif /* IPV6 */
188 210
189#endif 211#endif
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index ee769ecaa13c..0a0ef17b2a40 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -265,7 +265,7 @@ add_failure:
265static int netlbl_mgmt_listentry(struct sk_buff *skb, 265static int netlbl_mgmt_listentry(struct sk_buff *skb,
266 struct netlbl_dom_map *entry) 266 struct netlbl_dom_map *entry)
267{ 267{
268 int ret_val; 268 int ret_val = 0;
269 struct nlattr *nla_a; 269 struct nlattr *nla_a;
270 struct nlattr *nla_b; 270 struct nlattr *nla_b;
271 struct netlbl_af4list *iter4; 271 struct netlbl_af4list *iter4;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e8a5c32b0f10..8c0308032178 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -562,7 +562,6 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
562 const struct in_addr *mask, 562 const struct in_addr *mask,
563 struct netlbl_audit *audit_info) 563 struct netlbl_audit *audit_info)
564{ 564{
565 int ret_val = 0;
566 struct netlbl_af4list *list_entry; 565 struct netlbl_af4list *list_entry;
567 struct netlbl_unlhsh_addr4 *entry; 566 struct netlbl_unlhsh_addr4 *entry;
568 struct audit_buffer *audit_buf; 567 struct audit_buffer *audit_buf;
@@ -574,9 +573,10 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
574 list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr, 573 list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
575 &iface->addr4_list); 574 &iface->addr4_list);
576 spin_unlock(&netlbl_unlhsh_lock); 575 spin_unlock(&netlbl_unlhsh_lock);
577 if (list_entry == NULL) 576 if (list_entry != NULL)
578 ret_val = -ENOENT; 577 entry = netlbl_unlhsh_addr4_entry(list_entry);
579 entry = netlbl_unlhsh_addr4_entry(list_entry); 578 else
579 entry = NULL;
580 580
581 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 581 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
582 audit_info); 582 audit_info);
@@ -587,19 +587,21 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
587 addr->s_addr, mask->s_addr); 587 addr->s_addr, mask->s_addr);
588 if (dev != NULL) 588 if (dev != NULL)
589 dev_put(dev); 589 dev_put(dev);
590 if (entry && security_secid_to_secctx(entry->secid, 590 if (entry != NULL &&
591 &secctx, 591 security_secid_to_secctx(entry->secid,
592 &secctx_len) == 0) { 592 &secctx, &secctx_len) == 0) {
593 audit_log_format(audit_buf, " sec_obj=%s", secctx); 593 audit_log_format(audit_buf, " sec_obj=%s", secctx);
594 security_release_secctx(secctx, secctx_len); 594 security_release_secctx(secctx, secctx_len);
595 } 595 }
596 audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); 596 audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
597 audit_log_end(audit_buf); 597 audit_log_end(audit_buf);
598 } 598 }
599 599
600 if (ret_val == 0) 600 if (entry == NULL)
601 call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4); 601 return -ENOENT;
602 return ret_val; 602
603 call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4);
604 return 0;
603} 605}
604 606
605#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 607#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -623,7 +625,6 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
623 const struct in6_addr *mask, 625 const struct in6_addr *mask,
624 struct netlbl_audit *audit_info) 626 struct netlbl_audit *audit_info)
625{ 627{
626 int ret_val = 0;
627 struct netlbl_af6list *list_entry; 628 struct netlbl_af6list *list_entry;
628 struct netlbl_unlhsh_addr6 *entry; 629 struct netlbl_unlhsh_addr6 *entry;
629 struct audit_buffer *audit_buf; 630 struct audit_buffer *audit_buf;
@@ -634,9 +635,10 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
634 spin_lock(&netlbl_unlhsh_lock); 635 spin_lock(&netlbl_unlhsh_lock);
635 list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list); 636 list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list);
636 spin_unlock(&netlbl_unlhsh_lock); 637 spin_unlock(&netlbl_unlhsh_lock);
637 if (list_entry == NULL) 638 if (list_entry != NULL)
638 ret_val = -ENOENT; 639 entry = netlbl_unlhsh_addr6_entry(list_entry);
639 entry = netlbl_unlhsh_addr6_entry(list_entry); 640 else
641 entry = NULL;
640 642
641 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 643 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
642 audit_info); 644 audit_info);
@@ -647,19 +649,21 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
647 addr, mask); 649 addr, mask);
648 if (dev != NULL) 650 if (dev != NULL)
649 dev_put(dev); 651 dev_put(dev);
650 if (entry && security_secid_to_secctx(entry->secid, 652 if (entry != NULL &&
651 &secctx, 653 security_secid_to_secctx(entry->secid,
652 &secctx_len) == 0) { 654 &secctx, &secctx_len) == 0) {
653 audit_log_format(audit_buf, " sec_obj=%s", secctx); 655 audit_log_format(audit_buf, " sec_obj=%s", secctx);
654 security_release_secctx(secctx, secctx_len); 656 security_release_secctx(secctx, secctx_len);
655 } 657 }
656 audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); 658 audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
657 audit_log_end(audit_buf); 659 audit_log_end(audit_buf);
658 } 660 }
659 661
660 if (ret_val == 0) 662 if (entry == NULL)
661 call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6); 663 return -ENOENT;
662 return ret_val; 664
665 call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6);
666 return 0;
663} 667}
664#endif /* IPv6 */ 668#endif /* IPv6 */
665 669
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index b9d97effebe3..9d211f12582b 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -33,9 +33,30 @@
33#include <net/phonet/phonet.h> 33#include <net/phonet/phonet.h>
34#include <net/phonet/pn_dev.h> 34#include <net/phonet/pn_dev.h>
35 35
36static struct net_proto_family phonet_proto_family; 36/* Transport protocol registration */
37static struct phonet_protocol *phonet_proto_get(int protocol); 37static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
38static inline void phonet_proto_put(struct phonet_protocol *pp); 38static DEFINE_SPINLOCK(proto_tab_lock);
39
40static struct phonet_protocol *phonet_proto_get(int protocol)
41{
42 struct phonet_protocol *pp;
43
44 if (protocol >= PHONET_NPROTO)
45 return NULL;
46
47 spin_lock(&proto_tab_lock);
48 pp = proto_tab[protocol];
49 if (pp && !try_module_get(pp->prot->owner))
50 pp = NULL;
51 spin_unlock(&proto_tab_lock);
52
53 return pp;
54}
55
56static inline void phonet_proto_put(struct phonet_protocol *pp)
57{
58 module_put(pp->prot->owner);
59}
39 60
40/* protocol family functions */ 61/* protocol family functions */
41 62
@@ -144,8 +165,8 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev,
144 struct phonethdr *ph; 165 struct phonethdr *ph;
145 int err; 166 int err;
146 167
147 if (skb->len + 2 > 0xffff) { 168 if (skb->len + 2 > 0xffff /* Phonet length field limit */ ||
148 /* Phonet length field would overflow */ 169 skb->len + sizeof(struct phonethdr) > dev->mtu) {
149 err = -EMSGSIZE; 170 err = -EMSGSIZE;
150 goto drop; 171 goto drop;
151 } 172 }
@@ -261,6 +282,8 @@ static inline int can_respond(struct sk_buff *skb)
261 return 0; /* we are not the destination */ 282 return 0; /* we are not the destination */
262 if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5)) 283 if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5))
263 return 0; 284 return 0;
285 if (ph->pn_res == PN_COMMGR) /* indications */
286 return 0;
264 287
265 ph = pn_hdr(skb); /* re-acquires the pointer */ 288 ph = pn_hdr(skb); /* re-acquires the pointer */
266 pm = pn_msg(skb); 289 pm = pn_msg(skb);
@@ -309,7 +332,8 @@ static int send_reset_indications(struct sk_buff *rskb)
309 332
310 return pn_raw_send(data, sizeof(data), rskb->dev, 333 return pn_raw_send(data, sizeof(data), rskb->dev,
311 pn_object(oph->pn_sdev, 0x00), 334 pn_object(oph->pn_sdev, 0x00),
312 pn_object(oph->pn_rdev, oph->pn_robj), 0x10); 335 pn_object(oph->pn_rdev, oph->pn_robj),
336 PN_COMMGR);
313} 337}
314 338
315 339
@@ -372,10 +396,6 @@ static struct packet_type phonet_packet_type = {
372 .func = phonet_rcv, 396 .func = phonet_rcv,
373}; 397};
374 398
375/* Transport protocol registration */
376static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
377static DEFINE_SPINLOCK(proto_tab_lock);
378
379int __init_or_module phonet_proto_register(int protocol, 399int __init_or_module phonet_proto_register(int protocol,
380 struct phonet_protocol *pp) 400 struct phonet_protocol *pp)
381{ 401{
@@ -409,27 +429,6 @@ void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
409} 429}
410EXPORT_SYMBOL(phonet_proto_unregister); 430EXPORT_SYMBOL(phonet_proto_unregister);
411 431
412static struct phonet_protocol *phonet_proto_get(int protocol)
413{
414 struct phonet_protocol *pp;
415
416 if (protocol >= PHONET_NPROTO)
417 return NULL;
418
419 spin_lock(&proto_tab_lock);
420 pp = proto_tab[protocol];
421 if (pp && !try_module_get(pp->prot->owner))
422 pp = NULL;
423 spin_unlock(&proto_tab_lock);
424
425 return pp;
426}
427
428static inline void phonet_proto_put(struct phonet_protocol *pp)
429{
430 module_put(pp->prot->owner);
431}
432
433/* Module registration */ 432/* Module registration */
434static int __init phonet_init(void) 433static int __init phonet_init(void)
435{ 434{
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 9978afbd9f2a..803eeef0aa85 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -155,12 +155,13 @@ static void gprs_data_ready(struct sock *sk, int len)
155static void gprs_write_space(struct sock *sk) 155static void gprs_write_space(struct sock *sk)
156{ 156{
157 struct gprs_dev *dev = sk->sk_user_data; 157 struct gprs_dev *dev = sk->sk_user_data;
158 struct net_device *net = dev->net;
158 unsigned credits = pep_writeable(sk); 159 unsigned credits = pep_writeable(sk);
159 160
160 spin_lock_bh(&dev->tx_lock); 161 spin_lock_bh(&dev->tx_lock);
161 dev->tx_max = credits; 162 dev->tx_max = credits;
162 if (credits > skb_queue_len(&dev->tx_queue)) 163 if (credits > skb_queue_len(&dev->tx_queue) && netif_running(net))
163 netif_wake_queue(dev->net); 164 netif_wake_queue(net);
164 spin_unlock_bh(&dev->tx_lock); 165 spin_unlock_bh(&dev->tx_lock);
165} 166}
166 167
@@ -168,6 +169,23 @@ static void gprs_write_space(struct sock *sk)
168 * Network device callbacks 169 * Network device callbacks
169 */ 170 */
170 171
172static int gprs_open(struct net_device *dev)
173{
174 struct gprs_dev *gp = netdev_priv(dev);
175
176 gprs_write_space(gp->sk);
177 return 0;
178}
179
180static int gprs_close(struct net_device *dev)
181{
182 struct gprs_dev *gp = netdev_priv(dev);
183
184 netif_stop_queue(dev);
185 flush_work(&gp->tx_work);
186 return 0;
187}
188
171static int gprs_xmit(struct sk_buff *skb, struct net_device *net) 189static int gprs_xmit(struct sk_buff *skb, struct net_device *net)
172{ 190{
173 struct gprs_dev *dev = netdev_priv(net); 191 struct gprs_dev *dev = netdev_priv(net);
@@ -254,6 +272,8 @@ static void gprs_setup(struct net_device *net)
254 net->tx_queue_len = 10; 272 net->tx_queue_len = 10;
255 273
256 net->destructor = free_netdev; 274 net->destructor = free_netdev;
275 net->open = gprs_open;
276 net->stop = gprs_close;
257 net->hard_start_xmit = gprs_xmit; /* mandatory */ 277 net->hard_start_xmit = gprs_xmit; /* mandatory */
258 net->change_mtu = gprs_set_mtu; 278 net->change_mtu = gprs_set_mtu;
259 net->get_stats = gprs_get_stats; 279 net->get_stats = gprs_get_stats;
@@ -318,7 +338,6 @@ int gprs_attach(struct sock *sk)
318 dev->sk = sk; 338 dev->sk = sk;
319 339
320 printk(KERN_DEBUG"%s: attached\n", net->name); 340 printk(KERN_DEBUG"%s: attached\n", net->name);
321 gprs_write_space(sk); /* kick off TX */
322 return net->ifindex; 341 return net->ifindex;
323 342
324out_rel: 343out_rel:
@@ -341,7 +360,5 @@ void gprs_detach(struct sock *sk)
341 360
342 printk(KERN_DEBUG"%s: detached\n", net->name); 361 printk(KERN_DEBUG"%s: detached\n", net->name);
343 unregister_netdev(net); 362 unregister_netdev(net);
344 flush_scheduled_work();
345 sock_put(sk); 363 sock_put(sk);
346 skb_queue_purge(&dev->tx_queue);
347} 364}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 53be9fc82aaa..f93ff8ef47d0 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -115,7 +115,7 @@ int phonet_address_del(struct net_device *dev, u8 addr)
115 pnd = __phonet_get(dev); 115 pnd = __phonet_get(dev);
116 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) 116 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs))
117 err = -EADDRNOTAVAIL; 117 err = -EADDRNOTAVAIL;
118 if (bitmap_empty(pnd->addrs, 64)) 118 else if (bitmap_empty(pnd->addrs, 64))
119 __phonet_device_free(pnd); 119 __phonet_device_free(pnd);
120 spin_unlock_bh(&pndevs.lock); 120 spin_unlock_bh(&pndevs.lock);
121 return err; 121 return err;
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index b1770d66bc8d..242fe8f8c322 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -123,6 +123,7 @@ nla_put_failure:
123 123
124static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 124static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
125{ 125{
126 struct net *net = sock_net(skb->sk);
126 struct phonet_device *pnd; 127 struct phonet_device *pnd;
127 int dev_idx = 0, dev_start_idx = cb->args[0]; 128 int dev_idx = 0, dev_start_idx = cb->args[0];
128 int addr_idx = 0, addr_start_idx = cb->args[1]; 129 int addr_idx = 0, addr_start_idx = cb->args[1];
@@ -131,6 +132,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
131 list_for_each_entry(pnd, &pndevs.list, list) { 132 list_for_each_entry(pnd, &pndevs.list, list) {
132 u8 addr; 133 u8 addr;
133 134
135 if (!net_eq(dev_net(pnd->netdev), net))
136 continue;
134 if (dev_idx > dev_start_idx) 137 if (dev_idx > dev_start_idx)
135 addr_start_idx = 0; 138 addr_start_idx = 0;
136 if (dev_idx++ < dev_start_idx) 139 if (dev_idx++ < dev_start_idx)
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
index 21124ec0a73d..bfdade72e066 100644
--- a/net/rfkill/rfkill-input.c
+++ b/net/rfkill/rfkill-input.c
@@ -256,6 +256,11 @@ static struct input_handler rfkill_handler = {
256 256
257static int __init rfkill_handler_init(void) 257static int __init rfkill_handler_init(void)
258{ 258{
259 unsigned long last_run = jiffies - msecs_to_jiffies(500);
260 rfkill_wlan.last = last_run;
261 rfkill_bt.last = last_run;
262 rfkill_uwb.last = last_run;
263 rfkill_wimax.last = last_run;
259 return input_register_handler(&rfkill_handler); 264 return input_register_handler(&rfkill_handler);
260} 265}
261 266
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index f949a482b007..25ba3bd57e66 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -603,7 +603,7 @@ static int rfkill_check_duplicity(const struct rfkill *rfkill)
603 } 603 }
604 604
605 /* 0: first switch of its kind */ 605 /* 0: first switch of its kind */
606 return test_bit(rfkill->type, seen); 606 return (test_bit(rfkill->type, seen)) ? 1 : 0;
607} 607}
608 608
609static int rfkill_add_switch(struct rfkill *rfkill) 609static int rfkill_add_switch(struct rfkill *rfkill)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index a7f1ce11bc22..0c1cc7612800 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1072,6 +1072,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1072 unsigned char *asmptr; 1072 unsigned char *asmptr;
1073 int n, size, qbit = 0; 1073 int n, size, qbit = 0;
1074 1074
1075 /* ROSE empty frame has no meaning : don't send */
1076 if (len == 0)
1077 return 0;
1078
1075 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1079 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1076 return -EINVAL; 1080 return -EINVAL;
1077 1081
@@ -1265,6 +1269,12 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1265 skb_reset_transport_header(skb); 1269 skb_reset_transport_header(skb);
1266 copied = skb->len; 1270 copied = skb->len;
1267 1271
1272 /* ROSE empty frame has no meaning : ignore it */
1273 if (copied == 0) {
1274 skb_free_datagram(sk, skb);
1275 return copied;
1276 }
1277
1268 if (copied > size) { 1278 if (copied > size) {
1269 copied = size; 1279 copied = size;
1270 msg->msg_flags |= MSG_TRUNC; 1280 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b16ad2972c6b..6ab4a2f92ca0 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -417,6 +417,8 @@ static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
417 struct nlattr *nest; 417 struct nlattr *nest;
418 418
419 nest = nla_nest_start(skb, TCA_STAB); 419 nest = nla_nest_start(skb, TCA_STAB);
420 if (nest == NULL)
421 goto nla_put_failure;
420 NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts); 422 NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
421 nla_nest_end(skb, nest); 423 nla_nest_end(skb, nest);
422 424
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 93cd30ce6501..cdcd16fcfeda 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -270,6 +270,8 @@ static void dev_watchdog_down(struct net_device *dev)
270void netif_carrier_on(struct net_device *dev) 270void netif_carrier_on(struct net_device *dev)
271{ 271{
272 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 272 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
273 if (dev->reg_state == NETREG_UNINITIALIZED)
274 return;
273 linkwatch_fire_event(dev); 275 linkwatch_fire_event(dev);
274 if (netif_running(dev)) 276 if (netif_running(dev))
275 __netdev_watchdog_up(dev); 277 __netdev_watchdog_up(dev);
@@ -285,8 +287,11 @@ EXPORT_SYMBOL(netif_carrier_on);
285 */ 287 */
286void netif_carrier_off(struct net_device *dev) 288void netif_carrier_off(struct net_device *dev)
287{ 289{
288 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) 290 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
291 if (dev->reg_state == NETREG_UNINITIALIZED)
292 return;
289 linkwatch_fire_event(dev); 293 linkwatch_fire_event(dev);
294 }
290} 295}
291EXPORT_SYMBOL(netif_carrier_off); 296EXPORT_SYMBOL(netif_carrier_off);
292 297
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a11959908d9a..98402f0efa47 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -46,9 +46,6 @@
46 layering other disciplines. It does not need to do bandwidth 46 layering other disciplines. It does not need to do bandwidth
47 control either since that can be handled by using token 47 control either since that can be handled by using token
48 bucket or other rate control. 48 bucket or other rate control.
49
50 The simulator is limited by the Linux timer resolution
51 and will create packet bursts on the HZ boundary (1ms).
52*/ 49*/
53 50
54struct netem_sched_data { 51struct netem_sched_data {
diff --git a/net/socket.c b/net/socket.c
index 2b7a4b5c9b72..92764d836891 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -990,7 +990,6 @@ static int sock_close(struct inode *inode, struct file *filp)
990 printk(KERN_DEBUG "sock_close: NULL inode\n"); 990 printk(KERN_DEBUG "sock_close: NULL inode\n");
991 return 0; 991 return 0;
992 } 992 }
993 sock_fasync(-1, filp, 0);
994 sock_release(SOCKET_I(inode)); 993 sock_release(SOCKET_I(inode));
995 return 0; 994 return 0;
996} 995}
@@ -1427,8 +1426,8 @@ asmlinkage long sys_listen(int fd, int backlog)
1427 * clean when we restucture accept also. 1426 * clean when we restucture accept also.
1428 */ 1427 */
1429 1428
1430long do_accept(int fd, struct sockaddr __user *upeer_sockaddr, 1429asmlinkage long sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
1431 int __user *upeer_addrlen, int flags) 1430 int __user *upeer_addrlen, int flags)
1432{ 1431{
1433 struct socket *sock, *newsock; 1432 struct socket *sock, *newsock;
1434 struct file *newfile; 1433 struct file *newfile;
@@ -1511,66 +1510,10 @@ out_fd:
1511 goto out_put; 1510 goto out_put;
1512} 1511}
1513 1512
1514#if 0
1515#ifdef HAVE_SET_RESTORE_SIGMASK
1516asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr,
1517 int __user *upeer_addrlen,
1518 const sigset_t __user *sigmask,
1519 size_t sigsetsize, int flags)
1520{
1521 sigset_t ksigmask, sigsaved;
1522 int ret;
1523
1524 if (sigmask) {
1525 /* XXX: Don't preclude handling different sized sigset_t's. */
1526 if (sigsetsize != sizeof(sigset_t))
1527 return -EINVAL;
1528 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1529 return -EFAULT;
1530
1531 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1532 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1533 }
1534
1535 ret = do_accept(fd, upeer_sockaddr, upeer_addrlen, flags);
1536
1537 if (ret < 0 && signal_pending(current)) {
1538 /*
1539 * Don't restore the signal mask yet. Let do_signal() deliver
1540 * the signal on the way back to userspace, before the signal
1541 * mask is restored.
1542 */
1543 if (sigmask) {
1544 memcpy(&current->saved_sigmask, &sigsaved,
1545 sizeof(sigsaved));
1546 set_restore_sigmask();
1547 }
1548 } else if (sigmask)
1549 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1550
1551 return ret;
1552}
1553#else
1554asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr,
1555 int __user *upeer_addrlen,
1556 const sigset_t __user *sigmask,
1557 size_t sigsetsize, int flags)
1558{
1559 /* The platform does not support restoring the signal mask in the
1560 * return path. So we do not allow using paccept() with a signal
1561 * mask. */
1562 if (sigmask)
1563 return -EINVAL;
1564
1565 return do_accept(fd, upeer_sockaddr, upeer_addrlen, flags);
1566}
1567#endif
1568#endif
1569
1570asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, 1513asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
1571 int __user *upeer_addrlen) 1514 int __user *upeer_addrlen)
1572{ 1515{
1573 return do_accept(fd, upeer_sockaddr, upeer_addrlen, 0); 1516 return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0);
1574} 1517}
1575 1518
1576/* 1519/*
@@ -2097,7 +2040,7 @@ static const unsigned char nargs[19]={
2097 AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), 2040 AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
2098 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), 2041 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
2099 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), 2042 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3),
2100 AL(6) 2043 AL(4)
2101}; 2044};
2102 2045
2103#undef AL 2046#undef AL
@@ -2116,7 +2059,7 @@ asmlinkage long sys_socketcall(int call, unsigned long __user *args)
2116 unsigned long a0, a1; 2059 unsigned long a0, a1;
2117 int err; 2060 int err;
2118 2061
2119 if (call < 1 || call > SYS_PACCEPT) 2062 if (call < 1 || call > SYS_ACCEPT4)
2120 return -EINVAL; 2063 return -EINVAL;
2121 2064
2122 /* copy_from_user should be SMP safe. */ 2065 /* copy_from_user should be SMP safe. */
@@ -2144,9 +2087,8 @@ asmlinkage long sys_socketcall(int call, unsigned long __user *args)
2144 err = sys_listen(a0, a1); 2087 err = sys_listen(a0, a1);
2145 break; 2088 break;
2146 case SYS_ACCEPT: 2089 case SYS_ACCEPT:
2147 err = 2090 err = sys_accept4(a0, (struct sockaddr __user *)a1,
2148 do_accept(a0, (struct sockaddr __user *)a1, 2091 (int __user *)a[2], 0);
2149 (int __user *)a[2], 0);
2150 break; 2092 break;
2151 case SYS_GETSOCKNAME: 2093 case SYS_GETSOCKNAME:
2152 err = 2094 err =
@@ -2193,12 +2135,9 @@ asmlinkage long sys_socketcall(int call, unsigned long __user *args)
2193 case SYS_RECVMSG: 2135 case SYS_RECVMSG:
2194 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); 2136 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]);
2195 break; 2137 break;
2196 case SYS_PACCEPT: 2138 case SYS_ACCEPT4:
2197 err = 2139 err = sys_accept4(a0, (struct sockaddr __user *)a1,
2198 sys_paccept(a0, (struct sockaddr __user *)a1, 2140 (int __user *)a[2], a[3]);
2199 (int __user *)a[2],
2200 (const sigset_t __user *) a[3],
2201 a[4], a[5]);
2202 break; 2141 break;
2203 default: 2142 default:
2204 err = -EINVAL; 2143 err = -EINVAL;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 436bf1b4b76c..cb216b2df666 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -228,19 +228,21 @@ static int
228rpcauth_prune_expired(struct list_head *free, int nr_to_scan) 228rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
229{ 229{
230 spinlock_t *cache_lock; 230 spinlock_t *cache_lock;
231 struct rpc_cred *cred; 231 struct rpc_cred *cred, *next;
232 unsigned long expired = jiffies - RPC_AUTH_EXPIRY_MORATORIUM; 232 unsigned long expired = jiffies - RPC_AUTH_EXPIRY_MORATORIUM;
233 233
234 while (!list_empty(&cred_unused)) { 234 list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) {
235 cred = list_entry(cred_unused.next, struct rpc_cred, cr_lru); 235
236 /* Enforce a 60 second garbage collection moratorium */
237 if (time_in_range(cred->cr_expire, expired, jiffies) &&
238 test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
239 continue;
240
236 list_del_init(&cred->cr_lru); 241 list_del_init(&cred->cr_lru);
237 number_cred_unused--; 242 number_cred_unused--;
238 if (atomic_read(&cred->cr_count) != 0) 243 if (atomic_read(&cred->cr_count) != 0)
239 continue; 244 continue;
240 /* Enforce a 5 second garbage collection moratorium */ 245
241 if (time_in_range(cred->cr_expire, expired, jiffies) &&
242 test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
243 continue;
244 cache_lock = &cred->cr_auth->au_credcache->lock; 246 cache_lock = &cred->cr_auth->au_credcache->lock;
245 spin_lock(cache_lock); 247 spin_lock(cache_lock);
246 if (atomic_read(&cred->cr_count) == 0) { 248 if (atomic_read(&cred->cr_count) == 0) {
@@ -453,7 +455,7 @@ need_lock:
453 } 455 }
454 if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) 456 if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
455 rpcauth_unhash_cred(cred); 457 rpcauth_unhash_cred(cred);
456 else if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { 458 if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) {
457 cred->cr_expire = jiffies; 459 cred->cr_expire = jiffies;
458 list_add_tail(&cred->cr_lru, &cred_unused); 460 list_add_tail(&cred->cr_lru, &cred_unused);
459 number_cred_unused++; 461 number_cred_unused++;
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 744b79fdcb19..4028502f0528 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -133,13 +133,29 @@ static int
133generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags) 133generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
134{ 134{
135 struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base); 135 struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base);
136 int i;
136 137
137 if (gcred->acred.uid != acred->uid || 138 if (gcred->acred.uid != acred->uid ||
138 gcred->acred.gid != acred->gid || 139 gcred->acred.gid != acred->gid ||
139 gcred->acred.group_info != acred->group_info ||
140 gcred->acred.machine_cred != acred->machine_cred) 140 gcred->acred.machine_cred != acred->machine_cred)
141 return 0; 141 goto out_nomatch;
142
143 /* Optimisation in the case where pointers are identical... */
144 if (gcred->acred.group_info == acred->group_info)
145 goto out_match;
146
147 /* Slow path... */
148 if (gcred->acred.group_info->ngroups != acred->group_info->ngroups)
149 goto out_nomatch;
150 for (i = 0; i < gcred->acred.group_info->ngroups; i++) {
151 if (GROUP_AT(gcred->acred.group_info, i) !=
152 GROUP_AT(acred->group_info, i))
153 goto out_nomatch;
154 }
155out_match:
142 return 1; 156 return 1;
157out_nomatch:
158 return 0;
143} 159}
144 160
145void __init rpc_init_generic_auth(void) 161void __init rpc_init_generic_auth(void)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 95293f549e9c..a1951dcc5776 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1183,7 +1183,11 @@ int svc_addsock(struct svc_serv *serv,
1183 else if (so->state > SS_UNCONNECTED) 1183 else if (so->state > SS_UNCONNECTED)
1184 err = -EISCONN; 1184 err = -EISCONN;
1185 else { 1185 else {
1186 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS); 1186 if (!try_module_get(THIS_MODULE))
1187 err = -ENOENT;
1188 else
1189 svsk = svc_setup_socket(serv, so, &err,
1190 SVC_SOCK_DEFAULTS);
1187 if (svsk) { 1191 if (svsk) {
1188 struct sockaddr_storage addr; 1192 struct sockaddr_storage addr;
1189 struct sockaddr *sin = (struct sockaddr *)&addr; 1193 struct sockaddr *sin = (struct sockaddr *)&addr;
@@ -1196,7 +1200,8 @@ int svc_addsock(struct svc_serv *serv,
1196 spin_unlock_bh(&serv->sv_lock); 1200 spin_unlock_bh(&serv->sv_lock);
1197 svc_xprt_received(&svsk->sk_xprt); 1201 svc_xprt_received(&svsk->sk_xprt);
1198 err = 0; 1202 err = 0;
1199 } 1203 } else
1204 module_put(THIS_MODULE);
1200 } 1205 }
1201 if (err) { 1206 if (err) {
1202 sockfd_put(so); 1207 sockfd_put(so);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 9a288d5eea64..0a50361e3d83 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -249,6 +249,7 @@ struct sock_xprt {
249 void (*old_data_ready)(struct sock *, int); 249 void (*old_data_ready)(struct sock *, int);
250 void (*old_state_change)(struct sock *); 250 void (*old_state_change)(struct sock *);
251 void (*old_write_space)(struct sock *); 251 void (*old_write_space)(struct sock *);
252 void (*old_error_report)(struct sock *);
252}; 253};
253 254
254/* 255/*
@@ -698,8 +699,9 @@ static int xs_tcp_send_request(struct rpc_task *task)
698 case -EAGAIN: 699 case -EAGAIN:
699 xs_nospace(task); 700 xs_nospace(task);
700 break; 701 break;
701 case -ECONNREFUSED:
702 case -ECONNRESET: 702 case -ECONNRESET:
703 xs_tcp_shutdown(xprt);
704 case -ECONNREFUSED:
703 case -ENOTCONN: 705 case -ENOTCONN:
704 case -EPIPE: 706 case -EPIPE:
705 status = -ENOTCONN; 707 status = -ENOTCONN;
@@ -742,6 +744,22 @@ out_release:
742 xprt_release_xprt(xprt, task); 744 xprt_release_xprt(xprt, task);
743} 745}
744 746
747static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
748{
749 transport->old_data_ready = sk->sk_data_ready;
750 transport->old_state_change = sk->sk_state_change;
751 transport->old_write_space = sk->sk_write_space;
752 transport->old_error_report = sk->sk_error_report;
753}
754
755static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
756{
757 sk->sk_data_ready = transport->old_data_ready;
758 sk->sk_state_change = transport->old_state_change;
759 sk->sk_write_space = transport->old_write_space;
760 sk->sk_error_report = transport->old_error_report;
761}
762
745/** 763/**
746 * xs_close - close a socket 764 * xs_close - close a socket
747 * @xprt: transport 765 * @xprt: transport
@@ -765,9 +783,8 @@ static void xs_close(struct rpc_xprt *xprt)
765 transport->sock = NULL; 783 transport->sock = NULL;
766 784
767 sk->sk_user_data = NULL; 785 sk->sk_user_data = NULL;
768 sk->sk_data_ready = transport->old_data_ready; 786
769 sk->sk_state_change = transport->old_state_change; 787 xs_restore_old_callbacks(transport, sk);
770 sk->sk_write_space = transport->old_write_space;
771 write_unlock_bh(&sk->sk_callback_lock); 788 write_unlock_bh(&sk->sk_callback_lock);
772 789
773 sk->sk_no_check = 0; 790 sk->sk_no_check = 0;
@@ -1180,6 +1197,28 @@ static void xs_tcp_state_change(struct sock *sk)
1180} 1197}
1181 1198
1182/** 1199/**
1200 * xs_tcp_error_report - callback mainly for catching RST events
1201 * @sk: socket
1202 */
1203static void xs_tcp_error_report(struct sock *sk)
1204{
1205 struct rpc_xprt *xprt;
1206
1207 read_lock(&sk->sk_callback_lock);
1208 if (sk->sk_err != ECONNRESET || sk->sk_state != TCP_ESTABLISHED)
1209 goto out;
1210 if (!(xprt = xprt_from_sock(sk)))
1211 goto out;
1212 dprintk("RPC: %s client %p...\n"
1213 "RPC: error %d\n",
1214 __func__, xprt, sk->sk_err);
1215
1216 xprt_force_disconnect(xprt);
1217out:
1218 read_unlock(&sk->sk_callback_lock);
1219}
1220
1221/**
1183 * xs_udp_write_space - callback invoked when socket buffer space 1222 * xs_udp_write_space - callback invoked when socket buffer space
1184 * becomes available 1223 * becomes available
1185 * @sk: socket whose state has changed 1224 * @sk: socket whose state has changed
@@ -1454,10 +1493,9 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1454 1493
1455 write_lock_bh(&sk->sk_callback_lock); 1494 write_lock_bh(&sk->sk_callback_lock);
1456 1495
1496 xs_save_old_callbacks(transport, sk);
1497
1457 sk->sk_user_data = xprt; 1498 sk->sk_user_data = xprt;
1458 transport->old_data_ready = sk->sk_data_ready;
1459 transport->old_state_change = sk->sk_state_change;
1460 transport->old_write_space = sk->sk_write_space;
1461 sk->sk_data_ready = xs_udp_data_ready; 1499 sk->sk_data_ready = xs_udp_data_ready;
1462 sk->sk_write_space = xs_udp_write_space; 1500 sk->sk_write_space = xs_udp_write_space;
1463 sk->sk_no_check = UDP_CSUM_NORCV; 1501 sk->sk_no_check = UDP_CSUM_NORCV;
@@ -1589,13 +1627,13 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1589 1627
1590 write_lock_bh(&sk->sk_callback_lock); 1628 write_lock_bh(&sk->sk_callback_lock);
1591 1629
1630 xs_save_old_callbacks(transport, sk);
1631
1592 sk->sk_user_data = xprt; 1632 sk->sk_user_data = xprt;
1593 transport->old_data_ready = sk->sk_data_ready;
1594 transport->old_state_change = sk->sk_state_change;
1595 transport->old_write_space = sk->sk_write_space;
1596 sk->sk_data_ready = xs_tcp_data_ready; 1633 sk->sk_data_ready = xs_tcp_data_ready;
1597 sk->sk_state_change = xs_tcp_state_change; 1634 sk->sk_state_change = xs_tcp_state_change;
1598 sk->sk_write_space = xs_tcp_write_space; 1635 sk->sk_write_space = xs_tcp_write_space;
1636 sk->sk_error_report = xs_tcp_error_report;
1599 sk->sk_allocation = GFP_ATOMIC; 1637 sk->sk_allocation = GFP_ATOMIC;
1600 1638
1601 /* socket options */ 1639 /* socket options */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index dc504d308ec0..66d5ac4773ab 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1302,14 +1302,23 @@ static void unix_destruct_fds(struct sk_buff *skb)
1302 sock_wfree(skb); 1302 sock_wfree(skb);
1303} 1303}
1304 1304
1305static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) 1305static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1306{ 1306{
1307 int i; 1307 int i;
1308
1309 /*
1310 * Need to duplicate file references for the sake of garbage
1311 * collection. Otherwise a socket in the fps might become a
1312 * candidate for GC while the skb is not yet queued.
1313 */
1314 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1315 if (!UNIXCB(skb).fp)
1316 return -ENOMEM;
1317
1308 for (i=scm->fp->count-1; i>=0; i--) 1318 for (i=scm->fp->count-1; i>=0; i--)
1309 unix_inflight(scm->fp->fp[i]); 1319 unix_inflight(scm->fp->fp[i]);
1310 UNIXCB(skb).fp = scm->fp;
1311 skb->destructor = unix_destruct_fds; 1320 skb->destructor = unix_destruct_fds;
1312 scm->fp = NULL; 1321 return 0;
1313} 1322}
1314 1323
1315/* 1324/*
@@ -1334,6 +1343,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1334 1343
1335 if (NULL == siocb->scm) 1344 if (NULL == siocb->scm)
1336 siocb->scm = &tmp_scm; 1345 siocb->scm = &tmp_scm;
1346 wait_for_unix_gc();
1337 err = scm_send(sock, msg, siocb->scm); 1347 err = scm_send(sock, msg, siocb->scm);
1338 if (err < 0) 1348 if (err < 0)
1339 return err; 1349 return err;
@@ -1368,8 +1378,11 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1368 goto out; 1378 goto out;
1369 1379
1370 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1380 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1371 if (siocb->scm->fp) 1381 if (siocb->scm->fp) {
1372 unix_attach_fds(siocb->scm, skb); 1382 err = unix_attach_fds(siocb->scm, skb);
1383 if (err)
1384 goto out_free;
1385 }
1373 unix_get_secdata(siocb->scm, skb); 1386 unix_get_secdata(siocb->scm, skb);
1374 1387
1375 skb_reset_transport_header(skb); 1388 skb_reset_transport_header(skb);
@@ -1481,6 +1494,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1481 1494
1482 if (NULL == siocb->scm) 1495 if (NULL == siocb->scm)
1483 siocb->scm = &tmp_scm; 1496 siocb->scm = &tmp_scm;
1497 wait_for_unix_gc();
1484 err = scm_send(sock, msg, siocb->scm); 1498 err = scm_send(sock, msg, siocb->scm);
1485 if (err < 0) 1499 if (err < 0)
1486 return err; 1500 return err;
@@ -1538,8 +1552,13 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1538 size = min_t(int, size, skb_tailroom(skb)); 1552 size = min_t(int, size, skb_tailroom(skb));
1539 1553
1540 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1554 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1541 if (siocb->scm->fp) 1555 if (siocb->scm->fp) {
1542 unix_attach_fds(siocb->scm, skb); 1556 err = unix_attach_fds(siocb->scm, skb);
1557 if (err) {
1558 kfree_skb(skb);
1559 goto out_err;
1560 }
1561 }
1543 1562
1544 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) { 1563 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1545 kfree_skb(skb); 1564 kfree_skb(skb);
@@ -2213,7 +2232,7 @@ static int unix_net_init(struct net *net)
2213#endif 2232#endif
2214 error = 0; 2233 error = 0;
2215out: 2234out:
2216 return 0; 2235 return error;
2217} 2236}
2218 2237
2219static void unix_net_exit(struct net *net) 2238static void unix_net_exit(struct net *net)
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 2a27b84f740b..abb3ab34cb1e 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -80,6 +80,7 @@
80#include <linux/file.h> 80#include <linux/file.h>
81#include <linux/proc_fs.h> 81#include <linux/proc_fs.h>
82#include <linux/mutex.h> 82#include <linux/mutex.h>
83#include <linux/wait.h>
83 84
84#include <net/sock.h> 85#include <net/sock.h>
85#include <net/af_unix.h> 86#include <net/af_unix.h>
@@ -91,6 +92,7 @@
91static LIST_HEAD(gc_inflight_list); 92static LIST_HEAD(gc_inflight_list);
92static LIST_HEAD(gc_candidates); 93static LIST_HEAD(gc_candidates);
93static DEFINE_SPINLOCK(unix_gc_lock); 94static DEFINE_SPINLOCK(unix_gc_lock);
95static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
94 96
95unsigned int unix_tot_inflight; 97unsigned int unix_tot_inflight;
96 98
@@ -186,8 +188,17 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
186 */ 188 */
187 struct sock *sk = unix_get_socket(*fp++); 189 struct sock *sk = unix_get_socket(*fp++);
188 if (sk) { 190 if (sk) {
189 hit = true; 191 struct unix_sock *u = unix_sk(sk);
190 func(unix_sk(sk)); 192
193 /*
194 * Ignore non-candidates, they could
195 * have been added to the queues after
196 * starting the garbage collection
197 */
198 if (u->gc_candidate) {
199 hit = true;
200 func(u);
201 }
191 } 202 }
192 } 203 }
193 if (hit && hitlist != NULL) { 204 if (hit && hitlist != NULL) {
@@ -249,24 +260,29 @@ static void inc_inflight_move_tail(struct unix_sock *u)
249{ 260{
250 atomic_long_inc(&u->inflight); 261 atomic_long_inc(&u->inflight);
251 /* 262 /*
252 * If this is still a candidate, move it to the end of the 263 * If this still might be part of a cycle, move it to the end
253 * list, so that it's checked even if it was already passed 264 * of the list, so that it's checked even if it was already
254 * over 265 * passed over
255 */ 266 */
256 if (u->gc_candidate) 267 if (u->gc_maybe_cycle)
257 list_move_tail(&u->link, &gc_candidates); 268 list_move_tail(&u->link, &gc_candidates);
258} 269}
259 270
260/* The external entry point: unix_gc() */ 271static bool gc_in_progress = false;
261 272
262void unix_gc(void) 273void wait_for_unix_gc(void)
263{ 274{
264 static bool gc_in_progress = false; 275 wait_event(unix_gc_wait, gc_in_progress == false);
276}
265 277
278/* The external entry point: unix_gc() */
279void unix_gc(void)
280{
266 struct unix_sock *u; 281 struct unix_sock *u;
267 struct unix_sock *next; 282 struct unix_sock *next;
268 struct sk_buff_head hitlist; 283 struct sk_buff_head hitlist;
269 struct list_head cursor; 284 struct list_head cursor;
285 LIST_HEAD(not_cycle_list);
270 286
271 spin_lock(&unix_gc_lock); 287 spin_lock(&unix_gc_lock);
272 288
@@ -282,10 +298,14 @@ void unix_gc(void)
282 * 298 *
283 * Holding unix_gc_lock will protect these candidates from 299 * Holding unix_gc_lock will protect these candidates from
284 * being detached, and hence from gaining an external 300 * being detached, and hence from gaining an external
285 * reference. This also means, that since there are no 301 * reference. Since there are no possible receivers, all
286 * possible receivers, the receive queues of these sockets are 302 * buffers currently on the candidates' queues stay there
287 * static during the GC, even though the dequeue is done 303 * during the garbage collection.
288 * before the detach without atomicity guarantees. 304 *
305 * We also know that no new candidate can be added onto the
306 * receive queues. Other, non candidate sockets _can_ be
307 * added to queue, so we must make sure only to touch
308 * candidates.
289 */ 309 */
290 list_for_each_entry_safe(u, next, &gc_inflight_list, link) { 310 list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
291 long total_refs; 311 long total_refs;
@@ -299,6 +319,7 @@ void unix_gc(void)
299 if (total_refs == inflight_refs) { 319 if (total_refs == inflight_refs) {
300 list_move_tail(&u->link, &gc_candidates); 320 list_move_tail(&u->link, &gc_candidates);
301 u->gc_candidate = 1; 321 u->gc_candidate = 1;
322 u->gc_maybe_cycle = 1;
302 } 323 }
303 } 324 }
304 325
@@ -325,14 +346,24 @@ void unix_gc(void)
325 list_move(&cursor, &u->link); 346 list_move(&cursor, &u->link);
326 347
327 if (atomic_long_read(&u->inflight) > 0) { 348 if (atomic_long_read(&u->inflight) > 0) {
328 list_move_tail(&u->link, &gc_inflight_list); 349 list_move_tail(&u->link, &not_cycle_list);
329 u->gc_candidate = 0; 350 u->gc_maybe_cycle = 0;
330 scan_children(&u->sk, inc_inflight_move_tail, NULL); 351 scan_children(&u->sk, inc_inflight_move_tail, NULL);
331 } 352 }
332 } 353 }
333 list_del(&cursor); 354 list_del(&cursor);
334 355
335 /* 356 /*
357 * not_cycle_list contains those sockets which do not make up a
358 * cycle. Restore these to the inflight list.
359 */
360 while (!list_empty(&not_cycle_list)) {
361 u = list_entry(not_cycle_list.next, struct unix_sock, link);
362 u->gc_candidate = 0;
363 list_move_tail(&u->link, &gc_inflight_list);
364 }
365
366 /*
336 * Now gc_candidates contains only garbage. Restore original 367 * Now gc_candidates contains only garbage. Restore original
337 * inflight counters for these as well, and remove the skbuffs 368 * inflight counters for these as well, and remove the skbuffs
338 * which are creating the cycle(s). 369 * which are creating the cycle(s).
@@ -351,6 +382,7 @@ void unix_gc(void)
351 /* All candidates should have been detached by now. */ 382 /* All candidates should have been detached by now. */
352 BUG_ON(!list_empty(&gc_candidates)); 383 BUG_ON(!list_empty(&gc_candidates));
353 gc_in_progress = false; 384 gc_in_progress = false;
385 wake_up(&unix_gc_wait);
354 386
355 out: 387 out:
356 spin_unlock(&unix_gc_lock); 388 spin_unlock(&unix_gc_lock);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 626dbb688499..eb3b1a9f9b12 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -343,9 +343,9 @@ static int ignore_request(struct wiphy *wiphy, enum reg_set_by set_by,
343 return 0; 343 return 0;
344 return -EALREADY; 344 return -EALREADY;
345 } 345 }
346 if (WARN_ON(!is_alpha2_set(alpha2) || !is_an_alpha2(alpha2)), 346 if (WARN(!is_alpha2_set(alpha2) || !is_an_alpha2(alpha2),
347 "Invalid Country IE regulatory hint passed " 347 "Invalid Country IE regulatory hint passed "
348 "to the wireless core\n") 348 "to the wireless core\n"))
349 return -EINVAL; 349 return -EINVAL;
350 /* We ignore Country IE hints for now, as we haven't yet 350 /* We ignore Country IE hints for now, as we haven't yet
351 * added the dot11MultiDomainCapabilityEnabled flag 351 * added the dot11MultiDomainCapabilityEnabled flag
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 832b47c1de80..fb216c9adf86 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -315,9 +315,9 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
315 return; 315 return;
316 } 316 }
317 317
318 spin_lock(&xfrm_policy_gc_lock); 318 spin_lock_bh(&xfrm_policy_gc_lock);
319 hlist_add_head(&policy->bydst, &xfrm_policy_gc_list); 319 hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
320 spin_unlock(&xfrm_policy_gc_lock); 320 spin_unlock_bh(&xfrm_policy_gc_lock);
321 321
322 schedule_work(&xfrm_policy_gc_work); 322 schedule_work(&xfrm_policy_gc_work);
323} 323}
@@ -817,6 +817,7 @@ int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
817 continue; 817 continue;
818 hlist_del(&pol->bydst); 818 hlist_del(&pol->bydst);
819 hlist_del(&pol->byidx); 819 hlist_del(&pol->byidx);
820 list_del(&pol->walk.all);
820 write_unlock_bh(&xfrm_policy_lock); 821 write_unlock_bh(&xfrm_policy_lock);
821 822
822 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 823 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
@@ -1251,6 +1252,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
1251 -EINVAL : -EAGAIN); 1252 -EINVAL : -EAGAIN);
1252 xfrm_state_put(x); 1253 xfrm_state_put(x);
1253 } 1254 }
1255 else if (error == -ESRCH)
1256 error = -EAGAIN;
1254 1257
1255 if (!tmpl->optional) 1258 if (!tmpl->optional)
1256 goto fail; 1259 goto fail;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 4a8a1abb59ee..a278a6f3b991 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1816,7 +1816,7 @@ static int copy_to_user_kmaddress(struct xfrm_kmaddress *k, struct sk_buff *skb)
1816 uk.family = k->family; 1816 uk.family = k->family;
1817 uk.reserved = k->reserved; 1817 uk.reserved = k->reserved;
1818 memcpy(&uk.local, &k->local, sizeof(uk.local)); 1818 memcpy(&uk.local, &k->local, sizeof(uk.local));
1819 memcpy(&uk.remote, &k->local, sizeof(uk.remote)); 1819 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
1820 1820
1821 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); 1821 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
1822} 1822}