aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2009-02-05 19:01:45 -0500
committerJames Morris <jmorris@namei.org>2009-02-05 19:01:45 -0500
commitcb5629b10d64a8006622ce3a52bc887d91057d69 (patch)
tree7c06d8f30783115e3384721046258ce615b129c5 /net
parent8920d5ad6ba74ae8ab020e90cc4d976980e68701 (diff)
parentf01d1d546abb2f4028b5299092f529eefb01253a (diff)
Merge branch 'master' into next
Conflicts: fs/namei.c Manually merged per: diff --cc fs/namei.c index 734f2b5,bbc15c2..0000000 --- a/fs/namei.c +++ b/fs/namei.c @@@ -860,9 -848,8 +849,10 @@@ static int __link_path_walk(const char nd->flags |= LOOKUP_CONTINUE; err = exec_permission_lite(inode); if (err == -EAGAIN) - err = vfs_permission(nd, MAY_EXEC); + err = inode_permission(nd->path.dentry->d_inode, + MAY_EXEC); + if (!err) + err = ima_path_check(&nd->path, MAY_EXEC); if (err) break; @@@ -1525,14 -1506,9 +1509,14 @@@ int may_open(struct path *path, int acc flag &= ~O_TRUNC; } - error = vfs_permission(nd, acc_mode); + error = inode_permission(inode, acc_mode); if (error) return error; + - error = ima_path_check(&nd->path, ++ error = ima_path_check(path, + acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC)); + if (error) + return error; /* * An append-only file must be opened in append mode for writing. */ Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c115
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/9p/Kconfig2
-rw-r--r--net/9p/client.c2
-rw-r--r--net/Kconfig10
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/aarp.c5
-rw-r--r--net/bluetooth/bnep/bnep.h1
-rw-r--r--net/bluetooth/bnep/core.c12
-rw-r--r--net/bluetooth/bnep/netdev.c33
-rw-r--r--net/bridge/br_netfilter.c18
-rw-r--r--net/bridge/netfilter/ebtables.c5
-rw-r--r--net/can/af_can.c15
-rw-r--r--net/can/bcm.c201
-rw-r--r--net/can/raw.c15
-rw-r--r--net/core/dev.c346
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/skbuff.c106
-rw-r--r--net/dcb/dcbnl.c14
-rw-r--r--net/dccp/Kconfig4
-rw-r--r--net/dccp/Makefile15
-rw-r--r--net/dccp/ackvec.h49
-rw-r--r--net/dccp/ccid.c254
-rw-r--r--net/dccp/ccid.h14
-rw-r--r--net/dccp/ccids/Kconfig79
-rw-r--r--net/dccp/ccids/Makefile9
-rw-r--r--net/dccp/ccids/ccid2.c22
-rw-r--r--net/dccp/ccids/ccid3.c23
-rw-r--r--net/dccp/ccids/lib/Makefile3
-rw-r--r--net/dccp/ccids/lib/loss_interval.c3
-rw-r--r--net/dccp/ccids/lib/packet_history.c9
-rw-r--r--net/dccp/ccids/lib/tfrc.c19
-rw-r--r--net/dccp/ccids/lib/tfrc.h11
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c4
-rw-r--r--net/dccp/dccp.h2
-rw-r--r--net/dccp/feat.c6
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/proto.c7
-rw-r--r--net/dsa/slave.c51
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/netfilter/iptable_filter.c7
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c6
-rw-r--r--net/ipv4/netfilter/iptable_raw.c6
-rw-r--r--net/ipv4/netfilter/iptable_security.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c5
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c2
-rw-r--r--net/ipv4/tcp.c41
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c59
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/af_inet6.c114
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ip6_fib.c15
-rw-r--r--net/ipv6/ip6_input.c2
-rw-r--r--net/ipv6/ip6mr.c24
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/ipv6/route.c56
-rw-r--r--net/ipv6/sysctl_net_ipv6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c47
-rw-r--r--net/irda/ircomm/ircomm_tty.c5
-rw-r--r--net/iucv/af_iucv.c28
-rw-r--r--net/iucv/iucv.c25
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/mac80211/ht.c2
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/mesh_plink.c1
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mac80211/rc80211_minstrel.c10
-rw-r--r--net/mac80211/sta_info.h1
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/x_tables.c8
-rw-r--r--net/netfilter/xt_time.c11
-rw-r--r--net/netlink/genetlink.c1
-rw-r--r--net/packet/af_packet.c17
-rw-r--r--net/phonet/pep-gprs.c12
-rw-r--r--net/rfkill/rfkill.c4
-rw-r--r--net/sched/cls_u32.c3
-rw-r--r--net/sched/sch_htb.c15
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_teql.c20
-rw-r--r--net/sctp/auth.c6
-rw-r--r--net/sctp/input.c13
-rw-r--r--net/sctp/output.c7
-rw-r--r--net/sctp/outqueue.c3
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/tsnmap.c2
-rw-r--r--net/socket.c76
-rw-r--r--net/sunrpc/Kconfig78
-rw-r--r--net/sunrpc/cache.c20
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/stats.c6
-rw-r--r--net/sunrpc/svc.c14
-rw-r--r--net/sunrpc/svc_xprt.c58
-rw-r--r--net/sunrpc/svcauth.c14
-rw-r--r--net/sunrpc/svcauth_unix.c12
-rw-r--r--net/sunrpc/svcsock.c30
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/wimax/Kconfig52
-rw-r--r--net/wimax/Makefile13
-rw-r--r--net/wimax/debug-levels.h42
-rw-r--r--net/wimax/debugfs.c79
-rw-r--r--net/wimax/id-table.c144
-rw-r--r--net/wimax/op-msg.c421
-rw-r--r--net/wimax/op-reset.c143
-rw-r--r--net/wimax/op-rfkill.c532
-rw-r--r--net/wimax/stack.c612
-rw-r--r--net/wimax/wimax-internal.h91
-rw-r--r--net/wireless/reg.c142
-rw-r--r--net/wireless/wext.c4
-rw-r--r--net/xfrm/xfrm_ipcomp.c1
116 files changed, 3687 insertions, 1047 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index dd86a1dc4cd0..e9db889d6222 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -3,46 +3,35 @@
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include "vlan.h" 4#include "vlan.h"
5 5
6struct vlan_hwaccel_cb {
7 struct net_device *dev;
8};
9
10static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb)
11{
12 return (struct vlan_hwaccel_cb *)skb->cb;
13}
14
15/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
16int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
17 u16 vlan_tci, int polling) 8 u16 vlan_tci, int polling)
18{ 9{
19 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); 10 if (skb_bond_should_drop(skb))
20 11 goto drop;
21 if (skb_bond_should_drop(skb)) {
22 dev_kfree_skb_any(skb);
23 return NET_RX_DROP;
24 }
25 12
26 skb->vlan_tci = vlan_tci; 13 skb->vlan_tci = vlan_tci;
27 cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 14 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
15
16 if (!skb->dev)
17 goto drop;
28 18
29 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 19 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
20
21drop:
22 dev_kfree_skb_any(skb);
23 return NET_RX_DROP;
30} 24}
31EXPORT_SYMBOL(__vlan_hwaccel_rx); 25EXPORT_SYMBOL(__vlan_hwaccel_rx);
32 26
33int vlan_hwaccel_do_receive(struct sk_buff *skb) 27int vlan_hwaccel_do_receive(struct sk_buff *skb)
34{ 28{
35 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); 29 struct net_device *dev = skb->dev;
36 struct net_device *dev = cb->dev;
37 struct net_device_stats *stats; 30 struct net_device_stats *stats;
38 31
32 skb->dev = vlan_dev_info(dev)->real_dev;
39 netif_nit_deliver(skb); 33 netif_nit_deliver(skb);
40 34
41 if (dev == NULL) {
42 kfree_skb(skb);
43 return -1;
44 }
45
46 skb->dev = dev; 35 skb->dev = dev;
47 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); 36 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
48 skb->vlan_tci = 0; 37 skb->vlan_tci = 0;
@@ -73,10 +62,86 @@ struct net_device *vlan_dev_real_dev(const struct net_device *dev)
73{ 62{
74 return vlan_dev_info(dev)->real_dev; 63 return vlan_dev_info(dev)->real_dev;
75} 64}
76EXPORT_SYMBOL_GPL(vlan_dev_real_dev); 65EXPORT_SYMBOL(vlan_dev_real_dev);
77 66
78u16 vlan_dev_vlan_id(const struct net_device *dev) 67u16 vlan_dev_vlan_id(const struct net_device *dev)
79{ 68{
80 return vlan_dev_info(dev)->vlan_id; 69 return vlan_dev_info(dev)->vlan_id;
81} 70}
82EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); 71EXPORT_SYMBOL(vlan_dev_vlan_id);
72
73static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
74 unsigned int vlan_tci, struct sk_buff *skb)
75{
76 struct sk_buff *p;
77
78 if (skb_bond_should_drop(skb))
79 goto drop;
80
81 skb->vlan_tci = vlan_tci;
82 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
83
84 if (!skb->dev)
85 goto drop;
86
87 for (p = napi->gro_list; p; p = p->next) {
88 NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev;
89 NAPI_GRO_CB(p)->flush = 0;
90 }
91
92 return dev_gro_receive(napi, skb);
93
94drop:
95 return 2;
96}
97
98int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
99 unsigned int vlan_tci, struct sk_buff *skb)
100{
101 int err = NET_RX_SUCCESS;
102
103 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
104 case -1:
105 return netif_receive_skb(skb);
106
107 case 2:
108 err = NET_RX_DROP;
109 /* fall through */
110
111 case 1:
112 kfree_skb(skb);
113 break;
114 }
115
116 return err;
117}
118EXPORT_SYMBOL(vlan_gro_receive);
119
120int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
121 unsigned int vlan_tci, struct napi_gro_fraginfo *info)
122{
123 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
124 int err = NET_RX_DROP;
125
126 if (!skb)
127 goto out;
128
129 err = NET_RX_SUCCESS;
130
131 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
132 case -1:
133 return netif_receive_skb(skb);
134
135 case 2:
136 err = NET_RX_DROP;
137 /* fall through */
138
139 case 1:
140 napi_reuse_skb(napi, skb);
141 break;
142 }
143
144out:
145 return err;
146}
147EXPORT_SYMBOL(vlan_gro_frags);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 89a3bbdfca3f..4a19acd3a32b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -546,6 +546,18 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
546 return err; 546 return err;
547} 547}
548 548
549static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
550{
551 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
552 const struct net_device_ops *ops = real_dev->netdev_ops;
553 int err = 0;
554
555 if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
556 err = ops->ndo_neigh_setup(dev, pa);
557
558 return err;
559}
560
549static void vlan_dev_change_rx_flags(struct net_device *dev, int change) 561static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
550{ 562{
551 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 563 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
@@ -713,6 +725,7 @@ static const struct net_device_ops vlan_netdev_ops = {
713 .ndo_set_multicast_list = vlan_dev_set_rx_mode, 725 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
714 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 726 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
715 .ndo_do_ioctl = vlan_dev_ioctl, 727 .ndo_do_ioctl = vlan_dev_ioctl,
728 .ndo_neigh_setup = vlan_dev_neigh_setup,
716}; 729};
717 730
718static const struct net_device_ops vlan_netdev_accel_ops = { 731static const struct net_device_ops vlan_netdev_accel_ops = {
@@ -728,6 +741,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
728 .ndo_set_multicast_list = vlan_dev_set_rx_mode, 741 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
729 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 742 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
730 .ndo_do_ioctl = vlan_dev_ioctl, 743 .ndo_do_ioctl = vlan_dev_ioctl,
744 .ndo_neigh_setup = vlan_dev_neigh_setup,
731}; 745};
732 746
733void vlan_setup(struct net_device *dev) 747void vlan_setup(struct net_device *dev)
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index 0663f99e977a..7ed75c7bd5d1 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -23,7 +23,7 @@ config NET_9P_VIRTIO
23 guest partitions and a host partition. 23 guest partitions and a host partition.
24 24
25config NET_9P_RDMA 25config NET_9P_RDMA
26 depends on INET && INFINIBAND && EXPERIMENTAL 26 depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL
27 tristate "9P RDMA Transport (Experimental)" 27 tristate "9P RDMA Transport (Experimental)"
28 help 28 help
29 This builds support for an RDMA transport. 29 This builds support for an RDMA transport.
diff --git a/net/9p/client.c b/net/9p/client.c
index 821f1ec0b2c3..1eb580c38fbb 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -618,7 +618,7 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
618 return ERR_PTR(-ENOMEM); 618 return ERR_PTR(-ENOMEM);
619 619
620 ret = p9_idpool_get(clnt->fidpool); 620 ret = p9_idpool_get(clnt->fidpool);
621 if (fid->fid < 0) { 621 if (ret < 0) {
622 ret = -ENOSPC; 622 ret = -ENOSPC;
623 goto error; 623 goto error;
624 } 624 }
diff --git a/net/Kconfig b/net/Kconfig
index 6ec2cce7c167..cdb8fdef6c4a 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -24,14 +24,6 @@ if NET
24 24
25menu "Networking options" 25menu "Networking options"
26 26
27config NET_NS
28 bool "Network namespace support"
29 default n
30 depends on EXPERIMENTAL && NAMESPACES
31 help
32 Allow user space to create what appear to be multiple instances
33 of the network stack.
34
35config COMPAT_NET_DEV_OPS 27config COMPAT_NET_DEV_OPS
36 def_bool y 28 def_bool y
37 29
@@ -254,6 +246,8 @@ source "net/mac80211/Kconfig"
254 246
255endif # WIRELESS 247endif # WIRELESS
256 248
249source "net/wimax/Kconfig"
250
257source "net/rfkill/Kconfig" 251source "net/rfkill/Kconfig"
258source "net/9p/Kconfig" 252source "net/9p/Kconfig"
259 253
diff --git a/net/Makefile b/net/Makefile
index ba4460432b7c..0fcce89d7169 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -63,3 +63,4 @@ endif
63ifeq ($(CONFIG_NET),y) 63ifeq ($(CONFIG_NET),y)
64obj-$(CONFIG_SYSCTL) += sysctl_net.o 64obj-$(CONFIG_SYSCTL) += sysctl_net.o
65endif 65endif
66obj-$(CONFIG_WIMAX) += wimax/
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index b03ff58e9308..89f99d3beb60 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -443,13 +443,14 @@ static void aarp_send_probe_phase1(struct atalk_iface *iface)
443{ 443{
444 struct ifreq atreq; 444 struct ifreq atreq;
445 struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr; 445 struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr;
446 const struct net_device_ops *ops = iface->dev->netdev_ops;
446 447
447 sa->sat_addr.s_node = iface->address.s_node; 448 sa->sat_addr.s_node = iface->address.s_node;
448 sa->sat_addr.s_net = ntohs(iface->address.s_net); 449 sa->sat_addr.s_net = ntohs(iface->address.s_net);
449 450
450 /* We pass the Net:Node to the drivers/cards by a Device ioctl. */ 451 /* We pass the Net:Node to the drivers/cards by a Device ioctl. */
451 if (!(iface->dev->do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) { 452 if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
452 (void)iface->dev->do_ioctl(iface->dev, &atreq, SIOCGIFADDR); 453 ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
453 if (iface->address.s_net != htons(sa->sat_addr.s_net) || 454 if (iface->address.s_net != htons(sa->sat_addr.s_net) ||
454 iface->address.s_node != sa->sat_addr.s_node) 455 iface->address.s_node != sa->sat_addr.s_node)
455 iface->status |= ATIF_PROBE_FAIL; 456 iface->status |= ATIF_PROBE_FAIL;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index d20f8a40f36e..0d9e506f5d5a 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -165,7 +165,6 @@ struct bnep_session {
165 165
166 struct socket *sock; 166 struct socket *sock;
167 struct net_device *dev; 167 struct net_device *dev;
168 struct net_device_stats stats;
169}; 168};
170 169
171void bnep_net_setup(struct net_device *dev); 170void bnep_net_setup(struct net_device *dev);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 70fea8bdb4e5..52a6ce0d772b 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -306,7 +306,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
306 struct sk_buff *nskb; 306 struct sk_buff *nskb;
307 u8 type; 307 u8 type;
308 308
309 s->stats.rx_bytes += skb->len; 309 dev->stats.rx_bytes += skb->len;
310 310
311 type = *(u8 *) skb->data; skb_pull(skb, 1); 311 type = *(u8 *) skb->data; skb_pull(skb, 1);
312 312
@@ -343,7 +343,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
343 * may not be modified and because of the alignment requirements. */ 343 * may not be modified and because of the alignment requirements. */
344 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); 344 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL);
345 if (!nskb) { 345 if (!nskb) {
346 s->stats.rx_dropped++; 346 dev->stats.rx_dropped++;
347 kfree_skb(skb); 347 kfree_skb(skb);
348 return -ENOMEM; 348 return -ENOMEM;
349 } 349 }
@@ -378,14 +378,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
378 skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); 378 skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len);
379 kfree_skb(skb); 379 kfree_skb(skb);
380 380
381 s->stats.rx_packets++; 381 dev->stats.rx_packets++;
382 nskb->ip_summed = CHECKSUM_NONE; 382 nskb->ip_summed = CHECKSUM_NONE;
383 nskb->protocol = eth_type_trans(nskb, dev); 383 nskb->protocol = eth_type_trans(nskb, dev);
384 netif_rx_ni(nskb); 384 netif_rx_ni(nskb);
385 return 0; 385 return 0;
386 386
387badframe: 387badframe:
388 s->stats.rx_errors++; 388 dev->stats.rx_errors++;
389 kfree_skb(skb); 389 kfree_skb(skb);
390 return 0; 390 return 0;
391} 391}
@@ -448,8 +448,8 @@ send:
448 kfree_skb(skb); 448 kfree_skb(skb);
449 449
450 if (len > 0) { 450 if (len > 0) {
451 s->stats.tx_bytes += len; 451 s->dev->stats.tx_bytes += len;
452 s->stats.tx_packets++; 452 s->dev->stats.tx_packets++;
453 return 0; 453 return 0;
454 } 454 }
455 455
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index f897da6e0444..d7a0e9722def 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -55,12 +55,6 @@ static int bnep_net_close(struct net_device *dev)
55 return 0; 55 return 0;
56} 56}
57 57
58static struct net_device_stats *bnep_net_get_stats(struct net_device *dev)
59{
60 struct bnep_session *s = netdev_priv(dev);
61 return &s->stats;
62}
63
64static void bnep_net_set_mc_list(struct net_device *dev) 58static void bnep_net_set_mc_list(struct net_device *dev)
65{ 59{
66#ifdef CONFIG_BT_BNEP_MC_FILTER 60#ifdef CONFIG_BT_BNEP_MC_FILTER
@@ -128,11 +122,6 @@ static void bnep_net_timeout(struct net_device *dev)
128 netif_wake_queue(dev); 122 netif_wake_queue(dev);
129} 123}
130 124
131static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
132{
133 return -EINVAL;
134}
135
136#ifdef CONFIG_BT_BNEP_MC_FILTER 125#ifdef CONFIG_BT_BNEP_MC_FILTER
137static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) 126static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
138{ 127{
@@ -217,6 +206,18 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
217 return 0; 206 return 0;
218} 207}
219 208
209static const struct net_device_ops bnep_netdev_ops = {
210 .ndo_open = bnep_net_open,
211 .ndo_stop = bnep_net_close,
212 .ndo_start_xmit = bnep_net_xmit,
213 .ndo_validate_addr = eth_validate_addr,
214 .ndo_set_multicast_list = bnep_net_set_mc_list,
215 .ndo_set_mac_address = bnep_net_set_mac_addr,
216 .ndo_tx_timeout = bnep_net_timeout,
217 .ndo_change_mtu = eth_change_mtu,
218
219};
220
220void bnep_net_setup(struct net_device *dev) 221void bnep_net_setup(struct net_device *dev)
221{ 222{
222 223
@@ -224,15 +225,7 @@ void bnep_net_setup(struct net_device *dev)
224 dev->addr_len = ETH_ALEN; 225 dev->addr_len = ETH_ALEN;
225 226
226 ether_setup(dev); 227 ether_setup(dev);
227 228 dev->netdev_ops = &bnep_netdev_ops;
228 dev->open = bnep_net_open;
229 dev->stop = bnep_net_close;
230 dev->hard_start_xmit = bnep_net_xmit;
231 dev->get_stats = bnep_net_get_stats;
232 dev->do_ioctl = bnep_net_ioctl;
233 dev->set_mac_address = bnep_net_set_mac_addr;
234 dev->set_multicast_list = bnep_net_set_mc_list;
235 229
236 dev->watchdog_timeo = HZ * 2; 230 dev->watchdog_timeo = HZ * 2;
237 dev->tx_timeout = bnep_net_timeout;
238} 231}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index a65e43a17fbb..cf754ace0b75 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -58,11 +58,11 @@ static struct ctl_table_header *brnf_sysctl_header;
58static int brnf_call_iptables __read_mostly = 1; 58static int brnf_call_iptables __read_mostly = 1;
59static int brnf_call_ip6tables __read_mostly = 1; 59static int brnf_call_ip6tables __read_mostly = 1;
60static int brnf_call_arptables __read_mostly = 1; 60static int brnf_call_arptables __read_mostly = 1;
61static int brnf_filter_vlan_tagged __read_mostly = 1; 61static int brnf_filter_vlan_tagged __read_mostly = 0;
62static int brnf_filter_pppoe_tagged __read_mostly = 1; 62static int brnf_filter_pppoe_tagged __read_mostly = 0;
63#else 63#else
64#define brnf_filter_vlan_tagged 1 64#define brnf_filter_vlan_tagged 0
65#define brnf_filter_pppoe_tagged 1 65#define brnf_filter_pppoe_tagged 0
66#endif 66#endif
67 67
68static inline __be16 vlan_proto(const struct sk_buff *skb) 68static inline __be16 vlan_proto(const struct sk_buff *skb)
@@ -686,8 +686,11 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
686 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || 686 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
687 IS_PPPOE_IP(skb)) 687 IS_PPPOE_IP(skb))
688 pf = PF_INET; 688 pf = PF_INET;
689 else 689 else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
690 IS_PPPOE_IPV6(skb))
690 pf = PF_INET6; 691 pf = PF_INET6;
692 else
693 return NF_ACCEPT;
691 694
692 nf_bridge_pull_encap_header(skb); 695 nf_bridge_pull_encap_header(skb);
693 696
@@ -828,8 +831,11 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
828 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || 831 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
829 IS_PPPOE_IP(skb)) 832 IS_PPPOE_IP(skb))
830 pf = PF_INET; 833 pf = PF_INET;
831 else 834 else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
835 IS_PPPOE_IPV6(skb))
832 pf = PF_INET6; 836 pf = PF_INET6;
837 else
838 return NF_ACCEPT;
833 839
834#ifdef CONFIG_NETFILTER_DEBUG 840#ifdef CONFIG_NETFILTER_DEBUG
835 if (skb->dst == NULL) { 841 if (skb->dst == NULL) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index fa108c46e851..820252aee81f 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -79,18 +79,19 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
79{ 79{
80 par->match = m->u.match; 80 par->match = m->u.match;
81 par->matchinfo = m->data; 81 par->matchinfo = m->data;
82 return m->u.match->match(skb, par); 82 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
83} 83}
84 84
85static inline int ebt_dev_check(char *entry, const struct net_device *device) 85static inline int ebt_dev_check(char *entry, const struct net_device *device)
86{ 86{
87 int i = 0; 87 int i = 0;
88 const char *devname = device->name; 88 const char *devname;
89 89
90 if (*entry == '\0') 90 if (*entry == '\0')
91 return 0; 91 return 0;
92 if (!device) 92 if (!device)
93 return 1; 93 return 1;
94 devname = device->name;
94 /* 1 is the wildcard token */ 95 /* 1 is the wildcard token */
95 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i]) 96 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
96 i++; 97 i++;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3dadb338addd..fa417ca6cbe6 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -414,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
414 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 414 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
415 * filter for error frames (CAN_ERR_FLAG bit set in mask). 415 * filter for error frames (CAN_ERR_FLAG bit set in mask).
416 * 416 *
417 * The provided pointer to the sk_buff is guaranteed to be valid as long as
418 * the callback function is running. The callback function must *not* free
419 * the given sk_buff while processing it's task. When the given sk_buff is
420 * needed after the end of the callback function it must be cloned inside
421 * the callback function with skb_clone().
422 *
417 * Return: 423 * Return:
418 * 0 on success 424 * 0 on success
419 * -ENOMEM on missing cache mem to create subscription entry 425 * -ENOMEM on missing cache mem to create subscription entry
@@ -569,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister);
569 575
570static inline void deliver(struct sk_buff *skb, struct receiver *r) 576static inline void deliver(struct sk_buff *skb, struct receiver *r)
571{ 577{
572 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); 578 r->func(skb, r->data);
573 579 r->matches++;
574 if (clone) {
575 clone->sk = skb->sk;
576 r->func(clone, r->data);
577 r->matches++;
578 }
579} 580}
580 581
581static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) 582static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index da0d426c0ce4..b7c7d4651136 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -70,7 +70,7 @@
70 70
71#define CAN_BCM_VERSION CAN_VERSION 71#define CAN_BCM_VERSION CAN_VERSION
72static __initdata const char banner[] = KERN_INFO 72static __initdata const char banner[] = KERN_INFO
73 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; 73 "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
74 74
75MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 75MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
76MODULE_LICENSE("Dual BSD/GPL"); 76MODULE_LICENSE("Dual BSD/GPL");
@@ -90,6 +90,7 @@ struct bcm_op {
90 unsigned long frames_abs, frames_filtered; 90 unsigned long frames_abs, frames_filtered;
91 struct timeval ival1, ival2; 91 struct timeval ival1, ival2;
92 struct hrtimer timer, thrtimer; 92 struct hrtimer timer, thrtimer;
93 struct tasklet_struct tsklet, thrtsklet;
93 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 94 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
94 int rx_ifindex; 95 int rx_ifindex;
95 int count; 96 int count;
@@ -341,19 +342,15 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
341 } 342 }
342} 343}
343 344
344/* 345static void bcm_tx_timeout_tsklet(unsigned long data)
345 * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
346 */
347static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
348{ 346{
349 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 347 struct bcm_op *op = (struct bcm_op *)data;
350 enum hrtimer_restart ret = HRTIMER_NORESTART; 348 struct bcm_msg_head msg_head;
351 349
352 if (op->kt_ival1.tv64 && (op->count > 0)) { 350 if (op->kt_ival1.tv64 && (op->count > 0)) {
353 351
354 op->count--; 352 op->count--;
355 if (!op->count && (op->flags & TX_COUNTEVT)) { 353 if (!op->count && (op->flags & TX_COUNTEVT)) {
356 struct bcm_msg_head msg_head;
357 354
358 /* create notification to user */ 355 /* create notification to user */
359 msg_head.opcode = TX_EXPIRED; 356 msg_head.opcode = TX_EXPIRED;
@@ -372,20 +369,32 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
372 369
373 /* send (next) frame */ 370 /* send (next) frame */
374 bcm_can_tx(op); 371 bcm_can_tx(op);
375 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival1); 372 hrtimer_start(&op->timer,
376 ret = HRTIMER_RESTART; 373 ktime_add(ktime_get(), op->kt_ival1),
374 HRTIMER_MODE_ABS);
377 375
378 } else { 376 } else {
379 if (op->kt_ival2.tv64) { 377 if (op->kt_ival2.tv64) {
380 378
381 /* send (next) frame */ 379 /* send (next) frame */
382 bcm_can_tx(op); 380 bcm_can_tx(op);
383 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); 381 hrtimer_start(&op->timer,
384 ret = HRTIMER_RESTART; 382 ktime_add(ktime_get(), op->kt_ival2),
383 HRTIMER_MODE_ABS);
385 } 384 }
386 } 385 }
386}
387 387
388 return ret; 388/*
389 * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
390 */
391static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
392{
393 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
394
395 tasklet_schedule(&op->tsklet);
396
397 return HRTIMER_NORESTART;
389} 398}
390 399
391/* 400/*
@@ -402,6 +411,9 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
402 if (op->frames_filtered > ULONG_MAX/100) 411 if (op->frames_filtered > ULONG_MAX/100)
403 op->frames_filtered = op->frames_abs = 0; 412 op->frames_filtered = op->frames_abs = 0;
404 413
414 /* this element is not throttled anymore */
415 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
416
405 head.opcode = RX_CHANGED; 417 head.opcode = RX_CHANGED;
406 head.flags = op->flags; 418 head.flags = op->flags;
407 head.count = op->count; 419 head.count = op->count;
@@ -420,37 +432,32 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
420 */ 432 */
421static void bcm_rx_update_and_send(struct bcm_op *op, 433static void bcm_rx_update_and_send(struct bcm_op *op,
422 struct can_frame *lastdata, 434 struct can_frame *lastdata,
423 struct can_frame *rxdata) 435 const struct can_frame *rxdata)
424{ 436{
425 memcpy(lastdata, rxdata, CFSIZ); 437 memcpy(lastdata, rxdata, CFSIZ);
426 438
427 /* mark as used */ 439 /* mark as used and throttled by default */
428 lastdata->can_dlc |= RX_RECV; 440 lastdata->can_dlc |= (RX_RECV|RX_THR);
429 441
430 /* throtteling mode inactive OR data update already on the run ? */ 442 /* throtteling mode inactive ? */
431 if (!op->kt_ival2.tv64 || hrtimer_callback_running(&op->thrtimer)) { 443 if (!op->kt_ival2.tv64) {
432 /* send RX_CHANGED to the user immediately */ 444 /* send RX_CHANGED to the user immediately */
433 bcm_rx_changed(op, rxdata); 445 bcm_rx_changed(op, lastdata);
434 return; 446 return;
435 } 447 }
436 448
437 if (hrtimer_active(&op->thrtimer)) { 449 /* with active throttling timer we are just done here */
438 /* mark as 'throttled' */ 450 if (hrtimer_active(&op->thrtimer))
439 lastdata->can_dlc |= RX_THR;
440 return; 451 return;
441 }
442 452
443 if (!op->kt_lastmsg.tv64) { 453 /* first receiption with enabled throttling mode */
444 /* send first RX_CHANGED to the user immediately */ 454 if (!op->kt_lastmsg.tv64)
445 bcm_rx_changed(op, rxdata); 455 goto rx_changed_settime;
446 op->kt_lastmsg = ktime_get();
447 return;
448 }
449 456
457 /* got a second frame inside a potential throttle period? */
450 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < 458 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
451 ktime_to_us(op->kt_ival2)) { 459 ktime_to_us(op->kt_ival2)) {
452 /* mark as 'throttled' and start timer */ 460 /* do not send the saved data - only start throttle timer */
453 lastdata->can_dlc |= RX_THR;
454 hrtimer_start(&op->thrtimer, 461 hrtimer_start(&op->thrtimer,
455 ktime_add(op->kt_lastmsg, op->kt_ival2), 462 ktime_add(op->kt_lastmsg, op->kt_ival2),
456 HRTIMER_MODE_ABS); 463 HRTIMER_MODE_ABS);
@@ -458,7 +465,8 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
458 } 465 }
459 466
460 /* the gap was that big, that throttling was not needed here */ 467 /* the gap was that big, that throttling was not needed here */
461 bcm_rx_changed(op, rxdata); 468rx_changed_settime:
469 bcm_rx_changed(op, lastdata);
462 op->kt_lastmsg = ktime_get(); 470 op->kt_lastmsg = ktime_get();
463} 471}
464 472
@@ -467,7 +475,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
467 * received data stored in op->last_frames[] 475 * received data stored in op->last_frames[]
468 */ 476 */
469static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, 477static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
470 struct can_frame *rxdata) 478 const struct can_frame *rxdata)
471{ 479{
472 /* 480 /*
473 * no one uses the MSBs of can_dlc for comparation, 481 * no one uses the MSBs of can_dlc for comparation,
@@ -511,14 +519,12 @@ static void bcm_rx_starttimer(struct bcm_op *op)
511 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); 519 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
512} 520}
513 521
514/* 522static void bcm_rx_timeout_tsklet(unsigned long data)
515 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
516 */
517static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
518{ 523{
519 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 524 struct bcm_op *op = (struct bcm_op *)data;
520 struct bcm_msg_head msg_head; 525 struct bcm_msg_head msg_head;
521 526
527 /* create notification to user */
522 msg_head.opcode = RX_TIMEOUT; 528 msg_head.opcode = RX_TIMEOUT;
523 msg_head.flags = op->flags; 529 msg_head.flags = op->flags;
524 msg_head.count = op->count; 530 msg_head.count = op->count;
@@ -528,6 +534,17 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
528 msg_head.nframes = 0; 534 msg_head.nframes = 0;
529 535
530 bcm_send_to_user(op, &msg_head, NULL, 0); 536 bcm_send_to_user(op, &msg_head, NULL, 0);
537}
538
539/*
540 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
541 */
542static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
543{
544 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
545
546 /* schedule before NET_RX_SOFTIRQ */
547 tasklet_hi_schedule(&op->tsklet);
531 548
532 /* no restart of the timer is done here! */ 549 /* no restart of the timer is done here! */
533 550
@@ -541,9 +558,25 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
541} 558}
542 559
543/* 560/*
561 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
562 */
563static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
564{
565 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
566 if (update)
567 bcm_rx_changed(op, &op->last_frames[index]);
568 return 1;
569 }
570 return 0;
571}
572
573/*
544 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace 574 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
575 *
576 * update == 0 : just check if throttled data is available (any irq context)
577 * update == 1 : check and send throttled data to userspace (soft_irq context)
545 */ 578 */
546static int bcm_rx_thr_flush(struct bcm_op *op) 579static int bcm_rx_thr_flush(struct bcm_op *op, int update)
547{ 580{
548 int updated = 0; 581 int updated = 0;
549 582
@@ -551,27 +584,25 @@ static int bcm_rx_thr_flush(struct bcm_op *op)
551 int i; 584 int i;
552 585
553 /* for MUX filter we start at index 1 */ 586 /* for MUX filter we start at index 1 */
554 for (i = 1; i < op->nframes; i++) { 587 for (i = 1; i < op->nframes; i++)
555 if ((op->last_frames) && 588 updated += bcm_rx_do_flush(op, update, i);
556 (op->last_frames[i].can_dlc & RX_THR)) {
557 op->last_frames[i].can_dlc &= ~RX_THR;
558 bcm_rx_changed(op, &op->last_frames[i]);
559 updated++;
560 }
561 }
562 589
563 } else { 590 } else {
564 /* for RX_FILTER_ID and simple filter */ 591 /* for RX_FILTER_ID and simple filter */
565 if (op->last_frames && (op->last_frames[0].can_dlc & RX_THR)) { 592 updated += bcm_rx_do_flush(op, update, 0);
566 op->last_frames[0].can_dlc &= ~RX_THR;
567 bcm_rx_changed(op, &op->last_frames[0]);
568 updated++;
569 }
570 } 593 }
571 594
572 return updated; 595 return updated;
573} 596}
574 597
598static void bcm_rx_thr_tsklet(unsigned long data)
599{
600 struct bcm_op *op = (struct bcm_op *)data;
601
602 /* push the changed data to the userspace */
603 bcm_rx_thr_flush(op, 1);
604}
605
575/* 606/*
576 * bcm_rx_thr_handler - the time for blocked content updates is over now: 607 * bcm_rx_thr_handler - the time for blocked content updates is over now:
577 * Check for throttled data and send it to the userspace 608 * Check for throttled data and send it to the userspace
@@ -580,7 +611,9 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
580{ 611{
581 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); 612 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
582 613
583 if (bcm_rx_thr_flush(op)) { 614 tasklet_schedule(&op->thrtsklet);
615
616 if (bcm_rx_thr_flush(op, 0)) {
584 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); 617 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
585 return HRTIMER_RESTART; 618 return HRTIMER_RESTART;
586 } else { 619 } else {
@@ -596,29 +629,21 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
596static void bcm_rx_handler(struct sk_buff *skb, void *data) 629static void bcm_rx_handler(struct sk_buff *skb, void *data)
597{ 630{
598 struct bcm_op *op = (struct bcm_op *)data; 631 struct bcm_op *op = (struct bcm_op *)data;
599 struct can_frame rxframe; 632 const struct can_frame *rxframe = (struct can_frame *)skb->data;
600 int i; 633 int i;
601 634
602 /* disable timeout */ 635 /* disable timeout */
603 hrtimer_cancel(&op->timer); 636 hrtimer_cancel(&op->timer);
604 637
605 if (skb->len == sizeof(rxframe)) { 638 if (op->can_id != rxframe->can_id)
606 memcpy(&rxframe, skb->data, sizeof(rxframe));
607 /* save rx timestamp */
608 op->rx_stamp = skb->tstamp;
609 /* save originator for recvfrom() */
610 op->rx_ifindex = skb->dev->ifindex;
611 /* update statistics */
612 op->frames_abs++;
613 kfree_skb(skb);
614
615 } else {
616 kfree_skb(skb);
617 return; 639 return;
618 }
619 640
620 if (op->can_id != rxframe.can_id) 641 /* save rx timestamp */
621 return; 642 op->rx_stamp = skb->tstamp;
643 /* save originator for recvfrom() */
644 op->rx_ifindex = skb->dev->ifindex;
645 /* update statistics */
646 op->frames_abs++;
622 647
623 if (op->flags & RX_RTR_FRAME) { 648 if (op->flags & RX_RTR_FRAME) {
624 /* send reply for RTR-request (placed in op->frames[0]) */ 649 /* send reply for RTR-request (placed in op->frames[0]) */
@@ -628,16 +653,14 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
628 653
629 if (op->flags & RX_FILTER_ID) { 654 if (op->flags & RX_FILTER_ID) {
630 /* the easiest case */ 655 /* the easiest case */
631 bcm_rx_update_and_send(op, &op->last_frames[0], &rxframe); 656 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
632 bcm_rx_starttimer(op); 657 goto rx_starttimer;
633 return;
634 } 658 }
635 659
636 if (op->nframes == 1) { 660 if (op->nframes == 1) {
637 /* simple compare with index 0 */ 661 /* simple compare with index 0 */
638 bcm_rx_cmp_to_index(op, 0, &rxframe); 662 bcm_rx_cmp_to_index(op, 0, rxframe);
639 bcm_rx_starttimer(op); 663 goto rx_starttimer;
640 return;
641 } 664 }
642 665
643 if (op->nframes > 1) { 666 if (op->nframes > 1) {
@@ -649,15 +672,17 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
649 */ 672 */
650 673
651 for (i = 1; i < op->nframes; i++) { 674 for (i = 1; i < op->nframes; i++) {
652 if ((GET_U64(&op->frames[0]) & GET_U64(&rxframe)) == 675 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
653 (GET_U64(&op->frames[0]) & 676 (GET_U64(&op->frames[0]) &
654 GET_U64(&op->frames[i]))) { 677 GET_U64(&op->frames[i]))) {
655 bcm_rx_cmp_to_index(op, i, &rxframe); 678 bcm_rx_cmp_to_index(op, i, rxframe);
656 break; 679 break;
657 } 680 }
658 } 681 }
659 bcm_rx_starttimer(op);
660 } 682 }
683
684rx_starttimer:
685 bcm_rx_starttimer(op);
661} 686}
662 687
663/* 688/*
@@ -681,6 +706,12 @@ static void bcm_remove_op(struct bcm_op *op)
681 hrtimer_cancel(&op->timer); 706 hrtimer_cancel(&op->timer);
682 hrtimer_cancel(&op->thrtimer); 707 hrtimer_cancel(&op->thrtimer);
683 708
709 if (op->tsklet.func)
710 tasklet_kill(&op->tsklet);
711
712 if (op->thrtsklet.func)
713 tasklet_kill(&op->thrtsklet);
714
684 if ((op->frames) && (op->frames != &op->sframe)) 715 if ((op->frames) && (op->frames != &op->sframe))
685 kfree(op->frames); 716 kfree(op->frames);
686 717
@@ -891,6 +922,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
891 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 922 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
892 op->timer.function = bcm_tx_timeout_handler; 923 op->timer.function = bcm_tx_timeout_handler;
893 924
925 /* initialize tasklet for tx countevent notification */
926 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
927 (unsigned long) op);
928
894 /* currently unused in tx_ops */ 929 /* currently unused in tx_ops */
895 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 930 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
896 931
@@ -1054,9 +1089,17 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1054 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1089 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1055 op->timer.function = bcm_rx_timeout_handler; 1090 op->timer.function = bcm_rx_timeout_handler;
1056 1091
1092 /* initialize tasklet for rx timeout notification */
1093 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1094 (unsigned long) op);
1095
1057 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1096 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1058 op->thrtimer.function = bcm_rx_thr_handler; 1097 op->thrtimer.function = bcm_rx_thr_handler;
1059 1098
1099 /* initialize tasklet for rx throttle handling */
1100 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1101 (unsigned long) op);
1102
1060 /* add this bcm_op to the list of the rx_ops */ 1103 /* add this bcm_op to the list of the rx_ops */
1061 list_add(&op->list, &bo->rx_ops); 1104 list_add(&op->list, &bo->rx_ops);
1062 1105
@@ -1102,7 +1145,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1102 */ 1145 */
1103 op->kt_lastmsg = ktime_set(0, 0); 1146 op->kt_lastmsg = ktime_set(0, 0);
1104 hrtimer_cancel(&op->thrtimer); 1147 hrtimer_cancel(&op->thrtimer);
1105 bcm_rx_thr_flush(op); 1148 bcm_rx_thr_flush(op, 1);
1106 } 1149 }
1107 1150
1108 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) 1151 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
diff --git a/net/can/raw.c b/net/can/raw.c
index 27aab63df467..0703cba4bf9f 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data)
99 struct raw_sock *ro = raw_sk(sk); 99 struct raw_sock *ro = raw_sk(sk);
100 struct sockaddr_can *addr; 100 struct sockaddr_can *addr;
101 101
102 if (!ro->recv_own_msgs) { 102 /* check the received tx sock reference */
103 /* check the received tx sock reference */ 103 if (!ro->recv_own_msgs && skb->sk == sk)
104 if (skb->sk == sk) { 104 return;
105 kfree_skb(skb); 105
106 return; 106 /* clone the given skb to be able to enqueue it into the rcv queue */
107 } 107 skb = skb_clone(skb, GFP_ATOMIC);
108 } 108 if (!skb)
109 return;
109 110
110 /* 111 /*
111 * Put the datagram to the queue so that raw_recvmsg() can 112 * Put the datagram to the queue so that raw_recvmsg() can
diff --git a/net/core/dev.c b/net/core/dev.c
index 09c66a449da6..5379b0c1190a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -132,6 +132,9 @@
132/* Instead of increasing this, you should create a hash table. */ 132/* Instead of increasing this, you should create a hash table. */
133#define MAX_GRO_SKBS 8 133#define MAX_GRO_SKBS 8
134 134
135/* This should be increased if a protocol with a bigger head is added. */
136#define GRO_MAX_HEAD (MAX_HEADER + 128)
137
135/* 138/*
136 * The list of packet types we will receive (as opposed to discard) 139 * The list of packet types we will receive (as opposed to discard)
137 * and the routines to invoke. 140 * and the routines to invoke.
@@ -167,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock);
167static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 170static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
168static struct list_head ptype_all __read_mostly; /* Taps */ 171static struct list_head ptype_all __read_mostly; /* Taps */
169 172
170#ifdef CONFIG_NET_DMA
171struct net_dma {
172 struct dma_client client;
173 spinlock_t lock;
174 cpumask_t channel_mask;
175 struct dma_chan **channels;
176};
177
178static enum dma_state_client
179netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
180 enum dma_state state);
181
182static struct net_dma net_dma = {
183 .client = {
184 .event_callback = netdev_dma_event,
185 },
186};
187#endif
188
189/* 173/*
190 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 174 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
191 * semaphore. 175 * semaphore.
@@ -1104,6 +1088,11 @@ int dev_open(struct net_device *dev)
1104 dev->flags |= IFF_UP; 1088 dev->flags |= IFF_UP;
1105 1089
1106 /* 1090 /*
1091 * Enable NET_DMA
1092 */
1093 dmaengine_get();
1094
1095 /*
1107 * Initialize multicasting status 1096 * Initialize multicasting status
1108 */ 1097 */
1109 dev_set_rx_mode(dev); 1098 dev_set_rx_mode(dev);
@@ -1180,6 +1169,11 @@ int dev_close(struct net_device *dev)
1180 */ 1169 */
1181 call_netdevice_notifiers(NETDEV_DOWN, dev); 1170 call_netdevice_notifiers(NETDEV_DOWN, dev);
1182 1171
1172 /*
1173 * Shutdown NET_DMA
1174 */
1175 dmaengine_put();
1176
1183 return 0; 1177 return 0;
1184} 1178}
1185 1179
@@ -1540,7 +1534,19 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1540 skb->mac_len = skb->network_header - skb->mac_header; 1534 skb->mac_len = skb->network_header - skb->mac_header;
1541 __skb_pull(skb, skb->mac_len); 1535 __skb_pull(skb, skb->mac_len);
1542 1536
1543 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) { 1537 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1538 struct net_device *dev = skb->dev;
1539 struct ethtool_drvinfo info = {};
1540
1541 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1542 dev->ethtool_ops->get_drvinfo(dev, &info);
1543
1544 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1545 "ip_summed=%d",
1546 info.driver, dev ? dev->features : 0L,
1547 skb->sk ? skb->sk->sk_route_caps : 0L,
1548 skb->len, skb->data_len, skb->ip_summed);
1549
1544 if (skb_header_cloned(skb) && 1550 if (skb_header_cloned(skb) &&
1545 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1551 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1546 return ERR_PTR(err); 1552 return ERR_PTR(err);
@@ -2345,7 +2351,7 @@ static int napi_gro_complete(struct sk_buff *skb)
2345 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 2351 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2346 int err = -ENOENT; 2352 int err = -ENOENT;
2347 2353
2348 if (!skb_shinfo(skb)->frag_list) 2354 if (NAPI_GRO_CB(skb)->count == 1)
2349 goto out; 2355 goto out;
2350 2356
2351 rcu_read_lock(); 2357 rcu_read_lock();
@@ -2365,6 +2371,7 @@ static int napi_gro_complete(struct sk_buff *skb)
2365 } 2371 }
2366 2372
2367out: 2373out:
2374 skb_shinfo(skb)->gso_size = 0;
2368 __skb_push(skb, -skb_network_offset(skb)); 2375 __skb_push(skb, -skb_network_offset(skb));
2369 return netif_receive_skb(skb); 2376 return netif_receive_skb(skb);
2370} 2377}
@@ -2383,7 +2390,7 @@ void napi_gro_flush(struct napi_struct *napi)
2383} 2390}
2384EXPORT_SYMBOL(napi_gro_flush); 2391EXPORT_SYMBOL(napi_gro_flush);
2385 2392
2386int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2393int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2387{ 2394{
2388 struct sk_buff **pp = NULL; 2395 struct sk_buff **pp = NULL;
2389 struct packet_type *ptype; 2396 struct packet_type *ptype;
@@ -2392,10 +2399,14 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2392 int count = 0; 2399 int count = 0;
2393 int same_flow; 2400 int same_flow;
2394 int mac_len; 2401 int mac_len;
2402 int free;
2395 2403
2396 if (!(skb->dev->features & NETIF_F_GRO)) 2404 if (!(skb->dev->features & NETIF_F_GRO))
2397 goto normal; 2405 goto normal;
2398 2406
2407 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
2408 goto normal;
2409
2399 rcu_read_lock(); 2410 rcu_read_lock();
2400 list_for_each_entry_rcu(ptype, head, list) { 2411 list_for_each_entry_rcu(ptype, head, list) {
2401 struct sk_buff *p; 2412 struct sk_buff *p;
@@ -2408,14 +2419,18 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2408 skb->mac_len = mac_len; 2419 skb->mac_len = mac_len;
2409 NAPI_GRO_CB(skb)->same_flow = 0; 2420 NAPI_GRO_CB(skb)->same_flow = 0;
2410 NAPI_GRO_CB(skb)->flush = 0; 2421 NAPI_GRO_CB(skb)->flush = 0;
2422 NAPI_GRO_CB(skb)->free = 0;
2411 2423
2412 for (p = napi->gro_list; p; p = p->next) { 2424 for (p = napi->gro_list; p; p = p->next) {
2413 count++; 2425 count++;
2414 NAPI_GRO_CB(p)->same_flow = 2426
2415 p->mac_len == mac_len && 2427 if (!NAPI_GRO_CB(p)->same_flow)
2416 !memcmp(skb_mac_header(p), skb_mac_header(skb), 2428 continue;
2417 mac_len); 2429
2418 NAPI_GRO_CB(p)->flush = 0; 2430 if (p->mac_len != mac_len ||
2431 memcmp(skb_mac_header(p), skb_mac_header(skb),
2432 mac_len))
2433 NAPI_GRO_CB(p)->same_flow = 0;
2419 } 2434 }
2420 2435
2421 pp = ptype->gro_receive(&napi->gro_list, skb); 2436 pp = ptype->gro_receive(&napi->gro_list, skb);
@@ -2427,6 +2442,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2427 goto normal; 2442 goto normal;
2428 2443
2429 same_flow = NAPI_GRO_CB(skb)->same_flow; 2444 same_flow = NAPI_GRO_CB(skb)->same_flow;
2445 free = NAPI_GRO_CB(skb)->free;
2430 2446
2431 if (pp) { 2447 if (pp) {
2432 struct sk_buff *nskb = *pp; 2448 struct sk_buff *nskb = *pp;
@@ -2446,17 +2462,119 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2446 } 2462 }
2447 2463
2448 NAPI_GRO_CB(skb)->count = 1; 2464 NAPI_GRO_CB(skb)->count = 1;
2465 skb_shinfo(skb)->gso_size = skb->len;
2449 skb->next = napi->gro_list; 2466 skb->next = napi->gro_list;
2450 napi->gro_list = skb; 2467 napi->gro_list = skb;
2451 2468
2452ok: 2469ok:
2453 return NET_RX_SUCCESS; 2470 return free;
2454 2471
2455normal: 2472normal:
2456 return netif_receive_skb(skb); 2473 return -1;
2474}
2475EXPORT_SYMBOL(dev_gro_receive);
2476
2477static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2478{
2479 struct sk_buff *p;
2480
2481 for (p = napi->gro_list; p; p = p->next) {
2482 NAPI_GRO_CB(p)->same_flow = 1;
2483 NAPI_GRO_CB(p)->flush = 0;
2484 }
2485
2486 return dev_gro_receive(napi, skb);
2487}
2488
2489int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2490{
2491 switch (__napi_gro_receive(napi, skb)) {
2492 case -1:
2493 return netif_receive_skb(skb);
2494
2495 case 1:
2496 kfree_skb(skb);
2497 break;
2498 }
2499
2500 return NET_RX_SUCCESS;
2457} 2501}
2458EXPORT_SYMBOL(napi_gro_receive); 2502EXPORT_SYMBOL(napi_gro_receive);
2459 2503
2504void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2505{
2506 __skb_pull(skb, skb_headlen(skb));
2507 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2508
2509 napi->skb = skb;
2510}
2511EXPORT_SYMBOL(napi_reuse_skb);
2512
2513struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2514 struct napi_gro_fraginfo *info)
2515{
2516 struct net_device *dev = napi->dev;
2517 struct sk_buff *skb = napi->skb;
2518
2519 napi->skb = NULL;
2520
2521 if (!skb) {
2522 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2523 if (!skb)
2524 goto out;
2525
2526 skb_reserve(skb, NET_IP_ALIGN);
2527 }
2528
2529 BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
2530 skb_shinfo(skb)->nr_frags = info->nr_frags;
2531 memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags));
2532
2533 skb->data_len = info->len;
2534 skb->len += info->len;
2535 skb->truesize += info->len;
2536
2537 if (!pskb_may_pull(skb, ETH_HLEN)) {
2538 napi_reuse_skb(napi, skb);
2539 skb = NULL;
2540 goto out;
2541 }
2542
2543 skb->protocol = eth_type_trans(skb, dev);
2544
2545 skb->ip_summed = info->ip_summed;
2546 skb->csum = info->csum;
2547
2548out:
2549 return skb;
2550}
2551EXPORT_SYMBOL(napi_fraginfo_skb);
2552
2553int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2554{
2555 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
2556 int err = NET_RX_DROP;
2557
2558 if (!skb)
2559 goto out;
2560
2561 err = NET_RX_SUCCESS;
2562
2563 switch (__napi_gro_receive(napi, skb)) {
2564 case -1:
2565 return netif_receive_skb(skb);
2566
2567 case 0:
2568 goto out;
2569 }
2570
2571 napi_reuse_skb(napi, skb);
2572
2573out:
2574 return err;
2575}
2576EXPORT_SYMBOL(napi_gro_frags);
2577
2460static int process_backlog(struct napi_struct *napi, int quota) 2578static int process_backlog(struct napi_struct *napi, int quota)
2461{ 2579{
2462 int work = 0; 2580 int work = 0;
@@ -2535,11 +2653,12 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2535{ 2653{
2536 INIT_LIST_HEAD(&napi->poll_list); 2654 INIT_LIST_HEAD(&napi->poll_list);
2537 napi->gro_list = NULL; 2655 napi->gro_list = NULL;
2656 napi->skb = NULL;
2538 napi->poll = poll; 2657 napi->poll = poll;
2539 napi->weight = weight; 2658 napi->weight = weight;
2540 list_add(&napi->dev_list, &dev->napi_list); 2659 list_add(&napi->dev_list, &dev->napi_list);
2541#ifdef CONFIG_NETPOLL
2542 napi->dev = dev; 2660 napi->dev = dev;
2661#ifdef CONFIG_NETPOLL
2543 spin_lock_init(&napi->poll_lock); 2662 spin_lock_init(&napi->poll_lock);
2544 napi->poll_owner = -1; 2663 napi->poll_owner = -1;
2545#endif 2664#endif
@@ -2552,6 +2671,7 @@ void netif_napi_del(struct napi_struct *napi)
2552 struct sk_buff *skb, *next; 2671 struct sk_buff *skb, *next;
2553 2672
2554 list_del_init(&napi->dev_list); 2673 list_del_init(&napi->dev_list);
2674 kfree(napi->skb);
2555 2675
2556 for (skb = napi->gro_list; skb; skb = next) { 2676 for (skb = napi->gro_list; skb; skb = next) {
2557 next = skb->next; 2677 next = skb->next;
@@ -2635,14 +2755,7 @@ out:
2635 * There may not be any more sk_buffs coming right now, so push 2755 * There may not be any more sk_buffs coming right now, so push
2636 * any pending DMA copies to hardware 2756 * any pending DMA copies to hardware
2637 */ 2757 */
2638 if (!cpus_empty(net_dma.channel_mask)) { 2758 dma_issue_pending_all();
2639 int chan_idx;
2640 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2641 struct dma_chan *chan = net_dma.channels[chan_idx];
2642 if (chan)
2643 dma_async_memcpy_issue_pending(chan);
2644 }
2645 }
2646#endif 2759#endif
2647 2760
2648 return; 2761 return;
@@ -4331,6 +4444,45 @@ err_uninit:
4331} 4444}
4332 4445
4333/** 4446/**
4447 * init_dummy_netdev - init a dummy network device for NAPI
4448 * @dev: device to init
4449 *
4450 * This takes a network device structure and initialize the minimum
4451 * amount of fields so it can be used to schedule NAPI polls without
4452 * registering a full blown interface. This is to be used by drivers
4453 * that need to tie several hardware interfaces to a single NAPI
4454 * poll scheduler due to HW limitations.
4455 */
4456int init_dummy_netdev(struct net_device *dev)
4457{
4458 /* Clear everything. Note we don't initialize spinlocks
4459 * are they aren't supposed to be taken by any of the
4460 * NAPI code and this dummy netdev is supposed to be
4461 * only ever used for NAPI polls
4462 */
4463 memset(dev, 0, sizeof(struct net_device));
4464
4465 /* make sure we BUG if trying to hit standard
4466 * register/unregister code path
4467 */
4468 dev->reg_state = NETREG_DUMMY;
4469
4470 /* initialize the ref count */
4471 atomic_set(&dev->refcnt, 1);
4472
4473 /* NAPI wants this */
4474 INIT_LIST_HEAD(&dev->napi_list);
4475
4476 /* a dummy interface is started by default */
4477 set_bit(__LINK_STATE_PRESENT, &dev->state);
4478 set_bit(__LINK_STATE_START, &dev->state);
4479
4480 return 0;
4481}
4482EXPORT_SYMBOL_GPL(init_dummy_netdev);
4483
4484
4485/**
4334 * register_netdev - register a network device 4486 * register_netdev - register a network device
4335 * @dev: device to register 4487 * @dev: device to register
4336 * 4488 *
@@ -4833,122 +4985,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
4833 return NOTIFY_OK; 4985 return NOTIFY_OK;
4834} 4986}
4835 4987
4836#ifdef CONFIG_NET_DMA
4837/**
4838 * net_dma_rebalance - try to maintain one DMA channel per CPU
4839 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4840 *
4841 * This is called when the number of channels allocated to the net_dma client
4842 * changes. The net_dma client tries to have one DMA channel per CPU.
4843 */
4844
4845static void net_dma_rebalance(struct net_dma *net_dma)
4846{
4847 unsigned int cpu, i, n, chan_idx;
4848 struct dma_chan *chan;
4849
4850 if (cpus_empty(net_dma->channel_mask)) {
4851 for_each_online_cpu(cpu)
4852 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4853 return;
4854 }
4855
4856 i = 0;
4857 cpu = first_cpu(cpu_online_map);
4858
4859 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4860 chan = net_dma->channels[chan_idx];
4861
4862 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4863 + (i < (num_online_cpus() %
4864 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4865
4866 while(n) {
4867 per_cpu(softnet_data, cpu).net_dma = chan;
4868 cpu = next_cpu(cpu, cpu_online_map);
4869 n--;
4870 }
4871 i++;
4872 }
4873}
4874
4875/**
4876 * netdev_dma_event - event callback for the net_dma_client
4877 * @client: should always be net_dma_client
4878 * @chan: DMA channel for the event
4879 * @state: DMA state to be handled
4880 */
4881static enum dma_state_client
4882netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4883 enum dma_state state)
4884{
4885 int i, found = 0, pos = -1;
4886 struct net_dma *net_dma =
4887 container_of(client, struct net_dma, client);
4888 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4889
4890 spin_lock(&net_dma->lock);
4891 switch (state) {
4892 case DMA_RESOURCE_AVAILABLE:
4893 for (i = 0; i < nr_cpu_ids; i++)
4894 if (net_dma->channels[i] == chan) {
4895 found = 1;
4896 break;
4897 } else if (net_dma->channels[i] == NULL && pos < 0)
4898 pos = i;
4899
4900 if (!found && pos >= 0) {
4901 ack = DMA_ACK;
4902 net_dma->channels[pos] = chan;
4903 cpu_set(pos, net_dma->channel_mask);
4904 net_dma_rebalance(net_dma);
4905 }
4906 break;
4907 case DMA_RESOURCE_REMOVED:
4908 for (i = 0; i < nr_cpu_ids; i++)
4909 if (net_dma->channels[i] == chan) {
4910 found = 1;
4911 pos = i;
4912 break;
4913 }
4914
4915 if (found) {
4916 ack = DMA_ACK;
4917 cpu_clear(pos, net_dma->channel_mask);
4918 net_dma->channels[i] = NULL;
4919 net_dma_rebalance(net_dma);
4920 }
4921 break;
4922 default:
4923 break;
4924 }
4925 spin_unlock(&net_dma->lock);
4926
4927 return ack;
4928}
4929
4930/**
4931 * netdev_dma_register - register the networking subsystem as a DMA client
4932 */
4933static int __init netdev_dma_register(void)
4934{
4935 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4936 GFP_KERNEL);
4937 if (unlikely(!net_dma.channels)) {
4938 printk(KERN_NOTICE
4939 "netdev_dma: no memory for net_dma.channels\n");
4940 return -ENOMEM;
4941 }
4942 spin_lock_init(&net_dma.lock);
4943 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4944 dma_async_client_register(&net_dma.client);
4945 dma_async_client_chan_request(&net_dma.client);
4946 return 0;
4947}
4948
4949#else
4950static int __init netdev_dma_register(void) { return -ENODEV; }
4951#endif /* CONFIG_NET_DMA */
4952 4988
4953/** 4989/**
4954 * netdev_increment_features - increment feature set by one 4990 * netdev_increment_features - increment feature set by one
@@ -5168,8 +5204,6 @@ static int __init net_dev_init(void)
5168 if (register_pernet_device(&default_device_ops)) 5204 if (register_pernet_device(&default_device_ops))
5169 goto out; 5205 goto out;
5170 5206
5171 netdev_dma_register();
5172
5173 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 5207 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5174 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 5208 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5175 5209
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 55cffad2f328..55151faaf90c 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -341,8 +341,8 @@ again:
341 rv = register_pernet_operations(first_device, ops); 341 rv = register_pernet_operations(first_device, ops);
342 if (rv < 0) 342 if (rv < 0)
343 ida_remove(&net_generic_ids, *id); 343 ida_remove(&net_generic_ids, *id);
344 mutex_unlock(&net_mutex);
345out: 344out:
345 mutex_unlock(&net_mutex);
346 return rv; 346 return rv;
347} 347}
348EXPORT_SYMBOL_GPL(register_pernet_gen_subsys); 348EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b8d0abb26433..da74b844f4ea 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -73,17 +73,13 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
73static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 73static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
74 struct pipe_buffer *buf) 74 struct pipe_buffer *buf)
75{ 75{
76 struct sk_buff *skb = (struct sk_buff *) buf->private; 76 put_page(buf->page);
77
78 kfree_skb(skb);
79} 77}
80 78
81static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 79static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
82 struct pipe_buffer *buf) 80 struct pipe_buffer *buf)
83{ 81{
84 struct sk_buff *skb = (struct sk_buff *) buf->private; 82 get_page(buf->page);
85
86 skb_get(skb);
87} 83}
88 84
89static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 85static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
@@ -1334,9 +1330,19 @@ fault:
1334 */ 1330 */
1335static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1331static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1336{ 1332{
1337 struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private; 1333 put_page(spd->pages[i]);
1334}
1338 1335
1339 kfree_skb(skb); 1336static inline struct page *linear_to_page(struct page *page, unsigned int len,
1337 unsigned int offset)
1338{
1339 struct page *p = alloc_pages(GFP_KERNEL, 0);
1340
1341 if (!p)
1342 return NULL;
1343 memcpy(page_address(p) + offset, page_address(page) + offset, len);
1344
1345 return p;
1340} 1346}
1341 1347
1342/* 1348/*
@@ -1344,16 +1350,23 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1344 */ 1350 */
1345static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1351static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1346 unsigned int len, unsigned int offset, 1352 unsigned int len, unsigned int offset,
1347 struct sk_buff *skb) 1353 struct sk_buff *skb, int linear)
1348{ 1354{
1349 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1355 if (unlikely(spd->nr_pages == PIPE_BUFFERS))
1350 return 1; 1356 return 1;
1351 1357
1358 if (linear) {
1359 page = linear_to_page(page, len, offset);
1360 if (!page)
1361 return 1;
1362 } else
1363 get_page(page);
1364
1352 spd->pages[spd->nr_pages] = page; 1365 spd->pages[spd->nr_pages] = page;
1353 spd->partial[spd->nr_pages].len = len; 1366 spd->partial[spd->nr_pages].len = len;
1354 spd->partial[spd->nr_pages].offset = offset; 1367 spd->partial[spd->nr_pages].offset = offset;
1355 spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
1356 spd->nr_pages++; 1368 spd->nr_pages++;
1369
1357 return 0; 1370 return 0;
1358} 1371}
1359 1372
@@ -1369,7 +1382,7 @@ static inline void __segment_seek(struct page **page, unsigned int *poff,
1369static inline int __splice_segment(struct page *page, unsigned int poff, 1382static inline int __splice_segment(struct page *page, unsigned int poff,
1370 unsigned int plen, unsigned int *off, 1383 unsigned int plen, unsigned int *off,
1371 unsigned int *len, struct sk_buff *skb, 1384 unsigned int *len, struct sk_buff *skb,
1372 struct splice_pipe_desc *spd) 1385 struct splice_pipe_desc *spd, int linear)
1373{ 1386{
1374 if (!*len) 1387 if (!*len)
1375 return 1; 1388 return 1;
@@ -1392,7 +1405,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
1392 /* the linear region may spread across several pages */ 1405 /* the linear region may spread across several pages */
1393 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1406 flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1394 1407
1395 if (spd_fill_page(spd, page, flen, poff, skb)) 1408 if (spd_fill_page(spd, page, flen, poff, skb, linear))
1396 return 1; 1409 return 1;
1397 1410
1398 __segment_seek(&page, &poff, &plen, flen); 1411 __segment_seek(&page, &poff, &plen, flen);
@@ -1419,7 +1432,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1419 if (__splice_segment(virt_to_page(skb->data), 1432 if (__splice_segment(virt_to_page(skb->data),
1420 (unsigned long) skb->data & (PAGE_SIZE - 1), 1433 (unsigned long) skb->data & (PAGE_SIZE - 1),
1421 skb_headlen(skb), 1434 skb_headlen(skb),
1422 offset, len, skb, spd)) 1435 offset, len, skb, spd, 1))
1423 return 1; 1436 return 1;
1424 1437
1425 /* 1438 /*
@@ -1429,7 +1442,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1429 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1442 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1430 1443
1431 if (__splice_segment(f->page, f->page_offset, f->size, 1444 if (__splice_segment(f->page, f->page_offset, f->size,
1432 offset, len, skb, spd)) 1445 offset, len, skb, spd, 0))
1433 return 1; 1446 return 1;
1434 } 1447 }
1435 1448
@@ -1442,7 +1455,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1442 * the frag list, if such a thing exists. We'd probably need to recurse to 1455 * the frag list, if such a thing exists. We'd probably need to recurse to
1443 * handle that cleanly. 1456 * handle that cleanly.
1444 */ 1457 */
1445int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, 1458int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1446 struct pipe_inode_info *pipe, unsigned int tlen, 1459 struct pipe_inode_info *pipe, unsigned int tlen,
1447 unsigned int flags) 1460 unsigned int flags)
1448{ 1461{
@@ -1455,16 +1468,6 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
1455 .ops = &sock_pipe_buf_ops, 1468 .ops = &sock_pipe_buf_ops,
1456 .spd_release = sock_spd_release, 1469 .spd_release = sock_spd_release,
1457 }; 1470 };
1458 struct sk_buff *skb;
1459
1460 /*
1461 * I'd love to avoid the clone here, but tcp_read_sock()
1462 * ignores reference counts and unconditonally kills the sk_buff
1463 * on return from the actor.
1464 */
1465 skb = skb_clone(__skb, GFP_KERNEL);
1466 if (unlikely(!skb))
1467 return -ENOMEM;
1468 1471
1469 /* 1472 /*
1470 * __skb_splice_bits() only fails if the output has no room left, 1473 * __skb_splice_bits() only fails if the output has no room left,
@@ -1488,15 +1491,9 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
1488 } 1491 }
1489 1492
1490done: 1493done:
1491 /*
1492 * drop our reference to the clone, the pipe consumption will
1493 * drop the rest.
1494 */
1495 kfree_skb(skb);
1496
1497 if (spd.nr_pages) { 1494 if (spd.nr_pages) {
1495 struct sock *sk = skb->sk;
1498 int ret; 1496 int ret;
1499 struct sock *sk = __skb->sk;
1500 1497
1501 /* 1498 /*
1502 * Drop the socket lock, otherwise we have reverse 1499 * Drop the socket lock, otherwise we have reverse
@@ -2215,10 +2212,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2215 return 0; 2212 return 0;
2216 2213
2217next_skb: 2214next_skb:
2218 block_limit = skb_headlen(st->cur_skb); 2215 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2219 2216
2220 if (abs_offset < block_limit) { 2217 if (abs_offset < block_limit) {
2221 *data = st->cur_skb->data + abs_offset; 2218 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2222 return block_limit - abs_offset; 2219 return block_limit - abs_offset;
2223 } 2220 }
2224 2221
@@ -2253,13 +2250,14 @@ next_skb:
2253 st->frag_data = NULL; 2250 st->frag_data = NULL;
2254 } 2251 }
2255 2252
2256 if (st->cur_skb->next) { 2253 if (st->root_skb == st->cur_skb &&
2257 st->cur_skb = st->cur_skb->next; 2254 skb_shinfo(st->root_skb)->frag_list) {
2255 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2258 st->frag_idx = 0; 2256 st->frag_idx = 0;
2259 goto next_skb; 2257 goto next_skb;
2260 } else if (st->root_skb == st->cur_skb && 2258 } else if (st->cur_skb->next) {
2261 skb_shinfo(st->root_skb)->frag_list) { 2259 st->cur_skb = st->cur_skb->next;
2262 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2260 st->frag_idx = 0;
2263 goto next_skb; 2261 goto next_skb;
2264 } 2262 }
2265 2263
@@ -2588,12 +2586,30 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2588 struct sk_buff *nskb; 2586 struct sk_buff *nskb;
2589 unsigned int headroom; 2587 unsigned int headroom;
2590 unsigned int hlen = p->data - skb_mac_header(p); 2588 unsigned int hlen = p->data - skb_mac_header(p);
2589 unsigned int len = skb->len;
2591 2590
2592 if (hlen + p->len + skb->len >= 65536) 2591 if (hlen + p->len + len >= 65536)
2593 return -E2BIG; 2592 return -E2BIG;
2594 2593
2595 if (skb_shinfo(p)->frag_list) 2594 if (skb_shinfo(p)->frag_list)
2596 goto merge; 2595 goto merge;
2596 else if (!skb_headlen(p) && !skb_headlen(skb) &&
2597 skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags <
2598 MAX_SKB_FRAGS) {
2599 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
2600 skb_shinfo(skb)->frags,
2601 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
2602
2603 skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
2604 skb_shinfo(skb)->nr_frags = 0;
2605
2606 skb->truesize -= skb->data_len;
2607 skb->len -= skb->data_len;
2608 skb->data_len = 0;
2609
2610 NAPI_GRO_CB(skb)->free = 1;
2611 goto done;
2612 }
2597 2613
2598 headroom = skb_headroom(p); 2614 headroom = skb_headroom(p);
2599 nskb = netdev_alloc_skb(p->dev, headroom); 2615 nskb = netdev_alloc_skb(p->dev, headroom);
@@ -2613,6 +2629,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2613 2629
2614 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2630 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2615 skb_shinfo(nskb)->frag_list = p; 2631 skb_shinfo(nskb)->frag_list = p;
2632 skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size;
2616 skb_header_release(p); 2633 skb_header_release(p);
2617 nskb->prev = p; 2634 nskb->prev = p;
2618 2635
@@ -2627,14 +2644,15 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2627 p = nskb; 2644 p = nskb;
2628 2645
2629merge: 2646merge:
2630 NAPI_GRO_CB(p)->count++;
2631 p->prev->next = skb; 2647 p->prev->next = skb;
2632 p->prev = skb; 2648 p->prev = skb;
2633 skb_header_release(skb); 2649 skb_header_release(skb);
2634 2650
2635 p->data_len += skb->len; 2651done:
2636 p->truesize += skb->len; 2652 NAPI_GRO_CB(p)->count++;
2637 p->len += skb->len; 2653 p->data_len += len;
2654 p->truesize += len;
2655 p->len += len;
2638 2656
2639 NAPI_GRO_CB(skb)->same_flow = 1; 2657 NAPI_GRO_CB(skb)->same_flow = 1;
2640 return 0; 2658 return 0;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 5dbfe5fdc0d6..8379496de82b 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -191,7 +191,7 @@ static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
191 return 0; 191 return 0;
192nlmsg_failure: 192nlmsg_failure:
193err: 193err:
194 kfree(dcbnl_skb); 194 kfree_skb(dcbnl_skb);
195 return ret; 195 return ret;
196} 196}
197 197
@@ -272,7 +272,7 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
272 return 0; 272 return 0;
273nlmsg_failure: 273nlmsg_failure:
274err: 274err:
275 kfree(dcbnl_skb); 275 kfree_skb(dcbnl_skb);
276err_out: 276err_out:
277 return -EINVAL; 277 return -EINVAL;
278} 278}
@@ -314,7 +314,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
314 314
315nlmsg_failure: 315nlmsg_failure:
316err: 316err:
317 kfree(dcbnl_skb); 317 kfree_skb(dcbnl_skb);
318err_out: 318err_out:
319 return -EINVAL; 319 return -EINVAL;
320} 320}
@@ -380,7 +380,7 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
380 return 0; 380 return 0;
381nlmsg_failure: 381nlmsg_failure:
382err: 382err:
383 kfree(dcbnl_skb); 383 kfree_skb(dcbnl_skb);
384err_out: 384err_out:
385 return -EINVAL; 385 return -EINVAL;
386} 386}
@@ -458,7 +458,7 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
458 return 0; 458 return 0;
459nlmsg_failure: 459nlmsg_failure:
460err: 460err:
461 kfree(dcbnl_skb); 461 kfree_skb(dcbnl_skb);
462err_out: 462err_out:
463 return ret; 463 return ret;
464} 464}
@@ -687,7 +687,7 @@ err_pg:
687 nla_nest_cancel(dcbnl_skb, pg_nest); 687 nla_nest_cancel(dcbnl_skb, pg_nest);
688nlmsg_failure: 688nlmsg_failure:
689err: 689err:
690 kfree(dcbnl_skb); 690 kfree_skb(dcbnl_skb);
691err_out: 691err_out:
692 ret = -EINVAL; 692 ret = -EINVAL;
693 return ret; 693 return ret;
@@ -949,7 +949,7 @@ err_bcn:
949 nla_nest_cancel(dcbnl_skb, bcn_nest); 949 nla_nest_cancel(dcbnl_skb, bcn_nest);
950nlmsg_failure: 950nlmsg_failure:
951err: 951err:
952 kfree(dcbnl_skb); 952 kfree_skb(dcbnl_skb);
953err_out: 953err_out:
954 ret = -EINVAL; 954 ret = -EINVAL;
955 return ret; 955 return ret;
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index 7aa2a7acc7ec..ad6dffd9070e 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -1,7 +1,6 @@
1menuconfig IP_DCCP 1menuconfig IP_DCCP
2 tristate "The DCCP Protocol (EXPERIMENTAL)" 2 tristate "The DCCP Protocol (EXPERIMENTAL)"
3 depends on INET && EXPERIMENTAL 3 depends on INET && EXPERIMENTAL
4 select IP_DCCP_CCID2
5 ---help--- 4 ---help---
6 Datagram Congestion Control Protocol (RFC 4340) 5 Datagram Congestion Control Protocol (RFC 4340)
7 6
@@ -25,9 +24,6 @@ config INET_DCCP_DIAG
25 def_tristate y if (IP_DCCP = y && INET_DIAG = y) 24 def_tristate y if (IP_DCCP = y && INET_DIAG = y)
26 def_tristate m 25 def_tristate m
27 26
28config IP_DCCP_ACKVEC
29 bool
30
31source "net/dccp/ccids/Kconfig" 27source "net/dccp/ccids/Kconfig"
32 28
33menu "DCCP Kernel Hacking" 29menu "DCCP Kernel Hacking"
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
index f4f8793aafff..2991efcc8dea 100644
--- a/net/dccp/Makefile
+++ b/net/dccp/Makefile
@@ -2,14 +2,23 @@ obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o
2 2
3dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o 3dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o
4 4
5#
6# CCID algorithms to be used by dccp.ko
7#
8# CCID-2 is default (RFC 4340, p. 77) and has Ack Vectors as dependency
9dccp-y += ccids/ccid2.o ackvec.o
10dccp-$(CONFIG_IP_DCCP_CCID3) += ccids/ccid3.o
11dccp-$(CONFIG_IP_DCCP_TFRC_LIB) += ccids/lib/tfrc.o \
12 ccids/lib/tfrc_equation.o \
13 ccids/lib/packet_history.o \
14 ccids/lib/loss_interval.o
15
5dccp_ipv4-y := ipv4.o 16dccp_ipv4-y := ipv4.o
6 17
7# build dccp_ipv6 as module whenever either IPv6 or DCCP is a module 18# build dccp_ipv6 as module whenever either IPv6 or DCCP is a module
8obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o 19obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o
9dccp_ipv6-y := ipv6.o 20dccp_ipv6-y := ipv6.o
10 21
11dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o
12
13obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o 22obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o
14obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o 23obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o
15 24
@@ -17,5 +26,3 @@ dccp-$(CONFIG_SYSCTL) += sysctl.o
17 26
18dccp_diag-y := diag.o 27dccp_diag-y := diag.o
19dccp_probe-y := probe.o 28dccp_probe-y := probe.o
20
21obj-y += ccids/
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index 4ccee030524e..45f95e55f873 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -84,7 +84,6 @@ struct dccp_ackvec_record {
84struct sock; 84struct sock;
85struct sk_buff; 85struct sk_buff;
86 86
87#ifdef CONFIG_IP_DCCP_ACKVEC
88extern int dccp_ackvec_init(void); 87extern int dccp_ackvec_init(void);
89extern void dccp_ackvec_exit(void); 88extern void dccp_ackvec_exit(void);
90 89
@@ -106,52 +105,4 @@ static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
106{ 105{
107 return av->av_vec_len; 106 return av->av_vec_len;
108} 107}
109#else /* CONFIG_IP_DCCP_ACKVEC */
110static inline int dccp_ackvec_init(void)
111{
112 return 0;
113}
114
115static inline void dccp_ackvec_exit(void)
116{
117}
118
119static inline struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
120{
121 return NULL;
122}
123
124static inline void dccp_ackvec_free(struct dccp_ackvec *av)
125{
126}
127
128static inline int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
129 const u64 ackno, const u8 state)
130{
131 return -1;
132}
133
134static inline void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av,
135 struct sock *sk, const u64 ackno)
136{
137}
138
139static inline int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
140 const u64 *ackno, const u8 opt,
141 const u8 *value, const u8 len)
142{
143 return -1;
144}
145
146static inline int dccp_insert_option_ackvec(const struct sock *sk,
147 const struct sk_buff *skb)
148{
149 return -1;
150}
151
152static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
153{
154 return 0;
155}
156#endif /* CONFIG_IP_DCCP_ACKVEC */
157#endif /* _ACKVEC_H */ 108#endif /* _ACKVEC_H */
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index bcc643f992ae..f3e9ba1cfd01 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -12,56 +12,70 @@
12 */ 12 */
13 13
14#include "ccid.h" 14#include "ccid.h"
15#include "ccids/lib/tfrc.h"
15 16
16static u8 builtin_ccids[] = { 17static struct ccid_operations *ccids[] = {
17 DCCPC_CCID2, /* CCID2 is supported by default */ 18 &ccid2_ops,
18#if defined(CONFIG_IP_DCCP_CCID3) || defined(CONFIG_IP_DCCP_CCID3_MODULE) 19#ifdef CONFIG_IP_DCCP_CCID3
19 DCCPC_CCID3, 20 &ccid3_ops,
20#endif 21#endif
21}; 22};
22 23
23static struct ccid_operations *ccids[CCID_MAX]; 24static struct ccid_operations *ccid_by_number(const u8 id)
24#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
25static atomic_t ccids_lockct = ATOMIC_INIT(0);
26static DEFINE_SPINLOCK(ccids_lock);
27
28/*
29 * The strategy is: modifications ccids vector are short, do not sleep and
30 * veeery rare, but read access should be free of any exclusive locks.
31 */
32static void ccids_write_lock(void)
33{ 25{
34 spin_lock(&ccids_lock); 26 int i;
35 while (atomic_read(&ccids_lockct) != 0) { 27
36 spin_unlock(&ccids_lock); 28 for (i = 0; i < ARRAY_SIZE(ccids); i++)
37 yield(); 29 if (ccids[i]->ccid_id == id)
38 spin_lock(&ccids_lock); 30 return ccids[i];
39 } 31 return NULL;
40} 32}
41 33
42static inline void ccids_write_unlock(void) 34/* check that up to @array_len members in @ccid_array are supported */
35bool ccid_support_check(u8 const *ccid_array, u8 array_len)
43{ 36{
44 spin_unlock(&ccids_lock); 37 while (array_len > 0)
38 if (ccid_by_number(ccid_array[--array_len]) == NULL)
39 return false;
40 return true;
45} 41}
46 42
47static inline void ccids_read_lock(void) 43/**
44 * ccid_get_builtin_ccids - Populate a list of built-in CCIDs
45 * @ccid_array: pointer to copy into
46 * @array_len: value to return length into
47 * This function allocates memory - caller must see that it is freed after use.
48 */
49int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
48{ 50{
49 atomic_inc(&ccids_lockct); 51 *ccid_array = kmalloc(ARRAY_SIZE(ccids), gfp_any());
50 smp_mb__after_atomic_inc(); 52 if (*ccid_array == NULL)
51 spin_unlock_wait(&ccids_lock); 53 return -ENOBUFS;
54
55 for (*array_len = 0; *array_len < ARRAY_SIZE(ccids); *array_len += 1)
56 (*ccid_array)[*array_len] = ccids[*array_len]->ccid_id;
57 return 0;
52} 58}
53 59
54static inline void ccids_read_unlock(void) 60int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
61 char __user *optval, int __user *optlen)
55{ 62{
56 atomic_dec(&ccids_lockct); 63 u8 *ccid_array, array_len;
57} 64 int err = 0;
58 65
59#else 66 if (len < ARRAY_SIZE(ccids))
60#define ccids_write_lock() do { } while(0) 67 return -EINVAL;
61#define ccids_write_unlock() do { } while(0) 68
62#define ccids_read_lock() do { } while(0) 69 if (ccid_get_builtin_ccids(&ccid_array, &array_len))
63#define ccids_read_unlock() do { } while(0) 70 return -ENOBUFS;
64#endif 71
72 if (put_user(array_len, optlen) ||
73 copy_to_user(optval, ccid_array, array_len))
74 err = -EFAULT;
75
76 kfree(ccid_array);
77 return err;
78}
65 79
66static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) 80static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
67{ 81{
@@ -93,48 +107,7 @@ static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
93 } 107 }
94} 108}
95 109
96/* check that up to @array_len members in @ccid_array are supported */ 110static int ccid_activate(struct ccid_operations *ccid_ops)
97bool ccid_support_check(u8 const *ccid_array, u8 array_len)
98{
99 u8 i, j, found;
100
101 for (i = 0, found = 0; i < array_len; i++, found = 0) {
102 for (j = 0; !found && j < ARRAY_SIZE(builtin_ccids); j++)
103 found = (ccid_array[i] == builtin_ccids[j]);
104 if (!found)
105 return false;
106 }
107 return true;
108}
109
110/**
111 * ccid_get_builtin_ccids - Provide copy of `builtin' CCID array
112 * @ccid_array: pointer to copy into
113 * @array_len: value to return length into
114 * This function allocates memory - caller must see that it is freed after use.
115 */
116int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
117{
118 *ccid_array = kmemdup(builtin_ccids, sizeof(builtin_ccids), gfp_any());
119 if (*ccid_array == NULL)
120 return -ENOBUFS;
121 *array_len = ARRAY_SIZE(builtin_ccids);
122 return 0;
123}
124
125int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
126 char __user *optval, int __user *optlen)
127{
128 if (len < sizeof(builtin_ccids))
129 return -EINVAL;
130
131 if (put_user(sizeof(builtin_ccids), optlen) ||
132 copy_to_user(optval, builtin_ccids, sizeof(builtin_ccids)))
133 return -EFAULT;
134 return 0;
135}
136
137int ccid_register(struct ccid_operations *ccid_ops)
138{ 111{
139 int err = -ENOBUFS; 112 int err = -ENOBUFS;
140 113
@@ -152,79 +125,40 @@ int ccid_register(struct ccid_operations *ccid_ops)
152 if (ccid_ops->ccid_hc_tx_slab == NULL) 125 if (ccid_ops->ccid_hc_tx_slab == NULL)
153 goto out_free_rx_slab; 126 goto out_free_rx_slab;
154 127
155 ccids_write_lock(); 128 pr_info("CCID: Activated CCID %d (%s)\n",
156 err = -EEXIST;
157 if (ccids[ccid_ops->ccid_id] == NULL) {
158 ccids[ccid_ops->ccid_id] = ccid_ops;
159 err = 0;
160 }
161 ccids_write_unlock();
162 if (err != 0)
163 goto out_free_tx_slab;
164
165 pr_info("CCID: Registered CCID %d (%s)\n",
166 ccid_ops->ccid_id, ccid_ops->ccid_name); 129 ccid_ops->ccid_id, ccid_ops->ccid_name);
130 err = 0;
167out: 131out:
168 return err; 132 return err;
169out_free_tx_slab:
170 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
171 ccid_ops->ccid_hc_tx_slab = NULL;
172 goto out;
173out_free_rx_slab: 133out_free_rx_slab:
174 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); 134 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
175 ccid_ops->ccid_hc_rx_slab = NULL; 135 ccid_ops->ccid_hc_rx_slab = NULL;
176 goto out; 136 goto out;
177} 137}
178 138
179EXPORT_SYMBOL_GPL(ccid_register); 139static void ccid_deactivate(struct ccid_operations *ccid_ops)
180
181int ccid_unregister(struct ccid_operations *ccid_ops)
182{ 140{
183 ccids_write_lock();
184 ccids[ccid_ops->ccid_id] = NULL;
185 ccids_write_unlock();
186
187 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab); 141 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
188 ccid_ops->ccid_hc_tx_slab = NULL; 142 ccid_ops->ccid_hc_tx_slab = NULL;
189 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); 143 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
190 ccid_ops->ccid_hc_rx_slab = NULL; 144 ccid_ops->ccid_hc_rx_slab = NULL;
191 145
192 pr_info("CCID: Unregistered CCID %d (%s)\n", 146 pr_info("CCID: Deactivated CCID %d (%s)\n",
193 ccid_ops->ccid_id, ccid_ops->ccid_name); 147 ccid_ops->ccid_id, ccid_ops->ccid_name);
194 return 0;
195} 148}
196 149
197EXPORT_SYMBOL_GPL(ccid_unregister); 150struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx)
198
199struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp)
200{ 151{
201 struct ccid_operations *ccid_ops; 152 struct ccid_operations *ccid_ops = ccid_by_number(id);
202 struct ccid *ccid = NULL; 153 struct ccid *ccid = NULL;
203 154
204 ccids_read_lock();
205#ifdef CONFIG_MODULES
206 if (ccids[id] == NULL) {
207 /* We only try to load if in process context */
208 ccids_read_unlock();
209 if (gfp & GFP_ATOMIC)
210 goto out;
211 request_module("net-dccp-ccid-%d", id);
212 ccids_read_lock();
213 }
214#endif
215 ccid_ops = ccids[id];
216 if (ccid_ops == NULL) 155 if (ccid_ops == NULL)
217 goto out_unlock; 156 goto out;
218
219 if (!try_module_get(ccid_ops->ccid_owner))
220 goto out_unlock;
221
222 ccids_read_unlock();
223 157
224 ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab : 158 ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
225 ccid_ops->ccid_hc_tx_slab, gfp); 159 ccid_ops->ccid_hc_tx_slab, gfp_any());
226 if (ccid == NULL) 160 if (ccid == NULL)
227 goto out_module_put; 161 goto out;
228 ccid->ccid_ops = ccid_ops; 162 ccid->ccid_ops = ccid_ops;
229 if (rx) { 163 if (rx) {
230 memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size); 164 memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
@@ -239,53 +173,57 @@ struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp)
239 } 173 }
240out: 174out:
241 return ccid; 175 return ccid;
242out_unlock:
243 ccids_read_unlock();
244 goto out;
245out_free_ccid: 176out_free_ccid:
246 kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab : 177 kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
247 ccid_ops->ccid_hc_tx_slab, ccid); 178 ccid_ops->ccid_hc_tx_slab, ccid);
248 ccid = NULL; 179 ccid = NULL;
249out_module_put:
250 module_put(ccid_ops->ccid_owner);
251 goto out; 180 goto out;
252} 181}
253 182
254EXPORT_SYMBOL_GPL(ccid_new); 183void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
255
256static void ccid_delete(struct ccid *ccid, struct sock *sk, int rx)
257{ 184{
258 struct ccid_operations *ccid_ops; 185 if (ccid != NULL) {
259 186 if (ccid->ccid_ops->ccid_hc_rx_exit != NULL)
260 if (ccid == NULL) 187 ccid->ccid_ops->ccid_hc_rx_exit(sk);
261 return; 188 kmem_cache_free(ccid->ccid_ops->ccid_hc_rx_slab, ccid);
262
263 ccid_ops = ccid->ccid_ops;
264 if (rx) {
265 if (ccid_ops->ccid_hc_rx_exit != NULL)
266 ccid_ops->ccid_hc_rx_exit(sk);
267 kmem_cache_free(ccid_ops->ccid_hc_rx_slab, ccid);
268 } else {
269 if (ccid_ops->ccid_hc_tx_exit != NULL)
270 ccid_ops->ccid_hc_tx_exit(sk);
271 kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid);
272 } 189 }
273 ccids_read_lock();
274 if (ccids[ccid_ops->ccid_id] != NULL)
275 module_put(ccid_ops->ccid_owner);
276 ccids_read_unlock();
277} 190}
278 191
279void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk) 192void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
280{ 193{
281 ccid_delete(ccid, sk, 1); 194 if (ccid != NULL) {
195 if (ccid->ccid_ops->ccid_hc_tx_exit != NULL)
196 ccid->ccid_ops->ccid_hc_tx_exit(sk);
197 kmem_cache_free(ccid->ccid_ops->ccid_hc_tx_slab, ccid);
198 }
282} 199}
283 200
284EXPORT_SYMBOL_GPL(ccid_hc_rx_delete); 201int __init ccid_initialize_builtins(void)
285
286void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
287{ 202{
288 ccid_delete(ccid, sk, 0); 203 int i, err = tfrc_lib_init();
204
205 if (err)
206 return err;
207
208 for (i = 0; i < ARRAY_SIZE(ccids); i++) {
209 err = ccid_activate(ccids[i]);
210 if (err)
211 goto unwind_registrations;
212 }
213 return 0;
214
215unwind_registrations:
216 while(--i >= 0)
217 ccid_deactivate(ccids[i]);
218 tfrc_lib_exit();
219 return err;
289} 220}
290 221
291EXPORT_SYMBOL_GPL(ccid_hc_tx_delete); 222void ccid_cleanup_builtins(void)
223{
224 int i;
225
226 for (i = 0; i < ARRAY_SIZE(ccids); i++)
227 ccid_deactivate(ccids[i]);
228 tfrc_lib_exit();
229}
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 18f69423a708..facedd20b531 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -29,7 +29,6 @@ struct tcp_info;
29 * @ccid_id: numerical CCID ID (up to %CCID_MAX, cf. table 5 in RFC 4340, 10.) 29 * @ccid_id: numerical CCID ID (up to %CCID_MAX, cf. table 5 in RFC 4340, 10.)
30 * @ccid_ccmps: the CCMPS including network/transport headers (0 when disabled) 30 * @ccid_ccmps: the CCMPS including network/transport headers (0 when disabled)
31 * @ccid_name: alphabetical identifier string for @ccid_id 31 * @ccid_name: alphabetical identifier string for @ccid_id
32 * @ccid_owner: module which implements/owns this CCID
33 * @ccid_hc_{r,t}x_slab: memory pool for the receiver/sender half-connection 32 * @ccid_hc_{r,t}x_slab: memory pool for the receiver/sender half-connection
34 * @ccid_hc_{r,t}x_obj_size: size of the receiver/sender half-connection socket 33 * @ccid_hc_{r,t}x_obj_size: size of the receiver/sender half-connection socket
35 * 34 *
@@ -48,7 +47,6 @@ struct ccid_operations {
48 unsigned char ccid_id; 47 unsigned char ccid_id;
49 __u32 ccid_ccmps; 48 __u32 ccid_ccmps;
50 const char *ccid_name; 49 const char *ccid_name;
51 struct module *ccid_owner;
52 struct kmem_cache *ccid_hc_rx_slab, 50 struct kmem_cache *ccid_hc_rx_slab,
53 *ccid_hc_tx_slab; 51 *ccid_hc_tx_slab;
54 __u32 ccid_hc_rx_obj_size, 52 __u32 ccid_hc_rx_obj_size,
@@ -90,8 +88,13 @@ struct ccid_operations {
90 int __user *optlen); 88 int __user *optlen);
91}; 89};
92 90
93extern int ccid_register(struct ccid_operations *ccid_ops); 91extern struct ccid_operations ccid2_ops;
94extern int ccid_unregister(struct ccid_operations *ccid_ops); 92#ifdef CONFIG_IP_DCCP_CCID3
93extern struct ccid_operations ccid3_ops;
94#endif
95
96extern int ccid_initialize_builtins(void);
97extern void ccid_cleanup_builtins(void);
95 98
96struct ccid { 99struct ccid {
97 struct ccid_operations *ccid_ops; 100 struct ccid_operations *ccid_ops;
@@ -108,8 +111,7 @@ extern int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len);
108extern int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, 111extern int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
109 char __user *, int __user *); 112 char __user *, int __user *);
110 113
111extern struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, 114extern struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx);
112 gfp_t gfp);
113 115
114static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp) 116static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp)
115{ 117{
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index 12275943eab8..4b5db44970aa 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -1,80 +1,51 @@
1menu "DCCP CCIDs Configuration (EXPERIMENTAL)" 1menu "DCCP CCIDs Configuration (EXPERIMENTAL)"
2 depends on EXPERIMENTAL 2 depends on EXPERIMENTAL
3 3
4config IP_DCCP_CCID2
5 tristate "CCID2 (TCP-Like) (EXPERIMENTAL)"
6 def_tristate IP_DCCP
7 select IP_DCCP_ACKVEC
8 ---help---
9 CCID 2, TCP-like Congestion Control, denotes Additive Increase,
10 Multiplicative Decrease (AIMD) congestion control with behavior
11 modelled directly on TCP, including congestion window, slow start,
12 timeouts, and so forth [RFC 2581]. CCID 2 achieves maximum
13 bandwidth over the long term, consistent with the use of end-to-end
14 congestion control, but halves its congestion window in response to
15 each congestion event. This leads to the abrupt rate changes
16 typical of TCP. Applications should use CCID 2 if they prefer
17 maximum bandwidth utilization to steadiness of rate. This is often
18 the case for applications that are not playing their data directly
19 to the user. For example, a hypothetical application that
20 transferred files over DCCP, using application-level retransmissions
21 for lost packets, would prefer CCID 2 to CCID 3. On-line games may
22 also prefer CCID 2. See RFC 4341 for further details.
23
24 CCID2 is the default CCID used by DCCP.
25
26config IP_DCCP_CCID2_DEBUG 4config IP_DCCP_CCID2_DEBUG
27 bool "CCID2 debugging messages" 5 bool "CCID-2 debugging messages"
28 depends on IP_DCCP_CCID2 6 ---help---
29 ---help--- 7 Enable CCID-2 specific debugging messages.
30 Enable CCID2-specific debugging messages.
31 8
32 When compiling CCID2 as a module, this debugging output can 9 The debugging output can additionally be toggled by setting the
33 additionally be toggled by setting the ccid2_debug module 10 ccid2_debug parameter to 0 or 1.
34 parameter to 0 or 1.
35 11
36 If in doubt, say N. 12 If in doubt, say N.
37 13
38config IP_DCCP_CCID3 14config IP_DCCP_CCID3
39 tristate "CCID3 (TCP-Friendly) (EXPERIMENTAL)" 15 bool "CCID-3 (TCP-Friendly) (EXPERIMENTAL)"
40 def_tristate IP_DCCP 16 def_bool y if (IP_DCCP = y || IP_DCCP = m)
41 select IP_DCCP_TFRC_LIB
42 ---help--- 17 ---help---
43 CCID 3 denotes TCP-Friendly Rate Control (TFRC), an equation-based 18 CCID-3 denotes TCP-Friendly Rate Control (TFRC), an equation-based
44 rate-controlled congestion control mechanism. TFRC is designed to 19 rate-controlled congestion control mechanism. TFRC is designed to
45 be reasonably fair when competing for bandwidth with TCP-like flows, 20 be reasonably fair when competing for bandwidth with TCP-like flows,
46 where a flow is "reasonably fair" if its sending rate is generally 21 where a flow is "reasonably fair" if its sending rate is generally
47 within a factor of two of the sending rate of a TCP flow under the 22 within a factor of two of the sending rate of a TCP flow under the
48 same conditions. However, TFRC has a much lower variation of 23 same conditions. However, TFRC has a much lower variation of
49 throughput over time compared with TCP, which makes CCID 3 more 24 throughput over time compared with TCP, which makes CCID-3 more
50 suitable than CCID 2 for applications such streaming media where a 25 suitable than CCID-2 for applications such streaming media where a
51 relatively smooth sending rate is of importance. 26 relatively smooth sending rate is of importance.
52 27
53 CCID 3 is further described in RFC 4342, 28 CCID-3 is further described in RFC 4342,
54 http://www.ietf.org/rfc/rfc4342.txt 29 http://www.ietf.org/rfc/rfc4342.txt
55 30
56 The TFRC congestion control algorithms were initially described in 31 The TFRC congestion control algorithms were initially described in
57 RFC 3448. 32 RFC 5348.
58 33
59 This text was extracted from RFC 4340 (sec. 10.2), 34 This text was extracted from RFC 4340 (sec. 10.2),
60 http://www.ietf.org/rfc/rfc4340.txt 35 http://www.ietf.org/rfc/rfc4340.txt
61
62 To compile this CCID as a module, choose M here: the module will be
63 called dccp_ccid3.
64 36
65 If in doubt, say M. 37 If in doubt, say N.
66 38
67config IP_DCCP_CCID3_DEBUG 39config IP_DCCP_CCID3_DEBUG
68 bool "CCID3 debugging messages" 40 bool "CCID-3 debugging messages"
69 depends on IP_DCCP_CCID3 41 depends on IP_DCCP_CCID3
70 ---help--- 42 ---help---
71 Enable CCID3-specific debugging messages. 43 Enable CCID-3 specific debugging messages.
72 44
73 When compiling CCID3 as a module, this debugging output can 45 The debugging output can additionally be toggled by setting the
74 additionally be toggled by setting the ccid3_debug module 46 ccid3_debug parameter to 0 or 1.
75 parameter to 0 or 1.
76 47
77 If in doubt, say N. 48 If in doubt, say N.
78 49
79config IP_DCCP_CCID3_RTO 50config IP_DCCP_CCID3_RTO
80 int "Use higher bound for nofeedback timer" 51 int "Use higher bound for nofeedback timer"
@@ -108,12 +79,8 @@ config IP_DCCP_CCID3_RTO
108 therefore not be performed on WANs. 79 therefore not be performed on WANs.
109 80
110config IP_DCCP_TFRC_LIB 81config IP_DCCP_TFRC_LIB
111 tristate 82 def_bool y if IP_DCCP_CCID3
112 default n
113 83
114config IP_DCCP_TFRC_DEBUG 84config IP_DCCP_TFRC_DEBUG
115 bool 85 def_bool y if IP_DCCP_CCID3_DEBUG
116 depends on IP_DCCP_TFRC_LIB
117 default y if IP_DCCP_CCID3_DEBUG
118
119endmenu 86endmenu
diff --git a/net/dccp/ccids/Makefile b/net/dccp/ccids/Makefile
deleted file mode 100644
index 438f20bccff7..000000000000
--- a/net/dccp/ccids/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1obj-$(CONFIG_IP_DCCP_CCID3) += dccp_ccid3.o
2
3dccp_ccid3-y := ccid3.o
4
5obj-$(CONFIG_IP_DCCP_CCID2) += dccp_ccid2.o
6
7dccp_ccid2-y := ccid2.o
8
9obj-y += lib/
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index c9ea19a4d85e..d235294ace23 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -768,10 +768,9 @@ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
768 } 768 }
769} 769}
770 770
771static struct ccid_operations ccid2 = { 771struct ccid_operations ccid2_ops = {
772 .ccid_id = DCCPC_CCID2, 772 .ccid_id = DCCPC_CCID2,
773 .ccid_name = "TCP-like", 773 .ccid_name = "TCP-like",
774 .ccid_owner = THIS_MODULE,
775 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), 774 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock),
776 .ccid_hc_tx_init = ccid2_hc_tx_init, 775 .ccid_hc_tx_init = ccid2_hc_tx_init,
777 .ccid_hc_tx_exit = ccid2_hc_tx_exit, 776 .ccid_hc_tx_exit = ccid2_hc_tx_exit,
@@ -784,22 +783,5 @@ static struct ccid_operations ccid2 = {
784 783
785#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 784#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
786module_param(ccid2_debug, bool, 0644); 785module_param(ccid2_debug, bool, 0644);
787MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); 786MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages");
788#endif 787#endif
789
790static __init int ccid2_module_init(void)
791{
792 return ccid_register(&ccid2);
793}
794module_init(ccid2_module_init);
795
796static __exit void ccid2_module_exit(void)
797{
798 ccid_unregister(&ccid2);
799}
800module_exit(ccid2_module_exit);
801
802MODULE_AUTHOR("Andrea Bittau <a.bittau@cs.ucl.ac.uk>");
803MODULE_DESCRIPTION("DCCP TCP-Like (CCID2) CCID");
804MODULE_LICENSE("GPL");
805MODULE_ALIAS("net-dccp-ccid-2");
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 3b8bd7ca6761..a27b7f4c19c5 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -940,10 +940,9 @@ static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
940 return 0; 940 return 0;
941} 941}
942 942
943static struct ccid_operations ccid3 = { 943struct ccid_operations ccid3_ops = {
944 .ccid_id = DCCPC_CCID3, 944 .ccid_id = DCCPC_CCID3,
945 .ccid_name = "TCP-Friendly Rate Control", 945 .ccid_name = "TCP-Friendly Rate Control",
946 .ccid_owner = THIS_MODULE,
947 .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock), 946 .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock),
948 .ccid_hc_tx_init = ccid3_hc_tx_init, 947 .ccid_hc_tx_init = ccid3_hc_tx_init,
949 .ccid_hc_tx_exit = ccid3_hc_tx_exit, 948 .ccid_hc_tx_exit = ccid3_hc_tx_exit,
@@ -964,23 +963,5 @@ static struct ccid_operations ccid3 = {
964 963
965#ifdef CONFIG_IP_DCCP_CCID3_DEBUG 964#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
966module_param(ccid3_debug, bool, 0644); 965module_param(ccid3_debug, bool, 0644);
967MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); 966MODULE_PARM_DESC(ccid3_debug, "Enable CCID-3 debug messages");
968#endif 967#endif
969
970static __init int ccid3_module_init(void)
971{
972 return ccid_register(&ccid3);
973}
974module_init(ccid3_module_init);
975
976static __exit void ccid3_module_exit(void)
977{
978 ccid_unregister(&ccid3);
979}
980module_exit(ccid3_module_exit);
981
982MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
983 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
984MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
985MODULE_LICENSE("GPL");
986MODULE_ALIAS("net-dccp-ccid-3");
diff --git a/net/dccp/ccids/lib/Makefile b/net/dccp/ccids/lib/Makefile
deleted file mode 100644
index 68c93e3d89dc..000000000000
--- a/net/dccp/ccids/lib/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_IP_DCCP_TFRC_LIB) += dccp_tfrc_lib.o
2
3dccp_tfrc_lib-y := tfrc.o tfrc_equation.o packet_history.o loss_interval.o
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 5b3ce0688c5c..4d1e40127264 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -60,7 +60,6 @@ void tfrc_lh_cleanup(struct tfrc_loss_hist *lh)
60 lh->ring[LIH_INDEX(lh->counter)] = NULL; 60 lh->ring[LIH_INDEX(lh->counter)] = NULL;
61 } 61 }
62} 62}
63EXPORT_SYMBOL_GPL(tfrc_lh_cleanup);
64 63
65static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) 64static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh)
66{ 65{
@@ -121,7 +120,6 @@ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb)
121 120
122 return (lh->i_mean < old_i_mean); 121 return (lh->i_mean < old_i_mean);
123} 122}
124EXPORT_SYMBOL_GPL(tfrc_lh_update_i_mean);
125 123
126/* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */ 124/* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */
127static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur, 125static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur,
@@ -169,7 +167,6 @@ int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
169 } 167 }
170 return 1; 168 return 1;
171} 169}
172EXPORT_SYMBOL_GPL(tfrc_lh_interval_add);
173 170
174int __init tfrc_li_init(void) 171int __init tfrc_li_init(void)
175{ 172{
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index 6cc108afdc3b..b7785b3581ec 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -94,7 +94,6 @@ int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno)
94 *headp = entry; 94 *headp = entry;
95 return 0; 95 return 0;
96} 96}
97EXPORT_SYMBOL_GPL(tfrc_tx_hist_add);
98 97
99void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp) 98void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp)
100{ 99{
@@ -109,7 +108,6 @@ void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp)
109 108
110 *headp = NULL; 109 *headp = NULL;
111} 110}
112EXPORT_SYMBOL_GPL(tfrc_tx_hist_purge);
113 111
114u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno, 112u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno,
115 const ktime_t now) 113 const ktime_t now)
@@ -127,7 +125,6 @@ u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno,
127 125
128 return rtt; 126 return rtt;
129} 127}
130EXPORT_SYMBOL_GPL(tfrc_tx_hist_rtt);
131 128
132 129
133/* 130/*
@@ -172,7 +169,6 @@ void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
172 169
173 tfrc_rx_hist_entry_from_skb(entry, skb, ndp); 170 tfrc_rx_hist_entry_from_skb(entry, skb, ndp);
174} 171}
175EXPORT_SYMBOL_GPL(tfrc_rx_hist_add_packet);
176 172
177/* has the packet contained in skb been seen before? */ 173/* has the packet contained in skb been seen before? */
178int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb) 174int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb)
@@ -189,7 +185,6 @@ int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb)
189 185
190 return 0; 186 return 0;
191} 187}
192EXPORT_SYMBOL_GPL(tfrc_rx_hist_duplicate);
193 188
194static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b) 189static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b)
195{ 190{
@@ -390,7 +385,6 @@ int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
390 } 385 }
391 return is_new_loss; 386 return is_new_loss;
392} 387}
393EXPORT_SYMBOL_GPL(tfrc_rx_handle_loss);
394 388
395int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h) 389int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h)
396{ 390{
@@ -412,7 +406,6 @@ out_free:
412 } 406 }
413 return -ENOBUFS; 407 return -ENOBUFS;
414} 408}
415EXPORT_SYMBOL_GPL(tfrc_rx_hist_alloc);
416 409
417void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) 410void tfrc_rx_hist_purge(struct tfrc_rx_hist *h)
418{ 411{
@@ -424,7 +417,6 @@ void tfrc_rx_hist_purge(struct tfrc_rx_hist *h)
424 h->ring[i] = NULL; 417 h->ring[i] = NULL;
425 } 418 }
426} 419}
427EXPORT_SYMBOL_GPL(tfrc_rx_hist_purge);
428 420
429/** 421/**
430 * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against 422 * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against
@@ -495,4 +487,3 @@ keep_ref_for_next_time:
495 487
496 return sample; 488 return sample;
497} 489}
498EXPORT_SYMBOL_GPL(tfrc_rx_hist_sample_rtt);
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 185916218e07..4902029854d8 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -1,20 +1,18 @@
1/* 1/*
2 * TFRC: main module holding the pieces of the TFRC library together 2 * TFRC library initialisation
3 * 3 *
4 * Copyright (c) 2007 The University of Aberdeen, Scotland, UK 4 * Copyright (c) 2007 The University of Aberdeen, Scotland, UK
5 * Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 5 * Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
6 */ 6 */
7#include <linux/module.h>
8#include <linux/moduleparam.h>
9#include "tfrc.h" 7#include "tfrc.h"
10 8
11#ifdef CONFIG_IP_DCCP_TFRC_DEBUG 9#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
12int tfrc_debug; 10int tfrc_debug;
13module_param(tfrc_debug, bool, 0644); 11module_param(tfrc_debug, bool, 0644);
14MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); 12MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
15#endif 13#endif
16 14
17static int __init tfrc_module_init(void) 15int __init tfrc_lib_init(void)
18{ 16{
19 int rc = tfrc_li_init(); 17 int rc = tfrc_li_init();
20 18
@@ -38,18 +36,9 @@ out:
38 return rc; 36 return rc;
39} 37}
40 38
41static void __exit tfrc_module_exit(void) 39void tfrc_lib_exit(void)
42{ 40{
43 tfrc_rx_packet_history_exit(); 41 tfrc_rx_packet_history_exit();
44 tfrc_tx_packet_history_exit(); 42 tfrc_tx_packet_history_exit();
45 tfrc_li_exit(); 43 tfrc_li_exit();
46} 44}
47
48module_init(tfrc_module_init);
49module_exit(tfrc_module_exit);
50
51MODULE_AUTHOR("Gerrit Renker <gerrit@erg.abdn.ac.uk>, "
52 "Ian McDonald <ian.mcdonald@jandi.co.nz>, "
53 "Arnaldo Carvalho de Melo <acme@redhat.com>");
54MODULE_DESCRIPTION("DCCP TFRC library");
55MODULE_LICENSE("GPL");
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index ed9857527acf..e9720b143275 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -17,7 +17,8 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/math64.h> 18#include <linux/math64.h>
19#include "../../dccp.h" 19#include "../../dccp.h"
20/* internal includes that this module exports: */ 20
21/* internal includes that this library exports: */
21#include "loss_interval.h" 22#include "loss_interval.h"
22#include "packet_history.h" 23#include "packet_history.h"
23 24
@@ -66,4 +67,12 @@ extern void tfrc_rx_packet_history_exit(void);
66 67
67extern int tfrc_li_init(void); 68extern int tfrc_li_init(void);
68extern void tfrc_li_exit(void); 69extern void tfrc_li_exit(void);
70
71#ifdef CONFIG_IP_DCCP_TFRC_LIB
72extern int tfrc_lib_init(void);
73extern void tfrc_lib_exit(void);
74#else
75#define tfrc_lib_init() (0)
76#define tfrc_lib_exit()
77#endif
69#endif /* _TFRC_H_ */ 78#endif /* _TFRC_H_ */
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index 2f20a29cffe4..c5d3a9e5a5a4 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -659,8 +659,6 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
659 return scaled_div32(result, f); 659 return scaled_div32(result, f);
660} 660}
661 661
662EXPORT_SYMBOL_GPL(tfrc_calc_x);
663
664/** 662/**
665 * tfrc_calc_x_reverse_lookup - try to find p given f(p) 663 * tfrc_calc_x_reverse_lookup - try to find p given f(p)
666 * 664 *
@@ -693,5 +691,3 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
693 index = tfrc_binsearch(fvalue, 0); 691 index = tfrc_binsearch(fvalue, 0);
694 return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE; 692 return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE;
695} 693}
696
697EXPORT_SYMBOL_GPL(tfrc_calc_x_reverse_lookup);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 0bc4c9a02e19..f2230fc168e1 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -432,10 +432,8 @@ static inline int dccp_ack_pending(const struct sock *sk)
432{ 432{
433 const struct dccp_sock *dp = dccp_sk(sk); 433 const struct dccp_sock *dp = dccp_sk(sk);
434 return dp->dccps_timestamp_echo != 0 || 434 return dp->dccps_timestamp_echo != 0 ||
435#ifdef CONFIG_IP_DCCP_ACKVEC
436 (dp->dccps_hc_rx_ackvec != NULL && 435 (dp->dccps_hc_rx_ackvec != NULL &&
437 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) || 436 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) ||
438#endif
439 inet_csk_ack_scheduled(sk); 437 inet_csk_ack_scheduled(sk);
440} 438}
441 439
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 30f9fb76b921..4152308958ab 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -34,7 +34,7 @@
34static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx) 34static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx)
35{ 35{
36 struct dccp_sock *dp = dccp_sk(sk); 36 struct dccp_sock *dp = dccp_sk(sk);
37 struct ccid *new_ccid = ccid_new(ccid, sk, rx, gfp_any()); 37 struct ccid *new_ccid = ccid_new(ccid, sk, rx);
38 38
39 if (new_ccid == NULL) 39 if (new_ccid == NULL)
40 return -ENOMEM; 40 return -ENOMEM;
@@ -1214,8 +1214,6 @@ const char *dccp_feat_typename(const u8 type)
1214 return NULL; 1214 return NULL;
1215} 1215}
1216 1216
1217EXPORT_SYMBOL_GPL(dccp_feat_typename);
1218
1219const char *dccp_feat_name(const u8 feat) 1217const char *dccp_feat_name(const u8 feat)
1220{ 1218{
1221 static const char *feature_names[] = { 1219 static const char *feature_names[] = {
@@ -1240,6 +1238,4 @@ const char *dccp_feat_name(const u8 feat)
1240 1238
1241 return feature_names[feat]; 1239 return feature_names[feat];
1242} 1240}
1243
1244EXPORT_SYMBOL_GPL(dccp_feat_name);
1245#endif /* CONFIG_IP_DCCP_DEBUG */ 1241#endif /* CONFIG_IP_DCCP_DEBUG */
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 5eb443f656c1..7648f316310f 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -741,5 +741,3 @@ u32 dccp_sample_rtt(struct sock *sk, long delta)
741 741
742 return delta; 742 return delta;
743} 743}
744
745EXPORT_SYMBOL_GPL(dccp_sample_rtt);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 1747ccae8e8d..945b4d5d23b3 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1118,9 +1118,15 @@ static int __init dccp_init(void)
1118 if (rc) 1118 if (rc)
1119 goto out_ackvec_exit; 1119 goto out_ackvec_exit;
1120 1120
1121 rc = ccid_initialize_builtins();
1122 if (rc)
1123 goto out_sysctl_exit;
1124
1121 dccp_timestamping_init(); 1125 dccp_timestamping_init();
1122out: 1126out:
1123 return rc; 1127 return rc;
1128out_sysctl_exit:
1129 dccp_sysctl_exit();
1124out_ackvec_exit: 1130out_ackvec_exit:
1125 dccp_ackvec_exit(); 1131 dccp_ackvec_exit();
1126out_free_dccp_mib: 1132out_free_dccp_mib:
@@ -1143,6 +1149,7 @@ out_free_percpu:
1143 1149
1144static void __exit dccp_fini(void) 1150static void __exit dccp_fini(void)
1145{ 1151{
1152 ccid_cleanup_builtins();
1146 dccp_mib_exit(); 1153 dccp_mib_exit();
1147 free_pages((unsigned long)dccp_hashinfo.bhash, 1154 free_pages((unsigned long)dccp_hashinfo.bhash,
1148 get_order(dccp_hashinfo.bhash_size * 1155 get_order(dccp_hashinfo.bhash_size *
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a3a410d20da0..a68fd79e9eca 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -286,6 +286,42 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
286 .get_sset_count = dsa_slave_get_sset_count, 286 .get_sset_count = dsa_slave_get_sset_count,
287}; 287};
288 288
289#ifdef CONFIG_NET_DSA_TAG_DSA
290static const struct net_device_ops dsa_netdev_ops = {
291 .ndo_open = dsa_slave_open,
292 .ndo_stop = dsa_slave_close,
293 .ndo_start_xmit = dsa_xmit,
294 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
295 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
296 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
297 .ndo_set_mac_address = dsa_slave_set_mac_address,
298 .ndo_do_ioctl = dsa_slave_ioctl,
299};
300#endif
301#ifdef CONFIG_NET_DSA_TAG_EDSA
302static const struct net_device_ops edsa_netdev_ops = {
303 .ndo_open = dsa_slave_open,
304 .ndo_stop = dsa_slave_close,
305 .ndo_start_xmit = edsa_xmit,
306 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
307 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
308 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
309 .ndo_set_mac_address = dsa_slave_set_mac_address,
310 .ndo_do_ioctl = dsa_slave_ioctl,
311};
312#endif
313#ifdef CONFIG_NET_DSA_TAG_TRAILER
314static const struct net_device_ops trailer_netdev_ops = {
315 .ndo_open = dsa_slave_open,
316 .ndo_stop = dsa_slave_close,
317 .ndo_start_xmit = trailer_xmit,
318 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
319 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
320 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
321 .ndo_set_mac_address = dsa_slave_set_mac_address,
322 .ndo_do_ioctl = dsa_slave_ioctl,
323};
324#endif
289 325
290/* slave device setup *******************************************************/ 326/* slave device setup *******************************************************/
291struct net_device * 327struct net_device *
@@ -306,32 +342,27 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
306 SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); 342 SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
307 memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN); 343 memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN);
308 slave_dev->tx_queue_len = 0; 344 slave_dev->tx_queue_len = 0;
345
309 switch (ds->tag_protocol) { 346 switch (ds->tag_protocol) {
310#ifdef CONFIG_NET_DSA_TAG_DSA 347#ifdef CONFIG_NET_DSA_TAG_DSA
311 case htons(ETH_P_DSA): 348 case htons(ETH_P_DSA):
312 slave_dev->hard_start_xmit = dsa_xmit; 349 slave_dev->netdev_ops = &dsa_netdev_ops;
313 break; 350 break;
314#endif 351#endif
315#ifdef CONFIG_NET_DSA_TAG_EDSA 352#ifdef CONFIG_NET_DSA_TAG_EDSA
316 case htons(ETH_P_EDSA): 353 case htons(ETH_P_EDSA):
317 slave_dev->hard_start_xmit = edsa_xmit; 354 slave_dev->netdev_ops = &edsa_netdev_ops;
318 break; 355 break;
319#endif 356#endif
320#ifdef CONFIG_NET_DSA_TAG_TRAILER 357#ifdef CONFIG_NET_DSA_TAG_TRAILER
321 case htons(ETH_P_TRAILER): 358 case htons(ETH_P_TRAILER):
322 slave_dev->hard_start_xmit = trailer_xmit; 359 slave_dev->netdev_ops = &trailer_netdev_ops;
323 break; 360 break;
324#endif 361#endif
325 default: 362 default:
326 BUG(); 363 BUG();
327 } 364 }
328 slave_dev->open = dsa_slave_open; 365
329 slave_dev->stop = dsa_slave_close;
330 slave_dev->change_rx_flags = dsa_slave_change_rx_flags;
331 slave_dev->set_rx_mode = dsa_slave_set_rx_mode;
332 slave_dev->set_multicast_list = dsa_slave_set_rx_mode;
333 slave_dev->set_mac_address = dsa_slave_set_mac_address;
334 slave_dev->do_ioctl = dsa_slave_ioctl;
335 SET_NETDEV_DEV(slave_dev, parent); 366 SET_NETDEV_DEV(slave_dev, parent);
336 slave_dev->vlan_features = master->vlan_features; 367 slave_dev->vlan_features = master->vlan_features;
337 368
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 42a0f3dd3fd6..d722013c1cae 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1268,6 +1268,9 @@ __be32 __init root_nfs_parse_addr(char *name)
1268static int __init ip_auto_config(void) 1268static int __init ip_auto_config(void)
1269{ 1269{
1270 __be32 addr; 1270 __be32 addr;
1271#ifdef IPCONFIG_DYNAMIC
1272 int retries = CONF_OPEN_RETRIES;
1273#endif
1271 1274
1272#ifdef CONFIG_PROC_FS 1275#ifdef CONFIG_PROC_FS
1273 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); 1276 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@@ -1304,9 +1307,6 @@ static int __init ip_auto_config(void)
1304#endif 1307#endif
1305 ic_first_dev->next) { 1308 ic_first_dev->next) {
1306#ifdef IPCONFIG_DYNAMIC 1309#ifdef IPCONFIG_DYNAMIC
1307
1308 int retries = CONF_OPEN_RETRIES;
1309
1310 if (ic_dynamic() < 0) { 1310 if (ic_dynamic() < 0) {
1311 ic_close_devs(); 1311 ic_close_devs();
1312 1312
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index c9224310ebae..52cb6939d093 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -93,13 +93,8 @@ ipt_local_out_hook(unsigned int hook,
93{ 93{
94 /* root is playing with raw sockets. */ 94 /* root is playing with raw sockets. */
95 if (skb->len < sizeof(struct iphdr) || 95 if (skb->len < sizeof(struct iphdr) ||
96 ip_hdrlen(skb) < sizeof(struct iphdr)) { 96 ip_hdrlen(skb) < sizeof(struct iphdr))
97 if (net_ratelimit())
98 printk("iptable_filter: ignoring short SOCK_RAW "
99 "packet.\n");
100 return NF_ACCEPT; 97 return NF_ACCEPT;
101 }
102
103 return ipt_do_table(skb, hook, in, out, 98 return ipt_do_table(skb, hook, in, out,
104 dev_net(out)->ipv4.iptable_filter); 99 dev_net(out)->ipv4.iptable_filter);
105} 100}
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 69f2c4287146..3929d20b9e45 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -132,12 +132,8 @@ ipt_local_hook(unsigned int hook,
132 132
133 /* root is playing with raw sockets. */ 133 /* root is playing with raw sockets. */
134 if (skb->len < sizeof(struct iphdr) 134 if (skb->len < sizeof(struct iphdr)
135 || ip_hdrlen(skb) < sizeof(struct iphdr)) { 135 || ip_hdrlen(skb) < sizeof(struct iphdr))
136 if (net_ratelimit())
137 printk("iptable_mangle: ignoring short SOCK_RAW "
138 "packet.\n");
139 return NF_ACCEPT; 136 return NF_ACCEPT;
140 }
141 137
142 /* Save things which could affect route */ 138 /* Save things which could affect route */
143 mark = skb->mark; 139 mark = skb->mark;
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 8faebfe638f1..7f65d18333e3 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -65,12 +65,8 @@ ipt_local_hook(unsigned int hook,
65{ 65{
66 /* root is playing with raw sockets. */ 66 /* root is playing with raw sockets. */
67 if (skb->len < sizeof(struct iphdr) || 67 if (skb->len < sizeof(struct iphdr) ||
68 ip_hdrlen(skb) < sizeof(struct iphdr)) { 68 ip_hdrlen(skb) < sizeof(struct iphdr))
69 if (net_ratelimit())
70 printk("iptable_raw: ignoring short SOCK_RAW "
71 "packet.\n");
72 return NF_ACCEPT; 69 return NF_ACCEPT;
73 }
74 return ipt_do_table(skb, hook, in, out, 70 return ipt_do_table(skb, hook, in, out,
75 dev_net(out)->ipv4.iptable_raw); 71 dev_net(out)->ipv4.iptable_raw);
76} 72}
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index 36f3be3cc428..a52a35f4a584 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -96,12 +96,8 @@ ipt_local_out_hook(unsigned int hook,
96{ 96{
97 /* Somebody is playing with raw sockets. */ 97 /* Somebody is playing with raw sockets. */
98 if (skb->len < sizeof(struct iphdr) 98 if (skb->len < sizeof(struct iphdr)
99 || ip_hdrlen(skb) < sizeof(struct iphdr)) { 99 || ip_hdrlen(skb) < sizeof(struct iphdr))
100 if (net_ratelimit())
101 printk(KERN_INFO "iptable_security: ignoring short "
102 "SOCK_RAW packet.\n");
103 return NF_ACCEPT; 100 return NF_ACCEPT;
104 }
105 return ipt_do_table(skb, hook, in, out, 101 return ipt_do_table(skb, hook, in, out,
106 dev_net(out)->ipv4.iptable_security); 102 dev_net(out)->ipv4.iptable_security);
107} 103}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index b2141e11575e..4beb04fac588 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -145,11 +145,8 @@ static unsigned int ipv4_conntrack_local(unsigned int hooknum,
145{ 145{
146 /* root is playing with raw sockets. */ 146 /* root is playing with raw sockets. */
147 if (skb->len < sizeof(struct iphdr) || 147 if (skb->len < sizeof(struct iphdr) ||
148 ip_hdrlen(skb) < sizeof(struct iphdr)) { 148 ip_hdrlen(skb) < sizeof(struct iphdr))
149 if (net_ratelimit())
150 printk("ipt_hook: happy cracking.\n");
151 return NF_ACCEPT; 149 return NF_ACCEPT;
152 }
153 return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb); 150 return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb);
154} 151}
155 152
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 1fd3ef7718b6..2a8bee26f43d 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -20,7 +20,7 @@
20#include <net/netfilter/nf_conntrack_core.h> 20#include <net/netfilter/nf_conntrack_core.h>
21#include <net/netfilter/nf_log.h> 21#include <net/netfilter/nf_log.h>
22 22
23static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ; 23static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
24 24
25static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 25static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
26 struct nf_conntrack_tuple *tuple) 26 struct nf_conntrack_tuple *tuple)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f28acf11fc67..76b148bcb0dc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -522,8 +522,13 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
522 unsigned int offset, size_t len) 522 unsigned int offset, size_t len)
523{ 523{
524 struct tcp_splice_state *tss = rd_desc->arg.data; 524 struct tcp_splice_state *tss = rd_desc->arg.data;
525 int ret;
525 526
526 return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags); 527 ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
528 tss->flags);
529 if (ret > 0)
530 rd_desc->count -= ret;
531 return ret;
527} 532}
528 533
529static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 534static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
@@ -531,6 +536,7 @@ static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
531 /* Store TCP splice context information in read_descriptor_t. */ 536 /* Store TCP splice context information in read_descriptor_t. */
532 read_descriptor_t rd_desc = { 537 read_descriptor_t rd_desc = {
533 .arg.data = tss, 538 .arg.data = tss,
539 .count = tss->len,
534 }; 540 };
535 541
536 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 542 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
@@ -580,10 +586,6 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
580 else if (!ret) { 586 else if (!ret) {
581 if (spliced) 587 if (spliced)
582 break; 588 break;
583 if (flags & SPLICE_F_NONBLOCK) {
584 ret = -EAGAIN;
585 break;
586 }
587 if (sock_flag(sk, SOCK_DONE)) 589 if (sock_flag(sk, SOCK_DONE))
588 break; 590 break;
589 if (sk->sk_err) { 591 if (sk->sk_err) {
@@ -615,11 +617,13 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
615 tss.len -= ret; 617 tss.len -= ret;
616 spliced += ret; 618 spliced += ret;
617 619
620 if (!timeo)
621 break;
618 release_sock(sk); 622 release_sock(sk);
619 lock_sock(sk); 623 lock_sock(sk);
620 624
621 if (sk->sk_err || sk->sk_state == TCP_CLOSE || 625 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
622 (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo || 626 (sk->sk_shutdown & RCV_SHUTDOWN) ||
623 signal_pending(current)) 627 signal_pending(current))
624 break; 628 break;
625 } 629 }
@@ -1317,7 +1321,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1317 if ((available < target) && 1321 if ((available < target) &&
1318 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1322 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1319 !sysctl_tcp_low_latency && 1323 !sysctl_tcp_low_latency &&
1320 __get_cpu_var(softnet_data).net_dma) { 1324 dma_find_channel(DMA_MEMCPY)) {
1321 preempt_enable_no_resched(); 1325 preempt_enable_no_resched();
1322 tp->ucopy.pinned_list = 1326 tp->ucopy.pinned_list =
1323 dma_pin_iovec_pages(msg->msg_iov, len); 1327 dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1527,7 +1531,7 @@ do_prequeue:
1527 if (!(flags & MSG_TRUNC)) { 1531 if (!(flags & MSG_TRUNC)) {
1528#ifdef CONFIG_NET_DMA 1532#ifdef CONFIG_NET_DMA
1529 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1533 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1530 tp->ucopy.dma_chan = get_softnet_dma(); 1534 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1531 1535
1532 if (tp->ucopy.dma_chan) { 1536 if (tp->ucopy.dma_chan) {
1533 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1537 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1632,7 +1636,6 @@ skip_copy:
1632 1636
1633 /* Safe to free early-copied skbs now */ 1637 /* Safe to free early-copied skbs now */
1634 __skb_queue_purge(&sk->sk_async_wait_queue); 1638 __skb_queue_purge(&sk->sk_async_wait_queue);
1635 dma_chan_put(tp->ucopy.dma_chan);
1636 tp->ucopy.dma_chan = NULL; 1639 tp->ucopy.dma_chan = NULL;
1637 } 1640 }
1638 if (tp->ucopy.pinned_list) { 1641 if (tp->ucopy.pinned_list) {
@@ -2387,7 +2390,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2387 unsigned int seq; 2390 unsigned int seq;
2388 __be32 delta; 2391 __be32 delta;
2389 unsigned int oldlen; 2392 unsigned int oldlen;
2390 unsigned int len; 2393 unsigned int mss;
2391 2394
2392 if (!pskb_may_pull(skb, sizeof(*th))) 2395 if (!pskb_may_pull(skb, sizeof(*th)))
2393 goto out; 2396 goto out;
@@ -2403,10 +2406,13 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2403 oldlen = (u16)~skb->len; 2406 oldlen = (u16)~skb->len;
2404 __skb_pull(skb, thlen); 2407 __skb_pull(skb, thlen);
2405 2408
2409 mss = skb_shinfo(skb)->gso_size;
2410 if (unlikely(skb->len <= mss))
2411 goto out;
2412
2406 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 2413 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2407 /* Packet is from an untrusted source, reset gso_segs. */ 2414 /* Packet is from an untrusted source, reset gso_segs. */
2408 int type = skb_shinfo(skb)->gso_type; 2415 int type = skb_shinfo(skb)->gso_type;
2409 int mss;
2410 2416
2411 if (unlikely(type & 2417 if (unlikely(type &
2412 ~(SKB_GSO_TCPV4 | 2418 ~(SKB_GSO_TCPV4 |
@@ -2417,7 +2423,6 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2417 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) 2423 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2418 goto out; 2424 goto out;
2419 2425
2420 mss = skb_shinfo(skb)->gso_size;
2421 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 2426 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
2422 2427
2423 segs = NULL; 2428 segs = NULL;
@@ -2428,8 +2433,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2428 if (IS_ERR(segs)) 2433 if (IS_ERR(segs))
2429 goto out; 2434 goto out;
2430 2435
2431 len = skb_shinfo(skb)->gso_size; 2436 delta = htonl(oldlen + (thlen + mss));
2432 delta = htonl(oldlen + (thlen + len));
2433 2437
2434 skb = segs; 2438 skb = segs;
2435 th = tcp_hdr(skb); 2439 th = tcp_hdr(skb);
@@ -2445,7 +2449,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2445 csum_fold(csum_partial(skb_transport_header(skb), 2449 csum_fold(csum_partial(skb_transport_header(skb),
2446 thlen, skb->csum)); 2450 thlen, skb->csum));
2447 2451
2448 seq += len; 2452 seq += mss;
2449 skb = skb->next; 2453 skb = skb->next;
2450 th = tcp_hdr(skb); 2454 th = tcp_hdr(skb);
2451 2455
@@ -2519,9 +2523,7 @@ found:
2519 flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); 2523 flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th));
2520 2524
2521 total = p->len; 2525 total = p->len;
2522 mss = total; 2526 mss = skb_shinfo(p)->gso_size;
2523 if (skb_shinfo(p)->frag_list)
2524 mss = skb_shinfo(p)->frag_list->len;
2525 2527
2526 flush |= skb->len > mss || skb->len <= 0; 2528 flush |= skb->len > mss || skb->len <= 0;
2527 flush |= ntohl(th2->seq) + total != ntohl(th->seq); 2529 flush |= ntohl(th2->seq) + total != ntohl(th->seq);
@@ -2548,6 +2550,7 @@ out:
2548 2550
2549 return pp; 2551 return pp;
2550} 2552}
2553EXPORT_SYMBOL(tcp_gro_receive);
2551 2554
2552int tcp_gro_complete(struct sk_buff *skb) 2555int tcp_gro_complete(struct sk_buff *skb)
2553{ 2556{
@@ -2557,7 +2560,6 @@ int tcp_gro_complete(struct sk_buff *skb)
2557 skb->csum_offset = offsetof(struct tcphdr, check); 2560 skb->csum_offset = offsetof(struct tcphdr, check);
2558 skb->ip_summed = CHECKSUM_PARTIAL; 2561 skb->ip_summed = CHECKSUM_PARTIAL;
2559 2562
2560 skb_shinfo(skb)->gso_size = skb_shinfo(skb)->frag_list->len;
2561 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 2563 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2562 2564
2563 if (th->cwr) 2565 if (th->cwr)
@@ -2565,6 +2567,7 @@ int tcp_gro_complete(struct sk_buff *skb)
2565 2567
2566 return 0; 2568 return 0;
2567} 2569}
2570EXPORT_SYMBOL(tcp_gro_complete);
2568 2571
2569#ifdef CONFIG_TCP_MD5SIG 2572#ifdef CONFIG_TCP_MD5SIG
2570static unsigned long tcp_md5sig_users; 2573static unsigned long tcp_md5sig_users;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 99b7ecbe8893..a6961d75c7ea 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5005 return 0; 5005 return 0;
5006 5006
5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5008 tp->ucopy.dma_chan = get_softnet_dma(); 5008 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
5009 5009
5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5011 5011
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9d839fa9331e..19d7b429a262 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1594,7 +1594,7 @@ process:
1594#ifdef CONFIG_NET_DMA 1594#ifdef CONFIG_NET_DMA
1595 struct tcp_sock *tp = tcp_sk(sk); 1595 struct tcp_sock *tp = tcp_sk(sk);
1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1597 tp->ucopy.dma_chan = get_softnet_dma(); 1597 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1598 if (tp->ucopy.dma_chan) 1598 if (tp->ucopy.dma_chan)
1599 ret = tcp_v4_do_rcv(sk, skb); 1599 ret = tcp_v4_do_rcv(sk, skb);
1600 else 1600 else
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cf5ab0581eba..1ab180bad72a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -120,8 +120,11 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min);
120atomic_t udp_memory_allocated; 120atomic_t udp_memory_allocated;
121EXPORT_SYMBOL(udp_memory_allocated); 121EXPORT_SYMBOL(udp_memory_allocated);
122 122
123#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE)
124
123static int udp_lib_lport_inuse(struct net *net, __u16 num, 125static int udp_lib_lport_inuse(struct net *net, __u16 num,
124 const struct udp_hslot *hslot, 126 const struct udp_hslot *hslot,
127 unsigned long *bitmap,
125 struct sock *sk, 128 struct sock *sk,
126 int (*saddr_comp)(const struct sock *sk1, 129 int (*saddr_comp)(const struct sock *sk1,
127 const struct sock *sk2)) 130 const struct sock *sk2))
@@ -132,12 +135,17 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
132 sk_nulls_for_each(sk2, node, &hslot->head) 135 sk_nulls_for_each(sk2, node, &hslot->head)
133 if (net_eq(sock_net(sk2), net) && 136 if (net_eq(sock_net(sk2), net) &&
134 sk2 != sk && 137 sk2 != sk &&
135 sk2->sk_hash == num && 138 (bitmap || sk2->sk_hash == num) &&
136 (!sk2->sk_reuse || !sk->sk_reuse) && 139 (!sk2->sk_reuse || !sk->sk_reuse) &&
137 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if 140 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
138 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 141 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
139 (*saddr_comp)(sk, sk2)) 142 (*saddr_comp)(sk, sk2)) {
140 return 1; 143 if (bitmap)
144 __set_bit(sk2->sk_hash / UDP_HTABLE_SIZE,
145 bitmap);
146 else
147 return 1;
148 }
141 return 0; 149 return 0;
142} 150}
143 151
@@ -160,32 +168,47 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
160 if (!snum) { 168 if (!snum) {
161 int low, high, remaining; 169 int low, high, remaining;
162 unsigned rand; 170 unsigned rand;
163 unsigned short first; 171 unsigned short first, last;
172 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
164 173
165 inet_get_local_port_range(&low, &high); 174 inet_get_local_port_range(&low, &high);
166 remaining = (high - low) + 1; 175 remaining = (high - low) + 1;
167 176
168 rand = net_random(); 177 rand = net_random();
169 snum = first = rand % remaining + low; 178 first = (((u64)rand * remaining) >> 32) + low;
170 rand |= 1; 179 /*
171 for (;;) { 180 * force rand to be an odd multiple of UDP_HTABLE_SIZE
172 hslot = &udptable->hash[udp_hashfn(net, snum)]; 181 */
182 rand = (rand | 1) * UDP_HTABLE_SIZE;
183 for (last = first + UDP_HTABLE_SIZE; first != last; first++) {
184 hslot = &udptable->hash[udp_hashfn(net, first)];
185 bitmap_zero(bitmap, PORTS_PER_CHAIN);
173 spin_lock_bh(&hslot->lock); 186 spin_lock_bh(&hslot->lock);
174 if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) 187 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
175 break; 188 saddr_comp);
176 spin_unlock_bh(&hslot->lock); 189
190 snum = first;
191 /*
192 * Iterate on all possible values of snum for this hash.
193 * Using steps of an odd multiple of UDP_HTABLE_SIZE
194 * give us randomization and full range coverage.
195 */
177 do { 196 do {
178 snum = snum + rand; 197 if (low <= snum && snum <= high &&
179 } while (snum < low || snum > high); 198 !test_bit(snum / UDP_HTABLE_SIZE, bitmap))
180 if (snum == first) 199 goto found;
181 goto fail; 200 snum += rand;
201 } while (snum != first);
202 spin_unlock_bh(&hslot->lock);
182 } 203 }
204 goto fail;
183 } else { 205 } else {
184 hslot = &udptable->hash[udp_hashfn(net, snum)]; 206 hslot = &udptable->hash[udp_hashfn(net, snum)];
185 spin_lock_bh(&hslot->lock); 207 spin_lock_bh(&hslot->lock);
186 if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) 208 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp))
187 goto fail_unlock; 209 goto fail_unlock;
188 } 210 }
211found:
189 inet_sk(sk)->num = snum; 212 inet_sk(sk)->num = snum;
190 sk->sk_hash = snum; 213 sk->sk_hash = snum;
191 if (sk_unhashed(sk)) { 214 if (sk_unhashed(sk)) {
@@ -992,9 +1015,11 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
992 1015
993 if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { 1016 if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
994 /* Note that an ENOMEM error is charged twice */ 1017 /* Note that an ENOMEM error is charged twice */
995 if (rc == -ENOMEM) 1018 if (rc == -ENOMEM) {
996 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, 1019 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
997 is_udplite); 1020 is_udplite);
1021 atomic_inc(&sk->sk_drops);
1022 }
998 goto drop; 1023 goto drop;
999 } 1024 }
1000 1025
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e92ad8455c63..f9afb452249c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4250,7 +4250,7 @@ static struct addrconf_sysctl_table
4250 .procname = "mc_forwarding", 4250 .procname = "mc_forwarding",
4251 .data = &ipv6_devconf.mc_forwarding, 4251 .data = &ipv6_devconf.mc_forwarding,
4252 .maxlen = sizeof(int), 4252 .maxlen = sizeof(int),
4253 .mode = 0644, 4253 .mode = 0444,
4254 .proc_handler = proc_dointvec, 4254 .proc_handler = proc_dointvec,
4255 }, 4255 },
4256#endif 4256#endif
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 437b750b98fd..c802bc1658a8 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -672,8 +672,7 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
672 672
673EXPORT_SYMBOL_GPL(ipv6_opt_accepted); 673EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
674 674
675static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, 675static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
676 int proto)
677{ 676{
678 struct inet6_protocol *ops = NULL; 677 struct inet6_protocol *ops = NULL;
679 678
@@ -704,7 +703,7 @@ static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
704 __skb_pull(skb, len); 703 __skb_pull(skb, len);
705 } 704 }
706 705
707 return ops; 706 return proto;
708} 707}
709 708
710static int ipv6_gso_send_check(struct sk_buff *skb) 709static int ipv6_gso_send_check(struct sk_buff *skb)
@@ -721,7 +720,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
721 err = -EPROTONOSUPPORT; 720 err = -EPROTONOSUPPORT;
722 721
723 rcu_read_lock(); 722 rcu_read_lock();
724 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 723 ops = rcu_dereference(inet6_protos[
724 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
725
725 if (likely(ops && ops->gso_send_check)) { 726 if (likely(ops && ops->gso_send_check)) {
726 skb_reset_transport_header(skb); 727 skb_reset_transport_header(skb);
727 err = ops->gso_send_check(skb); 728 err = ops->gso_send_check(skb);
@@ -757,7 +758,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
757 segs = ERR_PTR(-EPROTONOSUPPORT); 758 segs = ERR_PTR(-EPROTONOSUPPORT);
758 759
759 rcu_read_lock(); 760 rcu_read_lock();
760 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 761 ops = rcu_dereference(inet6_protos[
762 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
763
761 if (likely(ops && ops->gso_segment)) { 764 if (likely(ops && ops->gso_segment)) {
762 skb_reset_transport_header(skb); 765 skb_reset_transport_header(skb);
763 segs = ops->gso_segment(skb, features); 766 segs = ops->gso_segment(skb, features);
@@ -777,11 +780,112 @@ out:
777 return segs; 780 return segs;
778} 781}
779 782
783struct ipv6_gro_cb {
784 struct napi_gro_cb napi;
785 int proto;
786};
787
788#define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb)
789
790static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
791 struct sk_buff *skb)
792{
793 struct inet6_protocol *ops;
794 struct sk_buff **pp = NULL;
795 struct sk_buff *p;
796 struct ipv6hdr *iph;
797 unsigned int nlen;
798 int flush = 1;
799 int proto;
800 __wsum csum;
801
802 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
803 goto out;
804
805 iph = ipv6_hdr(skb);
806 __skb_pull(skb, sizeof(*iph));
807
808 flush += ntohs(iph->payload_len) != skb->len;
809
810 rcu_read_lock();
811 proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr);
812 iph = ipv6_hdr(skb);
813 IPV6_GRO_CB(skb)->proto = proto;
814 ops = rcu_dereference(inet6_protos[proto]);
815 if (!ops || !ops->gro_receive)
816 goto out_unlock;
817
818 flush--;
819 skb_reset_transport_header(skb);
820 nlen = skb_network_header_len(skb);
821
822 for (p = *head; p; p = p->next) {
823 struct ipv6hdr *iph2;
824
825 if (!NAPI_GRO_CB(p)->same_flow)
826 continue;
827
828 iph2 = ipv6_hdr(p);
829
830 /* All fields must match except length. */
831 if (nlen != skb_network_header_len(p) ||
832 memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) ||
833 memcmp(&iph->nexthdr, &iph2->nexthdr,
834 nlen - offsetof(struct ipv6hdr, nexthdr))) {
835 NAPI_GRO_CB(p)->same_flow = 0;
836 continue;
837 }
838
839 NAPI_GRO_CB(p)->flush |= flush;
840 }
841
842 NAPI_GRO_CB(skb)->flush |= flush;
843
844 csum = skb->csum;
845 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
846
847 pp = ops->gro_receive(head, skb);
848
849 skb->csum = csum;
850
851out_unlock:
852 rcu_read_unlock();
853
854out:
855 NAPI_GRO_CB(skb)->flush |= flush;
856
857 return pp;
858}
859
860static int ipv6_gro_complete(struct sk_buff *skb)
861{
862 struct inet6_protocol *ops;
863 struct ipv6hdr *iph = ipv6_hdr(skb);
864 int err = -ENOSYS;
865
866 iph->payload_len = htons(skb->len - skb_network_offset(skb) -
867 sizeof(*iph));
868
869 rcu_read_lock();
870 ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]);
871 if (WARN_ON(!ops || !ops->gro_complete))
872 goto out_unlock;
873
874 err = ops->gro_complete(skb);
875
876out_unlock:
877 rcu_read_unlock();
878
879 return err;
880}
881
780static struct packet_type ipv6_packet_type = { 882static struct packet_type ipv6_packet_type = {
781 .type = __constant_htons(ETH_P_IPV6), 883 .type = __constant_htons(ETH_P_IPV6),
782 .func = ipv6_rcv, 884 .func = ipv6_rcv,
783 .gso_send_check = ipv6_gso_send_check, 885 .gso_send_check = ipv6_gso_send_check,
784 .gso_segment = ipv6_gso_segment, 886 .gso_segment = ipv6_gso_segment,
887 .gro_receive = ipv6_gro_receive,
888 .gro_complete = ipv6_gro_complete,
785}; 889};
786 890
787static int __init ipv6_packet_init(void) 891static int __init ipv6_packet_init(void)
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4f433847d95f..36dff8807183 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -443,10 +443,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
443 if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6)) 443 if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
444 goto relookup_failed; 444 goto relookup_failed;
445 445
446 if (ip6_dst_lookup(sk, &dst2, &fl)) 446 if (ip6_dst_lookup(sk, &dst2, &fl2))
447 goto relookup_failed; 447 goto relookup_failed;
448 448
449 err = xfrm_lookup(net, &dst2, &fl, sk, XFRM_LOOKUP_ICMP); 449 err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
450 switch (err) { 450 switch (err) {
451 case 0: 451 case 0:
452 dst_release(dst); 452 dst_release(dst);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 29c7c99e69f7..52ee1dced2ff 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -298,6 +298,10 @@ static void fib6_dump_end(struct netlink_callback *cb)
298 struct fib6_walker_t *w = (void*)cb->args[2]; 298 struct fib6_walker_t *w = (void*)cb->args[2];
299 299
300 if (w) { 300 if (w) {
301 if (cb->args[4]) {
302 cb->args[4] = 0;
303 fib6_walker_unlink(w);
304 }
301 cb->args[2] = 0; 305 cb->args[2] = 0;
302 kfree(w); 306 kfree(w);
303 } 307 }
@@ -330,15 +334,12 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
330 read_lock_bh(&table->tb6_lock); 334 read_lock_bh(&table->tb6_lock);
331 res = fib6_walk_continue(w); 335 res = fib6_walk_continue(w);
332 read_unlock_bh(&table->tb6_lock); 336 read_unlock_bh(&table->tb6_lock);
333 if (res != 0) { 337 if (res <= 0) {
334 if (res < 0) 338 fib6_walker_unlink(w);
335 fib6_walker_unlink(w); 339 cb->args[4] = 0;
336 goto end;
337 } 340 }
338 fib6_walker_unlink(w);
339 cb->args[4] = 0;
340 } 341 }
341end: 342
342 return res; 343 return res;
343} 344}
344 345
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 936f48946e20..f171e8dbac91 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -255,6 +255,7 @@ int ip6_mc_input(struct sk_buff *skb)
255 * IPv6 multicast router mode is now supported ;) 255 * IPv6 multicast router mode is now supported ;)
256 */ 256 */
257 if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && 257 if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
258 !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
258 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { 259 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
259 /* 260 /*
260 * Okay, we try to forward - split and duplicate 261 * Okay, we try to forward - split and duplicate
@@ -316,7 +317,6 @@ int ip6_mc_input(struct sk_buff *skb)
316 } 317 }
317 318
318 if (skb2) { 319 if (skb2) {
319 skb2->dev = skb2->dst->dev;
320 ip6_mr_input(skb2); 320 ip6_mr_input(skb2);
321 } 321 }
322 } 322 }
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 3c51b2d827f4..228be551e9c1 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -48,6 +48,7 @@
48#include <linux/pim.h> 48#include <linux/pim.h>
49#include <net/addrconf.h> 49#include <net/addrconf.h>
50#include <linux/netfilter_ipv6.h> 50#include <linux/netfilter_ipv6.h>
51#include <net/ip6_checksum.h>
51 52
52/* Big lock, protecting vif table, mrt cache and mroute socket state. 53/* Big lock, protecting vif table, mrt cache and mroute socket state.
53 Note that the changes are semaphored via rtnl_lock. 54 Note that the changes are semaphored via rtnl_lock.
@@ -365,7 +366,9 @@ static int pim6_rcv(struct sk_buff *skb)
365 pim = (struct pimreghdr *)skb_transport_header(skb); 366 pim = (struct pimreghdr *)skb_transport_header(skb);
366 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) || 367 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
367 (pim->flags & PIM_NULL_REGISTER) || 368 (pim->flags & PIM_NULL_REGISTER) ||
368 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && 369 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
370 sizeof(*pim), IPPROTO_PIM,
371 csum_partial((void *)pim, sizeof(*pim), 0)) &&
369 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 372 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
370 goto drop; 373 goto drop;
371 374
@@ -392,7 +395,7 @@ static int pim6_rcv(struct sk_buff *skb)
392 skb_pull(skb, (u8 *)encap - skb->data); 395 skb_pull(skb, (u8 *)encap - skb->data);
393 skb_reset_network_header(skb); 396 skb_reset_network_header(skb);
394 skb->dev = reg_dev; 397 skb->dev = reg_dev;
395 skb->protocol = htons(ETH_P_IP); 398 skb->protocol = htons(ETH_P_IPV6);
396 skb->ip_summed = 0; 399 skb->ip_summed = 0;
397 skb->pkt_type = PACKET_HOST; 400 skb->pkt_type = PACKET_HOST;
398 dst_release(skb->dst); 401 dst_release(skb->dst);
@@ -481,6 +484,7 @@ static int mif6_delete(struct net *net, int vifi)
481{ 484{
482 struct mif_device *v; 485 struct mif_device *v;
483 struct net_device *dev; 486 struct net_device *dev;
487 struct inet6_dev *in6_dev;
484 if (vifi < 0 || vifi >= net->ipv6.maxvif) 488 if (vifi < 0 || vifi >= net->ipv6.maxvif)
485 return -EADDRNOTAVAIL; 489 return -EADDRNOTAVAIL;
486 490
@@ -513,6 +517,10 @@ static int mif6_delete(struct net *net, int vifi)
513 517
514 dev_set_allmulti(dev, -1); 518 dev_set_allmulti(dev, -1);
515 519
520 in6_dev = __in6_dev_get(dev);
521 if (in6_dev)
522 in6_dev->cnf.mc_forwarding--;
523
516 if (v->flags & MIFF_REGISTER) 524 if (v->flags & MIFF_REGISTER)
517 unregister_netdevice(dev); 525 unregister_netdevice(dev);
518 526
@@ -622,6 +630,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
622 int vifi = vifc->mif6c_mifi; 630 int vifi = vifc->mif6c_mifi;
623 struct mif_device *v = &net->ipv6.vif6_table[vifi]; 631 struct mif_device *v = &net->ipv6.vif6_table[vifi];
624 struct net_device *dev; 632 struct net_device *dev;
633 struct inet6_dev *in6_dev;
625 int err; 634 int err;
626 635
627 /* Is vif busy ? */ 636 /* Is vif busy ? */
@@ -662,6 +671,10 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
662 return -EINVAL; 671 return -EINVAL;
663 } 672 }
664 673
674 in6_dev = __in6_dev_get(dev);
675 if (in6_dev)
676 in6_dev->cnf.mc_forwarding++;
677
665 /* 678 /*
666 * Fill in the VIF structures 679 * Fill in the VIF structures
667 */ 680 */
@@ -838,8 +851,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
838 851
839 skb->dst = dst_clone(pkt->dst); 852 skb->dst = dst_clone(pkt->dst);
840 skb->ip_summed = CHECKSUM_UNNECESSARY; 853 skb->ip_summed = CHECKSUM_UNNECESSARY;
841
842 skb_pull(skb, sizeof(struct ipv6hdr));
843 } 854 }
844 855
845 if (net->ipv6.mroute6_sk == NULL) { 856 if (net->ipv6.mroute6_sk == NULL) {
@@ -1222,8 +1233,10 @@ static int ip6mr_sk_init(struct sock *sk)
1222 1233
1223 rtnl_lock(); 1234 rtnl_lock();
1224 write_lock_bh(&mrt_lock); 1235 write_lock_bh(&mrt_lock);
1225 if (likely(net->ipv6.mroute6_sk == NULL)) 1236 if (likely(net->ipv6.mroute6_sk == NULL)) {
1226 net->ipv6.mroute6_sk = sk; 1237 net->ipv6.mroute6_sk = sk;
1238 net->ipv6.devconf_all->mc_forwarding++;
1239 }
1227 else 1240 else
1228 err = -EADDRINUSE; 1241 err = -EADDRINUSE;
1229 write_unlock_bh(&mrt_lock); 1242 write_unlock_bh(&mrt_lock);
@@ -1242,6 +1255,7 @@ int ip6mr_sk_done(struct sock *sk)
1242 if (sk == net->ipv6.mroute6_sk) { 1255 if (sk == net->ipv6.mroute6_sk) {
1243 write_lock_bh(&mrt_lock); 1256 write_lock_bh(&mrt_lock);
1244 net->ipv6.mroute6_sk = NULL; 1257 net->ipv6.mroute6_sk = NULL;
1258 net->ipv6.devconf_all->mc_forwarding--;
1245 write_unlock_bh(&mrt_lock); 1259 write_unlock_bh(&mrt_lock);
1246 1260
1247 mroute_clean_tables(net); 1261 mroute_clean_tables(net);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index eeeaad2e8b5c..40f324655e24 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -404,7 +404,7 @@ sticky_done:
404 else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL) 404 else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL)
405 goto e_inval; 405 goto e_inval;
406 406
407 if (copy_from_user(&pkt, optval, optlen)) { 407 if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) {
408 retv = -EFAULT; 408 retv = -EFAULT;
409 break; 409 break;
410 } 410 }
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index bd52151d31e9..c455cf4ee756 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -26,7 +26,7 @@
26#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> 26#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
27#include <net/netfilter/nf_log.h> 27#include <net/netfilter/nf_log.h>
28 28
29static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ; 29static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
30 30
31static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, 31static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
32 unsigned int dataoff, 32 unsigned int dataoff,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 18c486cf4987..9c574235c905 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -627,6 +627,9 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
627 rt = ip6_rt_copy(ort); 627 rt = ip6_rt_copy(ort);
628 628
629 if (rt) { 629 if (rt) {
630 struct neighbour *neigh;
631 int attempts = !in_softirq();
632
630 if (!(rt->rt6i_flags&RTF_GATEWAY)) { 633 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
631 if (rt->rt6i_dst.plen != 128 && 634 if (rt->rt6i_dst.plen != 128 &&
632 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)) 635 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
@@ -646,7 +649,35 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
646 } 649 }
647#endif 650#endif
648 651
649 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); 652 retry:
653 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
654 if (IS_ERR(neigh)) {
655 struct net *net = dev_net(rt->rt6i_dev);
656 int saved_rt_min_interval =
657 net->ipv6.sysctl.ip6_rt_gc_min_interval;
658 int saved_rt_elasticity =
659 net->ipv6.sysctl.ip6_rt_gc_elasticity;
660
661 if (attempts-- > 0) {
662 net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
663 net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
664
665 ip6_dst_gc(net->ipv6.ip6_dst_ops);
666
667 net->ipv6.sysctl.ip6_rt_gc_elasticity =
668 saved_rt_elasticity;
669 net->ipv6.sysctl.ip6_rt_gc_min_interval =
670 saved_rt_min_interval;
671 goto retry;
672 }
673
674 if (net_ratelimit())
675 printk(KERN_WARNING
676 "Neighbour table overflow.\n");
677 dst_free(&rt->u.dst);
678 return NULL;
679 }
680 rt->rt6i_nexthop = neigh;
650 681
651 } 682 }
652 683
@@ -763,7 +794,7 @@ void ip6_route_input(struct sk_buff *skb)
763 .proto = iph->nexthdr, 794 .proto = iph->nexthdr,
764 }; 795 };
765 796
766 if (rt6_need_strict(&iph->daddr)) 797 if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
767 flags |= RT6_LOOKUP_F_IFACE; 798 flags |= RT6_LOOKUP_F_IFACE;
768 799
769 skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input); 800 skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);
@@ -945,8 +976,11 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
945 dev_hold(dev); 976 dev_hold(dev);
946 if (neigh) 977 if (neigh)
947 neigh_hold(neigh); 978 neigh_hold(neigh);
948 else 979 else {
949 neigh = ndisc_get_neigh(dev, addr); 980 neigh = ndisc_get_neigh(dev, addr);
981 if (IS_ERR(neigh))
982 neigh = NULL;
983 }
950 984
951 rt->rt6i_dev = dev; 985 rt->rt6i_dev = dev;
952 rt->rt6i_idev = idev; 986 rt->rt6i_idev = idev;
@@ -1887,6 +1921,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1887{ 1921{
1888 struct net *net = dev_net(idev->dev); 1922 struct net *net = dev_net(idev->dev);
1889 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); 1923 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1924 struct neighbour *neigh;
1890 1925
1891 if (rt == NULL) 1926 if (rt == NULL)
1892 return ERR_PTR(-ENOMEM); 1927 return ERR_PTR(-ENOMEM);
@@ -1909,11 +1944,18 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1909 rt->rt6i_flags |= RTF_ANYCAST; 1944 rt->rt6i_flags |= RTF_ANYCAST;
1910 else 1945 else
1911 rt->rt6i_flags |= RTF_LOCAL; 1946 rt->rt6i_flags |= RTF_LOCAL;
1912 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); 1947 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1913 if (rt->rt6i_nexthop == NULL) { 1948 if (IS_ERR(neigh)) {
1914 dst_free(&rt->u.dst); 1949 dst_free(&rt->u.dst);
1915 return ERR_PTR(-ENOMEM); 1950
1951 /* We are casting this because that is the return
1952 * value type. But an errno encoded pointer is the
1953 * same regardless of the underlying pointer type,
1954 * and that's what we are returning. So this is OK.
1955 */
1956 return (struct rt6_info *) neigh;
1916 } 1957 }
1958 rt->rt6i_nexthop = neigh;
1917 1959
1918 ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1960 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1919 rt->rt6i_dst.plen = 128; 1961 rt->rt6i_dst.plen = 128;
@@ -2710,7 +2752,7 @@ int __init ip6_route_init(void)
2710 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 2752 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2711 SLAB_HWCACHE_ALIGN, NULL); 2753 SLAB_HWCACHE_ALIGN, NULL);
2712 if (!ip6_dst_ops_template.kmem_cachep) 2754 if (!ip6_dst_ops_template.kmem_cachep)
2713 goto out;; 2755 goto out;
2714 2756
2715 ret = register_pernet_subsys(&ip6_route_net_ops); 2757 ret = register_pernet_subsys(&ip6_route_net_ops);
2716 if (ret) 2758 if (ret)
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 9048fe7e7ea7..a031034720b4 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -128,7 +128,7 @@ static struct ctl_table_header *ip6_header;
128 128
129int ipv6_sysctl_register(void) 129int ipv6_sysctl_register(void)
130{ 130{
131 int err = -ENOMEM;; 131 int err = -ENOMEM;
132 132
133 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table); 133 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table);
134 if (ip6_header == NULL) 134 if (ip6_header == NULL)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e8b8337a8310..e5b85d45bee8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -101,7 +101,7 @@ static void tcp_v6_hash(struct sock *sk)
101 } 101 }
102} 102}
103 103
104static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, 104static __inline__ __sum16 tcp_v6_check(int len,
105 struct in6_addr *saddr, 105 struct in6_addr *saddr,
106 struct in6_addr *daddr, 106 struct in6_addr *daddr,
107 __wsum base) 107 __wsum base)
@@ -501,7 +501,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
501 if (skb) { 501 if (skb) {
502 struct tcphdr *th = tcp_hdr(skb); 502 struct tcphdr *th = tcp_hdr(skb);
503 503
504 th->check = tcp_v6_check(th, skb->len, 504 th->check = tcp_v6_check(skb->len,
505 &treq->loc_addr, &treq->rmt_addr, 505 &treq->loc_addr, &treq->rmt_addr,
506 csum_partial(th, skb->len, skb->csum)); 506 csum_partial(th, skb->len, skb->csum));
507 507
@@ -942,6 +942,41 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
942 return 0; 942 return 0;
943} 943}
944 944
945struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
946{
947 struct ipv6hdr *iph = ipv6_hdr(skb);
948
949 switch (skb->ip_summed) {
950 case CHECKSUM_COMPLETE:
951 if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr,
952 skb->csum)) {
953 skb->ip_summed = CHECKSUM_UNNECESSARY;
954 break;
955 }
956
957 /* fall through */
958 case CHECKSUM_NONE:
959 NAPI_GRO_CB(skb)->flush = 1;
960 return NULL;
961 }
962
963 return tcp_gro_receive(head, skb);
964}
965EXPORT_SYMBOL(tcp6_gro_receive);
966
967int tcp6_gro_complete(struct sk_buff *skb)
968{
969 struct ipv6hdr *iph = ipv6_hdr(skb);
970 struct tcphdr *th = tcp_hdr(skb);
971
972 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
973 &iph->saddr, &iph->daddr, 0);
974 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
975
976 return tcp_gro_complete(skb);
977}
978EXPORT_SYMBOL(tcp6_gro_complete);
979
945static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 980static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
946 u32 ts, struct tcp_md5sig_key *key, int rst) 981 u32 ts, struct tcp_md5sig_key *key, int rst)
947{ 982{
@@ -1429,14 +1464,14 @@ out:
1429static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) 1464static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1430{ 1465{
1431 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1466 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1432 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, 1467 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1433 &ipv6_hdr(skb)->daddr, skb->csum)) { 1468 &ipv6_hdr(skb)->daddr, skb->csum)) {
1434 skb->ip_summed = CHECKSUM_UNNECESSARY; 1469 skb->ip_summed = CHECKSUM_UNNECESSARY;
1435 return 0; 1470 return 0;
1436 } 1471 }
1437 } 1472 }
1438 1473
1439 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len, 1474 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1440 &ipv6_hdr(skb)->saddr, 1475 &ipv6_hdr(skb)->saddr,
1441 &ipv6_hdr(skb)->daddr, 0)); 1476 &ipv6_hdr(skb)->daddr, 0));
1442 1477
@@ -1640,7 +1675,7 @@ process:
1640#ifdef CONFIG_NET_DMA 1675#ifdef CONFIG_NET_DMA
1641 struct tcp_sock *tp = tcp_sk(sk); 1676 struct tcp_sock *tp = tcp_sk(sk);
1642 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1677 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1643 tp->ucopy.dma_chan = get_softnet_dma(); 1678 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1644 if (tp->ucopy.dma_chan) 1679 if (tp->ucopy.dma_chan)
1645 ret = tcp_v6_do_rcv(sk, skb); 1680 ret = tcp_v6_do_rcv(sk, skb);
1646 else 1681 else
@@ -2062,6 +2097,8 @@ static struct inet6_protocol tcpv6_protocol = {
2062 .err_handler = tcp_v6_err, 2097 .err_handler = tcp_v6_err,
2063 .gso_send_check = tcp_v6_gso_send_check, 2098 .gso_send_check = tcp_v6_gso_send_check,
2064 .gso_segment = tcp_tso_segment, 2099 .gso_segment = tcp_tso_segment,
2100 .gro_receive = tcp6_gro_receive,
2101 .gro_complete = tcp6_gro_complete,
2065 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 2102 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2066}; 2103};
2067 2104
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index e4e2caeb9d82..086d5ef098fd 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -371,9 +371,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
371 IRDA_DEBUG(2, "%s()\n", __func__ ); 371 IRDA_DEBUG(2, "%s()\n", __func__ );
372 372
373 line = tty->index; 373 line = tty->index;
374 if ((line < 0) || (line >= IRCOMM_TTY_PORTS)) { 374 if (line >= IRCOMM_TTY_PORTS)
375 return -ENODEV; 375 return -ENODEV;
376 }
377 376
378 /* Check if instance already exists */ 377 /* Check if instance already exists */
379 self = hashbin_lock_find(ircomm_tty, line, NULL); 378 self = hashbin_lock_find(ircomm_tty, line, NULL);
@@ -405,6 +404,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
405 * Force TTY into raw mode by default which is usually what 404 * Force TTY into raw mode by default which is usually what
406 * we want for IrCOMM and IrLPT. This way applications will 405 * we want for IrCOMM and IrLPT. This way applications will
407 * not have to twiddle with printcap etc. 406 * not have to twiddle with printcap etc.
407 *
408 * Note this is completely usafe and doesn't work properly
408 */ 409 */
409 tty->termios->c_iflag = 0; 410 tty->termios->c_iflag = 0;
410 tty->termios->c_oflag = 0; 411 tty->termios->c_oflag = 0;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index af3192d2a5a3..eb8a2a0b6eb7 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -494,7 +494,21 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
494 if (err) { 494 if (err) {
495 iucv_path_free(iucv->path); 495 iucv_path_free(iucv->path);
496 iucv->path = NULL; 496 iucv->path = NULL;
497 err = -ECONNREFUSED; 497 switch (err) {
498 case 0x0b: /* Target communicator is not logged on */
499 err = -ENETUNREACH;
500 break;
501 case 0x0d: /* Max connections for this guest exceeded */
502 case 0x0e: /* Max connections for target guest exceeded */
503 err = -EAGAIN;
504 break;
505 case 0x0f: /* Missing IUCV authorization */
506 err = -EACCES;
507 break;
508 default:
509 err = -ECONNREFUSED;
510 break;
511 }
498 goto done; 512 goto done;
499 } 513 }
500 514
@@ -507,6 +521,13 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
507 release_sock(sk); 521 release_sock(sk);
508 return -ECONNREFUSED; 522 return -ECONNREFUSED;
509 } 523 }
524
525 if (err) {
526 iucv_path_sever(iucv->path, NULL);
527 iucv_path_free(iucv->path);
528 iucv->path = NULL;
529 }
530
510done: 531done:
511 release_sock(sk); 532 release_sock(sk);
512 return err; 533 return err;
@@ -1021,12 +1042,14 @@ static int iucv_callback_connreq(struct iucv_path *path,
1021 ASCEBC(user_data, sizeof(user_data)); 1042 ASCEBC(user_data, sizeof(user_data));
1022 if (sk->sk_state != IUCV_LISTEN) { 1043 if (sk->sk_state != IUCV_LISTEN) {
1023 err = iucv_path_sever(path, user_data); 1044 err = iucv_path_sever(path, user_data);
1045 iucv_path_free(path);
1024 goto fail; 1046 goto fail;
1025 } 1047 }
1026 1048
1027 /* Check for backlog size */ 1049 /* Check for backlog size */
1028 if (sk_acceptq_is_full(sk)) { 1050 if (sk_acceptq_is_full(sk)) {
1029 err = iucv_path_sever(path, user_data); 1051 err = iucv_path_sever(path, user_data);
1052 iucv_path_free(path);
1030 goto fail; 1053 goto fail;
1031 } 1054 }
1032 1055
@@ -1034,6 +1057,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1034 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); 1057 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
1035 if (!nsk) { 1058 if (!nsk) {
1036 err = iucv_path_sever(path, user_data); 1059 err = iucv_path_sever(path, user_data);
1060 iucv_path_free(path);
1037 goto fail; 1061 goto fail;
1038 } 1062 }
1039 1063
@@ -1057,6 +1081,8 @@ static int iucv_callback_connreq(struct iucv_path *path,
1057 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1081 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1058 if (err) { 1082 if (err) {
1059 err = iucv_path_sever(path, user_data); 1083 err = iucv_path_sever(path, user_data);
1084 iucv_path_free(path);
1085 iucv_sock_kill(nsk);
1060 goto fail; 1086 goto fail;
1061 } 1087 }
1062 1088
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 8f57d4f4328a..a35240f61ec3 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -50,7 +50,6 @@
50#include <asm/ebcdic.h> 50#include <asm/ebcdic.h>
51#include <asm/io.h> 51#include <asm/io.h>
52#include <asm/s390_ext.h> 52#include <asm/s390_ext.h>
53#include <asm/s390_rdev.h>
54#include <asm/smp.h> 53#include <asm/smp.h>
55 54
56/* 55/*
@@ -517,6 +516,7 @@ static int iucv_enable(void)
517 size_t alloc_size; 516 size_t alloc_size;
518 int cpu, rc; 517 int cpu, rc;
519 518
519 get_online_cpus();
520 rc = -ENOMEM; 520 rc = -ENOMEM;
521 alloc_size = iucv_max_pathid * sizeof(struct iucv_path); 521 alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
522 iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); 522 iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
@@ -524,19 +524,17 @@ static int iucv_enable(void)
524 goto out; 524 goto out;
525 /* Declare per cpu buffers. */ 525 /* Declare per cpu buffers. */
526 rc = -EIO; 526 rc = -EIO;
527 get_online_cpus();
528 for_each_online_cpu(cpu) 527 for_each_online_cpu(cpu)
529 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 528 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
530 if (cpus_empty(iucv_buffer_cpumask)) 529 if (cpus_empty(iucv_buffer_cpumask))
531 /* No cpu could declare an iucv buffer. */ 530 /* No cpu could declare an iucv buffer. */
532 goto out_path; 531 goto out;
533 put_online_cpus(); 532 put_online_cpus();
534 return 0; 533 return 0;
535
536out_path:
537 put_online_cpus();
538 kfree(iucv_path_table);
539out: 534out:
535 kfree(iucv_path_table);
536 iucv_path_table = NULL;
537 put_online_cpus();
540 return rc; 538 return rc;
541} 539}
542 540
@@ -551,8 +549,9 @@ static void iucv_disable(void)
551{ 549{
552 get_online_cpus(); 550 get_online_cpus();
553 on_each_cpu(iucv_retrieve_cpu, NULL, 1); 551 on_each_cpu(iucv_retrieve_cpu, NULL, 1);
554 put_online_cpus();
555 kfree(iucv_path_table); 552 kfree(iucv_path_table);
553 iucv_path_table = NULL;
554 put_online_cpus();
556} 555}
557 556
558static int __cpuinit iucv_cpu_notify(struct notifier_block *self, 557static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
@@ -589,10 +588,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
589 case CPU_ONLINE_FROZEN: 588 case CPU_ONLINE_FROZEN:
590 case CPU_DOWN_FAILED: 589 case CPU_DOWN_FAILED:
591 case CPU_DOWN_FAILED_FROZEN: 590 case CPU_DOWN_FAILED_FROZEN:
591 if (!iucv_path_table)
592 break;
592 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 593 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
593 break; 594 break;
594 case CPU_DOWN_PREPARE: 595 case CPU_DOWN_PREPARE:
595 case CPU_DOWN_PREPARE_FROZEN: 596 case CPU_DOWN_PREPARE_FROZEN:
597 if (!iucv_path_table)
598 break;
596 cpumask = iucv_buffer_cpumask; 599 cpumask = iucv_buffer_cpumask;
597 cpu_clear(cpu, cpumask); 600 cpu_clear(cpu, cpumask);
598 if (cpus_empty(cpumask)) 601 if (cpus_empty(cpumask))
@@ -1692,7 +1695,7 @@ static int __init iucv_init(void)
1692 rc = register_external_interrupt(0x4000, iucv_external_interrupt); 1695 rc = register_external_interrupt(0x4000, iucv_external_interrupt);
1693 if (rc) 1696 if (rc)
1694 goto out; 1697 goto out;
1695 iucv_root = s390_root_dev_register("iucv"); 1698 iucv_root = root_device_register("iucv");
1696 if (IS_ERR(iucv_root)) { 1699 if (IS_ERR(iucv_root)) {
1697 rc = PTR_ERR(iucv_root); 1700 rc = PTR_ERR(iucv_root);
1698 goto out_int; 1701 goto out_int;
@@ -1736,7 +1739,7 @@ out_free:
1736 kfree(iucv_irq_data[cpu]); 1739 kfree(iucv_irq_data[cpu]);
1737 iucv_irq_data[cpu] = NULL; 1740 iucv_irq_data[cpu] = NULL;
1738 } 1741 }
1739 s390_root_dev_unregister(iucv_root); 1742 root_device_unregister(iucv_root);
1740out_int: 1743out_int:
1741 unregister_external_interrupt(0x4000, iucv_external_interrupt); 1744 unregister_external_interrupt(0x4000, iucv_external_interrupt);
1742out: 1745out:
@@ -1766,7 +1769,7 @@ static void __exit iucv_exit(void)
1766 kfree(iucv_irq_data[cpu]); 1769 kfree(iucv_irq_data[cpu]);
1767 iucv_irq_data[cpu] = NULL; 1770 iucv_irq_data[cpu] = NULL;
1768 } 1771 }
1769 s390_root_dev_unregister(iucv_root); 1772 root_device_unregister(iucv_root);
1770 bus_unregister(&iucv_bus); 1773 bus_unregister(&iucv_bus);
1771 unregister_external_interrupt(0x4000, iucv_external_interrupt); 1774 unregister_external_interrupt(0x4000, iucv_external_interrupt);
1772} 1775}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index f8bd8df5e257..7dcbde3ea7d9 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1285,6 +1285,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1285 ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]; 1285 ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1];
1286 natt->encap_dport = n_port->sadb_x_nat_t_port_port; 1286 natt->encap_dport = n_port->sadb_x_nat_t_port_port;
1287 } 1287 }
1288 memset(&natt->encap_oa, 0, sizeof(natt->encap_oa));
1288 } 1289 }
1289 1290
1290 err = xfrm_init_state(x); 1291 err = xfrm_init_state(x);
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 5f510a13b9f0..c5c0c5271096 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -469,7 +469,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
469 struct ieee80211_sub_if_data *sdata; 469 struct ieee80211_sub_if_data *sdata;
470 u16 start_seq_num; 470 u16 start_seq_num;
471 u8 *state; 471 u8 *state;
472 int ret; 472 int ret = 0;
473 473
474 if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) 474 if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
475 return -EINVAL; 475 return -EINVAL;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5abbc3f07dd6..b9074824862a 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -699,7 +699,8 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
699 return 0; 699 return 0;
700 700
701 /* Setting ad-hoc mode on non-IBSS channel is not supported. */ 701 /* Setting ad-hoc mode on non-IBSS channel is not supported. */
702 if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS) 702 if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS &&
703 type == NL80211_IFTYPE_ADHOC)
703 return -EOPNOTSUPP; 704 return -EOPNOTSUPP;
704 705
705 /* 706 /*
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 929ba542fd72..1159bdb4119c 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -107,6 +107,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
107 107
108 sta->flags = WLAN_STA_AUTHORIZED; 108 sta->flags = WLAN_STA_AUTHORIZED;
109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
110 rate_control_rate_init(sta);
110 111
111 return sta; 112 return sta;
112} 113}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 5ba721b6a399..2b890af01ba4 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -620,8 +620,8 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
620 if (use_short_slot != bss_conf->use_short_slot) { 620 if (use_short_slot != bss_conf->use_short_slot) {
621#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 621#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
622 if (net_ratelimit()) { 622 if (net_ratelimit()) {
623 printk(KERN_DEBUG "%s: switched to %s slot" 623 printk(KERN_DEBUG "%s: switched to %s slot time"
624 " (BSSID=%s)\n", 624 " (BSSID=%pM)\n",
625 sdata->dev->name, 625 sdata->dev->name,
626 use_short_slot ? "short" : "long", 626 use_short_slot ? "short" : "long",
627 ifsta->bssid); 627 ifsta->bssid);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 2b3b490a6073..3824990d340b 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -395,13 +395,15 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
395{ 395{
396 struct minstrel_sta_info *mi = priv_sta; 396 struct minstrel_sta_info *mi = priv_sta;
397 struct minstrel_priv *mp = priv; 397 struct minstrel_priv *mp = priv;
398 struct minstrel_rate *mr_ctl; 398 struct ieee80211_local *local = hw_to_local(mp->hw);
399 struct ieee80211_rate *ctl_rate;
399 unsigned int i, n = 0; 400 unsigned int i, n = 0;
400 unsigned int t_slot = 9; /* FIXME: get real slot time */ 401 unsigned int t_slot = 9; /* FIXME: get real slot time */
401 402
402 mi->lowest_rix = rate_lowest_index(sband, sta); 403 mi->lowest_rix = rate_lowest_index(sband, sta);
403 mr_ctl = &mi->r[rix_to_ndx(mi, mi->lowest_rix)]; 404 ctl_rate = &sband->bitrates[mi->lowest_rix];
404 mi->sp_ack_dur = mr_ctl->ack_time; 405 mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate,
406 !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
405 407
406 for (i = 0; i < sband->n_bitrates; i++) { 408 for (i = 0; i < sband->n_bitrates; i++) {
407 struct minstrel_rate *mr = &mi->r[n]; 409 struct minstrel_rate *mr = &mi->r[n];
@@ -416,7 +418,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
416 418
417 mr->rix = i; 419 mr->rix = i;
418 mr->bitrate = sband->bitrates[i].bitrate / 5; 420 mr->bitrate = sband->bitrates[i].bitrate / 5;
419 calc_rate_durations(mi, hw_to_local(mp->hw), mr, 421 calc_rate_durations(mi, local, mr,
420 &sband->bitrates[i]); 422 &sband->bitrates[i]);
421 423
422 /* calculate maximum number of retransmissions before 424 /* calculate maximum number of retransmissions before
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index dc2606d0ae77..e49a5b99cf10 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -195,7 +195,6 @@ struct sta_ampdu_mlme {
195 * @tx_packets: number of RX/TX MSDUs 195 * @tx_packets: number of RX/TX MSDUs
196 * @tx_bytes: number of bytes transmitted to this STA 196 * @tx_bytes: number of bytes transmitted to this STA
197 * @tx_fragments: number of transmitted MPDUs 197 * @tx_fragments: number of transmitted MPDUs
198 * @last_txrate: description of the last used transmit rate
199 * @tid_seq: per-TID sequence numbers for sending to this STA 198 * @tid_seq: per-TID sequence numbers for sending to this STA
200 * @ampdu_mlme: A-MPDU state machine state 199 * @ampdu_mlme: A-MPDU state machine state
201 * @timer_to_tid: identity mapping to ID timers 200 * @timer_to_tid: identity mapping to ID timers
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index a4af3a124cce..4278e545638f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1307,8 +1307,10 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1307 if (is_multicast_ether_addr(hdr->addr3)) 1307 if (is_multicast_ether_addr(hdr->addr3))
1308 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); 1308 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
1309 else 1309 else
1310 if (mesh_nexthop_lookup(skb, osdata)) 1310 if (mesh_nexthop_lookup(skb, osdata)) {
1311 return 0; 1311 dev_put(odev);
1312 return 0;
1313 }
1312 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) 1314 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
1313 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, 1315 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
1314 fwded_frames); 1316 fwded_frames);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 7e83f74cd5de..90ce9ddb9451 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -469,7 +469,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
469 const struct nf_conntrack_tuple *repl, 469 const struct nf_conntrack_tuple *repl,
470 gfp_t gfp) 470 gfp_t gfp)
471{ 471{
472 struct nf_conn *ct = NULL; 472 struct nf_conn *ct;
473 473
474 if (unlikely(!nf_conntrack_hash_rnd_initted)) { 474 if (unlikely(!nf_conntrack_hash_rnd_initted)) {
475 get_random_bytes(&nf_conntrack_hash_rnd, 4); 475 get_random_bytes(&nf_conntrack_hash_rnd, 4);
@@ -551,7 +551,7 @@ init_conntrack(struct net *net,
551 } 551 }
552 552
553 ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC); 553 ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
554 if (ct == NULL || IS_ERR(ct)) { 554 if (IS_ERR(ct)) {
555 pr_debug("Can't allocate conntrack.\n"); 555 pr_debug("Can't allocate conntrack.\n");
556 return (struct nf_conntrack_tuple_hash *)ct; 556 return (struct nf_conntrack_tuple_hash *)ct;
557 } 557 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 00e8c27130ff..c32a7e8e3a1b 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -831,13 +831,16 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
831 if (!parse_nat_setup) { 831 if (!parse_nat_setup) {
832#ifdef CONFIG_MODULES 832#ifdef CONFIG_MODULES
833 rcu_read_unlock(); 833 rcu_read_unlock();
834 spin_unlock_bh(&nf_conntrack_lock);
834 nfnl_unlock(); 835 nfnl_unlock();
835 if (request_module("nf-nat-ipv4") < 0) { 836 if (request_module("nf-nat-ipv4") < 0) {
836 nfnl_lock(); 837 nfnl_lock();
838 spin_lock_bh(&nf_conntrack_lock);
837 rcu_read_lock(); 839 rcu_read_lock();
838 return -EOPNOTSUPP; 840 return -EOPNOTSUPP;
839 } 841 }
840 nfnl_lock(); 842 nfnl_lock();
843 spin_lock_bh(&nf_conntrack_lock);
841 rcu_read_lock(); 844 rcu_read_lock();
842 if (nfnetlink_parse_nat_setup_hook) 845 if (nfnetlink_parse_nat_setup_hook)
843 return -EAGAIN; 846 return -EAGAIN;
@@ -1134,7 +1137,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1134 struct nf_conntrack_helper *helper; 1137 struct nf_conntrack_helper *helper;
1135 1138
1136 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC); 1139 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC);
1137 if (ct == NULL || IS_ERR(ct)) 1140 if (IS_ERR(ct))
1138 return -ENOMEM; 1141 return -ENOMEM;
1139 1142
1140 if (!cda[CTA_TIMEOUT]) 1143 if (!cda[CTA_TIMEOUT])
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 89837a4eef76..bfbf521f6ea5 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -273,6 +273,10 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
273 have_rev = 1; 273 have_rev = 1;
274 } 274 }
275 } 275 }
276
277 if (af != NFPROTO_UNSPEC && !have_rev)
278 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
279
276 return have_rev; 280 return have_rev;
277} 281}
278 282
@@ -289,6 +293,10 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
289 have_rev = 1; 293 have_rev = 1;
290 } 294 }
291 } 295 }
296
297 if (af != NFPROTO_UNSPEC && !have_rev)
298 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
299
292 return have_rev; 300 return have_rev;
293} 301}
294 302
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 29375ba8db73..93acaa59d108 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -243,6 +243,17 @@ static struct xt_match xt_time_mt_reg __read_mostly = {
243 243
244static int __init time_mt_init(void) 244static int __init time_mt_init(void)
245{ 245{
246 int minutes = sys_tz.tz_minuteswest;
247
248 if (minutes < 0) /* east of Greenwich */
249 printk(KERN_INFO KBUILD_MODNAME
250 ": kernel timezone is +%02d%02d\n",
251 -minutes / 60, -minutes % 60);
252 else /* west of Greenwich */
253 printk(KERN_INFO KBUILD_MODNAME
254 ": kernel timezone is -%02d%02d\n",
255 minutes / 60, minutes % 60);
256
246 return xt_register_match(&xt_time_mt_reg); 257 return xt_register_match(&xt_time_mt_reg);
247} 258}
248 259
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 3e1191cecaf0..1d3dd30099df 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -225,6 +225,7 @@ void genl_unregister_mc_group(struct genl_family *family,
225 __genl_unregister_mc_group(family, grp); 225 __genl_unregister_mc_group(family, grp);
226 genl_unlock(); 226 genl_unlock();
227} 227}
228EXPORT_SYMBOL(genl_unregister_mc_group);
228 229
229static void genl_unregister_mc_groups(struct genl_family *family) 230static void genl_unregister_mc_groups(struct genl_family *family)
230{ 231{
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 5f94db2f3e9e..1fc4a7885c41 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -77,6 +77,7 @@
77#include <linux/poll.h> 77#include <linux/poll.h>
78#include <linux/module.h> 78#include <linux/module.h>
79#include <linux/init.h> 79#include <linux/init.h>
80#include <linux/mutex.h>
80 81
81#ifdef CONFIG_INET 82#ifdef CONFIG_INET
82#include <net/inet_common.h> 83#include <net/inet_common.h>
@@ -175,6 +176,7 @@ struct packet_sock {
175#endif 176#endif
176 struct packet_type prot_hook; 177 struct packet_type prot_hook;
177 spinlock_t bind_lock; 178 spinlock_t bind_lock;
179 struct mutex pg_vec_lock;
178 unsigned int running:1, /* prot_hook is attached*/ 180 unsigned int running:1, /* prot_hook is attached*/
179 auxdata:1, 181 auxdata:1,
180 origdev:1; 182 origdev:1;
@@ -220,13 +222,13 @@ static void *packet_lookup_frame(struct packet_sock *po, unsigned int position,
220 h.raw = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size); 222 h.raw = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
221 switch (po->tp_version) { 223 switch (po->tp_version) {
222 case TPACKET_V1: 224 case TPACKET_V1:
223 if (status != h.h1->tp_status ? TP_STATUS_USER : 225 if (status != (h.h1->tp_status ? TP_STATUS_USER :
224 TP_STATUS_KERNEL) 226 TP_STATUS_KERNEL))
225 return NULL; 227 return NULL;
226 break; 228 break;
227 case TPACKET_V2: 229 case TPACKET_V2:
228 if (status != h.h2->tp_status ? TP_STATUS_USER : 230 if (status != (h.h2->tp_status ? TP_STATUS_USER :
229 TP_STATUS_KERNEL) 231 TP_STATUS_KERNEL))
230 return NULL; 232 return NULL;
231 break; 233 break;
232 } 234 }
@@ -1069,6 +1071,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
1069 */ 1071 */
1070 1072
1071 spin_lock_init(&po->bind_lock); 1073 spin_lock_init(&po->bind_lock);
1074 mutex_init(&po->pg_vec_lock);
1072 po->prot_hook.func = packet_rcv; 1075 po->prot_hook.func = packet_rcv;
1073 1076
1074 if (sock->type == SOCK_PACKET) 1077 if (sock->type == SOCK_PACKET)
@@ -1865,6 +1868,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1865 synchronize_net(); 1868 synchronize_net();
1866 1869
1867 err = -EBUSY; 1870 err = -EBUSY;
1871 mutex_lock(&po->pg_vec_lock);
1868 if (closing || atomic_read(&po->mapped) == 0) { 1872 if (closing || atomic_read(&po->mapped) == 0) {
1869 err = 0; 1873 err = 0;
1870#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) 1874#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
@@ -1886,6 +1890,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
1886 if (atomic_read(&po->mapped)) 1890 if (atomic_read(&po->mapped))
1887 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); 1891 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
1888 } 1892 }
1893 mutex_unlock(&po->pg_vec_lock);
1889 1894
1890 spin_lock(&po->bind_lock); 1895 spin_lock(&po->bind_lock);
1891 if (was_running && !po->running) { 1896 if (was_running && !po->running) {
@@ -1918,7 +1923,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
1918 1923
1919 size = vma->vm_end - vma->vm_start; 1924 size = vma->vm_end - vma->vm_start;
1920 1925
1921 lock_sock(sk); 1926 mutex_lock(&po->pg_vec_lock);
1922 if (po->pg_vec == NULL) 1927 if (po->pg_vec == NULL)
1923 goto out; 1928 goto out;
1924 if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) 1929 if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
@@ -1941,7 +1946,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
1941 err = 0; 1946 err = 0;
1942 1947
1943out: 1948out:
1944 release_sock(sk); 1949 mutex_unlock(&po->pg_vec_lock);
1945 return err; 1950 return err;
1946} 1951}
1947#endif 1952#endif
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index b0ceac2d6cd1..6a91a32a80c1 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -227,6 +227,13 @@ static int gprs_set_mtu(struct net_device *dev, int new_mtu)
227 return 0; 227 return 0;
228} 228}
229 229
230static const struct net_device_ops gprs_netdev_ops = {
231 .ndo_open = gprs_open,
232 .ndo_stop = gprs_close,
233 .ndo_start_xmit = gprs_xmit,
234 .ndo_change_mtu = gprs_set_mtu,
235};
236
230static void gprs_setup(struct net_device *dev) 237static void gprs_setup(struct net_device *dev)
231{ 238{
232 dev->features = NETIF_F_FRAGLIST; 239 dev->features = NETIF_F_FRAGLIST;
@@ -237,11 +244,8 @@ static void gprs_setup(struct net_device *dev)
237 dev->addr_len = 0; 244 dev->addr_len = 0;
238 dev->tx_queue_len = 10; 245 dev->tx_queue_len = 10;
239 246
247 dev->netdev_ops = &gprs_netdev_ops;
240 dev->destructor = free_netdev; 248 dev->destructor = free_netdev;
241 dev->open = gprs_open;
242 dev->stop = gprs_close;
243 dev->hard_start_xmit = gprs_xmit; /* mandatory */
244 dev->change_mtu = gprs_set_mtu;
245} 249}
246 250
247/* 251/*
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 3c94f76d5525..3eaa39403c13 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -54,10 +54,10 @@ static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
54static bool rfkill_epo_lock_active; 54static bool rfkill_epo_lock_active;
55 55
56 56
57#ifdef CONFIG_RFKILL_LEDS
57static void rfkill_led_trigger(struct rfkill *rfkill, 58static void rfkill_led_trigger(struct rfkill *rfkill,
58 enum rfkill_state state) 59 enum rfkill_state state)
59{ 60{
60#ifdef CONFIG_RFKILL_LEDS
61 struct led_trigger *led = &rfkill->led_trigger; 61 struct led_trigger *led = &rfkill->led_trigger;
62 62
63 if (!led->name) 63 if (!led->name)
@@ -66,10 +66,8 @@ static void rfkill_led_trigger(struct rfkill *rfkill,
66 led_trigger_event(led, LED_OFF); 66 led_trigger_event(led, LED_OFF);
67 else 67 else
68 led_trigger_event(led, LED_FULL); 68 led_trigger_event(led, LED_FULL);
69#endif /* CONFIG_RFKILL_LEDS */
70} 69}
71 70
72#ifdef CONFIG_RFKILL_LEDS
73static void rfkill_led_trigger_activate(struct led_classdev *led) 71static void rfkill_led_trigger_activate(struct led_classdev *led)
74{ 72{
75 struct rfkill *rfkill = container_of(led->trigger, 73 struct rfkill *rfkill = container_of(led->trigger,
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 05d178008cbc..07372f60bee3 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -638,8 +638,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
638 break; 638 break;
639 639
640 n->next = *ins; 640 n->next = *ins;
641 wmb(); 641 tcf_tree_lock(tp);
642 *ins = n; 642 *ins = n;
643 tcf_tree_unlock(tp);
643 644
644 *arg = (unsigned long)n; 645 *arg = (unsigned long)n;
645 return 0; 646 return 0;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 5070643ce534..2f0f0b04d3fb 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -661,12 +661,13 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
661 * next pending event (0 for no event in pq). 661 * next pending event (0 for no event in pq).
662 * Note: Applied are events whose have cl->pq_key <= q->now. 662 * Note: Applied are events whose have cl->pq_key <= q->now.
663 */ 663 */
664static psched_time_t htb_do_events(struct htb_sched *q, int level) 664static psched_time_t htb_do_events(struct htb_sched *q, int level,
665 unsigned long start)
665{ 666{
666 /* don't run for longer than 2 jiffies; 2 is used instead of 667 /* don't run for longer than 2 jiffies; 2 is used instead of
667 1 to simplify things when jiffy is going to be incremented 668 1 to simplify things when jiffy is going to be incremented
668 too soon */ 669 too soon */
669 unsigned long stop_at = jiffies + 2; 670 unsigned long stop_at = start + 2;
670 while (time_before(jiffies, stop_at)) { 671 while (time_before(jiffies, stop_at)) {
671 struct htb_class *cl; 672 struct htb_class *cl;
672 long diff; 673 long diff;
@@ -685,8 +686,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level)
685 if (cl->cmode != HTB_CAN_SEND) 686 if (cl->cmode != HTB_CAN_SEND)
686 htb_add_to_wait_tree(q, cl, diff); 687 htb_add_to_wait_tree(q, cl, diff);
687 } 688 }
688 /* too much load - let's continue on next jiffie */ 689 /* too much load - let's continue on next jiffie (including above) */
689 return q->now + PSCHED_TICKS_PER_SEC / HZ; 690 return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ;
690} 691}
691 692
692/* Returns class->node+prio from id-tree where classe's id is >= id. NULL 693/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
@@ -845,6 +846,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
845 struct htb_sched *q = qdisc_priv(sch); 846 struct htb_sched *q = qdisc_priv(sch);
846 int level; 847 int level;
847 psched_time_t next_event; 848 psched_time_t next_event;
849 unsigned long start_at;
848 850
849 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 851 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
850 skb = __skb_dequeue(&q->direct_queue); 852 skb = __skb_dequeue(&q->direct_queue);
@@ -857,6 +859,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
857 if (!sch->q.qlen) 859 if (!sch->q.qlen)
858 goto fin; 860 goto fin;
859 q->now = psched_get_time(); 861 q->now = psched_get_time();
862 start_at = jiffies;
860 863
861 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; 864 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
862 865
@@ -866,14 +869,14 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
866 psched_time_t event; 869 psched_time_t event;
867 870
868 if (q->now >= q->near_ev_cache[level]) { 871 if (q->now >= q->near_ev_cache[level]) {
869 event = htb_do_events(q, level); 872 event = htb_do_events(q, level, start_at);
870 if (!event) 873 if (!event)
871 event = q->now + PSCHED_TICKS_PER_SEC; 874 event = q->now + PSCHED_TICKS_PER_SEC;
872 q->near_ev_cache[level] = event; 875 q->near_ev_cache[level] = event;
873 } else 876 } else
874 event = q->near_ev_cache[level]; 877 event = q->near_ev_cache[level];
875 878
876 if (event && next_event > event) 879 if (next_event > event)
877 next_event = event; 880 next_event = event;
878 881
879 m = ~q->row_mask[level]; 882 m = ~q->row_mask[level];
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index f3965df00559..33133d27b539 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -435,7 +435,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
435 int i; 435 int i;
436 436
437 q->perturb_timer.function = sfq_perturbation; 437 q->perturb_timer.function = sfq_perturbation;
438 q->perturb_timer.data = (unsigned long)sch;; 438 q->perturb_timer.data = (unsigned long)sch;
439 init_timer_deferrable(&q->perturb_timer); 439 init_timer_deferrable(&q->perturb_timer);
440 440
441 for (i = 0; i < SFQ_HASH_DIVISOR; i++) 441 for (i = 0; i < SFQ_HASH_DIVISOR; i++)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index cfc8e7caba62..ec697cebb63b 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -289,9 +289,9 @@ restart:
289 289
290 do { 290 do {
291 struct net_device *slave = qdisc_dev(q); 291 struct net_device *slave = qdisc_dev(q);
292 struct netdev_queue *slave_txq; 292 struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
293 const struct net_device_ops *slave_ops = slave->netdev_ops;
293 294
294 slave_txq = netdev_get_tx_queue(slave, 0);
295 if (slave_txq->qdisc_sleeping != q) 295 if (slave_txq->qdisc_sleeping != q)
296 continue; 296 continue;
297 if (__netif_subqueue_stopped(slave, subq) || 297 if (__netif_subqueue_stopped(slave, subq) ||
@@ -305,7 +305,7 @@ restart:
305 if (__netif_tx_trylock(slave_txq)) { 305 if (__netif_tx_trylock(slave_txq)) {
306 if (!netif_tx_queue_stopped(slave_txq) && 306 if (!netif_tx_queue_stopped(slave_txq) &&
307 !netif_tx_queue_frozen(slave_txq) && 307 !netif_tx_queue_frozen(slave_txq) &&
308 slave->hard_start_xmit(skb, slave) == 0) { 308 slave_ops->ndo_start_xmit(skb, slave) == 0) {
309 __netif_tx_unlock(slave_txq); 309 __netif_tx_unlock(slave_txq);
310 master->slaves = NEXT_SLAVE(q); 310 master->slaves = NEXT_SLAVE(q);
311 netif_wake_queue(dev); 311 netif_wake_queue(dev);
@@ -420,6 +420,14 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
420 return 0; 420 return 0;
421} 421}
422 422
423static const struct net_device_ops teql_netdev_ops = {
424 .ndo_open = teql_master_open,
425 .ndo_stop = teql_master_close,
426 .ndo_start_xmit = teql_master_xmit,
427 .ndo_get_stats = teql_master_stats,
428 .ndo_change_mtu = teql_master_mtu,
429};
430
423static __init void teql_master_setup(struct net_device *dev) 431static __init void teql_master_setup(struct net_device *dev)
424{ 432{
425 struct teql_master *master = netdev_priv(dev); 433 struct teql_master *master = netdev_priv(dev);
@@ -436,11 +444,7 @@ static __init void teql_master_setup(struct net_device *dev)
436 ops->destroy = teql_destroy; 444 ops->destroy = teql_destroy;
437 ops->owner = THIS_MODULE; 445 ops->owner = THIS_MODULE;
438 446
439 dev->open = teql_master_open; 447 dev->netdev_ops = &teql_netdev_ops;
440 dev->hard_start_xmit = teql_master_xmit;
441 dev->stop = teql_master_close;
442 dev->get_stats = teql_master_stats;
443 dev->change_mtu = teql_master_mtu;
444 dev->type = ARPHRD_VOID; 448 dev->type = ARPHRD_VOID;
445 dev->mtu = 1500; 449 dev->mtu = 1500;
446 dev->tx_queue_len = 100; 450 dev->tx_queue_len = 100;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 52db5f60daa0..56935bbc1496 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -141,8 +141,8 @@ void sctp_auth_destroy_keys(struct list_head *keys)
141/* Compare two byte vectors as numbers. Return values 141/* Compare two byte vectors as numbers. Return values
142 * are: 142 * are:
143 * 0 - vectors are equal 143 * 0 - vectors are equal
144 * < 0 - vector 1 is smaller then vector2 144 * < 0 - vector 1 is smaller than vector2
145 * > 0 - vector 1 is greater then vector2 145 * > 0 - vector 1 is greater than vector2
146 * 146 *
147 * Algorithm is: 147 * Algorithm is:
148 * This is performed by selecting the numerically smaller key vector... 148 * This is performed by selecting the numerically smaller key vector...
@@ -489,7 +489,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
489 return 0; 489 return 0;
490 490
491out_err: 491out_err:
492 /* Clean up any successfull allocations */ 492 /* Clean up any successful allocations */
493 sctp_auth_destroy_hmacs(ep->auth_hmacs); 493 sctp_auth_destroy_hmacs(ep->auth_hmacs);
494 return -ENOMEM; 494 return -ENOMEM;
495} 495}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index bf612d954d41..2e4a8646dbc3 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -249,6 +249,19 @@ int sctp_rcv(struct sk_buff *skb)
249 */ 249 */
250 sctp_bh_lock_sock(sk); 250 sctp_bh_lock_sock(sk);
251 251
252 if (sk != rcvr->sk) {
253 /* Our cached sk is different from the rcvr->sk. This is
254 * because migrate()/accept() may have moved the association
255 * to a new socket and released all the sockets. So now we
256 * are holding a lock on the old socket while the user may
257 * be doing something with the new socket. Switch our veiw
258 * of the current sk.
259 */
260 sctp_bh_unlock_sock(sk);
261 sk = rcvr->sk;
262 sctp_bh_lock_sock(sk);
263 }
264
252 if (sock_owned_by_user(sk)) { 265 if (sock_owned_by_user(sk)) {
253 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); 266 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
254 sctp_add_backlog(sk, skb); 267 sctp_add_backlog(sk, skb);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index c3f417f7ec6e..73639355157e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -324,14 +324,16 @@ append:
324 switch (chunk->chunk_hdr->type) { 324 switch (chunk->chunk_hdr->type) {
325 case SCTP_CID_DATA: 325 case SCTP_CID_DATA:
326 retval = sctp_packet_append_data(packet, chunk); 326 retval = sctp_packet_append_data(packet, chunk);
327 if (SCTP_XMIT_OK != retval)
328 goto finish;
327 /* Disallow SACK bundling after DATA. */ 329 /* Disallow SACK bundling after DATA. */
328 packet->has_sack = 1; 330 packet->has_sack = 1;
329 /* Disallow AUTH bundling after DATA */ 331 /* Disallow AUTH bundling after DATA */
330 packet->has_auth = 1; 332 packet->has_auth = 1;
331 /* Let it be knows that packet has DATA in it */ 333 /* Let it be knows that packet has DATA in it */
332 packet->has_data = 1; 334 packet->has_data = 1;
333 if (SCTP_XMIT_OK != retval) 335 /* timestamp the chunk for rtx purposes */
334 goto finish; 336 chunk->sent_at = jiffies;
335 break; 337 break;
336 case SCTP_CID_COOKIE_ECHO: 338 case SCTP_CID_COOKIE_ECHO:
337 packet->has_cookie_echo = 1; 339 packet->has_cookie_echo = 1;
@@ -470,7 +472,6 @@ int sctp_packet_transmit(struct sctp_packet *packet)
470 } else 472 } else
471 chunk->resent = 1; 473 chunk->resent = 1;
472 474
473 chunk->sent_at = jiffies;
474 has_data = 1; 475 has_data = 1;
475 } 476 }
476 477
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 247ebc95c1e5..bc411c896216 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -929,7 +929,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
929 } 929 }
930 930
931 /* Finally, transmit new packets. */ 931 /* Finally, transmit new packets. */
932 start_timer = 0;
933 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 932 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
934 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 933 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
935 * stream identifier. 934 * stream identifier.
@@ -1028,7 +1027,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
1028 list_add_tail(&chunk->transmitted_list, 1027 list_add_tail(&chunk->transmitted_list,
1029 &transport->transmitted); 1028 &transport->transmitted);
1030 1029
1031 sctp_transport_reset_timers(transport, start_timer-1); 1030 sctp_transport_reset_timers(transport, 0);
1032 1031
1033 q->empty = 0; 1032 q->empty = 0;
1034 1033
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 1c4e5d6c29c0..3a0cd075914f 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -4268,9 +4268,9 @@ nomem:
4268 4268
4269/* 4269/*
4270 * Handle a protocol violation when the chunk length is invalid. 4270 * Handle a protocol violation when the chunk length is invalid.
4271 * "Invalid" length is identified as smaller then the minimal length a 4271 * "Invalid" length is identified as smaller than the minimal length a
4272 * given chunk can be. For example, a SACK chunk has invalid length 4272 * given chunk can be. For example, a SACK chunk has invalid length
4273 * if it's length is set to be smaller then the size of sctp_sack_chunk_t. 4273 * if its length is set to be smaller than the size of sctp_sack_chunk_t.
4274 * 4274 *
4275 * We inform the other end by sending an ABORT with a Protocol Violation 4275 * We inform the other end by sending an ABORT with a Protocol Violation
4276 * error code. 4276 * error code.
@@ -4300,7 +4300,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
4300 4300
4301/* 4301/*
4302 * Handle a protocol violation when the parameter length is invalid. 4302 * Handle a protocol violation when the parameter length is invalid.
4303 * "Invalid" length is identified as smaller then the minimal length a 4303 * "Invalid" length is identified as smaller than the minimal length a
4304 * given parameter can be. 4304 * given parameter can be.
4305 */ 4305 */
4306static sctp_disposition_t sctp_sf_violation_paramlen( 4306static sctp_disposition_t sctp_sf_violation_paramlen(
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b14a8f33e42d..ff0a8f88de04 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2717,7 +2717,7 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int o
2717 paths++; 2717 paths++;
2718 } 2718 }
2719 2719
2720 /* Only validate asocmaxrxt if we have more then 2720 /* Only validate asocmaxrxt if we have more than
2721 * one path/transport. We do this because path 2721 * one path/transport. We do this because path
2722 * retransmissions are only counted when we have more 2722 * retransmissions are only counted when we have more
2723 * then one path. 2723 * then one path.
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 35c73e82553a..9bd64565021a 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -227,7 +227,7 @@ void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn)
227 */ 227 */
228 bitmap_zero(map->tsn_map, map->len); 228 bitmap_zero(map->tsn_map, map->len);
229 } else { 229 } else {
230 /* If the gap is smaller then the map size, 230 /* If the gap is smaller than the map size,
231 * shift the map by 'gap' bits and update further. 231 * shift the map by 'gap' bits and update further.
232 */ 232 */
233 bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len); 233 bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len);
diff --git a/net/socket.c b/net/socket.c
index 2c730fc718ab..35dd7371752a 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1214,7 +1214,7 @@ int sock_create_kern(int family, int type, int protocol, struct socket **res)
1214 return __sock_create(&init_net, family, type, protocol, res, 1); 1214 return __sock_create(&init_net, family, type, protocol, res, 1);
1215} 1215}
1216 1216
1217asmlinkage long sys_socket(int family, int type, int protocol) 1217SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
1218{ 1218{
1219 int retval; 1219 int retval;
1220 struct socket *sock; 1220 struct socket *sock;
@@ -1255,8 +1255,8 @@ out_release:
1255 * Create a pair of connected sockets. 1255 * Create a pair of connected sockets.
1256 */ 1256 */
1257 1257
1258asmlinkage long sys_socketpair(int family, int type, int protocol, 1258SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
1259 int __user *usockvec) 1259 int __user *, usockvec)
1260{ 1260{
1261 struct socket *sock1, *sock2; 1261 struct socket *sock1, *sock2;
1262 int fd1, fd2, err; 1262 int fd1, fd2, err;
@@ -1313,13 +1313,7 @@ asmlinkage long sys_socketpair(int family, int type, int protocol,
1313 goto out_fd1; 1313 goto out_fd1;
1314 } 1314 }
1315 1315
1316 err = audit_fd_pair(fd1, fd2); 1316 audit_fd_pair(fd1, fd2);
1317 if (err < 0) {
1318 fput(newfile1);
1319 fput(newfile2);
1320 goto out_fd;
1321 }
1322
1323 fd_install(fd1, newfile1); 1317 fd_install(fd1, newfile1);
1324 fd_install(fd2, newfile2); 1318 fd_install(fd2, newfile2);
1325 /* fd1 and fd2 may be already another descriptors. 1319 /* fd1 and fd2 may be already another descriptors.
@@ -1349,7 +1343,6 @@ out_fd2:
1349out_fd1: 1343out_fd1:
1350 put_filp(newfile2); 1344 put_filp(newfile2);
1351 sock_release(sock2); 1345 sock_release(sock2);
1352out_fd:
1353 put_unused_fd(fd1); 1346 put_unused_fd(fd1);
1354 put_unused_fd(fd2); 1347 put_unused_fd(fd2);
1355 goto out; 1348 goto out;
@@ -1363,7 +1356,7 @@ out_fd:
1363 * the protocol layer (having also checked the address is ok). 1356 * the protocol layer (having also checked the address is ok).
1364 */ 1357 */
1365 1358
1366asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen) 1359SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
1367{ 1360{
1368 struct socket *sock; 1361 struct socket *sock;
1369 struct sockaddr_storage address; 1362 struct sockaddr_storage address;
@@ -1392,7 +1385,7 @@ asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
1392 * ready for listening. 1385 * ready for listening.
1393 */ 1386 */
1394 1387
1395asmlinkage long sys_listen(int fd, int backlog) 1388SYSCALL_DEFINE2(listen, int, fd, int, backlog)
1396{ 1389{
1397 struct socket *sock; 1390 struct socket *sock;
1398 int err, fput_needed; 1391 int err, fput_needed;
@@ -1425,8 +1418,8 @@ asmlinkage long sys_listen(int fd, int backlog)
1425 * clean when we restucture accept also. 1418 * clean when we restucture accept also.
1426 */ 1419 */
1427 1420
1428asmlinkage long sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, 1421SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
1429 int __user *upeer_addrlen, int flags) 1422 int __user *, upeer_addrlen, int, flags)
1430{ 1423{
1431 struct socket *sock, *newsock; 1424 struct socket *sock, *newsock;
1432 struct file *newfile; 1425 struct file *newfile;
@@ -1509,8 +1502,8 @@ out_fd:
1509 goto out_put; 1502 goto out_put;
1510} 1503}
1511 1504
1512asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, 1505SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr,
1513 int __user *upeer_addrlen) 1506 int __user *, upeer_addrlen)
1514{ 1507{
1515 return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); 1508 return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0);
1516} 1509}
@@ -1527,8 +1520,8 @@ asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
1527 * include the -EINPROGRESS status for such sockets. 1520 * include the -EINPROGRESS status for such sockets.
1528 */ 1521 */
1529 1522
1530asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr, 1523SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
1531 int addrlen) 1524 int, addrlen)
1532{ 1525{
1533 struct socket *sock; 1526 struct socket *sock;
1534 struct sockaddr_storage address; 1527 struct sockaddr_storage address;
@@ -1559,8 +1552,8 @@ out:
1559 * name to user space. 1552 * name to user space.
1560 */ 1553 */
1561 1554
1562asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr, 1555SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
1563 int __user *usockaddr_len) 1556 int __user *, usockaddr_len)
1564{ 1557{
1565 struct socket *sock; 1558 struct socket *sock;
1566 struct sockaddr_storage address; 1559 struct sockaddr_storage address;
@@ -1590,8 +1583,8 @@ out:
1590 * name to user space. 1583 * name to user space.
1591 */ 1584 */
1592 1585
1593asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr, 1586SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
1594 int __user *usockaddr_len) 1587 int __user *, usockaddr_len)
1595{ 1588{
1596 struct socket *sock; 1589 struct socket *sock;
1597 struct sockaddr_storage address; 1590 struct sockaddr_storage address;
@@ -1622,9 +1615,9 @@ asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr,
1622 * the protocol. 1615 * the protocol.
1623 */ 1616 */
1624 1617
1625asmlinkage long sys_sendto(int fd, void __user *buff, size_t len, 1618SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
1626 unsigned flags, struct sockaddr __user *addr, 1619 unsigned, flags, struct sockaddr __user *, addr,
1627 int addr_len) 1620 int, addr_len)
1628{ 1621{
1629 struct socket *sock; 1622 struct socket *sock;
1630 struct sockaddr_storage address; 1623 struct sockaddr_storage address;
@@ -1667,7 +1660,8 @@ out:
1667 * Send a datagram down a socket. 1660 * Send a datagram down a socket.
1668 */ 1661 */
1669 1662
1670asmlinkage long sys_send(int fd, void __user *buff, size_t len, unsigned flags) 1663SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
1664 unsigned, flags)
1671{ 1665{
1672 return sys_sendto(fd, buff, len, flags, NULL, 0); 1666 return sys_sendto(fd, buff, len, flags, NULL, 0);
1673} 1667}
@@ -1678,9 +1672,9 @@ asmlinkage long sys_send(int fd, void __user *buff, size_t len, unsigned flags)
1678 * sender address from kernel to user space. 1672 * sender address from kernel to user space.
1679 */ 1673 */
1680 1674
1681asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size, 1675SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1682 unsigned flags, struct sockaddr __user *addr, 1676 unsigned, flags, struct sockaddr __user *, addr,
1683 int __user *addr_len) 1677 int __user *, addr_len)
1684{ 1678{
1685 struct socket *sock; 1679 struct socket *sock;
1686 struct iovec iov; 1680 struct iovec iov;
@@ -1732,8 +1726,8 @@ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
1732 * to pass the user mode parameter for the protocols to sort out. 1726 * to pass the user mode parameter for the protocols to sort out.
1733 */ 1727 */
1734 1728
1735asmlinkage long sys_setsockopt(int fd, int level, int optname, 1729SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
1736 char __user *optval, int optlen) 1730 char __user *, optval, int, optlen)
1737{ 1731{
1738 int err, fput_needed; 1732 int err, fput_needed;
1739 struct socket *sock; 1733 struct socket *sock;
@@ -1766,8 +1760,8 @@ out_put:
1766 * to pass a user mode parameter for the protocols to sort out. 1760 * to pass a user mode parameter for the protocols to sort out.
1767 */ 1761 */
1768 1762
1769asmlinkage long sys_getsockopt(int fd, int level, int optname, 1763SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
1770 char __user *optval, int __user *optlen) 1764 char __user *, optval, int __user *, optlen)
1771{ 1765{
1772 int err, fput_needed; 1766 int err, fput_needed;
1773 struct socket *sock; 1767 struct socket *sock;
@@ -1796,7 +1790,7 @@ out_put:
1796 * Shutdown a socket. 1790 * Shutdown a socket.
1797 */ 1791 */
1798 1792
1799asmlinkage long sys_shutdown(int fd, int how) 1793SYSCALL_DEFINE2(shutdown, int, fd, int, how)
1800{ 1794{
1801 int err, fput_needed; 1795 int err, fput_needed;
1802 struct socket *sock; 1796 struct socket *sock;
@@ -1822,7 +1816,7 @@ asmlinkage long sys_shutdown(int fd, int how)
1822 * BSD sendmsg interface 1816 * BSD sendmsg interface
1823 */ 1817 */
1824 1818
1825asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) 1819SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1826{ 1820{
1827 struct compat_msghdr __user *msg_compat = 1821 struct compat_msghdr __user *msg_compat =
1828 (struct compat_msghdr __user *)msg; 1822 (struct compat_msghdr __user *)msg;
@@ -1928,8 +1922,8 @@ out:
1928 * BSD recvmsg interface 1922 * BSD recvmsg interface
1929 */ 1923 */
1930 1924
1931asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, 1925SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
1932 unsigned int flags) 1926 unsigned int, flags)
1933{ 1927{
1934 struct compat_msghdr __user *msg_compat = 1928 struct compat_msghdr __user *msg_compat =
1935 (struct compat_msghdr __user *)msg; 1929 (struct compat_msghdr __user *)msg;
@@ -2052,7 +2046,7 @@ static const unsigned char nargs[19]={
2052 * it is set by the callees. 2046 * it is set by the callees.
2053 */ 2047 */
2054 2048
2055asmlinkage long sys_socketcall(int call, unsigned long __user *args) 2049SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2056{ 2050{
2057 unsigned long a[6]; 2051 unsigned long a[6];
2058 unsigned long a0, a1; 2052 unsigned long a0, a1;
@@ -2065,9 +2059,7 @@ asmlinkage long sys_socketcall(int call, unsigned long __user *args)
2065 if (copy_from_user(a, args, nargs[call])) 2059 if (copy_from_user(a, args, nargs[call]))
2066 return -EFAULT; 2060 return -EFAULT;
2067 2061
2068 err = audit_socketcall(nargs[call] / sizeof(unsigned long), a); 2062 audit_socketcall(nargs[call] / sizeof(unsigned long), a);
2069 if (err)
2070 return err;
2071 2063
2072 a0 = a[0]; 2064 a0 = a[0];
2073 a1 = a[1]; 2065 a1 = a[1];
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
new file mode 100644
index 000000000000..5592883e1e4a
--- /dev/null
+++ b/net/sunrpc/Kconfig
@@ -0,0 +1,78 @@
1config SUNRPC
2 tristate
3
4config SUNRPC_GSS
5 tristate
6
7config SUNRPC_XPRT_RDMA
8 tristate
9 depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL
10 default SUNRPC && INFINIBAND
11 help
12 This option allows the NFS client and server to support
13 an RDMA-enabled transport.
14
15 To compile RPC client RDMA transport support as a module,
16 choose M here: the module will be called xprtrdma.
17
18 If unsure, say N.
19
20config SUNRPC_REGISTER_V4
21 bool "Register local RPC services via rpcbind v4 (EXPERIMENTAL)"
22 depends on SUNRPC && EXPERIMENTAL
23 default n
24 help
25 Sun added support for registering RPC services at an IPv6
26 address by creating two new versions of the rpcbind protocol
27 (RFC 1833).
28
29 This option enables support in the kernel RPC server for
30 registering kernel RPC services via version 4 of the rpcbind
31 protocol. If you enable this option, you must run a portmapper
32 daemon that supports rpcbind protocol version 4.
33
34 Serving NFS over IPv6 from knfsd (the kernel's NFS server)
35 requires that you enable this option and use a portmapper that
36 supports rpcbind version 4.
37
38 If unsure, say N to get traditional behavior (register kernel
39 RPC services using only rpcbind version 2). Distributions
40 using the legacy Linux portmapper daemon must say N here.
41
42config RPCSEC_GSS_KRB5
43 tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)"
44 depends on SUNRPC && EXPERIMENTAL
45 select SUNRPC_GSS
46 select CRYPTO
47 select CRYPTO_MD5
48 select CRYPTO_DES
49 select CRYPTO_CBC
50 help
51 Choose Y here to enable Secure RPC using the Kerberos version 5
52 GSS-API mechanism (RFC 1964).
53
54 Secure RPC calls with Kerberos require an auxiliary user-space
55 daemon which may be found in the Linux nfs-utils package
56 available from http://linux-nfs.org/. In addition, user-space
57 Kerberos support should be installed.
58
59 If unsure, say N.
60
61config RPCSEC_GSS_SPKM3
62 tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)"
63 depends on SUNRPC && EXPERIMENTAL
64 select SUNRPC_GSS
65 select CRYPTO
66 select CRYPTO_MD5
67 select CRYPTO_DES
68 select CRYPTO_CAST5
69 select CRYPTO_CBC
70 help
71 Choose Y here to enable Secure RPC using the SPKM3 public key
72 GSS-API mechansim (RFC 2025).
73
74 Secure RPC calls with SPKM3 require an auxiliary userspace
75 daemon which may be found in the Linux nfs-utils package
76 available from http://linux-nfs.org/.
77
78 If unsure, say N.
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index c9966713282a..4735caad26ed 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -98,7 +98,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
98 98
99 return new; 99 return new;
100} 100}
101EXPORT_SYMBOL(sunrpc_cache_lookup); 101EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
102 102
103 103
104static void queue_loose(struct cache_detail *detail, struct cache_head *ch); 104static void queue_loose(struct cache_detail *detail, struct cache_head *ch);
@@ -173,7 +173,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
173 cache_put(old, detail); 173 cache_put(old, detail);
174 return tmp; 174 return tmp;
175} 175}
176EXPORT_SYMBOL(sunrpc_cache_update); 176EXPORT_SYMBOL_GPL(sunrpc_cache_update);
177 177
178static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); 178static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
179/* 179/*
@@ -245,7 +245,7 @@ int cache_check(struct cache_detail *detail,
245 cache_put(h, detail); 245 cache_put(h, detail);
246 return rv; 246 return rv;
247} 247}
248EXPORT_SYMBOL(cache_check); 248EXPORT_SYMBOL_GPL(cache_check);
249 249
250/* 250/*
251 * caches need to be periodically cleaned. 251 * caches need to be periodically cleaned.
@@ -373,7 +373,7 @@ int cache_register(struct cache_detail *cd)
373 schedule_delayed_work(&cache_cleaner, 0); 373 schedule_delayed_work(&cache_cleaner, 0);
374 return 0; 374 return 0;
375} 375}
376EXPORT_SYMBOL(cache_register); 376EXPORT_SYMBOL_GPL(cache_register);
377 377
378void cache_unregister(struct cache_detail *cd) 378void cache_unregister(struct cache_detail *cd)
379{ 379{
@@ -399,7 +399,7 @@ void cache_unregister(struct cache_detail *cd)
399out: 399out:
400 printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name); 400 printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
401} 401}
402EXPORT_SYMBOL(cache_unregister); 402EXPORT_SYMBOL_GPL(cache_unregister);
403 403
404/* clean cache tries to find something to clean 404/* clean cache tries to find something to clean
405 * and cleans it. 405 * and cleans it.
@@ -514,7 +514,7 @@ void cache_flush(void)
514 while (cache_clean() != -1) 514 while (cache_clean() != -1)
515 cond_resched(); 515 cond_resched();
516} 516}
517EXPORT_SYMBOL(cache_flush); 517EXPORT_SYMBOL_GPL(cache_flush);
518 518
519void cache_purge(struct cache_detail *detail) 519void cache_purge(struct cache_detail *detail)
520{ 520{
@@ -523,7 +523,7 @@ void cache_purge(struct cache_detail *detail)
523 cache_flush(); 523 cache_flush();
524 detail->flush_time = 1; 524 detail->flush_time = 1;
525} 525}
526EXPORT_SYMBOL(cache_purge); 526EXPORT_SYMBOL_GPL(cache_purge);
527 527
528 528
529/* 529/*
@@ -988,7 +988,7 @@ void qword_add(char **bpp, int *lp, char *str)
988 *bpp = bp; 988 *bpp = bp;
989 *lp = len; 989 *lp = len;
990} 990}
991EXPORT_SYMBOL(qword_add); 991EXPORT_SYMBOL_GPL(qword_add);
992 992
993void qword_addhex(char **bpp, int *lp, char *buf, int blen) 993void qword_addhex(char **bpp, int *lp, char *buf, int blen)
994{ 994{
@@ -1017,7 +1017,7 @@ void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1017 *bpp = bp; 1017 *bpp = bp;
1018 *lp = len; 1018 *lp = len;
1019} 1019}
1020EXPORT_SYMBOL(qword_addhex); 1020EXPORT_SYMBOL_GPL(qword_addhex);
1021 1021
1022static void warn_no_listener(struct cache_detail *detail) 1022static void warn_no_listener(struct cache_detail *detail)
1023{ 1023{
@@ -1140,7 +1140,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
1140 *dest = '\0'; 1140 *dest = '\0';
1141 return len; 1141 return len;
1142} 1142}
1143EXPORT_SYMBOL(qword_get); 1143EXPORT_SYMBOL_GPL(qword_get);
1144 1144
1145 1145
1146/* 1146/*
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 192453248870..577385a4a5dc 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -522,8 +522,6 @@ rpc_get_inode(struct super_block *sb, int mode)
522 if (!inode) 522 if (!inode)
523 return NULL; 523 return NULL;
524 inode->i_mode = mode; 524 inode->i_mode = mode;
525 inode->i_uid = inode->i_gid = 0;
526 inode->i_blocks = 0;
527 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 525 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
528 switch(mode & S_IFMT) { 526 switch(mode & S_IFMT) {
529 case S_IFDIR: 527 case S_IFDIR:
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 50b049c6598a..085372ef4feb 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -106,7 +106,7 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
106 seq_putc(seq, '\n'); 106 seq_putc(seq, '\n');
107 } 107 }
108} 108}
109EXPORT_SYMBOL(svc_seq_show); 109EXPORT_SYMBOL_GPL(svc_seq_show);
110 110
111/** 111/**
112 * rpc_alloc_iostats - allocate an rpc_iostats structure 112 * rpc_alloc_iostats - allocate an rpc_iostats structure
@@ -249,14 +249,14 @@ svc_proc_register(struct svc_stat *statp, const struct file_operations *fops)
249{ 249{
250 return do_register(statp->program->pg_name, statp, fops); 250 return do_register(statp->program->pg_name, statp, fops);
251} 251}
252EXPORT_SYMBOL(svc_proc_register); 252EXPORT_SYMBOL_GPL(svc_proc_register);
253 253
254void 254void
255svc_proc_unregister(const char *name) 255svc_proc_unregister(const char *name)
256{ 256{
257 remove_proc_entry(name, proc_net_rpc); 257 remove_proc_entry(name, proc_net_rpc);
258} 258}
259EXPORT_SYMBOL(svc_proc_unregister); 259EXPORT_SYMBOL_GPL(svc_proc_unregister);
260 260
261void 261void
262rpc_proc_init(void) 262rpc_proc_init(void)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 54c98d876847..c51fed4d1af1 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -431,7 +431,7 @@ svc_create(struct svc_program *prog, unsigned int bufsize,
431{ 431{
432 return __svc_create(prog, bufsize, /*npools*/1, family, shutdown); 432 return __svc_create(prog, bufsize, /*npools*/1, family, shutdown);
433} 433}
434EXPORT_SYMBOL(svc_create); 434EXPORT_SYMBOL_GPL(svc_create);
435 435
436struct svc_serv * 436struct svc_serv *
437svc_create_pooled(struct svc_program *prog, unsigned int bufsize, 437svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
@@ -450,7 +450,7 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
450 450
451 return serv; 451 return serv;
452} 452}
453EXPORT_SYMBOL(svc_create_pooled); 453EXPORT_SYMBOL_GPL(svc_create_pooled);
454 454
455/* 455/*
456 * Destroy an RPC service. Should be called with appropriate locking to 456 * Destroy an RPC service. Should be called with appropriate locking to
@@ -492,7 +492,7 @@ svc_destroy(struct svc_serv *serv)
492 kfree(serv->sv_pools); 492 kfree(serv->sv_pools);
493 kfree(serv); 493 kfree(serv);
494} 494}
495EXPORT_SYMBOL(svc_destroy); 495EXPORT_SYMBOL_GPL(svc_destroy);
496 496
497/* 497/*
498 * Allocate an RPC server's buffer space. 498 * Allocate an RPC server's buffer space.
@@ -567,7 +567,7 @@ out_thread:
567out_enomem: 567out_enomem:
568 return ERR_PTR(-ENOMEM); 568 return ERR_PTR(-ENOMEM);
569} 569}
570EXPORT_SYMBOL(svc_prepare_thread); 570EXPORT_SYMBOL_GPL(svc_prepare_thread);
571 571
572/* 572/*
573 * Choose a pool in which to create a new thread, for svc_set_num_threads 573 * Choose a pool in which to create a new thread, for svc_set_num_threads
@@ -689,7 +689,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
689 689
690 return error; 690 return error;
691} 691}
692EXPORT_SYMBOL(svc_set_num_threads); 692EXPORT_SYMBOL_GPL(svc_set_num_threads);
693 693
694/* 694/*
695 * Called from a server thread as it's exiting. Caller must hold the BKL or 695 * Called from a server thread as it's exiting. Caller must hold the BKL or
@@ -717,7 +717,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
717 if (serv) 717 if (serv)
718 svc_destroy(serv); 718 svc_destroy(serv);
719} 719}
720EXPORT_SYMBOL(svc_exit_thread); 720EXPORT_SYMBOL_GPL(svc_exit_thread);
721 721
722#ifdef CONFIG_SUNRPC_REGISTER_V4 722#ifdef CONFIG_SUNRPC_REGISTER_V4
723 723
@@ -1231,7 +1231,7 @@ err_bad:
1231 svc_putnl(resv, ntohl(rpc_stat)); 1231 svc_putnl(resv, ntohl(rpc_stat));
1232 goto sendit; 1232 goto sendit;
1233} 1233}
1234EXPORT_SYMBOL(svc_process); 1234EXPORT_SYMBOL_GPL(svc_process);
1235 1235
1236/* 1236/*
1237 * Return (transport-specific) limit on the rpc payload. 1237 * Return (transport-specific) limit on the rpc payload.
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index bf5b5cdafebf..e588df5d6b34 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -440,7 +440,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
440 svc_xprt_enqueue(xprt); 440 svc_xprt_enqueue(xprt);
441 } 441 }
442} 442}
443EXPORT_SYMBOL(svc_reserve); 443EXPORT_SYMBOL_GPL(svc_reserve);
444 444
445static void svc_xprt_release(struct svc_rqst *rqstp) 445static void svc_xprt_release(struct svc_rqst *rqstp)
446{ 446{
@@ -448,6 +448,9 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
448 448
449 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 449 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
450 450
451 kfree(rqstp->rq_deferred);
452 rqstp->rq_deferred = NULL;
453
451 svc_free_res_pages(rqstp); 454 svc_free_res_pages(rqstp);
452 rqstp->rq_res.page_len = 0; 455 rqstp->rq_res.page_len = 0;
453 rqstp->rq_res.page_base = 0; 456 rqstp->rq_res.page_base = 0;
@@ -498,7 +501,7 @@ void svc_wake_up(struct svc_serv *serv)
498 spin_unlock_bh(&pool->sp_lock); 501 spin_unlock_bh(&pool->sp_lock);
499 } 502 }
500} 503}
501EXPORT_SYMBOL(svc_wake_up); 504EXPORT_SYMBOL_GPL(svc_wake_up);
502 505
503int svc_port_is_privileged(struct sockaddr *sin) 506int svc_port_is_privileged(struct sockaddr *sin)
504{ 507{
@@ -515,8 +518,10 @@ int svc_port_is_privileged(struct sockaddr *sin)
515} 518}
516 519
517/* 520/*
518 * Make sure that we don't have too many active connections. If we 521 * Make sure that we don't have too many active connections. If we have,
519 * have, something must be dropped. 522 * something must be dropped. It's not clear what will happen if we allow
523 * "too many" connections, but when dealing with network-facing software,
524 * we have to code defensively. Here we do that by imposing hard limits.
520 * 525 *
521 * There's no point in trying to do random drop here for DoS 526 * There's no point in trying to do random drop here for DoS
522 * prevention. The NFS clients does 1 reconnect in 15 seconds. An 527 * prevention. The NFS clients does 1 reconnect in 15 seconds. An
@@ -525,19 +530,27 @@ int svc_port_is_privileged(struct sockaddr *sin)
525 * The only somewhat efficient mechanism would be if drop old 530 * The only somewhat efficient mechanism would be if drop old
526 * connections from the same IP first. But right now we don't even 531 * connections from the same IP first. But right now we don't even
527 * record the client IP in svc_sock. 532 * record the client IP in svc_sock.
533 *
534 * single-threaded services that expect a lot of clients will probably
535 * need to set sv_maxconn to override the default value which is based
536 * on the number of threads
528 */ 537 */
529static void svc_check_conn_limits(struct svc_serv *serv) 538static void svc_check_conn_limits(struct svc_serv *serv)
530{ 539{
531 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { 540 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
541 (serv->sv_nrthreads+3) * 20;
542
543 if (serv->sv_tmpcnt > limit) {
532 struct svc_xprt *xprt = NULL; 544 struct svc_xprt *xprt = NULL;
533 spin_lock_bh(&serv->sv_lock); 545 spin_lock_bh(&serv->sv_lock);
534 if (!list_empty(&serv->sv_tempsocks)) { 546 if (!list_empty(&serv->sv_tempsocks)) {
535 if (net_ratelimit()) { 547 if (net_ratelimit()) {
536 /* Try to help the admin */ 548 /* Try to help the admin */
537 printk(KERN_NOTICE "%s: too many open " 549 printk(KERN_NOTICE "%s: too many open "
538 "connections, consider increasing the " 550 "connections, consider increasing %s\n",
539 "number of nfsd threads\n", 551 serv->sv_name, serv->sv_maxconn ?
540 serv->sv_name); 552 "the max number of connections." :
553 "the number of threads.");
541 } 554 }
542 /* 555 /*
543 * Always select the oldest connection. It's not fair, 556 * Always select the oldest connection. It's not fair,
@@ -730,7 +743,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
730 serv->sv_stats->netcnt++; 743 serv->sv_stats->netcnt++;
731 return len; 744 return len;
732} 745}
733EXPORT_SYMBOL(svc_recv); 746EXPORT_SYMBOL_GPL(svc_recv);
734 747
735/* 748/*
736 * Drop request 749 * Drop request
@@ -740,7 +753,7 @@ void svc_drop(struct svc_rqst *rqstp)
740 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 753 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
741 svc_xprt_release(rqstp); 754 svc_xprt_release(rqstp);
742} 755}
743EXPORT_SYMBOL(svc_drop); 756EXPORT_SYMBOL_GPL(svc_drop);
744 757
745/* 758/*
746 * Return reply to client. 759 * Return reply to client.
@@ -837,6 +850,11 @@ static void svc_age_temp_xprts(unsigned long closure)
837void svc_delete_xprt(struct svc_xprt *xprt) 850void svc_delete_xprt(struct svc_xprt *xprt)
838{ 851{
839 struct svc_serv *serv = xprt->xpt_server; 852 struct svc_serv *serv = xprt->xpt_server;
853 struct svc_deferred_req *dr;
854
855 /* Only do this once */
856 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
857 return;
840 858
841 dprintk("svc: svc_delete_xprt(%p)\n", xprt); 859 dprintk("svc: svc_delete_xprt(%p)\n", xprt);
842 xprt->xpt_ops->xpo_detach(xprt); 860 xprt->xpt_ops->xpo_detach(xprt);
@@ -851,12 +869,16 @@ void svc_delete_xprt(struct svc_xprt *xprt)
851 * while still attached to a queue, the queue itself 869 * while still attached to a queue, the queue itself
852 * is about to be destroyed (in svc_destroy). 870 * is about to be destroyed (in svc_destroy).
853 */ 871 */
854 if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) { 872 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
855 BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2); 873 serv->sv_tmpcnt--;
856 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 874
857 serv->sv_tmpcnt--; 875 for (dr = svc_deferred_dequeue(xprt); dr;
876 dr = svc_deferred_dequeue(xprt)) {
858 svc_xprt_put(xprt); 877 svc_xprt_put(xprt);
878 kfree(dr);
859 } 879 }
880
881 svc_xprt_put(xprt);
860 spin_unlock_bh(&serv->sv_lock); 882 spin_unlock_bh(&serv->sv_lock);
861} 883}
862 884
@@ -902,17 +924,19 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
902 container_of(dreq, struct svc_deferred_req, handle); 924 container_of(dreq, struct svc_deferred_req, handle);
903 struct svc_xprt *xprt = dr->xprt; 925 struct svc_xprt *xprt = dr->xprt;
904 926
905 if (too_many) { 927 spin_lock(&xprt->xpt_lock);
928 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
929 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
930 spin_unlock(&xprt->xpt_lock);
931 dprintk("revisit canceled\n");
906 svc_xprt_put(xprt); 932 svc_xprt_put(xprt);
907 kfree(dr); 933 kfree(dr);
908 return; 934 return;
909 } 935 }
910 dprintk("revisit queued\n"); 936 dprintk("revisit queued\n");
911 dr->xprt = NULL; 937 dr->xprt = NULL;
912 spin_lock(&xprt->xpt_lock);
913 list_add(&dr->handle.recent, &xprt->xpt_deferred); 938 list_add(&dr->handle.recent, &xprt->xpt_deferred);
914 spin_unlock(&xprt->xpt_lock); 939 spin_unlock(&xprt->xpt_lock);
915 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
916 svc_xprt_enqueue(xprt); 940 svc_xprt_enqueue(xprt);
917 svc_xprt_put(xprt); 941 svc_xprt_put(xprt);
918} 942}
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 8a73cbb16052..e64109b02aee 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -57,13 +57,13 @@ svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
57 rqstp->rq_authop = aops; 57 rqstp->rq_authop = aops;
58 return aops->accept(rqstp, authp); 58 return aops->accept(rqstp, authp);
59} 59}
60EXPORT_SYMBOL(svc_authenticate); 60EXPORT_SYMBOL_GPL(svc_authenticate);
61 61
62int svc_set_client(struct svc_rqst *rqstp) 62int svc_set_client(struct svc_rqst *rqstp)
63{ 63{
64 return rqstp->rq_authop->set_client(rqstp); 64 return rqstp->rq_authop->set_client(rqstp);
65} 65}
66EXPORT_SYMBOL(svc_set_client); 66EXPORT_SYMBOL_GPL(svc_set_client);
67 67
68/* A request, which was authenticated, has now executed. 68/* A request, which was authenticated, has now executed.
69 * Time to finalise the credentials and verifier 69 * Time to finalise the credentials and verifier
@@ -95,7 +95,7 @@ svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops)
95 spin_unlock(&authtab_lock); 95 spin_unlock(&authtab_lock);
96 return rv; 96 return rv;
97} 97}
98EXPORT_SYMBOL(svc_auth_register); 98EXPORT_SYMBOL_GPL(svc_auth_register);
99 99
100void 100void
101svc_auth_unregister(rpc_authflavor_t flavor) 101svc_auth_unregister(rpc_authflavor_t flavor)
@@ -105,7 +105,7 @@ svc_auth_unregister(rpc_authflavor_t flavor)
105 authtab[flavor] = NULL; 105 authtab[flavor] = NULL;
106 spin_unlock(&authtab_lock); 106 spin_unlock(&authtab_lock);
107} 107}
108EXPORT_SYMBOL(svc_auth_unregister); 108EXPORT_SYMBOL_GPL(svc_auth_unregister);
109 109
110/************************************************** 110/**************************************************
111 * 'auth_domains' are stored in a hash table indexed by name. 111 * 'auth_domains' are stored in a hash table indexed by name.
@@ -132,7 +132,7 @@ void auth_domain_put(struct auth_domain *dom)
132 spin_unlock(&auth_domain_lock); 132 spin_unlock(&auth_domain_lock);
133 } 133 }
134} 134}
135EXPORT_SYMBOL(auth_domain_put); 135EXPORT_SYMBOL_GPL(auth_domain_put);
136 136
137struct auth_domain * 137struct auth_domain *
138auth_domain_lookup(char *name, struct auth_domain *new) 138auth_domain_lookup(char *name, struct auth_domain *new)
@@ -157,10 +157,10 @@ auth_domain_lookup(char *name, struct auth_domain *new)
157 spin_unlock(&auth_domain_lock); 157 spin_unlock(&auth_domain_lock);
158 return new; 158 return new;
159} 159}
160EXPORT_SYMBOL(auth_domain_lookup); 160EXPORT_SYMBOL_GPL(auth_domain_lookup);
161 161
162struct auth_domain *auth_domain_find(char *name) 162struct auth_domain *auth_domain_find(char *name)
163{ 163{
164 return auth_domain_lookup(name, NULL); 164 return auth_domain_lookup(name, NULL);
165} 165}
166EXPORT_SYMBOL(auth_domain_find); 166EXPORT_SYMBOL_GPL(auth_domain_find);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 82240e6127b2..5c865e2d299e 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -64,7 +64,7 @@ struct auth_domain *unix_domain_find(char *name)
64 rv = auth_domain_lookup(name, &new->h); 64 rv = auth_domain_lookup(name, &new->h);
65 } 65 }
66} 66}
67EXPORT_SYMBOL(unix_domain_find); 67EXPORT_SYMBOL_GPL(unix_domain_find);
68 68
69static void svcauth_unix_domain_release(struct auth_domain *dom) 69static void svcauth_unix_domain_release(struct auth_domain *dom)
70{ 70{
@@ -358,7 +358,7 @@ int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom)
358 else 358 else
359 return -ENOMEM; 359 return -ENOMEM;
360} 360}
361EXPORT_SYMBOL(auth_unix_add_addr); 361EXPORT_SYMBOL_GPL(auth_unix_add_addr);
362 362
363int auth_unix_forget_old(struct auth_domain *dom) 363int auth_unix_forget_old(struct auth_domain *dom)
364{ 364{
@@ -370,7 +370,7 @@ int auth_unix_forget_old(struct auth_domain *dom)
370 udom->addr_changes++; 370 udom->addr_changes++;
371 return 0; 371 return 0;
372} 372}
373EXPORT_SYMBOL(auth_unix_forget_old); 373EXPORT_SYMBOL_GPL(auth_unix_forget_old);
374 374
375struct auth_domain *auth_unix_lookup(struct in6_addr *addr) 375struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
376{ 376{
@@ -395,13 +395,13 @@ struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
395 cache_put(&ipm->h, &ip_map_cache); 395 cache_put(&ipm->h, &ip_map_cache);
396 return rv; 396 return rv;
397} 397}
398EXPORT_SYMBOL(auth_unix_lookup); 398EXPORT_SYMBOL_GPL(auth_unix_lookup);
399 399
400void svcauth_unix_purge(void) 400void svcauth_unix_purge(void)
401{ 401{
402 cache_purge(&ip_map_cache); 402 cache_purge(&ip_map_cache);
403} 403}
404EXPORT_SYMBOL(svcauth_unix_purge); 404EXPORT_SYMBOL_GPL(svcauth_unix_purge);
405 405
406static inline struct ip_map * 406static inline struct ip_map *
407ip_map_cached_get(struct svc_rqst *rqstp) 407ip_map_cached_get(struct svc_rqst *rqstp)
@@ -714,7 +714,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
714 return SVC_OK; 714 return SVC_OK;
715} 715}
716 716
717EXPORT_SYMBOL(svcauth_unix_set_client); 717EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
718 718
719static int 719static int
720svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) 720svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index ef3238d665ee..5763e6460fea 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -59,6 +59,7 @@ static void svc_udp_data_ready(struct sock *, int);
59static int svc_udp_recvfrom(struct svc_rqst *); 59static int svc_udp_recvfrom(struct svc_rqst *);
60static int svc_udp_sendto(struct svc_rqst *); 60static int svc_udp_sendto(struct svc_rqst *);
61static void svc_sock_detach(struct svc_xprt *); 61static void svc_sock_detach(struct svc_xprt *);
62static void svc_tcp_sock_detach(struct svc_xprt *);
62static void svc_sock_free(struct svc_xprt *); 63static void svc_sock_free(struct svc_xprt *);
63 64
64static struct svc_xprt *svc_create_socket(struct svc_serv *, int, 65static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
@@ -102,7 +103,6 @@ static void svc_reclassify_socket(struct socket *sock)
102static void svc_release_skb(struct svc_rqst *rqstp) 103static void svc_release_skb(struct svc_rqst *rqstp)
103{ 104{
104 struct sk_buff *skb = rqstp->rq_xprt_ctxt; 105 struct sk_buff *skb = rqstp->rq_xprt_ctxt;
105 struct svc_deferred_req *dr = rqstp->rq_deferred;
106 106
107 if (skb) { 107 if (skb) {
108 struct svc_sock *svsk = 108 struct svc_sock *svsk =
@@ -112,10 +112,6 @@ static void svc_release_skb(struct svc_rqst *rqstp)
112 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); 112 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
113 skb_free_datagram(svsk->sk_sk, skb); 113 skb_free_datagram(svsk->sk_sk, skb);
114 } 114 }
115 if (dr) {
116 rqstp->rq_deferred = NULL;
117 kfree(dr);
118 }
119} 115}
120 116
121union svc_pktinfo_u { 117union svc_pktinfo_u {
@@ -289,7 +285,7 @@ svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
289 return -ENOENT; 285 return -ENOENT;
290 return len; 286 return len;
291} 287}
292EXPORT_SYMBOL(svc_sock_names); 288EXPORT_SYMBOL_GPL(svc_sock_names);
293 289
294/* 290/*
295 * Check input queue length 291 * Check input queue length
@@ -1017,7 +1013,7 @@ static struct svc_xprt_ops svc_tcp_ops = {
1017 .xpo_recvfrom = svc_tcp_recvfrom, 1013 .xpo_recvfrom = svc_tcp_recvfrom,
1018 .xpo_sendto = svc_tcp_sendto, 1014 .xpo_sendto = svc_tcp_sendto,
1019 .xpo_release_rqst = svc_release_skb, 1015 .xpo_release_rqst = svc_release_skb,
1020 .xpo_detach = svc_sock_detach, 1016 .xpo_detach = svc_tcp_sock_detach,
1021 .xpo_free = svc_sock_free, 1017 .xpo_free = svc_sock_free,
1022 .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, 1018 .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
1023 .xpo_has_wspace = svc_tcp_has_wspace, 1019 .xpo_has_wspace = svc_tcp_has_wspace,
@@ -1101,7 +1097,7 @@ void svc_sock_update_bufs(struct svc_serv *serv)
1101 } 1097 }
1102 spin_unlock_bh(&serv->sv_lock); 1098 spin_unlock_bh(&serv->sv_lock);
1103} 1099}
1104EXPORT_SYMBOL(svc_sock_update_bufs); 1100EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
1105 1101
1106/* 1102/*
1107 * Initialize socket for RPC use and create svc_sock struct 1103 * Initialize socket for RPC use and create svc_sock struct
@@ -1287,6 +1283,24 @@ static void svc_sock_detach(struct svc_xprt *xprt)
1287 sk->sk_state_change = svsk->sk_ostate; 1283 sk->sk_state_change = svsk->sk_ostate;
1288 sk->sk_data_ready = svsk->sk_odata; 1284 sk->sk_data_ready = svsk->sk_odata;
1289 sk->sk_write_space = svsk->sk_owspace; 1285 sk->sk_write_space = svsk->sk_owspace;
1286
1287 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1288 wake_up_interruptible(sk->sk_sleep);
1289}
1290
1291/*
1292 * Disconnect the socket, and reset the callbacks
1293 */
1294static void svc_tcp_sock_detach(struct svc_xprt *xprt)
1295{
1296 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1297
1298 dprintk("svc: svc_tcp_sock_detach(%p)\n", svsk);
1299
1300 svc_sock_detach(xprt);
1301
1302 if (!test_bit(XPT_LISTENER, &xprt->xpt_flags))
1303 kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR);
1290} 1304}
1291 1305
1292/* 1306/*
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5aa024b99c55..2f2d731bc1c2 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -124,7 +124,7 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m
124static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b, 124static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
125 struct tipc_node_map *nm_diff) 125 struct tipc_node_map *nm_diff)
126{ 126{
127 int stop = sizeof(nm_a->map) / sizeof(u32); 127 int stop = ARRAY_SIZE(nm_a->map);
128 int w; 128 int w;
129 int b; 129 int b;
130 u32 map; 130 u32 map;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c6250d0055d2..d1b89820ab4f 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -836,7 +836,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
836 err = mnt_want_write(nd.path.mnt); 836 err = mnt_want_write(nd.path.mnt);
837 if (err) 837 if (err)
838 goto out_mknod_dput; 838 goto out_mknod_dput;
839 err = security_path_mknod(&nd.path, dentry, mode, 0);
840 if (err)
841 goto out_mknod_drop_write;
839 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0); 842 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
843out_mknod_drop_write:
840 mnt_drop_write(nd.path.mnt); 844 mnt_drop_write(nd.path.mnt);
841 if (err) 845 if (err)
842 goto out_mknod_dput; 846 goto out_mknod_dput;
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig
new file mode 100644
index 000000000000..18495cdcd10d
--- /dev/null
+++ b/net/wimax/Kconfig
@@ -0,0 +1,52 @@
1#
2# WiMAX LAN device configuration
3#
4# Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a
5# module if WIMAX is to be linked in. The WiMAX code is done in such a
6# way that it doesn't require and explicit dependency on RFKILL in
7# case an embedded system wants to rip it out.
8#
9# As well, enablement of the RFKILL code means we need the INPUT layer
10# support to inject events coming from hw rfkill switches. That
11# dependency could be killed if input.h provided appropiate means to
12# work when input is disabled.
13
14comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled"
15 depends on INPUT = n && RFKILL != n
16
17menuconfig WIMAX
18 tristate "WiMAX Wireless Broadband support"
19 depends on (y && RFKILL != m) || m
20 depends on (INPUT && RFKILL != n) || RFKILL = n
21 help
22
23 Select to configure support for devices that provide
24 wireless broadband connectivity using the WiMAX protocol
25 (IEEE 802.16).
26
27 Please note that most of these devices require signing up
28 for a service plan with a provider.
29
30 The different WiMAX drivers can be enabled in the menu entry
31
32 Device Drivers > Network device support > WiMAX Wireless
33 Broadband devices
34
35 If unsure, it is safe to select M (module).
36
37config WIMAX_DEBUG_LEVEL
38 int "WiMAX debug level"
39 depends on WIMAX
40 default 8
41 help
42
43 Select the maximum debug verbosity level to be compiled into
44 the WiMAX stack code.
45
46 By default, debug messages are disabled at runtime and can
47 be selectively enabled for different parts of the code using
48 the sysfs debug-levels file.
49
50 If set at zero, this will compile out all the debug code.
51
52 It is recommended that it is left at 8.
diff --git a/net/wimax/Makefile b/net/wimax/Makefile
new file mode 100644
index 000000000000..5b80b941c2c9
--- /dev/null
+++ b/net/wimax/Makefile
@@ -0,0 +1,13 @@
1
2obj-$(CONFIG_WIMAX) += wimax.o
3
4wimax-y := \
5 id-table.o \
6 op-msg.o \
7 op-reset.o \
8 op-rfkill.o \
9 stack.o
10
11wimax-$(CONFIG_DEBUG_FS) += debugfs.o
12
13
diff --git a/net/wimax/debug-levels.h b/net/wimax/debug-levels.h
new file mode 100644
index 000000000000..1c29123a3aa9
--- /dev/null
+++ b/net/wimax/debug-levels.h
@@ -0,0 +1,42 @@
1/*
2 * Linux WiMAX Stack
3 * Debug levels control file for the wimax module
4 *
5 *
6 * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 */
23#ifndef __debug_levels__h__
24#define __debug_levels__h__
25
26/* Maximum compile and run time debug level for all submodules */
27#define D_MODULENAME wimax
28#define D_MASTER CONFIG_WIMAX_DEBUG_LEVEL
29
30#include <linux/wimax/debug.h>
31
32/* List of all the enabled modules */
33enum d_module {
34 D_SUBMODULE_DECLARE(debugfs),
35 D_SUBMODULE_DECLARE(id_table),
36 D_SUBMODULE_DECLARE(op_msg),
37 D_SUBMODULE_DECLARE(op_reset),
38 D_SUBMODULE_DECLARE(op_rfkill),
39 D_SUBMODULE_DECLARE(stack),
40};
41
42#endif /* #ifndef __debug_levels__h__ */
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c
new file mode 100644
index 000000000000..94d216a46407
--- /dev/null
+++ b/net/wimax/debugfs.c
@@ -0,0 +1,79 @@
1/*
2 * Linux WiMAX
3 * Debugfs support
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 */
23#include <linux/debugfs.h>
24#include <linux/wimax.h>
25#include "wimax-internal.h"
26
27#define D_SUBMODULE debugfs
28#include "debug-levels.h"
29
30
31#define __debugfs_register(prefix, name, parent) \
32do { \
33 result = d_level_register_debugfs(prefix, name, parent); \
34 if (result < 0) \
35 goto error; \
36} while (0)
37
38
39int wimax_debugfs_add(struct wimax_dev *wimax_dev)
40{
41 int result;
42 struct net_device *net_dev = wimax_dev->net_dev;
43 struct device *dev = net_dev->dev.parent;
44 struct dentry *dentry;
45 char buf[128];
46
47 snprintf(buf, sizeof(buf), "wimax:%s", net_dev->name);
48 dentry = debugfs_create_dir(buf, NULL);
49 result = PTR_ERR(dentry);
50 if (IS_ERR(dentry)) {
51 if (result == -ENODEV)
52 result = 0; /* No debugfs support */
53 else
54 dev_err(dev, "Can't create debugfs dentry: %d\n",
55 result);
56 goto out;
57 }
58 wimax_dev->debugfs_dentry = dentry;
59 __debugfs_register("wimax_dl_", debugfs, dentry);
60 __debugfs_register("wimax_dl_", id_table, dentry);
61 __debugfs_register("wimax_dl_", op_msg, dentry);
62 __debugfs_register("wimax_dl_", op_reset, dentry);
63 __debugfs_register("wimax_dl_", op_rfkill, dentry);
64 __debugfs_register("wimax_dl_", stack, dentry);
65 result = 0;
66out:
67 return result;
68
69error:
70 debugfs_remove_recursive(wimax_dev->debugfs_dentry);
71 return result;
72}
73
74void wimax_debugfs_rm(struct wimax_dev *wimax_dev)
75{
76 debugfs_remove_recursive(wimax_dev->debugfs_dentry);
77}
78
79
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c
new file mode 100644
index 000000000000..5e685f7eda90
--- /dev/null
+++ b/net/wimax/id-table.c
@@ -0,0 +1,144 @@
1/*
2 * Linux WiMAX
3 * Mappping of generic netlink family IDs to net devices
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * We assign a single generic netlink family ID to each device (to
25 * simplify lookup).
26 *
27 * We need a way to map family ID to a wimax_dev pointer.
28 *
29 * The idea is to use a very simple lookup. Using a netlink attribute
30 * with (for example) the interface name implies a heavier search over
31 * all the network devices; seemed kind of a waste given that we know
32 * we are looking for a WiMAX device and that most systems will have
33 * just a single WiMAX adapter.
34 *
35 * We put all the WiMAX devices in the system in a linked list and
36 * match the generic link family ID against the list.
37 *
38 * By using a linked list, the case of a single adapter in the system
39 * becomes (almost) no overhead, while still working for many more. If
40 * it ever goes beyond two, I'll be surprised.
41 */
42#include <linux/device.h>
43#include <net/genetlink.h>
44#include <linux/netdevice.h>
45#include <linux/list.h>
46#include <linux/wimax.h>
47#include "wimax-internal.h"
48
49
50#define D_SUBMODULE id_table
51#include "debug-levels.h"
52
53
54static DEFINE_SPINLOCK(wimax_id_table_lock);
55static struct list_head wimax_id_table = LIST_HEAD_INIT(wimax_id_table);
56
57
58/*
59 * wimax_id_table_add - add a gennetlink familiy ID / wimax_dev mapping
60 *
61 * @wimax_dev: WiMAX device descriptor to associate to the Generic
62 * Netlink family ID.
63 *
64 * Look for an empty spot in the ID table; if none found, double the
65 * table's size and get the first spot.
66 */
67void wimax_id_table_add(struct wimax_dev *wimax_dev)
68{
69 d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
70 spin_lock(&wimax_id_table_lock);
71 list_add(&wimax_dev->id_table_node, &wimax_id_table);
72 spin_unlock(&wimax_id_table_lock);
73 d_fnend(3, NULL, "(wimax_dev %p)\n", wimax_dev);
74}
75
76
77/*
78 * wimax_get_netdev_by_info - lookup a wimax_dev from the gennetlink info
79 *
80 * The generic netlink family ID has been filled out in the
81 * nlmsghdr->nlmsg_type field, so we pull it from there, look it up in
82 * the mapping table and reference the wimax_dev.
83 *
84 * When done, the reference should be dropped with
85 * 'dev_put(wimax_dev->net_dev)'.
86 */
87struct wimax_dev *wimax_dev_get_by_genl_info(
88 struct genl_info *info, int ifindex)
89{
90 struct wimax_dev *wimax_dev = NULL;
91
92 d_fnstart(3, NULL, "(info %p ifindex %d)\n", info, ifindex);
93 spin_lock(&wimax_id_table_lock);
94 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
95 if (wimax_dev->net_dev->ifindex == ifindex) {
96 dev_hold(wimax_dev->net_dev);
97 break;
98 }
99 }
100 if (wimax_dev == NULL)
101 d_printf(1, NULL, "wimax: no devices found with ifindex %d\n",
102 ifindex);
103 spin_unlock(&wimax_id_table_lock);
104 d_fnend(3, NULL, "(info %p ifindex %d) = %p\n",
105 info, ifindex, wimax_dev);
106 return wimax_dev;
107}
108
109
110/*
111 * wimax_id_table_rm - Remove a gennetlink familiy ID / wimax_dev mapping
112 *
113 * @id: family ID to remove from the table
114 */
115void wimax_id_table_rm(struct wimax_dev *wimax_dev)
116{
117 spin_lock(&wimax_id_table_lock);
118 list_del_init(&wimax_dev->id_table_node);
119 spin_unlock(&wimax_id_table_lock);
120}
121
122
123/*
124 * Release the gennetlink family id / mapping table
125 *
126 * On debug, verify that the table is empty upon removal. We want the
127 * code always compiled, to ensure it doesn't bit rot. It will be
128 * compiled out if CONFIG_BUG is disabled.
129 */
130void wimax_id_table_release(void)
131{
132 struct wimax_dev *wimax_dev;
133
134#ifndef CONFIG_BUG
135 return;
136#endif
137 spin_lock(&wimax_id_table_lock);
138 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
139 printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n",
140 __func__, wimax_dev, wimax_dev->net_dev->ifindex);
141 WARN_ON(1);
142 }
143 spin_unlock(&wimax_id_table_lock);
144}
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
new file mode 100644
index 000000000000..cb3b4ad53683
--- /dev/null
+++ b/net/wimax/op-msg.c
@@ -0,0 +1,421 @@
1/*
2 * Linux WiMAX
3 * Generic messaging interface between userspace and driver/device
4 *
5 *
6 * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This implements a direct communication channel between user space and
25 * the driver/device, by which free form messages can be sent back and
26 * forth.
27 *
28 * This is intended for device-specific features, vendor quirks, etc.
29 *
30 * See include/net/wimax.h
31 *
32 * GENERIC NETLINK ENCODING AND CAPACITY
33 *
34 * A destination "pipe name" is added to each message; it is up to the
35 * drivers to assign or use those names (if using them at all).
36 *
37 * Messages are encoded as a binary netlink attribute using nla_put()
38 * using type NLA_UNSPEC (as some versions of libnl still in
39 * deployment don't yet understand NLA_BINARY).
40 *
41 * The maximum capacity of this transport is PAGESIZE per message (so
42 * the actual payload will be bit smaller depending on the
43 * netlink/generic netlink attributes and headers).
44 *
45 * RECEPTION OF MESSAGES
46 *
47 * When a message is received from user space, it is passed verbatim
48 * to the driver calling wimax_dev->op_msg_from_user(). The return
49 * value from this function is passed back to user space as an ack
50 * over the generic netlink protocol.
51 *
52 * The stack doesn't do any processing or interpretation of these
53 * messages.
54 *
55 * SENDING MESSAGES
56 *
57 * Messages can be sent with wimax_msg().
58 *
59 * If the message delivery needs to happen on a different context to
60 * that of its creation, wimax_msg_alloc() can be used to get a
61 * pointer to the message that can be delivered later on with
62 * wimax_msg_send().
63 *
64 * ROADMAP
65 *
66 * wimax_gnl_doit_msg_from_user() Process a message from user space
67 * wimax_dev_get_by_genl_info()
68 * wimax_dev->op_msg_from_user() Delivery of message to the driver
69 *
70 * wimax_msg() Send a message to user space
71 * wimax_msg_alloc()
72 * wimax_msg_send()
73 */
74#include <linux/device.h>
75#include <net/genetlink.h>
76#include <linux/netdevice.h>
77#include <linux/wimax.h>
78#include <linux/security.h>
79#include "wimax-internal.h"
80
81
82#define D_SUBMODULE op_msg
83#include "debug-levels.h"
84
85
86/**
87 * wimax_msg_alloc - Create a new skb for sending a message to userspace
88 *
89 * @wimax_dev: WiMAX device descriptor
90 * @pipe_name: "named pipe" the message will be sent to
91 * @msg: pointer to the message data to send
92 * @size: size of the message to send (in bytes), including the header.
93 * @gfp_flags: flags for memory allocation.
94 *
95 * Returns: %0 if ok, negative errno code on error
96 *
97 * Description:
98 *
99 * Allocates an skb that will contain the message to send to user
100 * space over the messaging pipe and initializes it, copying the
101 * payload.
102 *
103 * Once this call is done, you can deliver it with
104 * wimax_msg_send().
105 *
106 * IMPORTANT:
107 *
108 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
109 * wimax_msg_send() depends on skb->data being placed at the
110 * beginning of the user message.
111 */
112struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
113 const char *pipe_name,
114 const void *msg, size_t size,
115 gfp_t gfp_flags)
116{
117 int result;
118 struct device *dev = wimax_dev->net_dev->dev.parent;
119 size_t msg_size;
120 void *genl_msg;
121 struct sk_buff *skb;
122
123 msg_size = nla_total_size(size)
124 + nla_total_size(sizeof(u32))
125 + (pipe_name ? nla_total_size(strlen(pipe_name)) : 0);
126 result = -ENOMEM;
127 skb = genlmsg_new(msg_size, gfp_flags);
128 if (skb == NULL)
129 goto error_new;
130 genl_msg = genlmsg_put(skb, 0, 0, &wimax_gnl_family,
131 0, WIMAX_GNL_OP_MSG_TO_USER);
132 if (genl_msg == NULL) {
133 dev_err(dev, "no memory to create generic netlink message\n");
134 goto error_genlmsg_put;
135 }
136 result = nla_put_u32(skb, WIMAX_GNL_MSG_IFIDX,
137 wimax_dev->net_dev->ifindex);
138 if (result < 0) {
139 dev_err(dev, "no memory to add ifindex attribute\n");
140 goto error_nla_put;
141 }
142 if (pipe_name) {
143 result = nla_put_string(skb, WIMAX_GNL_MSG_PIPE_NAME,
144 pipe_name);
145 if (result < 0) {
146 dev_err(dev, "no memory to add pipe_name attribute\n");
147 goto error_nla_put;
148 }
149 }
150 result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
151 if (result < 0) {
152 dev_err(dev, "no memory to add payload in attribute\n");
153 goto error_nla_put;
154 }
155 genlmsg_end(skb, genl_msg);
156 return skb;
157
158error_nla_put:
159error_genlmsg_put:
160error_new:
161 nlmsg_free(skb);
162 return ERR_PTR(result);
163
164}
165EXPORT_SYMBOL_GPL(wimax_msg_alloc);
166
167
168/**
169 * wimax_msg_data_len - Return a pointer and size of a message's payload
170 *
171 * @msg: Pointer to a message created with wimax_msg_alloc()
172 * @size: Pointer to where to store the message's size
173 *
174 * Returns the pointer to the message data.
175 */
176const void *wimax_msg_data_len(struct sk_buff *msg, size_t *size)
177{
178 struct nlmsghdr *nlh = (void *) msg->head;
179 struct nlattr *nla;
180
181 nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
182 WIMAX_GNL_MSG_DATA);
183 if (nla == NULL) {
184 printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
185 return NULL;
186 }
187 *size = nla_len(nla);
188 return nla_data(nla);
189}
190EXPORT_SYMBOL_GPL(wimax_msg_data_len);
191
192
193/**
194 * wimax_msg_data - Return a pointer to a message's payload
195 *
196 * @msg: Pointer to a message created with wimax_msg_alloc()
197 */
198const void *wimax_msg_data(struct sk_buff *msg)
199{
200 struct nlmsghdr *nlh = (void *) msg->head;
201 struct nlattr *nla;
202
203 nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
204 WIMAX_GNL_MSG_DATA);
205 if (nla == NULL) {
206 printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
207 return NULL;
208 }
209 return nla_data(nla);
210}
211EXPORT_SYMBOL_GPL(wimax_msg_data);
212
213
214/**
215 * wimax_msg_len - Return a message's payload length
216 *
217 * @msg: Pointer to a message created with wimax_msg_alloc()
218 */
219ssize_t wimax_msg_len(struct sk_buff *msg)
220{
221 struct nlmsghdr *nlh = (void *) msg->head;
222 struct nlattr *nla;
223
224 nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
225 WIMAX_GNL_MSG_DATA);
226 if (nla == NULL) {
227 printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
228 return -EINVAL;
229 }
230 return nla_len(nla);
231}
232EXPORT_SYMBOL_GPL(wimax_msg_len);
233
234
235/**
236 * wimax_msg_send - Send a pre-allocated message to user space
237 *
238 * @wimax_dev: WiMAX device descriptor
239 *
240 * @skb: &struct sk_buff returned by wimax_msg_alloc(). Note the
241 * ownership of @skb is transferred to this function.
242 *
243 * Returns: 0 if ok, < 0 errno code on error
244 *
245 * Description:
246 *
247 * Sends a free-form message that was preallocated with
248 * wimax_msg_alloc() and filled up.
249 *
250 * Assumes that once you pass an skb to this function for sending, it
251 * owns it and will release it when done (on success).
252 *
253 * IMPORTANT:
254 *
255 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
256 * wimax_msg_send() depends on skb->data being placed at the
257 * beginning of the user message.
258 */
259int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
260{
261 int result;
262 struct device *dev = wimax_dev->net_dev->dev.parent;
263 void *msg = skb->data;
264 size_t size = skb->len;
265 might_sleep();
266
267 d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size);
268 d_dump(2, dev, msg, size);
269 result = genlmsg_multicast(skb, 0, wimax_gnl_mcg.id, GFP_KERNEL);
270 d_printf(1, dev, "CTX: genl multicast result %d\n", result);
271 if (result == -ESRCH) /* Nobody connected, ignore it */
272 result = 0; /* btw, the skb is freed already */
273 return result;
274}
275EXPORT_SYMBOL_GPL(wimax_msg_send);
276
277
278/**
279 * wimax_msg - Send a message to user space
280 *
281 * @wimax_dev: WiMAX device descriptor (properly referenced)
282 * @pipe_name: "named pipe" the message will be sent to
283 * @buf: pointer to the message to send.
284 * @size: size of the buffer pointed to by @buf (in bytes).
285 * @gfp_flags: flags for memory allocation.
286 *
287 * Returns: %0 if ok, negative errno code on error.
288 *
289 * Description:
290 *
291 * Sends a free-form message to user space on the device @wimax_dev.
292 *
293 * NOTES:
294 *
295 * Once the @skb is given to this function, who will own it and will
296 * release it when done (unless it returns error).
297 */
298int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
299 const void *buf, size_t size, gfp_t gfp_flags)
300{
301 int result = -ENOMEM;
302 struct sk_buff *skb;
303
304 skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
305 if (skb == NULL)
306 goto error_msg_new;
307 result = wimax_msg_send(wimax_dev, skb);
308error_msg_new:
309 return result;
310}
311EXPORT_SYMBOL_GPL(wimax_msg);
312
313
314static const
315struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
316 [WIMAX_GNL_MSG_IFIDX] = {
317 .type = NLA_U32,
318 },
319 [WIMAX_GNL_MSG_DATA] = {
320 .type = NLA_UNSPEC, /* libnl doesn't grok BINARY yet */
321 },
322};
323
324
325/*
326 * Relays a message from user space to the driver
327 *
328 * The skb is passed to the driver-specific function with the netlink
329 * and generic netlink headers already stripped.
330 *
331 * This call will block while handling/relaying the message.
332 */
333static
334int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
335{
336 int result, ifindex;
337 struct wimax_dev *wimax_dev;
338 struct device *dev;
339 struct nlmsghdr *nlh = info->nlhdr;
340 char *pipe_name;
341 void *msg_buf;
342 size_t msg_len;
343
344 might_sleep();
345 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
346 result = -ENODEV;
347 if (info->attrs[WIMAX_GNL_MSG_IFIDX] == NULL) {
348 printk(KERN_ERR "WIMAX_GNL_MSG_FROM_USER: can't find IFIDX "
349 "attribute\n");
350 goto error_no_wimax_dev;
351 }
352 ifindex = nla_get_u32(info->attrs[WIMAX_GNL_MSG_IFIDX]);
353 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
354 if (wimax_dev == NULL)
355 goto error_no_wimax_dev;
356 dev = wimax_dev_to_dev(wimax_dev);
357
358 /* Unpack arguments */
359 result = -EINVAL;
360 if (info->attrs[WIMAX_GNL_MSG_DATA] == NULL) {
361 dev_err(dev, "WIMAX_GNL_MSG_FROM_USER: can't find MSG_DATA "
362 "attribute\n");
363 goto error_no_data;
364 }
365 msg_buf = nla_data(info->attrs[WIMAX_GNL_MSG_DATA]);
366 msg_len = nla_len(info->attrs[WIMAX_GNL_MSG_DATA]);
367
368 if (info->attrs[WIMAX_GNL_MSG_PIPE_NAME] == NULL)
369 pipe_name = NULL;
370 else {
371 struct nlattr *attr = info->attrs[WIMAX_GNL_MSG_PIPE_NAME];
372 size_t attr_len = nla_len(attr);
373 /* libnl-1.1 does not yet support NLA_NUL_STRING */
374 result = -ENOMEM;
375 pipe_name = kstrndup(nla_data(attr), attr_len + 1, GFP_KERNEL);
376 if (pipe_name == NULL)
377 goto error_alloc;
378 pipe_name[attr_len] = 0;
379 }
380 mutex_lock(&wimax_dev->mutex);
381 result = wimax_dev_is_ready(wimax_dev);
382 if (result < 0)
383 goto error_not_ready;
384 result = -ENOSYS;
385 if (wimax_dev->op_msg_from_user == NULL)
386 goto error_noop;
387
388 d_printf(1, dev,
389 "CRX: nlmsghdr len %u type %u flags 0x%04x seq 0x%x pid %u\n",
390 nlh->nlmsg_len, nlh->nlmsg_type, nlh->nlmsg_flags,
391 nlh->nlmsg_seq, nlh->nlmsg_pid);
392 d_printf(1, dev, "CRX: wimax message %zu bytes\n", msg_len);
393 d_dump(2, dev, msg_buf, msg_len);
394
395 result = wimax_dev->op_msg_from_user(wimax_dev, pipe_name,
396 msg_buf, msg_len, info);
397error_noop:
398error_not_ready:
399 mutex_unlock(&wimax_dev->mutex);
400error_alloc:
401 kfree(pipe_name);
402error_no_data:
403 dev_put(wimax_dev->net_dev);
404error_no_wimax_dev:
405 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
406 return result;
407}
408
409
410/*
411 * Generic Netlink glue
412 */
413
414struct genl_ops wimax_gnl_msg_from_user = {
415 .cmd = WIMAX_GNL_OP_MSG_FROM_USER,
416 .flags = GENL_ADMIN_PERM,
417 .policy = wimax_gnl_msg_policy,
418 .doit = wimax_gnl_doit_msg_from_user,
419 .dumpit = NULL,
420};
421
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
new file mode 100644
index 000000000000..ca269178c4d4
--- /dev/null
+++ b/net/wimax/op-reset.c
@@ -0,0 +1,143 @@
1/*
2 * Linux WiMAX
3 * Implement and export a method for resetting a WiMAX device
4 *
5 *
6 * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This implements a simple synchronous call to reset a WiMAX device.
25 *
26 * Resets aim at being warm, keeping the device handles active;
27 * however, when that fails, it falls back to a cold reset (that will
28 * disconnect and reconnect the device).
29 */
30
31#include <net/wimax.h>
32#include <net/genetlink.h>
33#include <linux/wimax.h>
34#include <linux/security.h>
35#include "wimax-internal.h"
36
37#define D_SUBMODULE op_reset
38#include "debug-levels.h"
39
40
41/**
42 * wimax_reset - Reset a WiMAX device
43 *
44 * @wimax_dev: WiMAX device descriptor
45 *
46 * Returns:
47 *
48 * %0 if ok and a warm reset was done (the device still exists in
49 * the system).
50 *
51 * -%ENODEV if a cold/bus reset had to be done (device has
52 * disconnected and reconnected, so current handle is not valid
53 * any more).
54 *
55 * -%EINVAL if the device is not even registered.
56 *
57 * Any other negative error code shall be considered as
58 * non-recoverable.
59 *
60 * Description:
61 *
62 * Called when wanting to reset the device for any reason. Device is
63 * taken back to power on status.
64 *
65 * This call blocks; on succesful return, the device has completed the
66 * reset process and is ready to operate.
67 */
68int wimax_reset(struct wimax_dev *wimax_dev)
69{
70 int result = -EINVAL;
71 struct device *dev = wimax_dev_to_dev(wimax_dev);
72 enum wimax_st state;
73
74 might_sleep();
75 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
76 mutex_lock(&wimax_dev->mutex);
77 dev_hold(wimax_dev->net_dev);
78 state = wimax_dev->state;
79 mutex_unlock(&wimax_dev->mutex);
80
81 if (state >= WIMAX_ST_DOWN) {
82 mutex_lock(&wimax_dev->mutex_reset);
83 result = wimax_dev->op_reset(wimax_dev);
84 mutex_unlock(&wimax_dev->mutex_reset);
85 }
86 dev_put(wimax_dev->net_dev);
87
88 d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
89 return result;
90}
91EXPORT_SYMBOL(wimax_reset);
92
93
94static const
95struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
96 [WIMAX_GNL_RESET_IFIDX] = {
97 .type = NLA_U32,
98 },
99};
100
101
102/*
103 * Exporting to user space over generic netlink
104 *
105 * Parse the reset command from user space, return error code.
106 *
107 * No attributes.
108 */
109static
110int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
111{
112 int result, ifindex;
113 struct wimax_dev *wimax_dev;
114 struct device *dev;
115
116 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
117 result = -ENODEV;
118 if (info->attrs[WIMAX_GNL_RESET_IFIDX] == NULL) {
119 printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX "
120 "attribute\n");
121 goto error_no_wimax_dev;
122 }
123 ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RESET_IFIDX]);
124 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
125 if (wimax_dev == NULL)
126 goto error_no_wimax_dev;
127 dev = wimax_dev_to_dev(wimax_dev);
128 /* Execute the operation and send the result back to user space */
129 result = wimax_reset(wimax_dev);
130 dev_put(wimax_dev->net_dev);
131error_no_wimax_dev:
132 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
133 return result;
134}
135
136
137struct genl_ops wimax_gnl_reset = {
138 .cmd = WIMAX_GNL_OP_RESET,
139 .flags = GENL_ADMIN_PERM,
140 .policy = wimax_gnl_reset_policy,
141 .doit = wimax_gnl_doit_reset,
142 .dumpit = NULL,
143};
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
new file mode 100644
index 000000000000..2b75aee04217
--- /dev/null
+++ b/net/wimax/op-rfkill.c
@@ -0,0 +1,532 @@
1/*
2 * Linux WiMAX
3 * RF-kill framework integration
4 *
5 *
6 * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This integrates into the Linux Kernel rfkill susbystem so that the
25 * drivers just have to do the bare minimal work, which is providing a
26 * method to set the software RF-Kill switch and to report changes in
27 * the software and hardware switch status.
28 *
29 * A non-polled generic rfkill device is embedded into the WiMAX
30 * subsystem's representation of a device.
31 *
32 * FIXME: Need polled support? use a timer or add the implementation
33 * to the stack.
34 *
35 * All device drivers have to do is after wimax_dev_init(), call
36 * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update
37 * initial state and then every time it changes. See wimax.h:struct
38 * wimax_dev for more information.
39 *
40 * ROADMAP
41 *
42 * wimax_gnl_doit_rfkill() User space calling wimax_rfkill()
43 * wimax_rfkill() Kernel calling wimax_rfkill()
44 * __wimax_rf_toggle_radio()
45 *
46 * wimax_rfkill_toggle_radio() RF-Kill subsytem calling
47 * __wimax_rf_toggle_radio()
48 *
49 * __wimax_rf_toggle_radio()
50 * wimax_dev->op_rfkill_sw_toggle() Driver backend
51 * __wimax_state_change()
52 *
53 * wimax_report_rfkill_sw() Driver reports state change
54 * __wimax_state_change()
55 *
56 * wimax_report_rfkill_hw() Driver reports state change
57 * __wimax_state_change()
58 *
59 * wimax_rfkill_add() Initialize/shutdown rfkill support
60 * wimax_rfkill_rm() [called by wimax_dev_add/rm()]
61 */
62
63#include <net/wimax.h>
64#include <net/genetlink.h>
65#include <linux/wimax.h>
66#include <linux/security.h>
67#include <linux/rfkill.h>
68#include <linux/input.h>
69#include "wimax-internal.h"
70
71#define D_SUBMODULE op_rfkill
72#include "debug-levels.h"
73
74#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
75
76
77/**
78 * wimax_report_rfkill_hw - Reports changes in the hardware RF switch
79 *
80 * @wimax_dev: WiMAX device descriptor
81 *
82 * @state: New state of the RF Kill switch. %WIMAX_RF_ON radio on,
83 * %WIMAX_RF_OFF radio off.
84 *
85 * When the device detects a change in the state of thehardware RF
86 * switch, it must call this function to let the WiMAX kernel stack
87 * know that the state has changed so it can be properly propagated.
88 *
89 * The WiMAX stack caches the state (the driver doesn't need to). As
90 * well, as the change is propagated it will come back as a request to
91 * change the software state to mirror the hardware state.
92 *
93 * If the device doesn't have a hardware kill switch, just report
94 * it on initialization as always on (%WIMAX_RF_ON, radio on).
95 */
96void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
97 enum wimax_rf_state state)
98{
99 int result;
100 struct device *dev = wimax_dev_to_dev(wimax_dev);
101 enum wimax_st wimax_state;
102 enum rfkill_state rfkill_state;
103
104 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
105 BUG_ON(state == WIMAX_RF_QUERY);
106 BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
107
108 mutex_lock(&wimax_dev->mutex);
109 result = wimax_dev_is_ready(wimax_dev);
110 if (result < 0)
111 goto error_not_ready;
112
113 if (state != wimax_dev->rf_hw) {
114 wimax_dev->rf_hw = state;
115 rfkill_state = state == WIMAX_RF_ON ?
116 RFKILL_STATE_OFF : RFKILL_STATE_ON;
117 if (wimax_dev->rf_hw == WIMAX_RF_ON
118 && wimax_dev->rf_sw == WIMAX_RF_ON)
119 wimax_state = WIMAX_ST_READY;
120 else
121 wimax_state = WIMAX_ST_RADIO_OFF;
122 __wimax_state_change(wimax_dev, wimax_state);
123 input_report_key(wimax_dev->rfkill_input, KEY_WIMAX,
124 rfkill_state);
125 }
126error_not_ready:
127 mutex_unlock(&wimax_dev->mutex);
128 d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
129 wimax_dev, state, result);
130}
131EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
132
133
134/**
135 * wimax_report_rfkill_sw - Reports changes in the software RF switch
136 *
137 * @wimax_dev: WiMAX device descriptor
138 *
139 * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on,
140 * %WIMAX_RF_OFF radio off.
141 *
142 * Reports changes in the software RF switch state to the the WiMAX
143 * stack.
144 *
145 * The main use is during initialization, so the driver can query the
146 * device for its current software radio kill switch state and feed it
147 * to the system.
148 *
149 * On the side, the device does not change the software state by
150 * itself. In practice, this can happen, as the device might decide to
151 * switch (in software) the radio off for different reasons.
152 */
153void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
154 enum wimax_rf_state state)
155{
156 int result;
157 struct device *dev = wimax_dev_to_dev(wimax_dev);
158 enum wimax_st wimax_state;
159
160 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
161 BUG_ON(state == WIMAX_RF_QUERY);
162 BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
163
164 mutex_lock(&wimax_dev->mutex);
165 result = wimax_dev_is_ready(wimax_dev);
166 if (result < 0)
167 goto error_not_ready;
168
169 if (state != wimax_dev->rf_sw) {
170 wimax_dev->rf_sw = state;
171 if (wimax_dev->rf_hw == WIMAX_RF_ON
172 && wimax_dev->rf_sw == WIMAX_RF_ON)
173 wimax_state = WIMAX_ST_READY;
174 else
175 wimax_state = WIMAX_ST_RADIO_OFF;
176 __wimax_state_change(wimax_dev, wimax_state);
177 }
178error_not_ready:
179 mutex_unlock(&wimax_dev->mutex);
180 d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
181 wimax_dev, state, result);
182}
183EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw);
184
185
186/*
187 * Callback for the RF Kill toggle operation
188 *
189 * This function is called by:
190 *
191 * - The rfkill subsystem when the RF-Kill key is pressed in the
192 * hardware and the driver notifies through
193 * wimax_report_rfkill_hw(). The rfkill subsystem ends up calling back
194 * here so the software RF Kill switch state is changed to reflect
195 * the hardware switch state.
196 *
197 * - When the user sets the state through sysfs' rfkill/state file
198 *
199 * - When the user calls wimax_rfkill().
200 *
201 * This call blocks!
202 *
203 * WARNING! When we call rfkill_unregister(), this will be called with
204 * state 0!
205 *
206 * WARNING: wimax_dev must be locked
207 */
208static
209int __wimax_rf_toggle_radio(struct wimax_dev *wimax_dev,
210 enum wimax_rf_state state)
211{
212 int result = 0;
213 struct device *dev = wimax_dev_to_dev(wimax_dev);
214 enum wimax_st wimax_state;
215
216 might_sleep();
217 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
218 if (wimax_dev->rf_sw == state)
219 goto out_no_change;
220 if (wimax_dev->op_rfkill_sw_toggle != NULL)
221 result = wimax_dev->op_rfkill_sw_toggle(wimax_dev, state);
222 else if (state == WIMAX_RF_OFF) /* No op? can't turn off */
223 result = -ENXIO;
224 else /* No op? can turn on */
225 result = 0; /* should never happen tho */
226 if (result >= 0) {
227 result = 0;
228 wimax_dev->rf_sw = state;
229 wimax_state = state == WIMAX_RF_ON ?
230 WIMAX_ST_READY : WIMAX_ST_RADIO_OFF;
231 __wimax_state_change(wimax_dev, wimax_state);
232 }
233out_no_change:
234 d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
235 wimax_dev, state, result);
236 return result;
237}
238
239
240/*
241 * Translate from rfkill state to wimax state
242 *
243 * NOTE: Special state handling rules here
244 *
245 * Just pretend the call didn't happen if we are in a state where
246 * we know for sure it cannot be handled (WIMAX_ST_DOWN or
247 * __WIMAX_ST_QUIESCING). rfkill() needs it to register and
248 * unregister, as it will run this path.
249 *
250 * NOTE: This call will block until the operation is completed.
251 */
252static
253int wimax_rfkill_toggle_radio(void *data, enum rfkill_state state)
254{
255 int result;
256 struct wimax_dev *wimax_dev = data;
257 struct device *dev = wimax_dev_to_dev(wimax_dev);
258 enum wimax_rf_state rf_state;
259
260 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
261 switch (state) {
262 case RFKILL_STATE_ON:
263 rf_state = WIMAX_RF_OFF;
264 break;
265 case RFKILL_STATE_OFF:
266 rf_state = WIMAX_RF_ON;
267 break;
268 default:
269 BUG();
270 }
271 mutex_lock(&wimax_dev->mutex);
272 if (wimax_dev->state <= __WIMAX_ST_QUIESCING)
273 result = 0; /* just pretend it didn't happen */
274 else
275 result = __wimax_rf_toggle_radio(wimax_dev, rf_state);
276 mutex_unlock(&wimax_dev->mutex);
277 d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
278 wimax_dev, state, result);
279 return result;
280}
281
282
283/**
284 * wimax_rfkill - Set the software RF switch state for a WiMAX device
285 *
286 * @wimax_dev: WiMAX device descriptor
287 *
288 * @state: New RF state.
289 *
290 * Returns:
291 *
292 * >= 0 toggle state if ok, < 0 errno code on error. The toggle state
293 * is returned as a bitmap, bit 0 being the hardware RF state, bit 1
294 * the software RF state.
295 *
296 * 0 means disabled (%WIMAX_RF_ON, radio on), 1 means enabled radio
297 * off (%WIMAX_RF_OFF).
298 *
299 * Description:
300 *
301 * Called by the user when he wants to request the WiMAX radio to be
302 * switched on (%WIMAX_RF_ON) or off (%WIMAX_RF_OFF). With
303 * %WIMAX_RF_QUERY, just the current state is returned.
304 *
305 * NOTE:
306 *
307 * This call will block until the operation is complete.
308 */
309int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state)
310{
311 int result;
312 struct device *dev = wimax_dev_to_dev(wimax_dev);
313
314 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
315 mutex_lock(&wimax_dev->mutex);
316 result = wimax_dev_is_ready(wimax_dev);
317 if (result < 0)
318 goto error_not_ready;
319 switch (state) {
320 case WIMAX_RF_ON:
321 case WIMAX_RF_OFF:
322 result = __wimax_rf_toggle_radio(wimax_dev, state);
323 if (result < 0)
324 goto error;
325 break;
326 case WIMAX_RF_QUERY:
327 break;
328 default:
329 result = -EINVAL;
330 goto error;
331 }
332 result = wimax_dev->rf_sw << 1 | wimax_dev->rf_hw;
333error:
334error_not_ready:
335 mutex_unlock(&wimax_dev->mutex);
336 d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
337 wimax_dev, state, result);
338 return result;
339}
340EXPORT_SYMBOL(wimax_rfkill);
341
342
343/*
344 * Register a new WiMAX device's RF Kill support
345 *
346 * WARNING: wimax_dev->mutex must be unlocked
347 */
348int wimax_rfkill_add(struct wimax_dev *wimax_dev)
349{
350 int result;
351 struct rfkill *rfkill;
352 struct input_dev *input_dev;
353 struct device *dev = wimax_dev_to_dev(wimax_dev);
354
355 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
356 /* Initialize RF Kill */
357 result = -ENOMEM;
358 rfkill = rfkill_allocate(dev, RFKILL_TYPE_WIMAX);
359 if (rfkill == NULL)
360 goto error_rfkill_allocate;
361 wimax_dev->rfkill = rfkill;
362
363 rfkill->name = wimax_dev->name;
364 rfkill->state = RFKILL_STATE_OFF;
365 rfkill->data = wimax_dev;
366 rfkill->toggle_radio = wimax_rfkill_toggle_radio;
367 rfkill->user_claim_unsupported = 1;
368
369 /* Initialize the input device for the hw key */
370 input_dev = input_allocate_device();
371 if (input_dev == NULL)
372 goto error_input_allocate;
373 wimax_dev->rfkill_input = input_dev;
374 d_printf(1, dev, "rfkill %p input %p\n", rfkill, input_dev);
375
376 input_dev->name = wimax_dev->name;
377 /* FIXME: get a real device bus ID and stuff? do we care? */
378 input_dev->id.bustype = BUS_HOST;
379 input_dev->id.vendor = 0xffff;
380 input_dev->evbit[0] = BIT(EV_KEY);
381 set_bit(KEY_WIMAX, input_dev->keybit);
382
383 /* Register both */
384 result = input_register_device(wimax_dev->rfkill_input);
385 if (result < 0)
386 goto error_input_register;
387 result = rfkill_register(wimax_dev->rfkill);
388 if (result < 0)
389 goto error_rfkill_register;
390
391 /* If there is no SW toggle op, SW RFKill is always on */
392 if (wimax_dev->op_rfkill_sw_toggle == NULL)
393 wimax_dev->rf_sw = WIMAX_RF_ON;
394
395 d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev);
396 return 0;
397
398 /* if rfkill_register() suceeds, can't use rfkill_free() any
399 * more, only rfkill_unregister() [it owns the refcount]; with
400 * the input device we have the same issue--hence the if. */
401error_rfkill_register:
402 input_unregister_device(wimax_dev->rfkill_input);
403 wimax_dev->rfkill_input = NULL;
404error_input_register:
405 if (wimax_dev->rfkill_input)
406 input_free_device(wimax_dev->rfkill_input);
407error_input_allocate:
408 rfkill_free(wimax_dev->rfkill);
409error_rfkill_allocate:
410 d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
411 return result;
412}
413
414
415/*
416 * Deregister a WiMAX device's RF Kill support
417 *
418 * Ick, we can't call rfkill_free() after rfkill_unregister()...oh
419 * well.
420 *
421 * WARNING: wimax_dev->mutex must be unlocked
422 */
423void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
424{
425 struct device *dev = wimax_dev_to_dev(wimax_dev);
426 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
427 rfkill_unregister(wimax_dev->rfkill); /* frees */
428 input_unregister_device(wimax_dev->rfkill_input);
429 d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev);
430}
431
432
433#else /* #ifdef CONFIG_RFKILL */
434
435void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
436 enum wimax_rf_state state)
437{
438}
439EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
440
441void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
442 enum wimax_rf_state state)
443{
444}
445EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw);
446
447int wimax_rfkill(struct wimax_dev *wimax_dev,
448 enum wimax_rf_state state)
449{
450 return WIMAX_RF_ON << 1 | WIMAX_RF_ON;
451}
452EXPORT_SYMBOL_GPL(wimax_rfkill);
453
454int wimax_rfkill_add(struct wimax_dev *wimax_dev)
455{
456 return 0;
457}
458
459void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
460{
461}
462
463#endif /* #ifdef CONFIG_RFKILL */
464
465
466/*
467 * Exporting to user space over generic netlink
468 *
469 * Parse the rfkill command from user space, return a combination
470 * value that describe the states of the different toggles.
471 *
472 * Only one attribute: the new state requested (on, off or no change,
473 * just query).
474 */
475
476static const
477struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
478 [WIMAX_GNL_RFKILL_IFIDX] = {
479 .type = NLA_U32,
480 },
481 [WIMAX_GNL_RFKILL_STATE] = {
482 .type = NLA_U32 /* enum wimax_rf_state */
483 },
484};
485
486
487static
488int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info)
489{
490 int result, ifindex;
491 struct wimax_dev *wimax_dev;
492 struct device *dev;
493 enum wimax_rf_state new_state;
494
495 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
496 result = -ENODEV;
497 if (info->attrs[WIMAX_GNL_RFKILL_IFIDX] == NULL) {
498 printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX "
499 "attribute\n");
500 goto error_no_wimax_dev;
501 }
502 ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_IFIDX]);
503 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
504 if (wimax_dev == NULL)
505 goto error_no_wimax_dev;
506 dev = wimax_dev_to_dev(wimax_dev);
507 result = -EINVAL;
508 if (info->attrs[WIMAX_GNL_RFKILL_STATE] == NULL) {
509 dev_err(dev, "WIMAX_GNL_RFKILL: can't find RFKILL_STATE "
510 "attribute\n");
511 goto error_no_pid;
512 }
513 new_state = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_STATE]);
514
515 /* Execute the operation and send the result back to user space */
516 result = wimax_rfkill(wimax_dev, new_state);
517error_no_pid:
518 dev_put(wimax_dev->net_dev);
519error_no_wimax_dev:
520 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
521 return result;
522}
523
524
525struct genl_ops wimax_gnl_rfkill = {
526 .cmd = WIMAX_GNL_OP_RFKILL,
527 .flags = GENL_ADMIN_PERM,
528 .policy = wimax_gnl_rfkill_policy,
529 .doit = wimax_gnl_doit_rfkill,
530 .dumpit = NULL,
531};
532
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
new file mode 100644
index 000000000000..3869c0327882
--- /dev/null
+++ b/net/wimax/stack.c
@@ -0,0 +1,612 @@
1/*
2 * Linux WiMAX
3 * Initialization, addition and removal of wimax devices
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This implements:
25 *
26 * - basic life cycle of 'struct wimax_dev' [wimax_dev_*()]; on
27 * addition/registration initialize all subfields and allocate
28 * generic netlink resources for user space communication. On
29 * removal/unregistration, undo all that.
30 *
31 * - device state machine [wimax_state_change()] and support to send
32 * reports to user space when the state changes
33 * [wimax_gnl_re_state_change*()].
34 *
35 * See include/net/wimax.h for rationales and design.
36 *
37 * ROADMAP
38 *
39 * [__]wimax_state_change() Called by drivers to update device's state
40 * wimax_gnl_re_state_change_alloc()
41 * wimax_gnl_re_state_change_send()
42 *
43 * wimax_dev_init() Init a device
44 * wimax_dev_add() Register
45 * wimax_rfkill_add()
46 * wimax_gnl_add() Register all the generic netlink resources.
47 * wimax_id_table_add()
48 * wimax_dev_rm() Unregister
49 * wimax_id_table_rm()
50 * wimax_gnl_rm()
51 * wimax_rfkill_rm()
52 */
53#include <linux/device.h>
54#include <net/genetlink.h>
55#include <linux/netdevice.h>
56#include <linux/wimax.h>
57#include "wimax-internal.h"
58
59
60#define D_SUBMODULE stack
61#include "debug-levels.h"
62
63/*
64 * Authoritative source for the RE_STATE_CHANGE attribute policy
65 *
66 * We don't really use it here, but /me likes to keep the definition
67 * close to where the data is generated.
68 */
69/*
70static const
71struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
72 [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
73 [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
74};
75*/
76
77
78/*
79 * Allocate a Report State Change message
80 *
81 * @header: save it, you need it for _send()
82 *
83 * Creates and fills a basic state change message; different code
84 * paths can then add more attributes to the message as needed.
85 *
86 * Use wimax_gnl_re_state_change_send() to send the returned skb.
87 *
88 * Returns: skb with the genl message if ok, IS_ERR() ptr on error
89 * with an errno code.
90 */
91static
92struct sk_buff *wimax_gnl_re_state_change_alloc(
93 struct wimax_dev *wimax_dev,
94 enum wimax_st new_state, enum wimax_st old_state,
95 void **header)
96{
97 int result;
98 struct device *dev = wimax_dev_to_dev(wimax_dev);
99 void *data;
100 struct sk_buff *report_skb;
101
102 d_fnstart(3, dev, "(wimax_dev %p new_state %u old_state %u)\n",
103 wimax_dev, new_state, old_state);
104 result = -ENOMEM;
105 report_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
106 if (report_skb == NULL) {
107 dev_err(dev, "RE_STCH: can't create message\n");
108 goto error_new;
109 }
110 data = genlmsg_put(report_skb, 0, wimax_gnl_mcg.id, &wimax_gnl_family,
111 0, WIMAX_GNL_RE_STATE_CHANGE);
112 if (data == NULL) {
113 dev_err(dev, "RE_STCH: can't put data into message\n");
114 goto error_put;
115 }
116 *header = data;
117
118 result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_OLD, old_state);
119 if (result < 0) {
120 dev_err(dev, "RE_STCH: Error adding OLD attr: %d\n", result);
121 goto error_put;
122 }
123 result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_NEW, new_state);
124 if (result < 0) {
125 dev_err(dev, "RE_STCH: Error adding NEW attr: %d\n", result);
126 goto error_put;
127 }
128 result = nla_put_u32(report_skb, WIMAX_GNL_STCH_IFIDX,
129 wimax_dev->net_dev->ifindex);
130 if (result < 0) {
131 dev_err(dev, "RE_STCH: Error adding IFINDEX attribute\n");
132 goto error_put;
133 }
134 d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %p\n",
135 wimax_dev, new_state, old_state, report_skb);
136 return report_skb;
137
138error_put:
139 nlmsg_free(report_skb);
140error_new:
141 d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %d\n",
142 wimax_dev, new_state, old_state, result);
143 return ERR_PTR(result);
144}
145
146
147/*
148 * Send a Report State Change message (as created with _alloc).
149 *
150 * @report_skb: as returned by wimax_gnl_re_state_change_alloc()
151 * @header: as returned by wimax_gnl_re_state_change_alloc()
152 *
153 * Returns: 0 if ok, < 0 errno code on error.
154 *
155 * If the message is NULL, pretend it didn't happen.
156 */
157static
158int wimax_gnl_re_state_change_send(
159 struct wimax_dev *wimax_dev, struct sk_buff *report_skb,
160 void *header)
161{
162 int result = 0;
163 struct device *dev = wimax_dev_to_dev(wimax_dev);
164 d_fnstart(3, dev, "(wimax_dev %p report_skb %p)\n",
165 wimax_dev, report_skb);
166 if (report_skb == NULL)
167 goto out;
168 genlmsg_end(report_skb, header);
169 result = genlmsg_multicast(report_skb, 0, wimax_gnl_mcg.id, GFP_KERNEL);
170 if (result == -ESRCH) /* Nobody connected, ignore it */
171 result = 0; /* btw, the skb is freed already */
172 if (result < 0) {
173 dev_err(dev, "RE_STCH: Error sending: %d\n", result);
174 nlmsg_free(report_skb);
175 }
176out:
177 d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n",
178 wimax_dev, report_skb, result);
179 return result;
180}
181
182
183static
184void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
185 unsigned allowed_states_bm)
186{
187 if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
188 printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n",
189 old_state, new_state);
190 }
191}
192
193
194/*
195 * Set the current state of a WiMAX device [unlocking version of
196 * wimax_state_change().
197 */
198void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
199{
200 struct device *dev = wimax_dev_to_dev(wimax_dev);
201 enum wimax_st old_state = wimax_dev->state;
202 struct sk_buff *stch_skb;
203 void *header;
204
205 d_fnstart(3, dev, "(wimax_dev %p new_state %u [old %u])\n",
206 wimax_dev, new_state, old_state);
207
208 if (WARN_ON(new_state >= __WIMAX_ST_INVALID)) {
209 dev_err(dev, "SW BUG: requesting invalid state %u\n",
210 new_state);
211 goto out;
212 }
213 if (old_state == new_state)
214 goto out;
215 header = NULL; /* gcc complains? can't grok why */
216 stch_skb = wimax_gnl_re_state_change_alloc(
217 wimax_dev, new_state, old_state, &header);
218
219 /* Verify the state transition and do exit-from-state actions */
220 switch (old_state) {
221 case __WIMAX_ST_NULL:
222 __check_new_state(old_state, new_state,
223 1 << WIMAX_ST_DOWN);
224 break;
225 case WIMAX_ST_DOWN:
226 __check_new_state(old_state, new_state,
227 1 << __WIMAX_ST_QUIESCING
228 | 1 << WIMAX_ST_UNINITIALIZED
229 | 1 << WIMAX_ST_RADIO_OFF);
230 break;
231 case __WIMAX_ST_QUIESCING:
232 __check_new_state(old_state, new_state, 1 << WIMAX_ST_DOWN);
233 break;
234 case WIMAX_ST_UNINITIALIZED:
235 __check_new_state(old_state, new_state,
236 1 << __WIMAX_ST_QUIESCING
237 | 1 << WIMAX_ST_RADIO_OFF);
238 break;
239 case WIMAX_ST_RADIO_OFF:
240 __check_new_state(old_state, new_state,
241 1 << __WIMAX_ST_QUIESCING
242 | 1 << WIMAX_ST_READY);
243 break;
244 case WIMAX_ST_READY:
245 __check_new_state(old_state, new_state,
246 1 << __WIMAX_ST_QUIESCING
247 | 1 << WIMAX_ST_RADIO_OFF
248 | 1 << WIMAX_ST_SCANNING
249 | 1 << WIMAX_ST_CONNECTING
250 | 1 << WIMAX_ST_CONNECTED);
251 break;
252 case WIMAX_ST_SCANNING:
253 __check_new_state(old_state, new_state,
254 1 << __WIMAX_ST_QUIESCING
255 | 1 << WIMAX_ST_RADIO_OFF
256 | 1 << WIMAX_ST_READY
257 | 1 << WIMAX_ST_CONNECTING
258 | 1 << WIMAX_ST_CONNECTED);
259 break;
260 case WIMAX_ST_CONNECTING:
261 __check_new_state(old_state, new_state,
262 1 << __WIMAX_ST_QUIESCING
263 | 1 << WIMAX_ST_RADIO_OFF
264 | 1 << WIMAX_ST_READY
265 | 1 << WIMAX_ST_SCANNING
266 | 1 << WIMAX_ST_CONNECTED);
267 break;
268 case WIMAX_ST_CONNECTED:
269 __check_new_state(old_state, new_state,
270 1 << __WIMAX_ST_QUIESCING
271 | 1 << WIMAX_ST_RADIO_OFF
272 | 1 << WIMAX_ST_READY);
273 netif_tx_disable(wimax_dev->net_dev);
274 netif_carrier_off(wimax_dev->net_dev);
275 break;
276 case __WIMAX_ST_INVALID:
277 default:
278 dev_err(dev, "SW BUG: wimax_dev %p is in unknown state %u\n",
279 wimax_dev, wimax_dev->state);
280 WARN_ON(1);
281 goto out;
282 }
283
284 /* Execute the actions of entry to the new state */
285 switch (new_state) {
286 case __WIMAX_ST_NULL:
287 dev_err(dev, "SW BUG: wimax_dev %p entering NULL state "
288 "from %u\n", wimax_dev, wimax_dev->state);
289 WARN_ON(1); /* Nobody can enter this state */
290 break;
291 case WIMAX_ST_DOWN:
292 break;
293 case __WIMAX_ST_QUIESCING:
294 break;
295 case WIMAX_ST_UNINITIALIZED:
296 break;
297 case WIMAX_ST_RADIO_OFF:
298 break;
299 case WIMAX_ST_READY:
300 break;
301 case WIMAX_ST_SCANNING:
302 break;
303 case WIMAX_ST_CONNECTING:
304 break;
305 case WIMAX_ST_CONNECTED:
306 netif_carrier_on(wimax_dev->net_dev);
307 netif_wake_queue(wimax_dev->net_dev);
308 break;
309 case __WIMAX_ST_INVALID:
310 default:
311 BUG();
312 }
313 __wimax_state_set(wimax_dev, new_state);
314 if (stch_skb)
315 wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header);
316out:
317 d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n",
318 wimax_dev, new_state, old_state);
319 return;
320}
321
322
323/**
324 * wimax_state_change - Set the current state of a WiMAX device
325 *
326 * @wimax_dev: WiMAX device descriptor (properly referenced)
327 * @new_state: New state to switch to
328 *
329 * This implements the state changes for the wimax devices. It will
330 *
331 * - verify that the state transition is legal (for now it'll just
332 * print a warning if not) according to the table in
333 * linux/wimax.h's documentation for 'enum wimax_st'.
334 *
335 * - perform the actions needed for leaving the current state and
336 * whichever are needed for entering the new state.
337 *
338 * - issue a report to user space indicating the new state (and an
339 * optional payload with information about the new state).
340 *
341 * NOTE: @wimax_dev must be locked
342 */
343void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
344{
345 mutex_lock(&wimax_dev->mutex);
346 __wimax_state_change(wimax_dev, new_state);
347 mutex_unlock(&wimax_dev->mutex);
348 return;
349}
350EXPORT_SYMBOL_GPL(wimax_state_change);
351
352
353/**
354 * wimax_state_get() - Return the current state of a WiMAX device
355 *
356 * @wimax_dev: WiMAX device descriptor
357 *
358 * Returns: Current state of the device according to its driver.
359 */
360enum wimax_st wimax_state_get(struct wimax_dev *wimax_dev)
361{
362 enum wimax_st state;
363 mutex_lock(&wimax_dev->mutex);
364 state = wimax_dev->state;
365 mutex_unlock(&wimax_dev->mutex);
366 return state;
367}
368EXPORT_SYMBOL_GPL(wimax_state_get);
369
370
371/**
372 * wimax_dev_init - initialize a newly allocated instance
373 *
374 * @wimax_dev: WiMAX device descriptor to initialize.
375 *
376 * Initializes fields of a freshly allocated @wimax_dev instance. This
377 * function assumes that after allocation, the memory occupied by
378 * @wimax_dev was zeroed.
379 */
380void wimax_dev_init(struct wimax_dev *wimax_dev)
381{
382 INIT_LIST_HEAD(&wimax_dev->id_table_node);
383 __wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED);
384 mutex_init(&wimax_dev->mutex);
385 mutex_init(&wimax_dev->mutex_reset);
386}
387EXPORT_SYMBOL_GPL(wimax_dev_init);
388
389/*
390 * This extern is declared here because it's easier to keep track --
391 * both declarations are a list of the same
392 */
393extern struct genl_ops
394 wimax_gnl_msg_from_user,
395 wimax_gnl_reset,
396 wimax_gnl_rfkill;
397
398static
399struct genl_ops *wimax_gnl_ops[] = {
400 &wimax_gnl_msg_from_user,
401 &wimax_gnl_reset,
402 &wimax_gnl_rfkill,
403};
404
405
406static
407size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
408 unsigned char *addr, size_t addr_len)
409{
410 unsigned cnt, total;
411 for (total = cnt = 0; cnt < addr_len; cnt++)
412 total += scnprintf(addr_str + total, addr_str_size - total,
413 "%02x%c", addr[cnt],
414 cnt == addr_len - 1 ? '\0' : ':');
415 return total;
416}
417
418
419/**
420 * wimax_dev_add - Register a new WiMAX device
421 *
422 * @wimax_dev: WiMAX device descriptor (as embedded in your @net_dev's
423 * priv data). You must have called wimax_dev_init() on it before.
424 *
425 * @net_dev: net device the @wimax_dev is associated with. The
426 * function expects SET_NETDEV_DEV() and register_netdev() were
427 * already called on it.
428 *
429 * Registers the new WiMAX device, sets up the user-kernel control
430 * interface (generic netlink) and common WiMAX infrastructure.
431 *
432 * Note that the parts that will allow interaction with user space are
433 * setup at the very end, when the rest is in place, as once that
434 * happens, the driver might get user space control requests via
435 * netlink or from debugfs that might translate into calls into
436 * wimax_dev->op_*().
437 */
438int wimax_dev_add(struct wimax_dev *wimax_dev, struct net_device *net_dev)
439{
440 int result;
441 struct device *dev = net_dev->dev.parent;
442 char addr_str[32];
443
444 d_fnstart(3, dev, "(wimax_dev %p net_dev %p)\n", wimax_dev, net_dev);
445
446 /* Do the RFKILL setup before locking, as RFKILL will call
447 * into our functions. */
448 wimax_dev->net_dev = net_dev;
449 result = wimax_rfkill_add(wimax_dev);
450 if (result < 0)
451 goto error_rfkill_add;
452
453 /* Set up user-space interaction */
454 mutex_lock(&wimax_dev->mutex);
455 wimax_id_table_add(wimax_dev);
456 result = wimax_debugfs_add(wimax_dev);
457 if (result < 0) {
458 dev_err(dev, "cannot initialize debugfs: %d\n",
459 result);
460 goto error_debugfs_add;
461 }
462
463 __wimax_state_set(wimax_dev, WIMAX_ST_DOWN);
464 mutex_unlock(&wimax_dev->mutex);
465
466 wimax_addr_scnprint(addr_str, sizeof(addr_str),
467 net_dev->dev_addr, net_dev->addr_len);
468 dev_err(dev, "WiMAX interface %s (%s) ready\n",
469 net_dev->name, addr_str);
470 d_fnend(3, dev, "(wimax_dev %p net_dev %p) = 0\n", wimax_dev, net_dev);
471 return 0;
472
473error_debugfs_add:
474 wimax_id_table_rm(wimax_dev);
475 mutex_unlock(&wimax_dev->mutex);
476 wimax_rfkill_rm(wimax_dev);
477error_rfkill_add:
478 d_fnend(3, dev, "(wimax_dev %p net_dev %p) = %d\n",
479 wimax_dev, net_dev, result);
480 return result;
481}
482EXPORT_SYMBOL_GPL(wimax_dev_add);
483
484
485/**
486 * wimax_dev_rm - Unregister an existing WiMAX device
487 *
488 * @wimax_dev: WiMAX device descriptor
489 *
490 * Unregisters a WiMAX device previously registered for use with
491 * wimax_add_rm().
492 *
493 * IMPORTANT! Must call before calling unregister_netdev().
494 *
495 * After this function returns, you will not get any more user space
496 * control requests (via netlink or debugfs) and thus to wimax_dev->ops.
497 *
498 * Reentrancy control is ensured by setting the state to
499 * %__WIMAX_ST_QUIESCING. rfkill operations coming through
500 * wimax_*rfkill*() will be stopped by the quiescing state; ops coming
501 * from the rfkill subsystem will be stopped by the support being
502 * removed by wimax_rfkill_rm().
503 */
504void wimax_dev_rm(struct wimax_dev *wimax_dev)
505{
506 d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
507
508 mutex_lock(&wimax_dev->mutex);
509 __wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
510 wimax_debugfs_rm(wimax_dev);
511 wimax_id_table_rm(wimax_dev);
512 __wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
513 mutex_unlock(&wimax_dev->mutex);
514 wimax_rfkill_rm(wimax_dev);
515 d_fnend(3, NULL, "(wimax_dev %p) = void\n", wimax_dev);
516}
517EXPORT_SYMBOL_GPL(wimax_dev_rm);
518
519
520/* Debug framework control of debug levels */
521struct d_level D_LEVEL[] = {
522 D_SUBMODULE_DEFINE(debugfs),
523 D_SUBMODULE_DEFINE(id_table),
524 D_SUBMODULE_DEFINE(op_msg),
525 D_SUBMODULE_DEFINE(op_reset),
526 D_SUBMODULE_DEFINE(op_rfkill),
527 D_SUBMODULE_DEFINE(stack),
528};
529size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
530
531
532struct genl_family wimax_gnl_family = {
533 .id = GENL_ID_GENERATE,
534 .name = "WiMAX",
535 .version = WIMAX_GNL_VERSION,
536 .hdrsize = 0,
537 .maxattr = WIMAX_GNL_ATTR_MAX,
538};
539
540struct genl_multicast_group wimax_gnl_mcg = {
541 .name = "msg",
542};
543
544
545
546/* Shutdown the wimax stack */
547static
548int __init wimax_subsys_init(void)
549{
550 int result, cnt;
551
552 d_fnstart(4, NULL, "()\n");
553 snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name),
554 "WiMAX");
555 result = genl_register_family(&wimax_gnl_family);
556 if (unlikely(result < 0)) {
557 printk(KERN_ERR "cannot register generic netlink family: %d\n",
558 result);
559 goto error_register_family;
560 }
561
562 for (cnt = 0; cnt < ARRAY_SIZE(wimax_gnl_ops); cnt++) {
563 result = genl_register_ops(&wimax_gnl_family,
564 wimax_gnl_ops[cnt]);
565 d_printf(4, NULL, "registering generic netlink op code "
566 "%u: %d\n", wimax_gnl_ops[cnt]->cmd, result);
567 if (unlikely(result < 0)) {
568 printk(KERN_ERR "cannot register generic netlink op "
569 "code %u: %d\n",
570 wimax_gnl_ops[cnt]->cmd, result);
571 goto error_register_ops;
572 }
573 }
574
575 result = genl_register_mc_group(&wimax_gnl_family, &wimax_gnl_mcg);
576 if (result < 0)
577 goto error_mc_group;
578 d_fnend(4, NULL, "() = 0\n");
579 return 0;
580
581error_mc_group:
582error_register_ops:
583 for (cnt--; cnt >= 0; cnt--)
584 genl_unregister_ops(&wimax_gnl_family,
585 wimax_gnl_ops[cnt]);
586 genl_unregister_family(&wimax_gnl_family);
587error_register_family:
588 d_fnend(4, NULL, "() = %d\n", result);
589 return result;
590
591}
592module_init(wimax_subsys_init);
593
594
595/* Shutdown the wimax stack */
596static
597void __exit wimax_subsys_exit(void)
598{
599 int cnt;
600 wimax_id_table_release();
601 genl_unregister_mc_group(&wimax_gnl_family, &wimax_gnl_mcg);
602 for (cnt = ARRAY_SIZE(wimax_gnl_ops) - 1; cnt >= 0; cnt--)
603 genl_unregister_ops(&wimax_gnl_family,
604 wimax_gnl_ops[cnt]);
605 genl_unregister_family(&wimax_gnl_family);
606}
607module_exit(wimax_subsys_exit);
608
609MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
610MODULE_DESCRIPTION("Linux WiMAX stack");
611MODULE_LICENSE("GPL");
612
diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h
new file mode 100644
index 000000000000..1e743d214856
--- /dev/null
+++ b/net/wimax/wimax-internal.h
@@ -0,0 +1,91 @@
1/*
2 * Linux WiMAX
3 * Internal API for kernel space WiMAX stack
4 *
5 *
6 * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This header file is for declarations and definitions internal to
25 * the WiMAX stack. For public APIs and documentation, see
26 * include/net/wimax.h and include/linux/wimax.h.
27 */
28
29#ifndef __WIMAX_INTERNAL_H__
30#define __WIMAX_INTERNAL_H__
31#ifdef __KERNEL__
32
33#include <linux/device.h>
34#include <net/wimax.h>
35
36
37/*
38 * Decide if a (locked) device is ready for use
39 *
40 * Before using the device structure, it must be locked
41 * (wimax_dev->mutex). As well, most operations need to call this
42 * function to check if the state is the right one.
43 *
44 * An error value will be returned if the state is not the right
45 * one. In that case, the caller should not attempt to use the device
46 * and just unlock it.
47 */
48static inline __must_check
49int wimax_dev_is_ready(struct wimax_dev *wimax_dev)
50{
51 if (wimax_dev->state == __WIMAX_ST_NULL)
52 return -EINVAL; /* Device is not even registered! */
53 if (wimax_dev->state == WIMAX_ST_DOWN)
54 return -ENOMEDIUM;
55 if (wimax_dev->state == __WIMAX_ST_QUIESCING)
56 return -ESHUTDOWN;
57 return 0;
58}
59
60
61static inline
62void __wimax_state_set(struct wimax_dev *wimax_dev, enum wimax_st state)
63{
64 wimax_dev->state = state;
65}
66extern void __wimax_state_change(struct wimax_dev *, enum wimax_st);
67
68#ifdef CONFIG_DEBUG_FS
69extern int wimax_debugfs_add(struct wimax_dev *);
70extern void wimax_debugfs_rm(struct wimax_dev *);
71#else
72static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev)
73{
74 return 0;
75}
76static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {}
77#endif
78
79extern void wimax_id_table_add(struct wimax_dev *);
80extern struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
81extern void wimax_id_table_rm(struct wimax_dev *);
82extern void wimax_id_table_release(void);
83
84extern int wimax_rfkill_add(struct wimax_dev *);
85extern void wimax_rfkill_rm(struct wimax_dev *);
86
87extern struct genl_family wimax_gnl_family;
88extern struct genl_multicast_group wimax_gnl_mcg;
89
90#endif /* #ifdef __KERNEL__ */
91#endif /* #ifndef __WIMAX_INTERNAL_H__ */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4f877535e666..85c9034c59b2 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -421,6 +421,31 @@ static u32 freq_max_bandwidth(const struct ieee80211_freq_range *freq_range,
421 return 0; 421 return 0;
422} 422}
423 423
424/**
425 * freq_in_rule_band - tells us if a frequency is in a frequency band
426 * @freq_range: frequency rule we want to query
427 * @freq_khz: frequency we are inquiring about
428 *
429 * This lets us know if a specific frequency rule is or is not relevant to
430 * a specific frequency's band. Bands are device specific and artificial
431 * definitions (the "2.4 GHz band" and the "5 GHz band"), however it is
432 * safe for now to assume that a frequency rule should not be part of a
433 * frequency's band if the start freq or end freq are off by more than 2 GHz.
434 * This resolution can be lowered and should be considered as we add
435 * regulatory rule support for other "bands".
436 **/
437static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
438 u32 freq_khz)
439{
440#define ONE_GHZ_IN_KHZ 1000000
441 if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
442 return true;
443 if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
444 return true;
445 return false;
446#undef ONE_GHZ_IN_KHZ
447}
448
424/* Converts a country IE to a regulatory domain. A regulatory domain 449/* Converts a country IE to a regulatory domain. A regulatory domain
425 * structure has a lot of information which the IE doesn't yet have, 450 * structure has a lot of information which the IE doesn't yet have,
426 * so for the other values we use upper max values as we will intersect 451 * so for the other values we use upper max values as we will intersect
@@ -473,6 +498,7 @@ static struct ieee80211_regdomain *country_ie_2_rd(
473 * calculate the number of reg rules we will need. We will need one 498 * calculate the number of reg rules we will need. We will need one
474 * for each channel subband */ 499 * for each channel subband */
475 while (country_ie_len >= 3) { 500 while (country_ie_len >= 3) {
501 int end_channel = 0;
476 struct ieee80211_country_ie_triplet *triplet = 502 struct ieee80211_country_ie_triplet *triplet =
477 (struct ieee80211_country_ie_triplet *) country_ie; 503 (struct ieee80211_country_ie_triplet *) country_ie;
478 int cur_sub_max_channel = 0, cur_channel = 0; 504 int cur_sub_max_channel = 0, cur_channel = 0;
@@ -484,9 +510,25 @@ static struct ieee80211_regdomain *country_ie_2_rd(
484 continue; 510 continue;
485 } 511 }
486 512
513 /* 2 GHz */
514 if (triplet->chans.first_channel <= 14)
515 end_channel = triplet->chans.first_channel +
516 triplet->chans.num_channels;
517 else
518 /*
519 * 5 GHz -- For example in country IEs if the first
520 * channel given is 36 and the number of channels is 4
521 * then the individual channel numbers defined for the
522 * 5 GHz PHY by these parameters are: 36, 40, 44, and 48
523 * and not 36, 37, 38, 39.
524 *
525 * See: http://tinyurl.com/11d-clarification
526 */
527 end_channel = triplet->chans.first_channel +
528 (4 * (triplet->chans.num_channels - 1));
529
487 cur_channel = triplet->chans.first_channel; 530 cur_channel = triplet->chans.first_channel;
488 cur_sub_max_channel = ieee80211_channel_to_frequency( 531 cur_sub_max_channel = end_channel;
489 cur_channel + triplet->chans.num_channels);
490 532
491 /* Basic sanity check */ 533 /* Basic sanity check */
492 if (cur_sub_max_channel < cur_channel) 534 if (cur_sub_max_channel < cur_channel)
@@ -538,6 +580,7 @@ static struct ieee80211_regdomain *country_ie_2_rd(
538 580
539 /* This time around we fill in the rd */ 581 /* This time around we fill in the rd */
540 while (country_ie_len >= 3) { 582 while (country_ie_len >= 3) {
583 int end_channel = 0;
541 struct ieee80211_country_ie_triplet *triplet = 584 struct ieee80211_country_ie_triplet *triplet =
542 (struct ieee80211_country_ie_triplet *) country_ie; 585 (struct ieee80211_country_ie_triplet *) country_ie;
543 struct ieee80211_reg_rule *reg_rule = NULL; 586 struct ieee80211_reg_rule *reg_rule = NULL;
@@ -559,6 +602,14 @@ static struct ieee80211_regdomain *country_ie_2_rd(
559 602
560 reg_rule->flags = flags; 603 reg_rule->flags = flags;
561 604
605 /* 2 GHz */
606 if (triplet->chans.first_channel <= 14)
607 end_channel = triplet->chans.first_channel +
608 triplet->chans.num_channels;
609 else
610 end_channel = triplet->chans.first_channel +
611 (4 * (triplet->chans.num_channels - 1));
612
562 /* The +10 is since the regulatory domain expects 613 /* The +10 is since the regulatory domain expects
563 * the actual band edge, not the center of freq for 614 * the actual band edge, not the center of freq for
564 * its start and end freqs, assuming 20 MHz bandwidth on 615 * its start and end freqs, assuming 20 MHz bandwidth on
@@ -568,8 +619,7 @@ static struct ieee80211_regdomain *country_ie_2_rd(
568 triplet->chans.first_channel) - 10); 619 triplet->chans.first_channel) - 10);
569 freq_range->end_freq_khz = 620 freq_range->end_freq_khz =
570 MHZ_TO_KHZ(ieee80211_channel_to_frequency( 621 MHZ_TO_KHZ(ieee80211_channel_to_frequency(
571 triplet->chans.first_channel + 622 end_channel) + 10);
572 triplet->chans.num_channels) + 10);
573 623
574 /* Large arbitrary values, we intersect later */ 624 /* Large arbitrary values, we intersect later */
575 /* Increment this if we ever support >= 40 MHz channels 625 /* Increment this if we ever support >= 40 MHz channels
@@ -748,12 +798,23 @@ static u32 map_regdom_flags(u32 rd_flags)
748 * this value to the maximum allowed bandwidth. 798 * this value to the maximum allowed bandwidth.
749 * @reg_rule: the regulatory rule which we have for this frequency 799 * @reg_rule: the regulatory rule which we have for this frequency
750 * 800 *
751 * Use this function to get the regulatory rule for a specific frequency. 801 * Use this function to get the regulatory rule for a specific frequency on
802 * a given wireless device. If the device has a specific regulatory domain
803 * it wants to follow we respect that unless a country IE has been received
804 * and processed already.
805 *
806 * Returns 0 if it was able to find a valid regulatory rule which does
807 * apply to the given center_freq otherwise it returns non-zero. It will
808 * also return -ERANGE if we determine the given center_freq does not even have
809 * a regulatory rule for a frequency range in the center_freq's band. See
810 * freq_in_rule_band() for our current definition of a band -- this is purely
811 * subjective and right now its 802.11 specific.
752 */ 812 */
753static int freq_reg_info(u32 center_freq, u32 *bandwidth, 813static int freq_reg_info(u32 center_freq, u32 *bandwidth,
754 const struct ieee80211_reg_rule **reg_rule) 814 const struct ieee80211_reg_rule **reg_rule)
755{ 815{
756 int i; 816 int i;
817 bool band_rule_found = false;
757 u32 max_bandwidth = 0; 818 u32 max_bandwidth = 0;
758 819
759 if (!cfg80211_regdomain) 820 if (!cfg80211_regdomain)
@@ -767,7 +828,15 @@ static int freq_reg_info(u32 center_freq, u32 *bandwidth,
767 rr = &cfg80211_regdomain->reg_rules[i]; 828 rr = &cfg80211_regdomain->reg_rules[i];
768 fr = &rr->freq_range; 829 fr = &rr->freq_range;
769 pr = &rr->power_rule; 830 pr = &rr->power_rule;
831
832 /* We only need to know if one frequency rule was
833 * was in center_freq's band, that's enough, so lets
834 * not overwrite it once found */
835 if (!band_rule_found)
836 band_rule_found = freq_in_rule_band(fr, center_freq);
837
770 max_bandwidth = freq_max_bandwidth(fr, center_freq); 838 max_bandwidth = freq_max_bandwidth(fr, center_freq);
839
771 if (max_bandwidth && *bandwidth <= max_bandwidth) { 840 if (max_bandwidth && *bandwidth <= max_bandwidth) {
772 *reg_rule = rr; 841 *reg_rule = rr;
773 *bandwidth = max_bandwidth; 842 *bandwidth = max_bandwidth;
@@ -775,23 +844,64 @@ static int freq_reg_info(u32 center_freq, u32 *bandwidth,
775 } 844 }
776 } 845 }
777 846
847 if (!band_rule_found)
848 return -ERANGE;
849
778 return !max_bandwidth; 850 return !max_bandwidth;
779} 851}
780 852
781static void handle_channel(struct ieee80211_channel *chan) 853static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
854 unsigned int chan_idx)
782{ 855{
783 int r; 856 int r;
784 u32 flags = chan->orig_flags; 857 u32 flags;
785 u32 max_bandwidth = 0; 858 u32 max_bandwidth = 0;
786 const struct ieee80211_reg_rule *reg_rule = NULL; 859 const struct ieee80211_reg_rule *reg_rule = NULL;
787 const struct ieee80211_power_rule *power_rule = NULL; 860 const struct ieee80211_power_rule *power_rule = NULL;
861 struct ieee80211_supported_band *sband;
862 struct ieee80211_channel *chan;
863
864 sband = wiphy->bands[band];
865 BUG_ON(chan_idx >= sband->n_channels);
866 chan = &sband->channels[chan_idx];
867
868 flags = chan->orig_flags;
788 869
789 r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq), 870 r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq),
790 &max_bandwidth, &reg_rule); 871 &max_bandwidth, &reg_rule);
791 872
792 if (r) { 873 if (r) {
793 flags |= IEEE80211_CHAN_DISABLED; 874 /* This means no regulatory rule was found in the country IE
794 chan->flags = flags; 875 * with a frequency range on the center_freq's band, since
876 * IEEE-802.11 allows for a country IE to have a subset of the
877 * regulatory information provided in a country we ignore
878 * disabling the channel unless at least one reg rule was
879 * found on the center_freq's band. For details see this
880 * clarification:
881 *
882 * http://tinyurl.com/11d-clarification
883 */
884 if (r == -ERANGE &&
885 last_request->initiator == REGDOM_SET_BY_COUNTRY_IE) {
886#ifdef CONFIG_CFG80211_REG_DEBUG
887 printk(KERN_DEBUG "cfg80211: Leaving channel %d MHz "
888 "intact on %s - no rule found in band on "
889 "Country IE\n",
890 chan->center_freq, wiphy_name(wiphy));
891#endif
892 } else {
893 /* In this case we know the country IE has at least one reg rule
894 * for the band so we respect its band definitions */
895#ifdef CONFIG_CFG80211_REG_DEBUG
896 if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE)
897 printk(KERN_DEBUG "cfg80211: Disabling "
898 "channel %d MHz on %s due to "
899 "Country IE\n",
900 chan->center_freq, wiphy_name(wiphy));
901#endif
902 flags |= IEEE80211_CHAN_DISABLED;
903 chan->flags = flags;
904 }
795 return; 905 return;
796 } 906 }
797 907
@@ -808,12 +918,16 @@ static void handle_channel(struct ieee80211_channel *chan)
808 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); 918 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
809} 919}
810 920
811static void handle_band(struct ieee80211_supported_band *sband) 921static void handle_band(struct wiphy *wiphy, enum ieee80211_band band)
812{ 922{
813 int i; 923 unsigned int i;
924 struct ieee80211_supported_band *sband;
925
926 BUG_ON(!wiphy->bands[band]);
927 sband = wiphy->bands[band];
814 928
815 for (i = 0; i < sband->n_channels; i++) 929 for (i = 0; i < sband->n_channels; i++)
816 handle_channel(&sband->channels[i]); 930 handle_channel(wiphy, band, i);
817} 931}
818 932
819static bool ignore_reg_update(struct wiphy *wiphy, enum reg_set_by setby) 933static bool ignore_reg_update(struct wiphy *wiphy, enum reg_set_by setby)
@@ -840,7 +954,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby)
840 enum ieee80211_band band; 954 enum ieee80211_band band;
841 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 955 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
842 if (wiphy->bands[band]) 956 if (wiphy->bands[band])
843 handle_band(wiphy->bands[band]); 957 handle_band(wiphy, band);
844 if (wiphy->reg_notifier) 958 if (wiphy->reg_notifier)
845 wiphy->reg_notifier(wiphy, setby); 959 wiphy->reg_notifier(wiphy, setby);
846 } 960 }
@@ -1170,7 +1284,7 @@ static void reg_country_ie_process_debug(
1170 if (intersected_rd) { 1284 if (intersected_rd) {
1171 printk(KERN_DEBUG "cfg80211: We intersect both of these " 1285 printk(KERN_DEBUG "cfg80211: We intersect both of these "
1172 "and get:\n"); 1286 "and get:\n");
1173 print_regdomain_info(rd); 1287 print_regdomain_info(intersected_rd);
1174 return; 1288 return;
1175 } 1289 }
1176 printk(KERN_DEBUG "cfg80211: Intersection between both failed\n"); 1290 printk(KERN_DEBUG "cfg80211: Intersection between both failed\n");
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index e49a2d1ef1e4..cb6a5bb85d80 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1055,8 +1055,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
1055 return private(dev, iwr, cmd, info, handler); 1055 return private(dev, iwr, cmd, info, handler);
1056 } 1056 }
1057 /* Old driver API : call driver ioctl handler */ 1057 /* Old driver API : call driver ioctl handler */
1058 if (dev->do_ioctl) 1058 if (dev->netdev_ops->ndo_do_ioctl)
1059 return dev->do_ioctl(dev, ifr, cmd); 1059 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
1060 return -EOPNOTSUPP; 1060 return -EOPNOTSUPP;
1061} 1061}
1062 1062
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index c609a4b98e15..42cd18391f46 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -63,7 +63,6 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
63 if (len > skb_tailroom(skb)) 63 if (len > skb_tailroom(skb))
64 len = skb_tailroom(skb); 64 len = skb_tailroom(skb);
65 65
66 skb->truesize += len;
67 __skb_put(skb, len); 66 __skb_put(skb, len);
68 67
69 len += plen; 68 len += plen;