aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-12 05:32:03 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-12 05:32:03 -0500
commite3ee1e123183ca9847e74b7b8e2694c9e3b817a6 (patch)
tree652a84674ed05eaa46a813de2223af0bd0168a5a /net
parent5762ba1873b0bb9faa631aaa02f533c2b9837f82 (diff)
parentc59765042f53a79a7a65585042ff463b69cb248c (diff)
Merge commit 'v2.6.29-rc1' into timers/hrtimers
Conflicts: kernel/time/tick-common.c
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c111
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/Kconfig2
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/aarp.c5
-rw-r--r--net/bluetooth/bnep/bnep.h1
-rw-r--r--net/bluetooth/bnep/core.c12
-rw-r--r--net/bluetooth/bnep/netdev.c33
-rw-r--r--net/can/af_can.c15
-rw-r--r--net/can/bcm.c202
-rw-r--r--net/can/raw.c15
-rw-r--r--net/core/dev.c296
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/skbuff.c15
-rw-r--r--net/dcb/dcbnl.c14
-rw-r--r--net/dccp/Kconfig4
-rw-r--r--net/dccp/Makefile15
-rw-r--r--net/dccp/ackvec.h49
-rw-r--r--net/dccp/ccid.c254
-rw-r--r--net/dccp/ccid.h14
-rw-r--r--net/dccp/ccids/Kconfig79
-rw-r--r--net/dccp/ccids/Makefile9
-rw-r--r--net/dccp/ccids/ccid2.c22
-rw-r--r--net/dccp/ccids/ccid3.c23
-rw-r--r--net/dccp/ccids/lib/Makefile3
-rw-r--r--net/dccp/ccids/lib/loss_interval.c3
-rw-r--r--net/dccp/ccids/lib/packet_history.c9
-rw-r--r--net/dccp/ccids/lib/tfrc.c19
-rw-r--r--net/dccp/ccids/lib/tfrc.h11
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c4
-rw-r--r--net/dccp/dccp.h2
-rw-r--r--net/dccp/feat.c6
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/proto.c10
-rw-r--r--net/dsa/slave.c51
-rw-r--r--net/ipv4/cipso_ipv4.c86
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/proc.c13
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv6/af_inet6.c107
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/route.c54
-rw-r--r--net/ipv6/sysctl_net_ipv6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c50
-rw-r--r--net/irda/ircomm/ircomm_tty.c5
-rw-r--r--net/iucv/af_iucv.c28
-rw-r--r--net/iucv/iucv.c25
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lc.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_rr.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c8
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/netlabel/netlabel_cipso_v4.c61
-rw-r--r--net/netlabel/netlabel_domainhash.c67
-rw-r--r--net/netlabel/netlabel_domainhash.h4
-rw-r--r--net/netlabel/netlabel_kapi.c347
-rw-r--r--net/netlabel/netlabel_unlabeled.c26
-rw-r--r--net/netlabel/netlabel_unlabeled.h15
-rw-r--r--net/netlink/genetlink.c1
-rw-r--r--net/phonet/pep-gprs.c12
-rw-r--r--net/rfkill/rfkill.c4
-rw-r--r--net/sched/Kconfig3
-rw-r--r--net/sched/cls_cgroup.c23
-rw-r--r--net/sched/cls_u32.c3
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_teql.c20
-rw-r--r--net/sctp/auth.c6
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/tsnmap.c2
-rw-r--r--net/socket.c13
-rw-r--r--net/sunrpc/auth.c6
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c295
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c6
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c18
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c28
-rw-r--r--net/sunrpc/cache.c20
-rw-r--r--net/sunrpc/clnt.c16
-rw-r--r--net/sunrpc/rpc_pipe.c44
-rw-r--r--net/sunrpc/stats.c6
-rw-r--r--net/sunrpc/svc.c14
-rw-r--r--net/sunrpc/svc_xprt.c58
-rw-r--r--net/sunrpc/svcauth.c14
-rw-r--r--net/sunrpc/svcauth_unix.c12
-rw-r--r--net/sunrpc/svcsock.c30
-rw-r--r--net/sunrpc/xdr.c50
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/wimax/Kconfig52
-rw-r--r--net/wimax/Makefile13
-rw-r--r--net/wimax/debug-levels.h42
-rw-r--r--net/wimax/debugfs.c90
-rw-r--r--net/wimax/id-table.c144
-rw-r--r--net/wimax/op-msg.c421
-rw-r--r--net/wimax/op-reset.c143
-rw-r--r--net/wimax/op-rfkill.c532
-rw-r--r--net/wimax/stack.c599
-rw-r--r--net/wimax/wimax-internal.h91
-rw-r--r--net/wireless/wext.c4
-rw-r--r--net/xfrm/xfrm_proc.c17
108 files changed, 4025 insertions, 1138 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index dd86a1dc4cd0..6c1323940263 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -3,46 +3,35 @@
3#include <linux/if_vlan.h> 3#include <linux/if_vlan.h>
4#include "vlan.h" 4#include "vlan.h"
5 5
6struct vlan_hwaccel_cb {
7 struct net_device *dev;
8};
9
10static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb)
11{
12 return (struct vlan_hwaccel_cb *)skb->cb;
13}
14
15/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 6/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
16int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 7int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
17 u16 vlan_tci, int polling) 8 u16 vlan_tci, int polling)
18{ 9{
19 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); 10 if (skb_bond_should_drop(skb))
20 11 goto drop;
21 if (skb_bond_should_drop(skb)) {
22 dev_kfree_skb_any(skb);
23 return NET_RX_DROP;
24 }
25 12
26 skb->vlan_tci = vlan_tci; 13 skb->vlan_tci = vlan_tci;
27 cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 14 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
15
16 if (!skb->dev)
17 goto drop;
28 18
29 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 19 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
20
21drop:
22 dev_kfree_skb_any(skb);
23 return NET_RX_DROP;
30} 24}
31EXPORT_SYMBOL(__vlan_hwaccel_rx); 25EXPORT_SYMBOL(__vlan_hwaccel_rx);
32 26
33int vlan_hwaccel_do_receive(struct sk_buff *skb) 27int vlan_hwaccel_do_receive(struct sk_buff *skb)
34{ 28{
35 struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); 29 struct net_device *dev = skb->dev;
36 struct net_device *dev = cb->dev;
37 struct net_device_stats *stats; 30 struct net_device_stats *stats;
38 31
32 skb->dev = vlan_dev_info(dev)->real_dev;
39 netif_nit_deliver(skb); 33 netif_nit_deliver(skb);
40 34
41 if (dev == NULL) {
42 kfree_skb(skb);
43 return -1;
44 }
45
46 skb->dev = dev; 35 skb->dev = dev;
47 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); 36 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
48 skb->vlan_tci = 0; 37 skb->vlan_tci = 0;
@@ -80,3 +69,79 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
80 return vlan_dev_info(dev)->vlan_id; 69 return vlan_dev_info(dev)->vlan_id;
81} 70}
82EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); 71EXPORT_SYMBOL_GPL(vlan_dev_vlan_id);
72
73static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
74 unsigned int vlan_tci, struct sk_buff *skb)
75{
76 struct sk_buff *p;
77
78 if (skb_bond_should_drop(skb))
79 goto drop;
80
81 skb->vlan_tci = vlan_tci;
82 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
83
84 if (!skb->dev)
85 goto drop;
86
87 for (p = napi->gro_list; p; p = p->next) {
88 NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev;
89 NAPI_GRO_CB(p)->flush = 0;
90 }
91
92 return dev_gro_receive(napi, skb);
93
94drop:
95 return 2;
96}
97
98int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
99 unsigned int vlan_tci, struct sk_buff *skb)
100{
101 int err = NET_RX_SUCCESS;
102
103 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
104 case -1:
105 return netif_receive_skb(skb);
106
107 case 2:
108 err = NET_RX_DROP;
109 /* fall through */
110
111 case 1:
112 kfree_skb(skb);
113 break;
114 }
115
116 return err;
117}
118EXPORT_SYMBOL(vlan_gro_receive);
119
120int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
121 unsigned int vlan_tci, struct napi_gro_fraginfo *info)
122{
123 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
124 int err = NET_RX_DROP;
125
126 if (!skb)
127 goto out;
128
129 err = NET_RX_SUCCESS;
130
131 switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
132 case -1:
133 return netif_receive_skb(skb);
134
135 case 2:
136 err = NET_RX_DROP;
137 /* fall through */
138
139 case 1:
140 napi_reuse_skb(napi, skb);
141 break;
142 }
143
144out:
145 return err;
146}
147EXPORT_SYMBOL(vlan_gro_frags);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 89a3bbdfca3f..4a19acd3a32b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -546,6 +546,18 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
546 return err; 546 return err;
547} 547}
548 548
549static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
550{
551 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
552 const struct net_device_ops *ops = real_dev->netdev_ops;
553 int err = 0;
554
555 if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
556 err = ops->ndo_neigh_setup(dev, pa);
557
558 return err;
559}
560
549static void vlan_dev_change_rx_flags(struct net_device *dev, int change) 561static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
550{ 562{
551 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 563 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
@@ -713,6 +725,7 @@ static const struct net_device_ops vlan_netdev_ops = {
713 .ndo_set_multicast_list = vlan_dev_set_rx_mode, 725 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
714 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 726 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
715 .ndo_do_ioctl = vlan_dev_ioctl, 727 .ndo_do_ioctl = vlan_dev_ioctl,
728 .ndo_neigh_setup = vlan_dev_neigh_setup,
716}; 729};
717 730
718static const struct net_device_ops vlan_netdev_accel_ops = { 731static const struct net_device_ops vlan_netdev_accel_ops = {
@@ -728,6 +741,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
728 .ndo_set_multicast_list = vlan_dev_set_rx_mode, 741 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
729 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 742 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
730 .ndo_do_ioctl = vlan_dev_ioctl, 743 .ndo_do_ioctl = vlan_dev_ioctl,
744 .ndo_neigh_setup = vlan_dev_neigh_setup,
731}; 745};
732 746
733void vlan_setup(struct net_device *dev) 747void vlan_setup(struct net_device *dev)
diff --git a/net/Kconfig b/net/Kconfig
index 6ec2cce7c167..bf2776018f71 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -254,6 +254,8 @@ source "net/mac80211/Kconfig"
254 254
255endif # WIRELESS 255endif # WIRELESS
256 256
257source "net/wimax/Kconfig"
258
257source "net/rfkill/Kconfig" 259source "net/rfkill/Kconfig"
258source "net/9p/Kconfig" 260source "net/9p/Kconfig"
259 261
diff --git a/net/Makefile b/net/Makefile
index ba4460432b7c..0fcce89d7169 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -63,3 +63,4 @@ endif
63ifeq ($(CONFIG_NET),y) 63ifeq ($(CONFIG_NET),y)
64obj-$(CONFIG_SYSCTL) += sysctl_net.o 64obj-$(CONFIG_SYSCTL) += sysctl_net.o
65endif 65endif
66obj-$(CONFIG_WIMAX) += wimax/
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index b03ff58e9308..89f99d3beb60 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -443,13 +443,14 @@ static void aarp_send_probe_phase1(struct atalk_iface *iface)
443{ 443{
444 struct ifreq atreq; 444 struct ifreq atreq;
445 struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr; 445 struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr;
446 const struct net_device_ops *ops = iface->dev->netdev_ops;
446 447
447 sa->sat_addr.s_node = iface->address.s_node; 448 sa->sat_addr.s_node = iface->address.s_node;
448 sa->sat_addr.s_net = ntohs(iface->address.s_net); 449 sa->sat_addr.s_net = ntohs(iface->address.s_net);
449 450
450 /* We pass the Net:Node to the drivers/cards by a Device ioctl. */ 451 /* We pass the Net:Node to the drivers/cards by a Device ioctl. */
451 if (!(iface->dev->do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) { 452 if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
452 (void)iface->dev->do_ioctl(iface->dev, &atreq, SIOCGIFADDR); 453 ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
453 if (iface->address.s_net != htons(sa->sat_addr.s_net) || 454 if (iface->address.s_net != htons(sa->sat_addr.s_net) ||
454 iface->address.s_node != sa->sat_addr.s_node) 455 iface->address.s_node != sa->sat_addr.s_node)
455 iface->status |= ATIF_PROBE_FAIL; 456 iface->status |= ATIF_PROBE_FAIL;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index d20f8a40f36e..0d9e506f5d5a 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -165,7 +165,6 @@ struct bnep_session {
165 165
166 struct socket *sock; 166 struct socket *sock;
167 struct net_device *dev; 167 struct net_device *dev;
168 struct net_device_stats stats;
169}; 168};
170 169
171void bnep_net_setup(struct net_device *dev); 170void bnep_net_setup(struct net_device *dev);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 70fea8bdb4e5..52a6ce0d772b 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -306,7 +306,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
306 struct sk_buff *nskb; 306 struct sk_buff *nskb;
307 u8 type; 307 u8 type;
308 308
309 s->stats.rx_bytes += skb->len; 309 dev->stats.rx_bytes += skb->len;
310 310
311 type = *(u8 *) skb->data; skb_pull(skb, 1); 311 type = *(u8 *) skb->data; skb_pull(skb, 1);
312 312
@@ -343,7 +343,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
343 * may not be modified and because of the alignment requirements. */ 343 * may not be modified and because of the alignment requirements. */
344 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); 344 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL);
345 if (!nskb) { 345 if (!nskb) {
346 s->stats.rx_dropped++; 346 dev->stats.rx_dropped++;
347 kfree_skb(skb); 347 kfree_skb(skb);
348 return -ENOMEM; 348 return -ENOMEM;
349 } 349 }
@@ -378,14 +378,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
378 skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); 378 skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len);
379 kfree_skb(skb); 379 kfree_skb(skb);
380 380
381 s->stats.rx_packets++; 381 dev->stats.rx_packets++;
382 nskb->ip_summed = CHECKSUM_NONE; 382 nskb->ip_summed = CHECKSUM_NONE;
383 nskb->protocol = eth_type_trans(nskb, dev); 383 nskb->protocol = eth_type_trans(nskb, dev);
384 netif_rx_ni(nskb); 384 netif_rx_ni(nskb);
385 return 0; 385 return 0;
386 386
387badframe: 387badframe:
388 s->stats.rx_errors++; 388 dev->stats.rx_errors++;
389 kfree_skb(skb); 389 kfree_skb(skb);
390 return 0; 390 return 0;
391} 391}
@@ -448,8 +448,8 @@ send:
448 kfree_skb(skb); 448 kfree_skb(skb);
449 449
450 if (len > 0) { 450 if (len > 0) {
451 s->stats.tx_bytes += len; 451 s->dev->stats.tx_bytes += len;
452 s->stats.tx_packets++; 452 s->dev->stats.tx_packets++;
453 return 0; 453 return 0;
454 } 454 }
455 455
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index f897da6e0444..d7a0e9722def 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -55,12 +55,6 @@ static int bnep_net_close(struct net_device *dev)
55 return 0; 55 return 0;
56} 56}
57 57
58static struct net_device_stats *bnep_net_get_stats(struct net_device *dev)
59{
60 struct bnep_session *s = netdev_priv(dev);
61 return &s->stats;
62}
63
64static void bnep_net_set_mc_list(struct net_device *dev) 58static void bnep_net_set_mc_list(struct net_device *dev)
65{ 59{
66#ifdef CONFIG_BT_BNEP_MC_FILTER 60#ifdef CONFIG_BT_BNEP_MC_FILTER
@@ -128,11 +122,6 @@ static void bnep_net_timeout(struct net_device *dev)
128 netif_wake_queue(dev); 122 netif_wake_queue(dev);
129} 123}
130 124
131static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
132{
133 return -EINVAL;
134}
135
136#ifdef CONFIG_BT_BNEP_MC_FILTER 125#ifdef CONFIG_BT_BNEP_MC_FILTER
137static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) 126static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
138{ 127{
@@ -217,6 +206,18 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
217 return 0; 206 return 0;
218} 207}
219 208
209static const struct net_device_ops bnep_netdev_ops = {
210 .ndo_open = bnep_net_open,
211 .ndo_stop = bnep_net_close,
212 .ndo_start_xmit = bnep_net_xmit,
213 .ndo_validate_addr = eth_validate_addr,
214 .ndo_set_multicast_list = bnep_net_set_mc_list,
215 .ndo_set_mac_address = bnep_net_set_mac_addr,
216 .ndo_tx_timeout = bnep_net_timeout,
217 .ndo_change_mtu = eth_change_mtu,
218
219};
220
220void bnep_net_setup(struct net_device *dev) 221void bnep_net_setup(struct net_device *dev)
221{ 222{
222 223
@@ -224,15 +225,7 @@ void bnep_net_setup(struct net_device *dev)
224 dev->addr_len = ETH_ALEN; 225 dev->addr_len = ETH_ALEN;
225 226
226 ether_setup(dev); 227 ether_setup(dev);
227 228 dev->netdev_ops = &bnep_netdev_ops;
228 dev->open = bnep_net_open;
229 dev->stop = bnep_net_close;
230 dev->hard_start_xmit = bnep_net_xmit;
231 dev->get_stats = bnep_net_get_stats;
232 dev->do_ioctl = bnep_net_ioctl;
233 dev->set_mac_address = bnep_net_set_mac_addr;
234 dev->set_multicast_list = bnep_net_set_mc_list;
235 229
236 dev->watchdog_timeo = HZ * 2; 230 dev->watchdog_timeo = HZ * 2;
237 dev->tx_timeout = bnep_net_timeout;
238} 231}
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3dadb338addd..fa417ca6cbe6 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -414,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
414 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 414 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
415 * filter for error frames (CAN_ERR_FLAG bit set in mask). 415 * filter for error frames (CAN_ERR_FLAG bit set in mask).
416 * 416 *
417 * The provided pointer to the sk_buff is guaranteed to be valid as long as
418 * the callback function is running. The callback function must *not* free
419 * the given sk_buff while processing it's task. When the given sk_buff is
420 * needed after the end of the callback function it must be cloned inside
421 * the callback function with skb_clone().
422 *
417 * Return: 423 * Return:
418 * 0 on success 424 * 0 on success
419 * -ENOMEM on missing cache mem to create subscription entry 425 * -ENOMEM on missing cache mem to create subscription entry
@@ -569,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister);
569 575
570static inline void deliver(struct sk_buff *skb, struct receiver *r) 576static inline void deliver(struct sk_buff *skb, struct receiver *r)
571{ 577{
572 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); 578 r->func(skb, r->data);
573 579 r->matches++;
574 if (clone) {
575 clone->sk = skb->sk;
576 r->func(clone, r->data);
577 r->matches++;
578 }
579} 580}
580 581
581static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) 582static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index da0d426c0ce4..1649c8ab2c2f 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -70,7 +70,7 @@
70 70
71#define CAN_BCM_VERSION CAN_VERSION 71#define CAN_BCM_VERSION CAN_VERSION
72static __initdata const char banner[] = KERN_INFO 72static __initdata const char banner[] = KERN_INFO
73 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; 73 "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
74 74
75MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 75MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
76MODULE_LICENSE("Dual BSD/GPL"); 76MODULE_LICENSE("Dual BSD/GPL");
@@ -90,6 +90,7 @@ struct bcm_op {
90 unsigned long frames_abs, frames_filtered; 90 unsigned long frames_abs, frames_filtered;
91 struct timeval ival1, ival2; 91 struct timeval ival1, ival2;
92 struct hrtimer timer, thrtimer; 92 struct hrtimer timer, thrtimer;
93 struct tasklet_struct tsklet, thrtsklet;
93 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 94 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
94 int rx_ifindex; 95 int rx_ifindex;
95 int count; 96 int count;
@@ -341,6 +342,23 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
341 } 342 }
342} 343}
343 344
345static void bcm_tx_timeout_tsklet(unsigned long data)
346{
347 struct bcm_op *op = (struct bcm_op *)data;
348 struct bcm_msg_head msg_head;
349
350 /* create notification to user */
351 msg_head.opcode = TX_EXPIRED;
352 msg_head.flags = op->flags;
353 msg_head.count = op->count;
354 msg_head.ival1 = op->ival1;
355 msg_head.ival2 = op->ival2;
356 msg_head.can_id = op->can_id;
357 msg_head.nframes = 0;
358
359 bcm_send_to_user(op, &msg_head, NULL, 0);
360}
361
344/* 362/*
345 * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions 363 * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
346 */ 364 */
@@ -352,20 +370,8 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
352 if (op->kt_ival1.tv64 && (op->count > 0)) { 370 if (op->kt_ival1.tv64 && (op->count > 0)) {
353 371
354 op->count--; 372 op->count--;
355 if (!op->count && (op->flags & TX_COUNTEVT)) { 373 if (!op->count && (op->flags & TX_COUNTEVT))
356 struct bcm_msg_head msg_head; 374 tasklet_schedule(&op->tsklet);
357
358 /* create notification to user */
359 msg_head.opcode = TX_EXPIRED;
360 msg_head.flags = op->flags;
361 msg_head.count = op->count;
362 msg_head.ival1 = op->ival1;
363 msg_head.ival2 = op->ival2;
364 msg_head.can_id = op->can_id;
365 msg_head.nframes = 0;
366
367 bcm_send_to_user(op, &msg_head, NULL, 0);
368 }
369 } 375 }
370 376
371 if (op->kt_ival1.tv64 && (op->count > 0)) { 377 if (op->kt_ival1.tv64 && (op->count > 0)) {
@@ -402,6 +408,9 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
402 if (op->frames_filtered > ULONG_MAX/100) 408 if (op->frames_filtered > ULONG_MAX/100)
403 op->frames_filtered = op->frames_abs = 0; 409 op->frames_filtered = op->frames_abs = 0;
404 410
411 /* this element is not throttled anymore */
412 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
413
405 head.opcode = RX_CHANGED; 414 head.opcode = RX_CHANGED;
406 head.flags = op->flags; 415 head.flags = op->flags;
407 head.count = op->count; 416 head.count = op->count;
@@ -420,37 +429,32 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
420 */ 429 */
421static void bcm_rx_update_and_send(struct bcm_op *op, 430static void bcm_rx_update_and_send(struct bcm_op *op,
422 struct can_frame *lastdata, 431 struct can_frame *lastdata,
423 struct can_frame *rxdata) 432 const struct can_frame *rxdata)
424{ 433{
425 memcpy(lastdata, rxdata, CFSIZ); 434 memcpy(lastdata, rxdata, CFSIZ);
426 435
427 /* mark as used */ 436 /* mark as used and throttled by default */
428 lastdata->can_dlc |= RX_RECV; 437 lastdata->can_dlc |= (RX_RECV|RX_THR);
429 438
430 /* throtteling mode inactive OR data update already on the run ? */ 439 /* throtteling mode inactive ? */
431 if (!op->kt_ival2.tv64 || hrtimer_callback_running(&op->thrtimer)) { 440 if (!op->kt_ival2.tv64) {
432 /* send RX_CHANGED to the user immediately */ 441 /* send RX_CHANGED to the user immediately */
433 bcm_rx_changed(op, rxdata); 442 bcm_rx_changed(op, lastdata);
434 return; 443 return;
435 } 444 }
436 445
437 if (hrtimer_active(&op->thrtimer)) { 446 /* with active throttling timer we are just done here */
438 /* mark as 'throttled' */ 447 if (hrtimer_active(&op->thrtimer))
439 lastdata->can_dlc |= RX_THR;
440 return; 448 return;
441 }
442 449
443 if (!op->kt_lastmsg.tv64) { 450 /* first receiption with enabled throttling mode */
444 /* send first RX_CHANGED to the user immediately */ 451 if (!op->kt_lastmsg.tv64)
445 bcm_rx_changed(op, rxdata); 452 goto rx_changed_settime;
446 op->kt_lastmsg = ktime_get();
447 return;
448 }
449 453
454 /* got a second frame inside a potential throttle period? */
450 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < 455 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
451 ktime_to_us(op->kt_ival2)) { 456 ktime_to_us(op->kt_ival2)) {
452 /* mark as 'throttled' and start timer */ 457 /* do not send the saved data - only start throttle timer */
453 lastdata->can_dlc |= RX_THR;
454 hrtimer_start(&op->thrtimer, 458 hrtimer_start(&op->thrtimer,
455 ktime_add(op->kt_lastmsg, op->kt_ival2), 459 ktime_add(op->kt_lastmsg, op->kt_ival2),
456 HRTIMER_MODE_ABS); 460 HRTIMER_MODE_ABS);
@@ -458,7 +462,8 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
458 } 462 }
459 463
460 /* the gap was that big, that throttling was not needed here */ 464 /* the gap was that big, that throttling was not needed here */
461 bcm_rx_changed(op, rxdata); 465rx_changed_settime:
466 bcm_rx_changed(op, lastdata);
462 op->kt_lastmsg = ktime_get(); 467 op->kt_lastmsg = ktime_get();
463} 468}
464 469
@@ -467,7 +472,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
467 * received data stored in op->last_frames[] 472 * received data stored in op->last_frames[]
468 */ 473 */
469static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, 474static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
470 struct can_frame *rxdata) 475 const struct can_frame *rxdata)
471{ 476{
472 /* 477 /*
473 * no one uses the MSBs of can_dlc for comparation, 478 * no one uses the MSBs of can_dlc for comparation,
@@ -511,14 +516,12 @@ static void bcm_rx_starttimer(struct bcm_op *op)
511 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); 516 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
512} 517}
513 518
514/* 519static void bcm_rx_timeout_tsklet(unsigned long data)
515 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
516 */
517static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
518{ 520{
519 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 521 struct bcm_op *op = (struct bcm_op *)data;
520 struct bcm_msg_head msg_head; 522 struct bcm_msg_head msg_head;
521 523
524 /* create notification to user */
522 msg_head.opcode = RX_TIMEOUT; 525 msg_head.opcode = RX_TIMEOUT;
523 msg_head.flags = op->flags; 526 msg_head.flags = op->flags;
524 msg_head.count = op->count; 527 msg_head.count = op->count;
@@ -528,6 +531,17 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
528 msg_head.nframes = 0; 531 msg_head.nframes = 0;
529 532
530 bcm_send_to_user(op, &msg_head, NULL, 0); 533 bcm_send_to_user(op, &msg_head, NULL, 0);
534}
535
536/*
537 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
538 */
539static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
540{
541 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
542
543 /* schedule before NET_RX_SOFTIRQ */
544 tasklet_hi_schedule(&op->tsklet);
531 545
532 /* no restart of the timer is done here! */ 546 /* no restart of the timer is done here! */
533 547
@@ -541,9 +555,25 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
541} 555}
542 556
543/* 557/*
558 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
559 */
560static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
561{
562 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
563 if (update)
564 bcm_rx_changed(op, &op->last_frames[index]);
565 return 1;
566 }
567 return 0;
568}
569
570/*
544 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace 571 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
572 *
573 * update == 0 : just check if throttled data is available (any irq context)
574 * update == 1 : check and send throttled data to userspace (soft_irq context)
545 */ 575 */
546static int bcm_rx_thr_flush(struct bcm_op *op) 576static int bcm_rx_thr_flush(struct bcm_op *op, int update)
547{ 577{
548 int updated = 0; 578 int updated = 0;
549 579
@@ -551,27 +581,25 @@ static int bcm_rx_thr_flush(struct bcm_op *op)
551 int i; 581 int i;
552 582
553 /* for MUX filter we start at index 1 */ 583 /* for MUX filter we start at index 1 */
554 for (i = 1; i < op->nframes; i++) { 584 for (i = 1; i < op->nframes; i++)
555 if ((op->last_frames) && 585 updated += bcm_rx_do_flush(op, update, i);
556 (op->last_frames[i].can_dlc & RX_THR)) {
557 op->last_frames[i].can_dlc &= ~RX_THR;
558 bcm_rx_changed(op, &op->last_frames[i]);
559 updated++;
560 }
561 }
562 586
563 } else { 587 } else {
564 /* for RX_FILTER_ID and simple filter */ 588 /* for RX_FILTER_ID and simple filter */
565 if (op->last_frames && (op->last_frames[0].can_dlc & RX_THR)) { 589 updated += bcm_rx_do_flush(op, update, 0);
566 op->last_frames[0].can_dlc &= ~RX_THR;
567 bcm_rx_changed(op, &op->last_frames[0]);
568 updated++;
569 }
570 } 590 }
571 591
572 return updated; 592 return updated;
573} 593}
574 594
595static void bcm_rx_thr_tsklet(unsigned long data)
596{
597 struct bcm_op *op = (struct bcm_op *)data;
598
599 /* push the changed data to the userspace */
600 bcm_rx_thr_flush(op, 1);
601}
602
575/* 603/*
576 * bcm_rx_thr_handler - the time for blocked content updates is over now: 604 * bcm_rx_thr_handler - the time for blocked content updates is over now:
577 * Check for throttled data and send it to the userspace 605 * Check for throttled data and send it to the userspace
@@ -580,7 +608,9 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
580{ 608{
581 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); 609 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
582 610
583 if (bcm_rx_thr_flush(op)) { 611 tasklet_schedule(&op->thrtsklet);
612
613 if (bcm_rx_thr_flush(op, 0)) {
584 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); 614 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
585 return HRTIMER_RESTART; 615 return HRTIMER_RESTART;
586 } else { 616 } else {
@@ -596,29 +626,21 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
596static void bcm_rx_handler(struct sk_buff *skb, void *data) 626static void bcm_rx_handler(struct sk_buff *skb, void *data)
597{ 627{
598 struct bcm_op *op = (struct bcm_op *)data; 628 struct bcm_op *op = (struct bcm_op *)data;
599 struct can_frame rxframe; 629 const struct can_frame *rxframe = (struct can_frame *)skb->data;
600 int i; 630 int i;
601 631
602 /* disable timeout */ 632 /* disable timeout */
603 hrtimer_cancel(&op->timer); 633 hrtimer_cancel(&op->timer);
604 634
605 if (skb->len == sizeof(rxframe)) { 635 if (op->can_id != rxframe->can_id)
606 memcpy(&rxframe, skb->data, sizeof(rxframe));
607 /* save rx timestamp */
608 op->rx_stamp = skb->tstamp;
609 /* save originator for recvfrom() */
610 op->rx_ifindex = skb->dev->ifindex;
611 /* update statistics */
612 op->frames_abs++;
613 kfree_skb(skb);
614
615 } else {
616 kfree_skb(skb);
617 return; 636 return;
618 }
619 637
620 if (op->can_id != rxframe.can_id) 638 /* save rx timestamp */
621 return; 639 op->rx_stamp = skb->tstamp;
640 /* save originator for recvfrom() */
641 op->rx_ifindex = skb->dev->ifindex;
642 /* update statistics */
643 op->frames_abs++;
622 644
623 if (op->flags & RX_RTR_FRAME) { 645 if (op->flags & RX_RTR_FRAME) {
624 /* send reply for RTR-request (placed in op->frames[0]) */ 646 /* send reply for RTR-request (placed in op->frames[0]) */
@@ -628,16 +650,14 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
628 650
629 if (op->flags & RX_FILTER_ID) { 651 if (op->flags & RX_FILTER_ID) {
630 /* the easiest case */ 652 /* the easiest case */
631 bcm_rx_update_and_send(op, &op->last_frames[0], &rxframe); 653 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
632 bcm_rx_starttimer(op); 654 goto rx_starttimer;
633 return;
634 } 655 }
635 656
636 if (op->nframes == 1) { 657 if (op->nframes == 1) {
637 /* simple compare with index 0 */ 658 /* simple compare with index 0 */
638 bcm_rx_cmp_to_index(op, 0, &rxframe); 659 bcm_rx_cmp_to_index(op, 0, rxframe);
639 bcm_rx_starttimer(op); 660 goto rx_starttimer;
640 return;
641 } 661 }
642 662
643 if (op->nframes > 1) { 663 if (op->nframes > 1) {
@@ -649,15 +669,17 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
649 */ 669 */
650 670
651 for (i = 1; i < op->nframes; i++) { 671 for (i = 1; i < op->nframes; i++) {
652 if ((GET_U64(&op->frames[0]) & GET_U64(&rxframe)) == 672 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
653 (GET_U64(&op->frames[0]) & 673 (GET_U64(&op->frames[0]) &
654 GET_U64(&op->frames[i]))) { 674 GET_U64(&op->frames[i]))) {
655 bcm_rx_cmp_to_index(op, i, &rxframe); 675 bcm_rx_cmp_to_index(op, i, rxframe);
656 break; 676 break;
657 } 677 }
658 } 678 }
659 bcm_rx_starttimer(op);
660 } 679 }
680
681rx_starttimer:
682 bcm_rx_starttimer(op);
661} 683}
662 684
663/* 685/*
@@ -681,6 +703,12 @@ static void bcm_remove_op(struct bcm_op *op)
681 hrtimer_cancel(&op->timer); 703 hrtimer_cancel(&op->timer);
682 hrtimer_cancel(&op->thrtimer); 704 hrtimer_cancel(&op->thrtimer);
683 705
706 if (op->tsklet.func)
707 tasklet_kill(&op->tsklet);
708
709 if (op->thrtsklet.func)
710 tasklet_kill(&op->thrtsklet);
711
684 if ((op->frames) && (op->frames != &op->sframe)) 712 if ((op->frames) && (op->frames != &op->sframe))
685 kfree(op->frames); 713 kfree(op->frames);
686 714
@@ -891,6 +919,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
891 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 919 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
892 op->timer.function = bcm_tx_timeout_handler; 920 op->timer.function = bcm_tx_timeout_handler;
893 921
922 /* initialize tasklet for tx countevent notification */
923 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
924 (unsigned long) op);
925
894 /* currently unused in tx_ops */ 926 /* currently unused in tx_ops */
895 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 927 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
896 928
@@ -1054,9 +1086,17 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1054 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1086 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1055 op->timer.function = bcm_rx_timeout_handler; 1087 op->timer.function = bcm_rx_timeout_handler;
1056 1088
1089 /* initialize tasklet for rx timeout notification */
1090 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1091 (unsigned long) op);
1092
1057 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1093 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1058 op->thrtimer.function = bcm_rx_thr_handler; 1094 op->thrtimer.function = bcm_rx_thr_handler;
1059 1095
1096 /* initialize tasklet for rx throttle handling */
1097 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1098 (unsigned long) op);
1099
1060 /* add this bcm_op to the list of the rx_ops */ 1100 /* add this bcm_op to the list of the rx_ops */
1061 list_add(&op->list, &bo->rx_ops); 1101 list_add(&op->list, &bo->rx_ops);
1062 1102
@@ -1102,7 +1142,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1102 */ 1142 */
1103 op->kt_lastmsg = ktime_set(0, 0); 1143 op->kt_lastmsg = ktime_set(0, 0);
1104 hrtimer_cancel(&op->thrtimer); 1144 hrtimer_cancel(&op->thrtimer);
1105 bcm_rx_thr_flush(op); 1145 bcm_rx_thr_flush(op, 1);
1106 } 1146 }
1107 1147
1108 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) 1148 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
diff --git a/net/can/raw.c b/net/can/raw.c
index 27aab63df467..0703cba4bf9f 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data)
99 struct raw_sock *ro = raw_sk(sk); 99 struct raw_sock *ro = raw_sk(sk);
100 struct sockaddr_can *addr; 100 struct sockaddr_can *addr;
101 101
102 if (!ro->recv_own_msgs) { 102 /* check the received tx sock reference */
103 /* check the received tx sock reference */ 103 if (!ro->recv_own_msgs && skb->sk == sk)
104 if (skb->sk == sk) { 104 return;
105 kfree_skb(skb); 105
106 return; 106 /* clone the given skb to be able to enqueue it into the rcv queue */
107 } 107 skb = skb_clone(skb, GFP_ATOMIC);
108 } 108 if (!skb)
109 return;
109 110
110 /* 111 /*
111 * Put the datagram to the queue so that raw_recvmsg() can 112 * Put the datagram to the queue so that raw_recvmsg() can
diff --git a/net/core/dev.c b/net/core/dev.c
index 446424027d24..5f736f1ceeae 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -132,6 +132,9 @@
132/* Instead of increasing this, you should create a hash table. */ 132/* Instead of increasing this, you should create a hash table. */
133#define MAX_GRO_SKBS 8 133#define MAX_GRO_SKBS 8
134 134
135/* This should be increased if a protocol with a bigger head is added. */
136#define GRO_MAX_HEAD (MAX_HEADER + 128)
137
135/* 138/*
136 * The list of packet types we will receive (as opposed to discard) 139 * The list of packet types we will receive (as opposed to discard)
137 * and the routines to invoke. 140 * and the routines to invoke.
@@ -167,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock);
167static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; 170static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
168static struct list_head ptype_all __read_mostly; /* Taps */ 171static struct list_head ptype_all __read_mostly; /* Taps */
169 172
170#ifdef CONFIG_NET_DMA
171struct net_dma {
172 struct dma_client client;
173 spinlock_t lock;
174 cpumask_t channel_mask;
175 struct dma_chan **channels;
176};
177
178static enum dma_state_client
179netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
180 enum dma_state state);
181
182static struct net_dma net_dma = {
183 .client = {
184 .event_callback = netdev_dma_event,
185 },
186};
187#endif
188
189/* 173/*
190 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 174 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
191 * semaphore. 175 * semaphore.
@@ -2345,7 +2329,7 @@ static int napi_gro_complete(struct sk_buff *skb)
2345 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 2329 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2346 int err = -ENOENT; 2330 int err = -ENOENT;
2347 2331
2348 if (!skb_shinfo(skb)->frag_list) 2332 if (NAPI_GRO_CB(skb)->count == 1)
2349 goto out; 2333 goto out;
2350 2334
2351 rcu_read_lock(); 2335 rcu_read_lock();
@@ -2365,6 +2349,7 @@ static int napi_gro_complete(struct sk_buff *skb)
2365 } 2349 }
2366 2350
2367out: 2351out:
2352 skb_shinfo(skb)->gso_size = 0;
2368 __skb_push(skb, -skb_network_offset(skb)); 2353 __skb_push(skb, -skb_network_offset(skb));
2369 return netif_receive_skb(skb); 2354 return netif_receive_skb(skb);
2370} 2355}
@@ -2383,7 +2368,7 @@ void napi_gro_flush(struct napi_struct *napi)
2383} 2368}
2384EXPORT_SYMBOL(napi_gro_flush); 2369EXPORT_SYMBOL(napi_gro_flush);
2385 2370
2386int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2371int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2387{ 2372{
2388 struct sk_buff **pp = NULL; 2373 struct sk_buff **pp = NULL;
2389 struct packet_type *ptype; 2374 struct packet_type *ptype;
@@ -2392,6 +2377,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2392 int count = 0; 2377 int count = 0;
2393 int same_flow; 2378 int same_flow;
2394 int mac_len; 2379 int mac_len;
2380 int free;
2395 2381
2396 if (!(skb->dev->features & NETIF_F_GRO)) 2382 if (!(skb->dev->features & NETIF_F_GRO))
2397 goto normal; 2383 goto normal;
@@ -2408,14 +2394,18 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2408 skb->mac_len = mac_len; 2394 skb->mac_len = mac_len;
2409 NAPI_GRO_CB(skb)->same_flow = 0; 2395 NAPI_GRO_CB(skb)->same_flow = 0;
2410 NAPI_GRO_CB(skb)->flush = 0; 2396 NAPI_GRO_CB(skb)->flush = 0;
2397 NAPI_GRO_CB(skb)->free = 0;
2411 2398
2412 for (p = napi->gro_list; p; p = p->next) { 2399 for (p = napi->gro_list; p; p = p->next) {
2413 count++; 2400 count++;
2414 NAPI_GRO_CB(p)->same_flow = 2401
2415 p->mac_len == mac_len && 2402 if (!NAPI_GRO_CB(p)->same_flow)
2416 !memcmp(skb_mac_header(p), skb_mac_header(skb), 2403 continue;
2417 mac_len); 2404
2418 NAPI_GRO_CB(p)->flush = 0; 2405 if (p->mac_len != mac_len ||
2406 memcmp(skb_mac_header(p), skb_mac_header(skb),
2407 mac_len))
2408 NAPI_GRO_CB(p)->same_flow = 0;
2419 } 2409 }
2420 2410
2421 pp = ptype->gro_receive(&napi->gro_list, skb); 2411 pp = ptype->gro_receive(&napi->gro_list, skb);
@@ -2427,6 +2417,7 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2427 goto normal; 2417 goto normal;
2428 2418
2429 same_flow = NAPI_GRO_CB(skb)->same_flow; 2419 same_flow = NAPI_GRO_CB(skb)->same_flow;
2420 free = NAPI_GRO_CB(skb)->free;
2430 2421
2431 if (pp) { 2422 if (pp) {
2432 struct sk_buff *nskb = *pp; 2423 struct sk_buff *nskb = *pp;
@@ -2446,17 +2437,124 @@ int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2446 } 2437 }
2447 2438
2448 NAPI_GRO_CB(skb)->count = 1; 2439 NAPI_GRO_CB(skb)->count = 1;
2440 skb_shinfo(skb)->gso_size = skb->len;
2449 skb->next = napi->gro_list; 2441 skb->next = napi->gro_list;
2450 napi->gro_list = skb; 2442 napi->gro_list = skb;
2451 2443
2452ok: 2444ok:
2453 return NET_RX_SUCCESS; 2445 return free;
2454 2446
2455normal: 2447normal:
2456 return netif_receive_skb(skb); 2448 return -1;
2449}
2450EXPORT_SYMBOL(dev_gro_receive);
2451
2452static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2453{
2454 struct sk_buff *p;
2455
2456 for (p = napi->gro_list; p; p = p->next) {
2457 NAPI_GRO_CB(p)->same_flow = 1;
2458 NAPI_GRO_CB(p)->flush = 0;
2459 }
2460
2461 return dev_gro_receive(napi, skb);
2462}
2463
2464int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2465{
2466 switch (__napi_gro_receive(napi, skb)) {
2467 case -1:
2468 return netif_receive_skb(skb);
2469
2470 case 1:
2471 kfree_skb(skb);
2472 break;
2473 }
2474
2475 return NET_RX_SUCCESS;
2457} 2476}
2458EXPORT_SYMBOL(napi_gro_receive); 2477EXPORT_SYMBOL(napi_gro_receive);
2459 2478
2479void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2480{
2481 skb_shinfo(skb)->nr_frags = 0;
2482
2483 skb->len -= skb->data_len;
2484 skb->truesize -= skb->data_len;
2485 skb->data_len = 0;
2486
2487 __skb_pull(skb, skb_headlen(skb));
2488 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2489
2490 napi->skb = skb;
2491}
2492EXPORT_SYMBOL(napi_reuse_skb);
2493
2494struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2495 struct napi_gro_fraginfo *info)
2496{
2497 struct net_device *dev = napi->dev;
2498 struct sk_buff *skb = napi->skb;
2499
2500 napi->skb = NULL;
2501
2502 if (!skb) {
2503 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2504 if (!skb)
2505 goto out;
2506
2507 skb_reserve(skb, NET_IP_ALIGN);
2508 }
2509
2510 BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
2511 skb_shinfo(skb)->nr_frags = info->nr_frags;
2512 memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags));
2513
2514 skb->data_len = info->len;
2515 skb->len += info->len;
2516 skb->truesize += info->len;
2517
2518 if (!pskb_may_pull(skb, ETH_HLEN)) {
2519 napi_reuse_skb(napi, skb);
2520 goto out;
2521 }
2522
2523 skb->protocol = eth_type_trans(skb, dev);
2524
2525 skb->ip_summed = info->ip_summed;
2526 skb->csum = info->csum;
2527
2528out:
2529 return skb;
2530}
2531EXPORT_SYMBOL(napi_fraginfo_skb);
2532
2533int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2534{
2535 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
2536 int err = NET_RX_DROP;
2537
2538 if (!skb)
2539 goto out;
2540
2541 err = NET_RX_SUCCESS;
2542
2543 switch (__napi_gro_receive(napi, skb)) {
2544 case -1:
2545 return netif_receive_skb(skb);
2546
2547 case 0:
2548 goto out;
2549 }
2550
2551 napi_reuse_skb(napi, skb);
2552
2553out:
2554 return err;
2555}
2556EXPORT_SYMBOL(napi_gro_frags);
2557
2460static int process_backlog(struct napi_struct *napi, int quota) 2558static int process_backlog(struct napi_struct *napi, int quota)
2461{ 2559{
2462 int work = 0; 2560 int work = 0;
@@ -2535,11 +2633,12 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2535{ 2633{
2536 INIT_LIST_HEAD(&napi->poll_list); 2634 INIT_LIST_HEAD(&napi->poll_list);
2537 napi->gro_list = NULL; 2635 napi->gro_list = NULL;
2636 napi->skb = NULL;
2538 napi->poll = poll; 2637 napi->poll = poll;
2539 napi->weight = weight; 2638 napi->weight = weight;
2540 list_add(&napi->dev_list, &dev->napi_list); 2639 list_add(&napi->dev_list, &dev->napi_list);
2541#ifdef CONFIG_NETPOLL
2542 napi->dev = dev; 2640 napi->dev = dev;
2641#ifdef CONFIG_NETPOLL
2543 spin_lock_init(&napi->poll_lock); 2642 spin_lock_init(&napi->poll_lock);
2544 napi->poll_owner = -1; 2643 napi->poll_owner = -1;
2545#endif 2644#endif
@@ -2552,6 +2651,7 @@ void netif_napi_del(struct napi_struct *napi)
2552 struct sk_buff *skb, *next; 2651 struct sk_buff *skb, *next;
2553 2652
2554 list_del_init(&napi->dev_list); 2653 list_del_init(&napi->dev_list);
2654 kfree(napi->skb);
2555 2655
2556 for (skb = napi->gro_list; skb; skb = next) { 2656 for (skb = napi->gro_list; skb; skb = next) {
2557 next = skb->next; 2657 next = skb->next;
@@ -2635,14 +2735,7 @@ out:
2635 * There may not be any more sk_buffs coming right now, so push 2735 * There may not be any more sk_buffs coming right now, so push
2636 * any pending DMA copies to hardware 2736 * any pending DMA copies to hardware
2637 */ 2737 */
2638 if (!cpus_empty(net_dma.channel_mask)) { 2738 dma_issue_pending_all();
2639 int chan_idx;
2640 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2641 struct dma_chan *chan = net_dma.channels[chan_idx];
2642 if (chan)
2643 dma_async_memcpy_issue_pending(chan);
2644 }
2645 }
2646#endif 2739#endif
2647 2740
2648 return; 2741 return;
@@ -4833,122 +4926,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
4833 return NOTIFY_OK; 4926 return NOTIFY_OK;
4834} 4927}
4835 4928
4836#ifdef CONFIG_NET_DMA
4837/**
4838 * net_dma_rebalance - try to maintain one DMA channel per CPU
4839 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4840 *
4841 * This is called when the number of channels allocated to the net_dma client
4842 * changes. The net_dma client tries to have one DMA channel per CPU.
4843 */
4844
4845static void net_dma_rebalance(struct net_dma *net_dma)
4846{
4847 unsigned int cpu, i, n, chan_idx;
4848 struct dma_chan *chan;
4849
4850 if (cpus_empty(net_dma->channel_mask)) {
4851 for_each_online_cpu(cpu)
4852 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4853 return;
4854 }
4855
4856 i = 0;
4857 cpu = first_cpu(cpu_online_map);
4858
4859 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4860 chan = net_dma->channels[chan_idx];
4861
4862 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4863 + (i < (num_online_cpus() %
4864 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4865
4866 while(n) {
4867 per_cpu(softnet_data, cpu).net_dma = chan;
4868 cpu = next_cpu(cpu, cpu_online_map);
4869 n--;
4870 }
4871 i++;
4872 }
4873}
4874
4875/**
4876 * netdev_dma_event - event callback for the net_dma_client
4877 * @client: should always be net_dma_client
4878 * @chan: DMA channel for the event
4879 * @state: DMA state to be handled
4880 */
4881static enum dma_state_client
4882netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4883 enum dma_state state)
4884{
4885 int i, found = 0, pos = -1;
4886 struct net_dma *net_dma =
4887 container_of(client, struct net_dma, client);
4888 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4889
4890 spin_lock(&net_dma->lock);
4891 switch (state) {
4892 case DMA_RESOURCE_AVAILABLE:
4893 for (i = 0; i < nr_cpu_ids; i++)
4894 if (net_dma->channels[i] == chan) {
4895 found = 1;
4896 break;
4897 } else if (net_dma->channels[i] == NULL && pos < 0)
4898 pos = i;
4899
4900 if (!found && pos >= 0) {
4901 ack = DMA_ACK;
4902 net_dma->channels[pos] = chan;
4903 cpu_set(pos, net_dma->channel_mask);
4904 net_dma_rebalance(net_dma);
4905 }
4906 break;
4907 case DMA_RESOURCE_REMOVED:
4908 for (i = 0; i < nr_cpu_ids; i++)
4909 if (net_dma->channels[i] == chan) {
4910 found = 1;
4911 pos = i;
4912 break;
4913 }
4914
4915 if (found) {
4916 ack = DMA_ACK;
4917 cpu_clear(pos, net_dma->channel_mask);
4918 net_dma->channels[i] = NULL;
4919 net_dma_rebalance(net_dma);
4920 }
4921 break;
4922 default:
4923 break;
4924 }
4925 spin_unlock(&net_dma->lock);
4926
4927 return ack;
4928}
4929
4930/**
4931 * netdev_dma_register - register the networking subsystem as a DMA client
4932 */
4933static int __init netdev_dma_register(void)
4934{
4935 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4936 GFP_KERNEL);
4937 if (unlikely(!net_dma.channels)) {
4938 printk(KERN_NOTICE
4939 "netdev_dma: no memory for net_dma.channels\n");
4940 return -ENOMEM;
4941 }
4942 spin_lock_init(&net_dma.lock);
4943 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4944 dma_async_client_register(&net_dma.client);
4945 dma_async_client_chan_request(&net_dma.client);
4946 return 0;
4947}
4948
4949#else
4950static int __init netdev_dma_register(void) { return -ENODEV; }
4951#endif /* CONFIG_NET_DMA */
4952 4929
4953/** 4930/**
4954 * netdev_increment_features - increment feature set by one 4931 * netdev_increment_features - increment feature set by one
@@ -5066,13 +5043,14 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
5066 5043
5067static void __net_exit default_device_exit(struct net *net) 5044static void __net_exit default_device_exit(struct net *net)
5068{ 5045{
5069 struct net_device *dev, *next; 5046 struct net_device *dev;
5070 /* 5047 /*
5071 * Push all migratable of the network devices back to the 5048 * Push all migratable of the network devices back to the
5072 * initial network namespace 5049 * initial network namespace
5073 */ 5050 */
5074 rtnl_lock(); 5051 rtnl_lock();
5075 for_each_netdev_safe(net, dev, next) { 5052restart:
5053 for_each_netdev(net, dev) {
5076 int err; 5054 int err;
5077 char fb_name[IFNAMSIZ]; 5055 char fb_name[IFNAMSIZ];
5078 5056
@@ -5083,7 +5061,7 @@ static void __net_exit default_device_exit(struct net *net)
5083 /* Delete virtual devices */ 5061 /* Delete virtual devices */
5084 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { 5062 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5085 dev->rtnl_link_ops->dellink(dev); 5063 dev->rtnl_link_ops->dellink(dev);
5086 continue; 5064 goto restart;
5087 } 5065 }
5088 5066
5089 /* Push remaing network devices to init_net */ 5067 /* Push remaing network devices to init_net */
@@ -5094,6 +5072,7 @@ static void __net_exit default_device_exit(struct net *net)
5094 __func__, dev->name, err); 5072 __func__, dev->name, err);
5095 BUG(); 5073 BUG();
5096 } 5074 }
5075 goto restart;
5097 } 5076 }
5098 rtnl_unlock(); 5077 rtnl_unlock();
5099} 5078}
@@ -5166,14 +5145,15 @@ static int __init net_dev_init(void)
5166 if (register_pernet_device(&default_device_ops)) 5145 if (register_pernet_device(&default_device_ops))
5167 goto out; 5146 goto out;
5168 5147
5169 netdev_dma_register();
5170
5171 open_softirq(NET_TX_SOFTIRQ, net_tx_action); 5148 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5172 open_softirq(NET_RX_SOFTIRQ, net_rx_action); 5149 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5173 5150
5174 hotcpu_notifier(dev_cpu_callback, 0); 5151 hotcpu_notifier(dev_cpu_callback, 0);
5175 dst_init(); 5152 dst_init();
5176 dev_mcast_init(); 5153 dev_mcast_init();
5154 #ifdef CONFIG_NET_DMA
5155 dmaengine_get();
5156 #endif
5177 rc = 0; 5157 rc = 0;
5178out: 5158out:
5179 return rc; 5159 return rc;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9c3717a23cf7..f66c58df8953 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2414,7 +2414,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2414 if (*pos == 0) 2414 if (*pos == 0)
2415 return SEQ_START_TOKEN; 2415 return SEQ_START_TOKEN;
2416 2416
2417 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { 2417 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2418 if (!cpu_possible(cpu)) 2418 if (!cpu_possible(cpu))
2419 continue; 2419 continue;
2420 *pos = cpu+1; 2420 *pos = cpu+1;
@@ -2429,7 +2429,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2429 struct neigh_table *tbl = pde->data; 2429 struct neigh_table *tbl = pde->data;
2430 int cpu; 2430 int cpu;
2431 2431
2432 for (cpu = *pos; cpu < NR_CPUS; ++cpu) { 2432 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2433 if (!cpu_possible(cpu)) 2433 if (!cpu_possible(cpu))
2434 continue; 2434 continue;
2435 *pos = cpu+1; 2435 *pos = cpu+1;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b8d0abb26433..5110b359c758 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2594,6 +2594,17 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2594 2594
2595 if (skb_shinfo(p)->frag_list) 2595 if (skb_shinfo(p)->frag_list)
2596 goto merge; 2596 goto merge;
2597 else if (!skb_headlen(p) && !skb_headlen(skb) &&
2598 skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags <
2599 MAX_SKB_FRAGS) {
2600 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
2601 skb_shinfo(skb)->frags,
2602 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
2603
2604 skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
2605 NAPI_GRO_CB(skb)->free = 1;
2606 goto done;
2607 }
2597 2608
2598 headroom = skb_headroom(p); 2609 headroom = skb_headroom(p);
2599 nskb = netdev_alloc_skb(p->dev, headroom); 2610 nskb = netdev_alloc_skb(p->dev, headroom);
@@ -2613,6 +2624,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2613 2624
2614 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2625 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2615 skb_shinfo(nskb)->frag_list = p; 2626 skb_shinfo(nskb)->frag_list = p;
2627 skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size;
2616 skb_header_release(p); 2628 skb_header_release(p);
2617 nskb->prev = p; 2629 nskb->prev = p;
2618 2630
@@ -2627,11 +2639,12 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2627 p = nskb; 2639 p = nskb;
2628 2640
2629merge: 2641merge:
2630 NAPI_GRO_CB(p)->count++;
2631 p->prev->next = skb; 2642 p->prev->next = skb;
2632 p->prev = skb; 2643 p->prev = skb;
2633 skb_header_release(skb); 2644 skb_header_release(skb);
2634 2645
2646done:
2647 NAPI_GRO_CB(p)->count++;
2635 p->data_len += skb->len; 2648 p->data_len += skb->len;
2636 p->truesize += skb->len; 2649 p->truesize += skb->len;
2637 p->len += skb->len; 2650 p->len += skb->len;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 5dbfe5fdc0d6..8379496de82b 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -191,7 +191,7 @@ static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
191 return 0; 191 return 0;
192nlmsg_failure: 192nlmsg_failure:
193err: 193err:
194 kfree(dcbnl_skb); 194 kfree_skb(dcbnl_skb);
195 return ret; 195 return ret;
196} 196}
197 197
@@ -272,7 +272,7 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
272 return 0; 272 return 0;
273nlmsg_failure: 273nlmsg_failure:
274err: 274err:
275 kfree(dcbnl_skb); 275 kfree_skb(dcbnl_skb);
276err_out: 276err_out:
277 return -EINVAL; 277 return -EINVAL;
278} 278}
@@ -314,7 +314,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
314 314
315nlmsg_failure: 315nlmsg_failure:
316err: 316err:
317 kfree(dcbnl_skb); 317 kfree_skb(dcbnl_skb);
318err_out: 318err_out:
319 return -EINVAL; 319 return -EINVAL;
320} 320}
@@ -380,7 +380,7 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
380 return 0; 380 return 0;
381nlmsg_failure: 381nlmsg_failure:
382err: 382err:
383 kfree(dcbnl_skb); 383 kfree_skb(dcbnl_skb);
384err_out: 384err_out:
385 return -EINVAL; 385 return -EINVAL;
386} 386}
@@ -458,7 +458,7 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
458 return 0; 458 return 0;
459nlmsg_failure: 459nlmsg_failure:
460err: 460err:
461 kfree(dcbnl_skb); 461 kfree_skb(dcbnl_skb);
462err_out: 462err_out:
463 return ret; 463 return ret;
464} 464}
@@ -687,7 +687,7 @@ err_pg:
687 nla_nest_cancel(dcbnl_skb, pg_nest); 687 nla_nest_cancel(dcbnl_skb, pg_nest);
688nlmsg_failure: 688nlmsg_failure:
689err: 689err:
690 kfree(dcbnl_skb); 690 kfree_skb(dcbnl_skb);
691err_out: 691err_out:
692 ret = -EINVAL; 692 ret = -EINVAL;
693 return ret; 693 return ret;
@@ -949,7 +949,7 @@ err_bcn:
949 nla_nest_cancel(dcbnl_skb, bcn_nest); 949 nla_nest_cancel(dcbnl_skb, bcn_nest);
950nlmsg_failure: 950nlmsg_failure:
951err: 951err:
952 kfree(dcbnl_skb); 952 kfree_skb(dcbnl_skb);
953err_out: 953err_out:
954 ret = -EINVAL; 954 ret = -EINVAL;
955 return ret; 955 return ret;
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index 7aa2a7acc7ec..ad6dffd9070e 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -1,7 +1,6 @@
1menuconfig IP_DCCP 1menuconfig IP_DCCP
2 tristate "The DCCP Protocol (EXPERIMENTAL)" 2 tristate "The DCCP Protocol (EXPERIMENTAL)"
3 depends on INET && EXPERIMENTAL 3 depends on INET && EXPERIMENTAL
4 select IP_DCCP_CCID2
5 ---help--- 4 ---help---
6 Datagram Congestion Control Protocol (RFC 4340) 5 Datagram Congestion Control Protocol (RFC 4340)
7 6
@@ -25,9 +24,6 @@ config INET_DCCP_DIAG
25 def_tristate y if (IP_DCCP = y && INET_DIAG = y) 24 def_tristate y if (IP_DCCP = y && INET_DIAG = y)
26 def_tristate m 25 def_tristate m
27 26
28config IP_DCCP_ACKVEC
29 bool
30
31source "net/dccp/ccids/Kconfig" 27source "net/dccp/ccids/Kconfig"
32 28
33menu "DCCP Kernel Hacking" 29menu "DCCP Kernel Hacking"
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
index f4f8793aafff..2991efcc8dea 100644
--- a/net/dccp/Makefile
+++ b/net/dccp/Makefile
@@ -2,14 +2,23 @@ obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o
2 2
3dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o 3dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o
4 4
5#
6# CCID algorithms to be used by dccp.ko
7#
8# CCID-2 is default (RFC 4340, p. 77) and has Ack Vectors as dependency
9dccp-y += ccids/ccid2.o ackvec.o
10dccp-$(CONFIG_IP_DCCP_CCID3) += ccids/ccid3.o
11dccp-$(CONFIG_IP_DCCP_TFRC_LIB) += ccids/lib/tfrc.o \
12 ccids/lib/tfrc_equation.o \
13 ccids/lib/packet_history.o \
14 ccids/lib/loss_interval.o
15
5dccp_ipv4-y := ipv4.o 16dccp_ipv4-y := ipv4.o
6 17
7# build dccp_ipv6 as module whenever either IPv6 or DCCP is a module 18# build dccp_ipv6 as module whenever either IPv6 or DCCP is a module
8obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o 19obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o
9dccp_ipv6-y := ipv6.o 20dccp_ipv6-y := ipv6.o
10 21
11dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o
12
13obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o 22obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o
14obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o 23obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o
15 24
@@ -17,5 +26,3 @@ dccp-$(CONFIG_SYSCTL) += sysctl.o
17 26
18dccp_diag-y := diag.o 27dccp_diag-y := diag.o
19dccp_probe-y := probe.o 28dccp_probe-y := probe.o
20
21obj-y += ccids/
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index 4ccee030524e..45f95e55f873 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -84,7 +84,6 @@ struct dccp_ackvec_record {
84struct sock; 84struct sock;
85struct sk_buff; 85struct sk_buff;
86 86
87#ifdef CONFIG_IP_DCCP_ACKVEC
88extern int dccp_ackvec_init(void); 87extern int dccp_ackvec_init(void);
89extern void dccp_ackvec_exit(void); 88extern void dccp_ackvec_exit(void);
90 89
@@ -106,52 +105,4 @@ static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
106{ 105{
107 return av->av_vec_len; 106 return av->av_vec_len;
108} 107}
109#else /* CONFIG_IP_DCCP_ACKVEC */
110static inline int dccp_ackvec_init(void)
111{
112 return 0;
113}
114
115static inline void dccp_ackvec_exit(void)
116{
117}
118
119static inline struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
120{
121 return NULL;
122}
123
124static inline void dccp_ackvec_free(struct dccp_ackvec *av)
125{
126}
127
128static inline int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
129 const u64 ackno, const u8 state)
130{
131 return -1;
132}
133
134static inline void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av,
135 struct sock *sk, const u64 ackno)
136{
137}
138
139static inline int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
140 const u64 *ackno, const u8 opt,
141 const u8 *value, const u8 len)
142{
143 return -1;
144}
145
146static inline int dccp_insert_option_ackvec(const struct sock *sk,
147 const struct sk_buff *skb)
148{
149 return -1;
150}
151
152static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
153{
154 return 0;
155}
156#endif /* CONFIG_IP_DCCP_ACKVEC */
157#endif /* _ACKVEC_H */ 108#endif /* _ACKVEC_H */
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index bcc643f992ae..f3e9ba1cfd01 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -12,56 +12,70 @@
12 */ 12 */
13 13
14#include "ccid.h" 14#include "ccid.h"
15#include "ccids/lib/tfrc.h"
15 16
16static u8 builtin_ccids[] = { 17static struct ccid_operations *ccids[] = {
17 DCCPC_CCID2, /* CCID2 is supported by default */ 18 &ccid2_ops,
18#if defined(CONFIG_IP_DCCP_CCID3) || defined(CONFIG_IP_DCCP_CCID3_MODULE) 19#ifdef CONFIG_IP_DCCP_CCID3
19 DCCPC_CCID3, 20 &ccid3_ops,
20#endif 21#endif
21}; 22};
22 23
23static struct ccid_operations *ccids[CCID_MAX]; 24static struct ccid_operations *ccid_by_number(const u8 id)
24#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
25static atomic_t ccids_lockct = ATOMIC_INIT(0);
26static DEFINE_SPINLOCK(ccids_lock);
27
28/*
29 * The strategy is: modifications ccids vector are short, do not sleep and
30 * veeery rare, but read access should be free of any exclusive locks.
31 */
32static void ccids_write_lock(void)
33{ 25{
34 spin_lock(&ccids_lock); 26 int i;
35 while (atomic_read(&ccids_lockct) != 0) { 27
36 spin_unlock(&ccids_lock); 28 for (i = 0; i < ARRAY_SIZE(ccids); i++)
37 yield(); 29 if (ccids[i]->ccid_id == id)
38 spin_lock(&ccids_lock); 30 return ccids[i];
39 } 31 return NULL;
40} 32}
41 33
42static inline void ccids_write_unlock(void) 34/* check that up to @array_len members in @ccid_array are supported */
35bool ccid_support_check(u8 const *ccid_array, u8 array_len)
43{ 36{
44 spin_unlock(&ccids_lock); 37 while (array_len > 0)
38 if (ccid_by_number(ccid_array[--array_len]) == NULL)
39 return false;
40 return true;
45} 41}
46 42
47static inline void ccids_read_lock(void) 43/**
44 * ccid_get_builtin_ccids - Populate a list of built-in CCIDs
45 * @ccid_array: pointer to copy into
46 * @array_len: value to return length into
47 * This function allocates memory - caller must see that it is freed after use.
48 */
49int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
48{ 50{
49 atomic_inc(&ccids_lockct); 51 *ccid_array = kmalloc(ARRAY_SIZE(ccids), gfp_any());
50 smp_mb__after_atomic_inc(); 52 if (*ccid_array == NULL)
51 spin_unlock_wait(&ccids_lock); 53 return -ENOBUFS;
54
55 for (*array_len = 0; *array_len < ARRAY_SIZE(ccids); *array_len += 1)
56 (*ccid_array)[*array_len] = ccids[*array_len]->ccid_id;
57 return 0;
52} 58}
53 59
54static inline void ccids_read_unlock(void) 60int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
61 char __user *optval, int __user *optlen)
55{ 62{
56 atomic_dec(&ccids_lockct); 63 u8 *ccid_array, array_len;
57} 64 int err = 0;
58 65
59#else 66 if (len < ARRAY_SIZE(ccids))
60#define ccids_write_lock() do { } while(0) 67 return -EINVAL;
61#define ccids_write_unlock() do { } while(0) 68
62#define ccids_read_lock() do { } while(0) 69 if (ccid_get_builtin_ccids(&ccid_array, &array_len))
63#define ccids_read_unlock() do { } while(0) 70 return -ENOBUFS;
64#endif 71
72 if (put_user(array_len, optlen) ||
73 copy_to_user(optval, ccid_array, array_len))
74 err = -EFAULT;
75
76 kfree(ccid_array);
77 return err;
78}
65 79
66static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) 80static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
67{ 81{
@@ -93,48 +107,7 @@ static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
93 } 107 }
94} 108}
95 109
96/* check that up to @array_len members in @ccid_array are supported */ 110static int ccid_activate(struct ccid_operations *ccid_ops)
97bool ccid_support_check(u8 const *ccid_array, u8 array_len)
98{
99 u8 i, j, found;
100
101 for (i = 0, found = 0; i < array_len; i++, found = 0) {
102 for (j = 0; !found && j < ARRAY_SIZE(builtin_ccids); j++)
103 found = (ccid_array[i] == builtin_ccids[j]);
104 if (!found)
105 return false;
106 }
107 return true;
108}
109
110/**
111 * ccid_get_builtin_ccids - Provide copy of `builtin' CCID array
112 * @ccid_array: pointer to copy into
113 * @array_len: value to return length into
114 * This function allocates memory - caller must see that it is freed after use.
115 */
116int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
117{
118 *ccid_array = kmemdup(builtin_ccids, sizeof(builtin_ccids), gfp_any());
119 if (*ccid_array == NULL)
120 return -ENOBUFS;
121 *array_len = ARRAY_SIZE(builtin_ccids);
122 return 0;
123}
124
125int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
126 char __user *optval, int __user *optlen)
127{
128 if (len < sizeof(builtin_ccids))
129 return -EINVAL;
130
131 if (put_user(sizeof(builtin_ccids), optlen) ||
132 copy_to_user(optval, builtin_ccids, sizeof(builtin_ccids)))
133 return -EFAULT;
134 return 0;
135}
136
137int ccid_register(struct ccid_operations *ccid_ops)
138{ 111{
139 int err = -ENOBUFS; 112 int err = -ENOBUFS;
140 113
@@ -152,79 +125,40 @@ int ccid_register(struct ccid_operations *ccid_ops)
152 if (ccid_ops->ccid_hc_tx_slab == NULL) 125 if (ccid_ops->ccid_hc_tx_slab == NULL)
153 goto out_free_rx_slab; 126 goto out_free_rx_slab;
154 127
155 ccids_write_lock(); 128 pr_info("CCID: Activated CCID %d (%s)\n",
156 err = -EEXIST;
157 if (ccids[ccid_ops->ccid_id] == NULL) {
158 ccids[ccid_ops->ccid_id] = ccid_ops;
159 err = 0;
160 }
161 ccids_write_unlock();
162 if (err != 0)
163 goto out_free_tx_slab;
164
165 pr_info("CCID: Registered CCID %d (%s)\n",
166 ccid_ops->ccid_id, ccid_ops->ccid_name); 129 ccid_ops->ccid_id, ccid_ops->ccid_name);
130 err = 0;
167out: 131out:
168 return err; 132 return err;
169out_free_tx_slab:
170 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
171 ccid_ops->ccid_hc_tx_slab = NULL;
172 goto out;
173out_free_rx_slab: 133out_free_rx_slab:
174 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); 134 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
175 ccid_ops->ccid_hc_rx_slab = NULL; 135 ccid_ops->ccid_hc_rx_slab = NULL;
176 goto out; 136 goto out;
177} 137}
178 138
179EXPORT_SYMBOL_GPL(ccid_register); 139static void ccid_deactivate(struct ccid_operations *ccid_ops)
180
181int ccid_unregister(struct ccid_operations *ccid_ops)
182{ 140{
183 ccids_write_lock();
184 ccids[ccid_ops->ccid_id] = NULL;
185 ccids_write_unlock();
186
187 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab); 141 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
188 ccid_ops->ccid_hc_tx_slab = NULL; 142 ccid_ops->ccid_hc_tx_slab = NULL;
189 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); 143 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
190 ccid_ops->ccid_hc_rx_slab = NULL; 144 ccid_ops->ccid_hc_rx_slab = NULL;
191 145
192 pr_info("CCID: Unregistered CCID %d (%s)\n", 146 pr_info("CCID: Deactivated CCID %d (%s)\n",
193 ccid_ops->ccid_id, ccid_ops->ccid_name); 147 ccid_ops->ccid_id, ccid_ops->ccid_name);
194 return 0;
195} 148}
196 149
197EXPORT_SYMBOL_GPL(ccid_unregister); 150struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx)
198
199struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp)
200{ 151{
201 struct ccid_operations *ccid_ops; 152 struct ccid_operations *ccid_ops = ccid_by_number(id);
202 struct ccid *ccid = NULL; 153 struct ccid *ccid = NULL;
203 154
204 ccids_read_lock();
205#ifdef CONFIG_MODULES
206 if (ccids[id] == NULL) {
207 /* We only try to load if in process context */
208 ccids_read_unlock();
209 if (gfp & GFP_ATOMIC)
210 goto out;
211 request_module("net-dccp-ccid-%d", id);
212 ccids_read_lock();
213 }
214#endif
215 ccid_ops = ccids[id];
216 if (ccid_ops == NULL) 155 if (ccid_ops == NULL)
217 goto out_unlock; 156 goto out;
218
219 if (!try_module_get(ccid_ops->ccid_owner))
220 goto out_unlock;
221
222 ccids_read_unlock();
223 157
224 ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab : 158 ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
225 ccid_ops->ccid_hc_tx_slab, gfp); 159 ccid_ops->ccid_hc_tx_slab, gfp_any());
226 if (ccid == NULL) 160 if (ccid == NULL)
227 goto out_module_put; 161 goto out;
228 ccid->ccid_ops = ccid_ops; 162 ccid->ccid_ops = ccid_ops;
229 if (rx) { 163 if (rx) {
230 memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size); 164 memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
@@ -239,53 +173,57 @@ struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp)
239 } 173 }
240out: 174out:
241 return ccid; 175 return ccid;
242out_unlock:
243 ccids_read_unlock();
244 goto out;
245out_free_ccid: 176out_free_ccid:
246 kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab : 177 kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
247 ccid_ops->ccid_hc_tx_slab, ccid); 178 ccid_ops->ccid_hc_tx_slab, ccid);
248 ccid = NULL; 179 ccid = NULL;
249out_module_put:
250 module_put(ccid_ops->ccid_owner);
251 goto out; 180 goto out;
252} 181}
253 182
254EXPORT_SYMBOL_GPL(ccid_new); 183void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
255
256static void ccid_delete(struct ccid *ccid, struct sock *sk, int rx)
257{ 184{
258 struct ccid_operations *ccid_ops; 185 if (ccid != NULL) {
259 186 if (ccid->ccid_ops->ccid_hc_rx_exit != NULL)
260 if (ccid == NULL) 187 ccid->ccid_ops->ccid_hc_rx_exit(sk);
261 return; 188 kmem_cache_free(ccid->ccid_ops->ccid_hc_rx_slab, ccid);
262
263 ccid_ops = ccid->ccid_ops;
264 if (rx) {
265 if (ccid_ops->ccid_hc_rx_exit != NULL)
266 ccid_ops->ccid_hc_rx_exit(sk);
267 kmem_cache_free(ccid_ops->ccid_hc_rx_slab, ccid);
268 } else {
269 if (ccid_ops->ccid_hc_tx_exit != NULL)
270 ccid_ops->ccid_hc_tx_exit(sk);
271 kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid);
272 } 189 }
273 ccids_read_lock();
274 if (ccids[ccid_ops->ccid_id] != NULL)
275 module_put(ccid_ops->ccid_owner);
276 ccids_read_unlock();
277} 190}
278 191
279void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk) 192void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
280{ 193{
281 ccid_delete(ccid, sk, 1); 194 if (ccid != NULL) {
195 if (ccid->ccid_ops->ccid_hc_tx_exit != NULL)
196 ccid->ccid_ops->ccid_hc_tx_exit(sk);
197 kmem_cache_free(ccid->ccid_ops->ccid_hc_tx_slab, ccid);
198 }
282} 199}
283 200
284EXPORT_SYMBOL_GPL(ccid_hc_rx_delete); 201int __init ccid_initialize_builtins(void)
285
286void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
287{ 202{
288 ccid_delete(ccid, sk, 0); 203 int i, err = tfrc_lib_init();
204
205 if (err)
206 return err;
207
208 for (i = 0; i < ARRAY_SIZE(ccids); i++) {
209 err = ccid_activate(ccids[i]);
210 if (err)
211 goto unwind_registrations;
212 }
213 return 0;
214
215unwind_registrations:
216 while(--i >= 0)
217 ccid_deactivate(ccids[i]);
218 tfrc_lib_exit();
219 return err;
289} 220}
290 221
291EXPORT_SYMBOL_GPL(ccid_hc_tx_delete); 222void ccid_cleanup_builtins(void)
223{
224 int i;
225
226 for (i = 0; i < ARRAY_SIZE(ccids); i++)
227 ccid_deactivate(ccids[i]);
228 tfrc_lib_exit();
229}
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 18f69423a708..facedd20b531 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -29,7 +29,6 @@ struct tcp_info;
29 * @ccid_id: numerical CCID ID (up to %CCID_MAX, cf. table 5 in RFC 4340, 10.) 29 * @ccid_id: numerical CCID ID (up to %CCID_MAX, cf. table 5 in RFC 4340, 10.)
30 * @ccid_ccmps: the CCMPS including network/transport headers (0 when disabled) 30 * @ccid_ccmps: the CCMPS including network/transport headers (0 when disabled)
31 * @ccid_name: alphabetical identifier string for @ccid_id 31 * @ccid_name: alphabetical identifier string for @ccid_id
32 * @ccid_owner: module which implements/owns this CCID
33 * @ccid_hc_{r,t}x_slab: memory pool for the receiver/sender half-connection 32 * @ccid_hc_{r,t}x_slab: memory pool for the receiver/sender half-connection
34 * @ccid_hc_{r,t}x_obj_size: size of the receiver/sender half-connection socket 33 * @ccid_hc_{r,t}x_obj_size: size of the receiver/sender half-connection socket
35 * 34 *
@@ -48,7 +47,6 @@ struct ccid_operations {
48 unsigned char ccid_id; 47 unsigned char ccid_id;
49 __u32 ccid_ccmps; 48 __u32 ccid_ccmps;
50 const char *ccid_name; 49 const char *ccid_name;
51 struct module *ccid_owner;
52 struct kmem_cache *ccid_hc_rx_slab, 50 struct kmem_cache *ccid_hc_rx_slab,
53 *ccid_hc_tx_slab; 51 *ccid_hc_tx_slab;
54 __u32 ccid_hc_rx_obj_size, 52 __u32 ccid_hc_rx_obj_size,
@@ -90,8 +88,13 @@ struct ccid_operations {
90 int __user *optlen); 88 int __user *optlen);
91}; 89};
92 90
93extern int ccid_register(struct ccid_operations *ccid_ops); 91extern struct ccid_operations ccid2_ops;
94extern int ccid_unregister(struct ccid_operations *ccid_ops); 92#ifdef CONFIG_IP_DCCP_CCID3
93extern struct ccid_operations ccid3_ops;
94#endif
95
96extern int ccid_initialize_builtins(void);
97extern void ccid_cleanup_builtins(void);
95 98
96struct ccid { 99struct ccid {
97 struct ccid_operations *ccid_ops; 100 struct ccid_operations *ccid_ops;
@@ -108,8 +111,7 @@ extern int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len);
108extern int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, 111extern int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
109 char __user *, int __user *); 112 char __user *, int __user *);
110 113
111extern struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, 114extern struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx);
112 gfp_t gfp);
113 115
114static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp) 116static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp)
115{ 117{
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index 12275943eab8..b28bf962edc3 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -1,80 +1,51 @@
1menu "DCCP CCIDs Configuration (EXPERIMENTAL)" 1menu "DCCP CCIDs Configuration (EXPERIMENTAL)"
2 depends on EXPERIMENTAL 2 depends on EXPERIMENTAL
3 3
4config IP_DCCP_CCID2
5 tristate "CCID2 (TCP-Like) (EXPERIMENTAL)"
6 def_tristate IP_DCCP
7 select IP_DCCP_ACKVEC
8 ---help---
9 CCID 2, TCP-like Congestion Control, denotes Additive Increase,
10 Multiplicative Decrease (AIMD) congestion control with behavior
11 modelled directly on TCP, including congestion window, slow start,
12 timeouts, and so forth [RFC 2581]. CCID 2 achieves maximum
13 bandwidth over the long term, consistent with the use of end-to-end
14 congestion control, but halves its congestion window in response to
15 each congestion event. This leads to the abrupt rate changes
16 typical of TCP. Applications should use CCID 2 if they prefer
17 maximum bandwidth utilization to steadiness of rate. This is often
18 the case for applications that are not playing their data directly
19 to the user. For example, a hypothetical application that
20 transferred files over DCCP, using application-level retransmissions
21 for lost packets, would prefer CCID 2 to CCID 3. On-line games may
22 also prefer CCID 2. See RFC 4341 for further details.
23
24 CCID2 is the default CCID used by DCCP.
25
26config IP_DCCP_CCID2_DEBUG 4config IP_DCCP_CCID2_DEBUG
27 bool "CCID2 debugging messages" 5 bool "CCID-2 debugging messages"
28 depends on IP_DCCP_CCID2 6 ---help---
29 ---help--- 7 Enable CCID-2 specific debugging messages.
30 Enable CCID2-specific debugging messages.
31 8
32 When compiling CCID2 as a module, this debugging output can 9 The debugging output can additionally be toggled by setting the
33 additionally be toggled by setting the ccid2_debug module 10 ccid2_debug parameter to 0 or 1.
34 parameter to 0 or 1.
35 11
36 If in doubt, say N. 12 If in doubt, say N.
37 13
38config IP_DCCP_CCID3 14config IP_DCCP_CCID3
39 tristate "CCID3 (TCP-Friendly) (EXPERIMENTAL)" 15 bool "CCID-3 (TCP-Friendly) (EXPERIMENTAL)"
40 def_tristate IP_DCCP 16 def_bool y if (IP_DCCP = y || IP_DCCP = m)
41 select IP_DCCP_TFRC_LIB
42 ---help--- 17 ---help---
43 CCID 3 denotes TCP-Friendly Rate Control (TFRC), an equation-based 18 CCID-3 denotes TCP-Friendly Rate Control (TFRC), an equation-based
44 rate-controlled congestion control mechanism. TFRC is designed to 19 rate-controlled congestion control mechanism. TFRC is designed to
45 be reasonably fair when competing for bandwidth with TCP-like flows, 20 be reasonably fair when competing for bandwidth with TCP-like flows,
46 where a flow is "reasonably fair" if its sending rate is generally 21 where a flow is "reasonably fair" if its sending rate is generally
47 within a factor of two of the sending rate of a TCP flow under the 22 within a factor of two of the sending rate of a TCP flow under the
48 same conditions. However, TFRC has a much lower variation of 23 same conditions. However, TFRC has a much lower variation of
49 throughput over time compared with TCP, which makes CCID 3 more 24 throughput over time compared with TCP, which makes CCID-3 more
50 suitable than CCID 2 for applications such streaming media where a 25 suitable than CCID-2 for applications such streaming media where a
51 relatively smooth sending rate is of importance. 26 relatively smooth sending rate is of importance.
52 27
53 CCID 3 is further described in RFC 4342, 28 CCID-3 is further described in RFC 4342,
54 http://www.ietf.org/rfc/rfc4342.txt 29 http://www.ietf.org/rfc/rfc4342.txt
55 30
56 The TFRC congestion control algorithms were initially described in 31 The TFRC congestion control algorithms were initially described in
57 RFC 3448. 32 RFC 5448.
58 33
59 This text was extracted from RFC 4340 (sec. 10.2), 34 This text was extracted from RFC 4340 (sec. 10.2),
60 http://www.ietf.org/rfc/rfc4340.txt 35 http://www.ietf.org/rfc/rfc4340.txt
61
62 To compile this CCID as a module, choose M here: the module will be
63 called dccp_ccid3.
64 36
65 If in doubt, say M. 37 If in doubt, say N.
66 38
67config IP_DCCP_CCID3_DEBUG 39config IP_DCCP_CCID3_DEBUG
68 bool "CCID3 debugging messages" 40 bool "CCID-3 debugging messages"
69 depends on IP_DCCP_CCID3 41 depends on IP_DCCP_CCID3
70 ---help--- 42 ---help---
71 Enable CCID3-specific debugging messages. 43 Enable CCID-3 specific debugging messages.
72 44
73 When compiling CCID3 as a module, this debugging output can 45 The debugging output can additionally be toggled by setting the
74 additionally be toggled by setting the ccid3_debug module 46 ccid3_debug parameter to 0 or 1.
75 parameter to 0 or 1.
76 47
77 If in doubt, say N. 48 If in doubt, say N.
78 49
79config IP_DCCP_CCID3_RTO 50config IP_DCCP_CCID3_RTO
80 int "Use higher bound for nofeedback timer" 51 int "Use higher bound for nofeedback timer"
@@ -108,12 +79,8 @@ config IP_DCCP_CCID3_RTO
108 therefore not be performed on WANs. 79 therefore not be performed on WANs.
109 80
110config IP_DCCP_TFRC_LIB 81config IP_DCCP_TFRC_LIB
111 tristate 82 def_bool y if IP_DCCP_CCID3
112 default n
113 83
114config IP_DCCP_TFRC_DEBUG 84config IP_DCCP_TFRC_DEBUG
115 bool 85 def_bool y if IP_DCCP_CCID3_DEBUG
116 depends on IP_DCCP_TFRC_LIB
117 default y if IP_DCCP_CCID3_DEBUG
118
119endmenu 86endmenu
diff --git a/net/dccp/ccids/Makefile b/net/dccp/ccids/Makefile
deleted file mode 100644
index 438f20bccff7..000000000000
--- a/net/dccp/ccids/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1obj-$(CONFIG_IP_DCCP_CCID3) += dccp_ccid3.o
2
3dccp_ccid3-y := ccid3.o
4
5obj-$(CONFIG_IP_DCCP_CCID2) += dccp_ccid2.o
6
7dccp_ccid2-y := ccid2.o
8
9obj-y += lib/
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index c9ea19a4d85e..d235294ace23 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -768,10 +768,9 @@ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
768 } 768 }
769} 769}
770 770
771static struct ccid_operations ccid2 = { 771struct ccid_operations ccid2_ops = {
772 .ccid_id = DCCPC_CCID2, 772 .ccid_id = DCCPC_CCID2,
773 .ccid_name = "TCP-like", 773 .ccid_name = "TCP-like",
774 .ccid_owner = THIS_MODULE,
775 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), 774 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock),
776 .ccid_hc_tx_init = ccid2_hc_tx_init, 775 .ccid_hc_tx_init = ccid2_hc_tx_init,
777 .ccid_hc_tx_exit = ccid2_hc_tx_exit, 776 .ccid_hc_tx_exit = ccid2_hc_tx_exit,
@@ -784,22 +783,5 @@ static struct ccid_operations ccid2 = {
784 783
785#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 784#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
786module_param(ccid2_debug, bool, 0644); 785module_param(ccid2_debug, bool, 0644);
787MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); 786MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages");
788#endif 787#endif
789
790static __init int ccid2_module_init(void)
791{
792 return ccid_register(&ccid2);
793}
794module_init(ccid2_module_init);
795
796static __exit void ccid2_module_exit(void)
797{
798 ccid_unregister(&ccid2);
799}
800module_exit(ccid2_module_exit);
801
802MODULE_AUTHOR("Andrea Bittau <a.bittau@cs.ucl.ac.uk>");
803MODULE_DESCRIPTION("DCCP TCP-Like (CCID2) CCID");
804MODULE_LICENSE("GPL");
805MODULE_ALIAS("net-dccp-ccid-2");
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 3b8bd7ca6761..a27b7f4c19c5 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -940,10 +940,9 @@ static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
940 return 0; 940 return 0;
941} 941}
942 942
943static struct ccid_operations ccid3 = { 943struct ccid_operations ccid3_ops = {
944 .ccid_id = DCCPC_CCID3, 944 .ccid_id = DCCPC_CCID3,
945 .ccid_name = "TCP-Friendly Rate Control", 945 .ccid_name = "TCP-Friendly Rate Control",
946 .ccid_owner = THIS_MODULE,
947 .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock), 946 .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock),
948 .ccid_hc_tx_init = ccid3_hc_tx_init, 947 .ccid_hc_tx_init = ccid3_hc_tx_init,
949 .ccid_hc_tx_exit = ccid3_hc_tx_exit, 948 .ccid_hc_tx_exit = ccid3_hc_tx_exit,
@@ -964,23 +963,5 @@ static struct ccid_operations ccid3 = {
964 963
965#ifdef CONFIG_IP_DCCP_CCID3_DEBUG 964#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
966module_param(ccid3_debug, bool, 0644); 965module_param(ccid3_debug, bool, 0644);
967MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); 966MODULE_PARM_DESC(ccid3_debug, "Enable CCID-3 debug messages");
968#endif 967#endif
969
970static __init int ccid3_module_init(void)
971{
972 return ccid_register(&ccid3);
973}
974module_init(ccid3_module_init);
975
976static __exit void ccid3_module_exit(void)
977{
978 ccid_unregister(&ccid3);
979}
980module_exit(ccid3_module_exit);
981
982MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
983 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
984MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
985MODULE_LICENSE("GPL");
986MODULE_ALIAS("net-dccp-ccid-3");
diff --git a/net/dccp/ccids/lib/Makefile b/net/dccp/ccids/lib/Makefile
deleted file mode 100644
index 68c93e3d89dc..000000000000
--- a/net/dccp/ccids/lib/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_IP_DCCP_TFRC_LIB) += dccp_tfrc_lib.o
2
3dccp_tfrc_lib-y := tfrc.o tfrc_equation.o packet_history.o loss_interval.o
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 5b3ce0688c5c..4d1e40127264 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -60,7 +60,6 @@ void tfrc_lh_cleanup(struct tfrc_loss_hist *lh)
60 lh->ring[LIH_INDEX(lh->counter)] = NULL; 60 lh->ring[LIH_INDEX(lh->counter)] = NULL;
61 } 61 }
62} 62}
63EXPORT_SYMBOL_GPL(tfrc_lh_cleanup);
64 63
65static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) 64static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh)
66{ 65{
@@ -121,7 +120,6 @@ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb)
121 120
122 return (lh->i_mean < old_i_mean); 121 return (lh->i_mean < old_i_mean);
123} 122}
124EXPORT_SYMBOL_GPL(tfrc_lh_update_i_mean);
125 123
126/* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */ 124/* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */
127static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur, 125static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur,
@@ -169,7 +167,6 @@ int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
169 } 167 }
170 return 1; 168 return 1;
171} 169}
172EXPORT_SYMBOL_GPL(tfrc_lh_interval_add);
173 170
174int __init tfrc_li_init(void) 171int __init tfrc_li_init(void)
175{ 172{
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index 6cc108afdc3b..b7785b3581ec 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -94,7 +94,6 @@ int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno)
94 *headp = entry; 94 *headp = entry;
95 return 0; 95 return 0;
96} 96}
97EXPORT_SYMBOL_GPL(tfrc_tx_hist_add);
98 97
99void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp) 98void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp)
100{ 99{
@@ -109,7 +108,6 @@ void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp)
109 108
110 *headp = NULL; 109 *headp = NULL;
111} 110}
112EXPORT_SYMBOL_GPL(tfrc_tx_hist_purge);
113 111
114u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno, 112u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno,
115 const ktime_t now) 113 const ktime_t now)
@@ -127,7 +125,6 @@ u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno,
127 125
128 return rtt; 126 return rtt;
129} 127}
130EXPORT_SYMBOL_GPL(tfrc_tx_hist_rtt);
131 128
132 129
133/* 130/*
@@ -172,7 +169,6 @@ void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
172 169
173 tfrc_rx_hist_entry_from_skb(entry, skb, ndp); 170 tfrc_rx_hist_entry_from_skb(entry, skb, ndp);
174} 171}
175EXPORT_SYMBOL_GPL(tfrc_rx_hist_add_packet);
176 172
177/* has the packet contained in skb been seen before? */ 173/* has the packet contained in skb been seen before? */
178int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb) 174int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb)
@@ -189,7 +185,6 @@ int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb)
189 185
190 return 0; 186 return 0;
191} 187}
192EXPORT_SYMBOL_GPL(tfrc_rx_hist_duplicate);
193 188
194static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b) 189static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b)
195{ 190{
@@ -390,7 +385,6 @@ int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
390 } 385 }
391 return is_new_loss; 386 return is_new_loss;
392} 387}
393EXPORT_SYMBOL_GPL(tfrc_rx_handle_loss);
394 388
395int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h) 389int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h)
396{ 390{
@@ -412,7 +406,6 @@ out_free:
412 } 406 }
413 return -ENOBUFS; 407 return -ENOBUFS;
414} 408}
415EXPORT_SYMBOL_GPL(tfrc_rx_hist_alloc);
416 409
417void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) 410void tfrc_rx_hist_purge(struct tfrc_rx_hist *h)
418{ 411{
@@ -424,7 +417,6 @@ void tfrc_rx_hist_purge(struct tfrc_rx_hist *h)
424 h->ring[i] = NULL; 417 h->ring[i] = NULL;
425 } 418 }
426} 419}
427EXPORT_SYMBOL_GPL(tfrc_rx_hist_purge);
428 420
429/** 421/**
430 * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against 422 * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against
@@ -495,4 +487,3 @@ keep_ref_for_next_time:
495 487
496 return sample; 488 return sample;
497} 489}
498EXPORT_SYMBOL_GPL(tfrc_rx_hist_sample_rtt);
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 185916218e07..60c412ccfeef 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -1,20 +1,18 @@
1/* 1/*
2 * TFRC: main module holding the pieces of the TFRC library together 2 * TFRC library initialisation
3 * 3 *
4 * Copyright (c) 2007 The University of Aberdeen, Scotland, UK 4 * Copyright (c) 2007 The University of Aberdeen, Scotland, UK
5 * Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 5 * Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
6 */ 6 */
7#include <linux/module.h>
8#include <linux/moduleparam.h>
9#include "tfrc.h" 7#include "tfrc.h"
10 8
11#ifdef CONFIG_IP_DCCP_TFRC_DEBUG 9#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
12int tfrc_debug; 10int tfrc_debug;
13module_param(tfrc_debug, bool, 0644); 11module_param(tfrc_debug, bool, 0644);
14MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); 12MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
15#endif 13#endif
16 14
17static int __init tfrc_module_init(void) 15int __init tfrc_lib_init(void)
18{ 16{
19 int rc = tfrc_li_init(); 17 int rc = tfrc_li_init();
20 18
@@ -38,18 +36,9 @@ out:
38 return rc; 36 return rc;
39} 37}
40 38
41static void __exit tfrc_module_exit(void) 39void __exit tfrc_lib_exit(void)
42{ 40{
43 tfrc_rx_packet_history_exit(); 41 tfrc_rx_packet_history_exit();
44 tfrc_tx_packet_history_exit(); 42 tfrc_tx_packet_history_exit();
45 tfrc_li_exit(); 43 tfrc_li_exit();
46} 44}
47
48module_init(tfrc_module_init);
49module_exit(tfrc_module_exit);
50
51MODULE_AUTHOR("Gerrit Renker <gerrit@erg.abdn.ac.uk>, "
52 "Ian McDonald <ian.mcdonald@jandi.co.nz>, "
53 "Arnaldo Carvalho de Melo <acme@redhat.com>");
54MODULE_DESCRIPTION("DCCP TFRC library");
55MODULE_LICENSE("GPL");
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index ed9857527acf..e9720b143275 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -17,7 +17,8 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/math64.h> 18#include <linux/math64.h>
19#include "../../dccp.h" 19#include "../../dccp.h"
20/* internal includes that this module exports: */ 20
21/* internal includes that this library exports: */
21#include "loss_interval.h" 22#include "loss_interval.h"
22#include "packet_history.h" 23#include "packet_history.h"
23 24
@@ -66,4 +67,12 @@ extern void tfrc_rx_packet_history_exit(void);
66 67
67extern int tfrc_li_init(void); 68extern int tfrc_li_init(void);
68extern void tfrc_li_exit(void); 69extern void tfrc_li_exit(void);
70
71#ifdef CONFIG_IP_DCCP_TFRC_LIB
72extern int tfrc_lib_init(void);
73extern void tfrc_lib_exit(void);
74#else
75#define tfrc_lib_init() (0)
76#define tfrc_lib_exit()
77#endif
69#endif /* _TFRC_H_ */ 78#endif /* _TFRC_H_ */
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index 2f20a29cffe4..c5d3a9e5a5a4 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -659,8 +659,6 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
659 return scaled_div32(result, f); 659 return scaled_div32(result, f);
660} 660}
661 661
662EXPORT_SYMBOL_GPL(tfrc_calc_x);
663
664/** 662/**
665 * tfrc_calc_x_reverse_lookup - try to find p given f(p) 663 * tfrc_calc_x_reverse_lookup - try to find p given f(p)
666 * 664 *
@@ -693,5 +691,3 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
693 index = tfrc_binsearch(fvalue, 0); 691 index = tfrc_binsearch(fvalue, 0);
694 return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE; 692 return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE;
695} 693}
696
697EXPORT_SYMBOL_GPL(tfrc_calc_x_reverse_lookup);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 0bc4c9a02e19..f2230fc168e1 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -432,10 +432,8 @@ static inline int dccp_ack_pending(const struct sock *sk)
432{ 432{
433 const struct dccp_sock *dp = dccp_sk(sk); 433 const struct dccp_sock *dp = dccp_sk(sk);
434 return dp->dccps_timestamp_echo != 0 || 434 return dp->dccps_timestamp_echo != 0 ||
435#ifdef CONFIG_IP_DCCP_ACKVEC
436 (dp->dccps_hc_rx_ackvec != NULL && 435 (dp->dccps_hc_rx_ackvec != NULL &&
437 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) || 436 dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) ||
438#endif
439 inet_csk_ack_scheduled(sk); 437 inet_csk_ack_scheduled(sk);
440} 438}
441 439
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 30f9fb76b921..4152308958ab 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -34,7 +34,7 @@
34static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx) 34static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx)
35{ 35{
36 struct dccp_sock *dp = dccp_sk(sk); 36 struct dccp_sock *dp = dccp_sk(sk);
37 struct ccid *new_ccid = ccid_new(ccid, sk, rx, gfp_any()); 37 struct ccid *new_ccid = ccid_new(ccid, sk, rx);
38 38
39 if (new_ccid == NULL) 39 if (new_ccid == NULL)
40 return -ENOMEM; 40 return -ENOMEM;
@@ -1214,8 +1214,6 @@ const char *dccp_feat_typename(const u8 type)
1214 return NULL; 1214 return NULL;
1215} 1215}
1216 1216
1217EXPORT_SYMBOL_GPL(dccp_feat_typename);
1218
1219const char *dccp_feat_name(const u8 feat) 1217const char *dccp_feat_name(const u8 feat)
1220{ 1218{
1221 static const char *feature_names[] = { 1219 static const char *feature_names[] = {
@@ -1240,6 +1238,4 @@ const char *dccp_feat_name(const u8 feat)
1240 1238
1241 return feature_names[feat]; 1239 return feature_names[feat];
1242} 1240}
1243
1244EXPORT_SYMBOL_GPL(dccp_feat_name);
1245#endif /* CONFIG_IP_DCCP_DEBUG */ 1241#endif /* CONFIG_IP_DCCP_DEBUG */
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 5eb443f656c1..7648f316310f 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -741,5 +741,3 @@ u32 dccp_sample_rtt(struct sock *sk, long delta)
741 741
742 return delta; 742 return delta;
743} 743}
744
745EXPORT_SYMBOL_GPL(dccp_sample_rtt);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index d5c2bacb713c..945b4d5d23b3 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -964,7 +964,6 @@ adjudge_to_death:
964 state = sk->sk_state; 964 state = sk->sk_state;
965 sock_hold(sk); 965 sock_hold(sk);
966 sock_orphan(sk); 966 sock_orphan(sk);
967 percpu_counter_inc(sk->sk_prot->orphan_count);
968 967
969 /* 968 /*
970 * It is the last release_sock in its life. It will remove backlog. 969 * It is the last release_sock in its life. It will remove backlog.
@@ -978,6 +977,8 @@ adjudge_to_death:
978 bh_lock_sock(sk); 977 bh_lock_sock(sk);
979 WARN_ON(sock_owned_by_user(sk)); 978 WARN_ON(sock_owned_by_user(sk));
980 979
980 percpu_counter_inc(sk->sk_prot->orphan_count);
981
981 /* Have we already been destroyed by a softirq or backlog? */ 982 /* Have we already been destroyed by a softirq or backlog? */
982 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) 983 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
983 goto out; 984 goto out;
@@ -1117,9 +1118,15 @@ static int __init dccp_init(void)
1117 if (rc) 1118 if (rc)
1118 goto out_ackvec_exit; 1119 goto out_ackvec_exit;
1119 1120
1121 rc = ccid_initialize_builtins();
1122 if (rc)
1123 goto out_sysctl_exit;
1124
1120 dccp_timestamping_init(); 1125 dccp_timestamping_init();
1121out: 1126out:
1122 return rc; 1127 return rc;
1128out_sysctl_exit:
1129 dccp_sysctl_exit();
1123out_ackvec_exit: 1130out_ackvec_exit:
1124 dccp_ackvec_exit(); 1131 dccp_ackvec_exit();
1125out_free_dccp_mib: 1132out_free_dccp_mib:
@@ -1142,6 +1149,7 @@ out_free_percpu:
1142 1149
1143static void __exit dccp_fini(void) 1150static void __exit dccp_fini(void)
1144{ 1151{
1152 ccid_cleanup_builtins();
1145 dccp_mib_exit(); 1153 dccp_mib_exit();
1146 free_pages((unsigned long)dccp_hashinfo.bhash, 1154 free_pages((unsigned long)dccp_hashinfo.bhash,
1147 get_order(dccp_hashinfo.bhash_size * 1155 get_order(dccp_hashinfo.bhash_size *
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a3a410d20da0..a68fd79e9eca 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -286,6 +286,42 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
286 .get_sset_count = dsa_slave_get_sset_count, 286 .get_sset_count = dsa_slave_get_sset_count,
287}; 287};
288 288
289#ifdef CONFIG_NET_DSA_TAG_DSA
290static const struct net_device_ops dsa_netdev_ops = {
291 .ndo_open = dsa_slave_open,
292 .ndo_stop = dsa_slave_close,
293 .ndo_start_xmit = dsa_xmit,
294 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
295 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
296 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
297 .ndo_set_mac_address = dsa_slave_set_mac_address,
298 .ndo_do_ioctl = dsa_slave_ioctl,
299};
300#endif
301#ifdef CONFIG_NET_DSA_TAG_EDSA
302static const struct net_device_ops edsa_netdev_ops = {
303 .ndo_open = dsa_slave_open,
304 .ndo_stop = dsa_slave_close,
305 .ndo_start_xmit = edsa_xmit,
306 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
307 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
308 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
309 .ndo_set_mac_address = dsa_slave_set_mac_address,
310 .ndo_do_ioctl = dsa_slave_ioctl,
311};
312#endif
313#ifdef CONFIG_NET_DSA_TAG_TRAILER
314static const struct net_device_ops trailer_netdev_ops = {
315 .ndo_open = dsa_slave_open,
316 .ndo_stop = dsa_slave_close,
317 .ndo_start_xmit = trailer_xmit,
318 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
319 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
320 .ndo_set_multicast_list = dsa_slave_set_rx_mode,
321 .ndo_set_mac_address = dsa_slave_set_mac_address,
322 .ndo_do_ioctl = dsa_slave_ioctl,
323};
324#endif
289 325
290/* slave device setup *******************************************************/ 326/* slave device setup *******************************************************/
291struct net_device * 327struct net_device *
@@ -306,32 +342,27 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
306 SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops); 342 SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
307 memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN); 343 memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN);
308 slave_dev->tx_queue_len = 0; 344 slave_dev->tx_queue_len = 0;
345
309 switch (ds->tag_protocol) { 346 switch (ds->tag_protocol) {
310#ifdef CONFIG_NET_DSA_TAG_DSA 347#ifdef CONFIG_NET_DSA_TAG_DSA
311 case htons(ETH_P_DSA): 348 case htons(ETH_P_DSA):
312 slave_dev->hard_start_xmit = dsa_xmit; 349 slave_dev->netdev_ops = &dsa_netdev_ops;
313 break; 350 break;
314#endif 351#endif
315#ifdef CONFIG_NET_DSA_TAG_EDSA 352#ifdef CONFIG_NET_DSA_TAG_EDSA
316 case htons(ETH_P_EDSA): 353 case htons(ETH_P_EDSA):
317 slave_dev->hard_start_xmit = edsa_xmit; 354 slave_dev->netdev_ops = &edsa_netdev_ops;
318 break; 355 break;
319#endif 356#endif
320#ifdef CONFIG_NET_DSA_TAG_TRAILER 357#ifdef CONFIG_NET_DSA_TAG_TRAILER
321 case htons(ETH_P_TRAILER): 358 case htons(ETH_P_TRAILER):
322 slave_dev->hard_start_xmit = trailer_xmit; 359 slave_dev->netdev_ops = &trailer_netdev_ops;
323 break; 360 break;
324#endif 361#endif
325 default: 362 default:
326 BUG(); 363 BUG();
327 } 364 }
328 slave_dev->open = dsa_slave_open; 365
329 slave_dev->stop = dsa_slave_close;
330 slave_dev->change_rx_flags = dsa_slave_change_rx_flags;
331 slave_dev->set_rx_mode = dsa_slave_set_rx_mode;
332 slave_dev->set_multicast_list = dsa_slave_set_rx_mode;
333 slave_dev->set_mac_address = dsa_slave_set_mac_address;
334 slave_dev->do_ioctl = dsa_slave_ioctl;
335 SET_NETDEV_DEV(slave_dev, parent); 366 SET_NETDEV_DEV(slave_dev, parent);
336 slave_dev->vlan_features = master->vlan_features; 367 slave_dev->vlan_features = master->vlan_features;
337 368
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index e52799047a5f..6bb2635b5ded 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -38,6 +38,7 @@
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/jhash.h> 40#include <linux/jhash.h>
41#include <linux/audit.h>
41#include <net/ip.h> 42#include <net/ip.h>
42#include <net/icmp.h> 43#include <net/icmp.h>
43#include <net/tcp.h> 44#include <net/tcp.h>
@@ -449,6 +450,7 @@ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi)
449/** 450/**
450 * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine 451 * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine
451 * @doi_def: the DOI structure 452 * @doi_def: the DOI structure
453 * @audit_info: NetLabel audit information
452 * 454 *
453 * Description: 455 * Description:
454 * The caller defines a new DOI for use by the CIPSO engine and calls this 456 * The caller defines a new DOI for use by the CIPSO engine and calls this
@@ -458,50 +460,78 @@ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi)
458 * zero on success and non-zero on failure. 460 * zero on success and non-zero on failure.
459 * 461 *
460 */ 462 */
461int cipso_v4_doi_add(struct cipso_v4_doi *doi_def) 463int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
464 struct netlbl_audit *audit_info)
462{ 465{
466 int ret_val = -EINVAL;
463 u32 iter; 467 u32 iter;
468 u32 doi;
469 u32 doi_type;
470 struct audit_buffer *audit_buf;
471
472 doi = doi_def->doi;
473 doi_type = doi_def->type;
464 474
465 if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) 475 if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
466 return -EINVAL; 476 goto doi_add_return;
467 for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { 477 for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
468 switch (doi_def->tags[iter]) { 478 switch (doi_def->tags[iter]) {
469 case CIPSO_V4_TAG_RBITMAP: 479 case CIPSO_V4_TAG_RBITMAP:
470 break; 480 break;
471 case CIPSO_V4_TAG_RANGE: 481 case CIPSO_V4_TAG_RANGE:
472 if (doi_def->type != CIPSO_V4_MAP_PASS)
473 return -EINVAL;
474 break;
475 case CIPSO_V4_TAG_INVALID:
476 if (iter == 0)
477 return -EINVAL;
478 break;
479 case CIPSO_V4_TAG_ENUM: 482 case CIPSO_V4_TAG_ENUM:
480 if (doi_def->type != CIPSO_V4_MAP_PASS) 483 if (doi_def->type != CIPSO_V4_MAP_PASS)
481 return -EINVAL; 484 goto doi_add_return;
482 break; 485 break;
483 case CIPSO_V4_TAG_LOCAL: 486 case CIPSO_V4_TAG_LOCAL:
484 if (doi_def->type != CIPSO_V4_MAP_LOCAL) 487 if (doi_def->type != CIPSO_V4_MAP_LOCAL)
485 return -EINVAL; 488 goto doi_add_return;
489 break;
490 case CIPSO_V4_TAG_INVALID:
491 if (iter == 0)
492 goto doi_add_return;
486 break; 493 break;
487 default: 494 default:
488 return -EINVAL; 495 goto doi_add_return;
489 } 496 }
490 } 497 }
491 498
492 atomic_set(&doi_def->refcount, 1); 499 atomic_set(&doi_def->refcount, 1);
493 500
494 spin_lock(&cipso_v4_doi_list_lock); 501 spin_lock(&cipso_v4_doi_list_lock);
495 if (cipso_v4_doi_search(doi_def->doi) != NULL) 502 if (cipso_v4_doi_search(doi_def->doi) != NULL) {
496 goto doi_add_failure; 503 spin_unlock(&cipso_v4_doi_list_lock);
504 ret_val = -EEXIST;
505 goto doi_add_return;
506 }
497 list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); 507 list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list);
498 spin_unlock(&cipso_v4_doi_list_lock); 508 spin_unlock(&cipso_v4_doi_list_lock);
509 ret_val = 0;
499 510
500 return 0; 511doi_add_return:
512 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
513 if (audit_buf != NULL) {
514 const char *type_str;
515 switch (doi_type) {
516 case CIPSO_V4_MAP_TRANS:
517 type_str = "trans";
518 break;
519 case CIPSO_V4_MAP_PASS:
520 type_str = "pass";
521 break;
522 case CIPSO_V4_MAP_LOCAL:
523 type_str = "local";
524 break;
525 default:
526 type_str = "(unknown)";
527 }
528 audit_log_format(audit_buf,
529 " cipso_doi=%u cipso_type=%s res=%u",
530 doi, type_str, ret_val == 0 ? 1 : 0);
531 audit_log_end(audit_buf);
532 }
501 533
502doi_add_failure: 534 return ret_val;
503 spin_unlock(&cipso_v4_doi_list_lock);
504 return -EEXIST;
505} 535}
506 536
507/** 537/**
@@ -559,25 +589,39 @@ static void cipso_v4_doi_free_rcu(struct rcu_head *entry)
559 */ 589 */
560int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) 590int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
561{ 591{
592 int ret_val;
562 struct cipso_v4_doi *doi_def; 593 struct cipso_v4_doi *doi_def;
594 struct audit_buffer *audit_buf;
563 595
564 spin_lock(&cipso_v4_doi_list_lock); 596 spin_lock(&cipso_v4_doi_list_lock);
565 doi_def = cipso_v4_doi_search(doi); 597 doi_def = cipso_v4_doi_search(doi);
566 if (doi_def == NULL) { 598 if (doi_def == NULL) {
567 spin_unlock(&cipso_v4_doi_list_lock); 599 spin_unlock(&cipso_v4_doi_list_lock);
568 return -ENOENT; 600 ret_val = -ENOENT;
601 goto doi_remove_return;
569 } 602 }
570 if (!atomic_dec_and_test(&doi_def->refcount)) { 603 if (!atomic_dec_and_test(&doi_def->refcount)) {
571 spin_unlock(&cipso_v4_doi_list_lock); 604 spin_unlock(&cipso_v4_doi_list_lock);
572 return -EBUSY; 605 ret_val = -EBUSY;
606 goto doi_remove_return;
573 } 607 }
574 list_del_rcu(&doi_def->list); 608 list_del_rcu(&doi_def->list);
575 spin_unlock(&cipso_v4_doi_list_lock); 609 spin_unlock(&cipso_v4_doi_list_lock);
576 610
577 cipso_v4_cache_invalidate(); 611 cipso_v4_cache_invalidate();
578 call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); 612 call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
613 ret_val = 0;
614
615doi_remove_return:
616 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
617 if (audit_buf != NULL) {
618 audit_log_format(audit_buf,
619 " cipso_doi=%u res=%u",
620 doi, ret_val == 0 ? 1 : 0);
621 audit_log_end(audit_buf);
622 }
579 623
580 return 0; 624 return ret_val;
581} 625}
582 626
583/** 627/**
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index c7cda1ca8e65..f26ab38680de 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -633,8 +633,6 @@ void inet_csk_listen_stop(struct sock *sk)
633 633
634 acc_req = req->dl_next; 634 acc_req = req->dl_next;
635 635
636 percpu_counter_inc(sk->sk_prot->orphan_count);
637
638 local_bh_disable(); 636 local_bh_disable();
639 bh_lock_sock(child); 637 bh_lock_sock(child);
640 WARN_ON(sock_owned_by_user(child)); 638 WARN_ON(sock_owned_by_user(child));
@@ -644,6 +642,8 @@ void inet_csk_listen_stop(struct sock *sk)
644 642
645 sock_orphan(child); 643 sock_orphan(child);
646 644
645 percpu_counter_inc(sk->sk_prot->orphan_count);
646
647 inet_csk_destroy_sock(child); 647 inet_csk_destroy_sock(child);
648 648
649 bh_unlock_sock(child); 649 bh_unlock_sock(child);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 313ebf00ee36..6ba5c557690c 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -291,7 +291,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
291 if (*pos == 0) 291 if (*pos == 0)
292 return SEQ_START_TOKEN; 292 return SEQ_START_TOKEN;
293 293
294 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { 294 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
295 if (!cpu_possible(cpu)) 295 if (!cpu_possible(cpu))
296 continue; 296 continue;
297 *pos = cpu+1; 297 *pos = cpu+1;
@@ -306,7 +306,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
306 struct net *net = seq_file_net(seq); 306 struct net *net = seq_file_net(seq);
307 int cpu; 307 int cpu;
308 308
309 for (cpu = *pos; cpu < NR_CPUS; ++cpu) { 309 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
310 if (!cpu_possible(cpu)) 310 if (!cpu_possible(cpu))
311 continue; 311 continue;
312 *pos = cpu+1; 312 *pos = cpu+1;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 614958b7c276..eb62e58bff79 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -38,6 +38,7 @@
38#include <net/tcp.h> 38#include <net/tcp.h>
39#include <net/udp.h> 39#include <net/udp.h>
40#include <net/udplite.h> 40#include <net/udplite.h>
41#include <linux/bottom_half.h>
41#include <linux/inetdevice.h> 42#include <linux/inetdevice.h>
42#include <linux/proc_fs.h> 43#include <linux/proc_fs.h>
43#include <linux/seq_file.h> 44#include <linux/seq_file.h>
@@ -50,13 +51,17 @@
50static int sockstat_seq_show(struct seq_file *seq, void *v) 51static int sockstat_seq_show(struct seq_file *seq, void *v)
51{ 52{
52 struct net *net = seq->private; 53 struct net *net = seq->private;
54 int orphans, sockets;
55
56 local_bh_disable();
57 orphans = percpu_counter_sum_positive(&tcp_orphan_count),
58 sockets = percpu_counter_sum_positive(&tcp_sockets_allocated),
59 local_bh_enable();
53 60
54 socket_seq_show(seq); 61 socket_seq_show(seq);
55 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", 62 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
56 sock_prot_inuse_get(net, &tcp_prot), 63 sock_prot_inuse_get(net, &tcp_prot), orphans,
57 (int)percpu_counter_sum_positive(&tcp_orphan_count), 64 tcp_death_row.tw_count, sockets,
58 tcp_death_row.tw_count,
59 (int)percpu_counter_sum_positive(&tcp_sockets_allocated),
60 atomic_read(&tcp_memory_allocated)); 65 atomic_read(&tcp_memory_allocated));
61 seq_printf(seq, "UDP: inuse %d mem %d\n", 66 seq_printf(seq, "UDP: inuse %d mem %d\n",
62 sock_prot_inuse_get(net, &udp_prot), 67 sock_prot_inuse_get(net, &udp_prot),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 77bfba975959..97f71153584f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -429,7 +429,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
429 if (*pos == 0) 429 if (*pos == 0)
430 return SEQ_START_TOKEN; 430 return SEQ_START_TOKEN;
431 431
432 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { 432 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
433 if (!cpu_possible(cpu)) 433 if (!cpu_possible(cpu))
434 continue; 434 continue;
435 *pos = cpu+1; 435 *pos = cpu+1;
@@ -442,7 +442,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
442{ 442{
443 int cpu; 443 int cpu;
444 444
445 for (cpu = *pos; cpu < NR_CPUS; ++cpu) { 445 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
446 if (!cpu_possible(cpu)) 446 if (!cpu_possible(cpu))
447 continue; 447 continue;
448 *pos = cpu+1; 448 *pos = cpu+1;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1f3d52946b3b..ce572f9dff02 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -580,10 +580,6 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
580 else if (!ret) { 580 else if (!ret) {
581 if (spliced) 581 if (spliced)
582 break; 582 break;
583 if (flags & SPLICE_F_NONBLOCK) {
584 ret = -EAGAIN;
585 break;
586 }
587 if (sock_flag(sk, SOCK_DONE)) 583 if (sock_flag(sk, SOCK_DONE))
588 break; 584 break;
589 if (sk->sk_err) { 585 if (sk->sk_err) {
@@ -1317,7 +1313,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1317 if ((available < target) && 1313 if ((available < target) &&
1318 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1314 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1319 !sysctl_tcp_low_latency && 1315 !sysctl_tcp_low_latency &&
1320 __get_cpu_var(softnet_data).net_dma) { 1316 dma_find_channel(DMA_MEMCPY)) {
1321 preempt_enable_no_resched(); 1317 preempt_enable_no_resched();
1322 tp->ucopy.pinned_list = 1318 tp->ucopy.pinned_list =
1323 dma_pin_iovec_pages(msg->msg_iov, len); 1319 dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1527,7 +1523,7 @@ do_prequeue:
1527 if (!(flags & MSG_TRUNC)) { 1523 if (!(flags & MSG_TRUNC)) {
1528#ifdef CONFIG_NET_DMA 1524#ifdef CONFIG_NET_DMA
1529 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1525 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1530 tp->ucopy.dma_chan = get_softnet_dma(); 1526 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1531 1527
1532 if (tp->ucopy.dma_chan) { 1528 if (tp->ucopy.dma_chan) {
1533 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1529 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1632,7 +1628,6 @@ skip_copy:
1632 1628
1633 /* Safe to free early-copied skbs now */ 1629 /* Safe to free early-copied skbs now */
1634 __skb_queue_purge(&sk->sk_async_wait_queue); 1630 __skb_queue_purge(&sk->sk_async_wait_queue);
1635 dma_chan_put(tp->ucopy.dma_chan);
1636 tp->ucopy.dma_chan = NULL; 1631 tp->ucopy.dma_chan = NULL;
1637 } 1632 }
1638 if (tp->ucopy.pinned_list) { 1633 if (tp->ucopy.pinned_list) {
@@ -1836,7 +1831,6 @@ adjudge_to_death:
1836 state = sk->sk_state; 1831 state = sk->sk_state;
1837 sock_hold(sk); 1832 sock_hold(sk);
1838 sock_orphan(sk); 1833 sock_orphan(sk);
1839 percpu_counter_inc(sk->sk_prot->orphan_count);
1840 1834
1841 /* It is the last release_sock in its life. It will remove backlog. */ 1835 /* It is the last release_sock in its life. It will remove backlog. */
1842 release_sock(sk); 1836 release_sock(sk);
@@ -1849,6 +1843,8 @@ adjudge_to_death:
1849 bh_lock_sock(sk); 1843 bh_lock_sock(sk);
1850 WARN_ON(sock_owned_by_user(sk)); 1844 WARN_ON(sock_owned_by_user(sk));
1851 1845
1846 percpu_counter_inc(sk->sk_prot->orphan_count);
1847
1852 /* Have we already been destroyed by a softirq or backlog? */ 1848 /* Have we already been destroyed by a softirq or backlog? */
1853 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 1849 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1854 goto out; 1850 goto out;
@@ -2518,9 +2514,7 @@ found:
2518 flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th)); 2514 flush |= memcmp(th + 1, th2 + 1, thlen - sizeof(*th));
2519 2515
2520 total = p->len; 2516 total = p->len;
2521 mss = total; 2517 mss = skb_shinfo(p)->gso_size;
2522 if (skb_shinfo(p)->frag_list)
2523 mss = skb_shinfo(p)->frag_list->len;
2524 2518
2525 flush |= skb->len > mss || skb->len <= 0; 2519 flush |= skb->len > mss || skb->len <= 0;
2526 flush |= ntohl(th2->seq) + total != ntohl(th->seq); 2520 flush |= ntohl(th2->seq) + total != ntohl(th->seq);
@@ -2547,6 +2541,7 @@ out:
2547 2541
2548 return pp; 2542 return pp;
2549} 2543}
2544EXPORT_SYMBOL(tcp_gro_receive);
2550 2545
2551int tcp_gro_complete(struct sk_buff *skb) 2546int tcp_gro_complete(struct sk_buff *skb)
2552{ 2547{
@@ -2556,7 +2551,6 @@ int tcp_gro_complete(struct sk_buff *skb)
2556 skb->csum_offset = offsetof(struct tcphdr, check); 2551 skb->csum_offset = offsetof(struct tcphdr, check);
2557 skb->ip_summed = CHECKSUM_PARTIAL; 2552 skb->ip_summed = CHECKSUM_PARTIAL;
2558 2553
2559 skb_shinfo(skb)->gso_size = skb_shinfo(skb)->frag_list->len;
2560 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 2554 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2561 2555
2562 if (th->cwr) 2556 if (th->cwr)
@@ -2564,6 +2558,7 @@ int tcp_gro_complete(struct sk_buff *skb)
2564 2558
2565 return 0; 2559 return 0;
2566} 2560}
2561EXPORT_SYMBOL(tcp_gro_complete);
2567 2562
2568#ifdef CONFIG_TCP_MD5SIG 2563#ifdef CONFIG_TCP_MD5SIG
2569static unsigned long tcp_md5sig_users; 2564static unsigned long tcp_md5sig_users;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 99b7ecbe8893..a6961d75c7ea 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5005 return 0; 5005 return 0;
5006 5006
5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5007 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5008 tp->ucopy.dma_chan = get_softnet_dma(); 5008 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
5009 5009
5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5010 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5011 5011
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 10172487921b..19d7b429a262 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -51,6 +51,7 @@
51 */ 51 */
52 52
53 53
54#include <linux/bottom_half.h>
54#include <linux/types.h> 55#include <linux/types.h>
55#include <linux/fcntl.h> 56#include <linux/fcntl.h>
56#include <linux/module.h> 57#include <linux/module.h>
@@ -1593,7 +1594,7 @@ process:
1593#ifdef CONFIG_NET_DMA 1594#ifdef CONFIG_NET_DMA
1594 struct tcp_sock *tp = tcp_sk(sk); 1595 struct tcp_sock *tp = tcp_sk(sk);
1595 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1596 tp->ucopy.dma_chan = get_softnet_dma(); 1597 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1597 if (tp->ucopy.dma_chan) 1598 if (tp->ucopy.dma_chan)
1598 ret = tcp_v4_do_rcv(sk, skb); 1599 ret = tcp_v4_do_rcv(sk, skb);
1599 else 1600 else
@@ -1797,7 +1798,9 @@ static int tcp_v4_init_sock(struct sock *sk)
1797 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1798 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1798 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1799 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1799 1800
1801 local_bh_disable();
1800 percpu_counter_inc(&tcp_sockets_allocated); 1802 percpu_counter_inc(&tcp_sockets_allocated);
1803 local_bh_enable();
1801 1804
1802 return 0; 1805 return 0;
1803} 1806}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 437b750b98fd..94f74f5b0cbf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -672,8 +672,7 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
672 672
673EXPORT_SYMBOL_GPL(ipv6_opt_accepted); 673EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
674 674
675static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, 675static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
676 int proto)
677{ 676{
678 struct inet6_protocol *ops = NULL; 677 struct inet6_protocol *ops = NULL;
679 678
@@ -704,7 +703,7 @@ static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
704 __skb_pull(skb, len); 703 __skb_pull(skb, len);
705 } 704 }
706 705
707 return ops; 706 return proto;
708} 707}
709 708
710static int ipv6_gso_send_check(struct sk_buff *skb) 709static int ipv6_gso_send_check(struct sk_buff *skb)
@@ -721,7 +720,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
721 err = -EPROTONOSUPPORT; 720 err = -EPROTONOSUPPORT;
722 721
723 rcu_read_lock(); 722 rcu_read_lock();
724 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 723 ops = rcu_dereference(inet6_protos[
724 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
725
725 if (likely(ops && ops->gso_send_check)) { 726 if (likely(ops && ops->gso_send_check)) {
726 skb_reset_transport_header(skb); 727 skb_reset_transport_header(skb);
727 err = ops->gso_send_check(skb); 728 err = ops->gso_send_check(skb);
@@ -757,7 +758,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
757 segs = ERR_PTR(-EPROTONOSUPPORT); 758 segs = ERR_PTR(-EPROTONOSUPPORT);
758 759
759 rcu_read_lock(); 760 rcu_read_lock();
760 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 761 ops = rcu_dereference(inet6_protos[
762 ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
763
761 if (likely(ops && ops->gso_segment)) { 764 if (likely(ops && ops->gso_segment)) {
762 skb_reset_transport_header(skb); 765 skb_reset_transport_header(skb);
763 segs = ops->gso_segment(skb, features); 766 segs = ops->gso_segment(skb, features);
@@ -777,11 +780,105 @@ out:
777 return segs; 780 return segs;
778} 781}
779 782
783struct ipv6_gro_cb {
784 struct napi_gro_cb napi;
785 int proto;
786};
787
788#define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb)
789
790static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
791 struct sk_buff *skb)
792{
793 struct inet6_protocol *ops;
794 struct sk_buff **pp = NULL;
795 struct sk_buff *p;
796 struct ipv6hdr *iph;
797 unsigned int nlen;
798 int flush = 1;
799 int proto;
800
801 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
802 goto out;
803
804 iph = ipv6_hdr(skb);
805 __skb_pull(skb, sizeof(*iph));
806
807 flush += ntohs(iph->payload_len) != skb->len;
808
809 rcu_read_lock();
810 proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr);
811 IPV6_GRO_CB(skb)->proto = proto;
812 ops = rcu_dereference(inet6_protos[proto]);
813 if (!ops || !ops->gro_receive)
814 goto out_unlock;
815
816 flush--;
817 skb_reset_transport_header(skb);
818 nlen = skb_network_header_len(skb);
819
820 for (p = *head; p; p = p->next) {
821 struct ipv6hdr *iph2;
822
823 if (!NAPI_GRO_CB(p)->same_flow)
824 continue;
825
826 iph2 = ipv6_hdr(p);
827
828 /* All fields must match except length. */
829 if (nlen != skb_network_header_len(p) ||
830 memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) ||
831 memcmp(&iph->nexthdr, &iph2->nexthdr,
832 nlen - offsetof(struct ipv6hdr, nexthdr))) {
833 NAPI_GRO_CB(p)->same_flow = 0;
834 continue;
835 }
836
837 NAPI_GRO_CB(p)->flush |= flush;
838 }
839
840 NAPI_GRO_CB(skb)->flush |= flush;
841
842 pp = ops->gro_receive(head, skb);
843
844out_unlock:
845 rcu_read_unlock();
846
847out:
848 NAPI_GRO_CB(skb)->flush |= flush;
849
850 return pp;
851}
852
853static int ipv6_gro_complete(struct sk_buff *skb)
854{
855 struct inet6_protocol *ops;
856 struct ipv6hdr *iph = ipv6_hdr(skb);
857 int err = -ENOSYS;
858
859 iph->payload_len = htons(skb->len - skb_network_offset(skb) -
860 sizeof(*iph));
861
862 rcu_read_lock();
863 ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]);
864 if (WARN_ON(!ops || !ops->gro_complete))
865 goto out_unlock;
866
867 err = ops->gro_complete(skb);
868
869out_unlock:
870 rcu_read_unlock();
871
872 return err;
873}
874
780static struct packet_type ipv6_packet_type = { 875static struct packet_type ipv6_packet_type = {
781 .type = __constant_htons(ETH_P_IPV6), 876 .type = __constant_htons(ETH_P_IPV6),
782 .func = ipv6_rcv, 877 .func = ipv6_rcv,
783 .gso_send_check = ipv6_gso_send_check, 878 .gso_send_check = ipv6_gso_send_check,
784 .gso_segment = ipv6_gso_segment, 879 .gso_segment = ipv6_gso_segment,
880 .gro_receive = ipv6_gro_receive,
881 .gro_complete = ipv6_gro_complete,
785}; 882};
786 883
787static int __init ipv6_packet_init(void) 884static int __init ipv6_packet_init(void)
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index eeeaad2e8b5c..40f324655e24 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -404,7 +404,7 @@ sticky_done:
404 else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL) 404 else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL)
405 goto e_inval; 405 goto e_inval;
406 406
407 if (copy_from_user(&pkt, optval, optlen)) { 407 if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) {
408 retv = -EFAULT; 408 retv = -EFAULT;
409 break; 409 break;
410 } 410 }
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 18c486cf4987..c4a59824ac2c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -627,6 +627,9 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
627 rt = ip6_rt_copy(ort); 627 rt = ip6_rt_copy(ort);
628 628
629 if (rt) { 629 if (rt) {
630 struct neighbour *neigh;
631 int attempts = !in_softirq();
632
630 if (!(rt->rt6i_flags&RTF_GATEWAY)) { 633 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
631 if (rt->rt6i_dst.plen != 128 && 634 if (rt->rt6i_dst.plen != 128 &&
632 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)) 635 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
@@ -646,7 +649,35 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
646 } 649 }
647#endif 650#endif
648 651
649 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); 652 retry:
653 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
654 if (IS_ERR(neigh)) {
655 struct net *net = dev_net(rt->rt6i_dev);
656 int saved_rt_min_interval =
657 net->ipv6.sysctl.ip6_rt_gc_min_interval;
658 int saved_rt_elasticity =
659 net->ipv6.sysctl.ip6_rt_gc_elasticity;
660
661 if (attempts-- > 0) {
662 net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
663 net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
664
665 ip6_dst_gc(net->ipv6.ip6_dst_ops);
666
667 net->ipv6.sysctl.ip6_rt_gc_elasticity =
668 saved_rt_elasticity;
669 net->ipv6.sysctl.ip6_rt_gc_min_interval =
670 saved_rt_min_interval;
671 goto retry;
672 }
673
674 if (net_ratelimit())
675 printk(KERN_WARNING
676 "Neighbour table overflow.\n");
677 dst_free(&rt->u.dst);
678 return NULL;
679 }
680 rt->rt6i_nexthop = neigh;
650 681
651 } 682 }
652 683
@@ -945,8 +976,11 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
945 dev_hold(dev); 976 dev_hold(dev);
946 if (neigh) 977 if (neigh)
947 neigh_hold(neigh); 978 neigh_hold(neigh);
948 else 979 else {
949 neigh = ndisc_get_neigh(dev, addr); 980 neigh = ndisc_get_neigh(dev, addr);
981 if (IS_ERR(neigh))
982 neigh = NULL;
983 }
950 984
951 rt->rt6i_dev = dev; 985 rt->rt6i_dev = dev;
952 rt->rt6i_idev = idev; 986 rt->rt6i_idev = idev;
@@ -1887,6 +1921,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1887{ 1921{
1888 struct net *net = dev_net(idev->dev); 1922 struct net *net = dev_net(idev->dev);
1889 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); 1923 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1924 struct neighbour *neigh;
1890 1925
1891 if (rt == NULL) 1926 if (rt == NULL)
1892 return ERR_PTR(-ENOMEM); 1927 return ERR_PTR(-ENOMEM);
@@ -1909,11 +1944,18 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1909 rt->rt6i_flags |= RTF_ANYCAST; 1944 rt->rt6i_flags |= RTF_ANYCAST;
1910 else 1945 else
1911 rt->rt6i_flags |= RTF_LOCAL; 1946 rt->rt6i_flags |= RTF_LOCAL;
1912 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); 1947 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1913 if (rt->rt6i_nexthop == NULL) { 1948 if (IS_ERR(neigh)) {
1914 dst_free(&rt->u.dst); 1949 dst_free(&rt->u.dst);
1915 return ERR_PTR(-ENOMEM); 1950
1951 /* We are casting this because that is the return
1952 * value type. But an errno encoded pointer is the
1953 * same regardless of the underlying pointer type,
1954 * and that's what we are returning. So this is OK.
1955 */
1956 return (struct rt6_info *) neigh;
1916 } 1957 }
1958 rt->rt6i_nexthop = neigh;
1917 1959
1918 ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1960 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1919 rt->rt6i_dst.plen = 128; 1961 rt->rt6i_dst.plen = 128;
@@ -2710,7 +2752,7 @@ int __init ip6_route_init(void)
2710 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 2752 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2711 SLAB_HWCACHE_ALIGN, NULL); 2753 SLAB_HWCACHE_ALIGN, NULL);
2712 if (!ip6_dst_ops_template.kmem_cachep) 2754 if (!ip6_dst_ops_template.kmem_cachep)
2713 goto out;; 2755 goto out;
2714 2756
2715 ret = register_pernet_subsys(&ip6_route_net_ops); 2757 ret = register_pernet_subsys(&ip6_route_net_ops);
2716 if (ret) 2758 if (ret)
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 9048fe7e7ea7..a031034720b4 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -128,7 +128,7 @@ static struct ctl_table_header *ip6_header;
128 128
129int ipv6_sysctl_register(void) 129int ipv6_sysctl_register(void)
130{ 130{
131 int err = -ENOMEM;; 131 int err = -ENOMEM;
132 132
133 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table); 133 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table);
134 if (ip6_header == NULL) 134 if (ip6_header == NULL)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8702b06cb60a..e5b85d45bee8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -23,6 +23,7 @@
23 * 2 of the License, or (at your option) any later version. 23 * 2 of the License, or (at your option) any later version.
24 */ 24 */
25 25
26#include <linux/bottom_half.h>
26#include <linux/module.h> 27#include <linux/module.h>
27#include <linux/errno.h> 28#include <linux/errno.h>
28#include <linux/types.h> 29#include <linux/types.h>
@@ -100,7 +101,7 @@ static void tcp_v6_hash(struct sock *sk)
100 } 101 }
101} 102}
102 103
103static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, 104static __inline__ __sum16 tcp_v6_check(int len,
104 struct in6_addr *saddr, 105 struct in6_addr *saddr,
105 struct in6_addr *daddr, 106 struct in6_addr *daddr,
106 __wsum base) 107 __wsum base)
@@ -500,7 +501,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
500 if (skb) { 501 if (skb) {
501 struct tcphdr *th = tcp_hdr(skb); 502 struct tcphdr *th = tcp_hdr(skb);
502 503
503 th->check = tcp_v6_check(th, skb->len, 504 th->check = tcp_v6_check(skb->len,
504 &treq->loc_addr, &treq->rmt_addr, 505 &treq->loc_addr, &treq->rmt_addr,
505 csum_partial(th, skb->len, skb->csum)); 506 csum_partial(th, skb->len, skb->csum));
506 507
@@ -941,6 +942,41 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
941 return 0; 942 return 0;
942} 943}
943 944
945struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
946{
947 struct ipv6hdr *iph = ipv6_hdr(skb);
948
949 switch (skb->ip_summed) {
950 case CHECKSUM_COMPLETE:
951 if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr,
952 skb->csum)) {
953 skb->ip_summed = CHECKSUM_UNNECESSARY;
954 break;
955 }
956
957 /* fall through */
958 case CHECKSUM_NONE:
959 NAPI_GRO_CB(skb)->flush = 1;
960 return NULL;
961 }
962
963 return tcp_gro_receive(head, skb);
964}
965EXPORT_SYMBOL(tcp6_gro_receive);
966
967int tcp6_gro_complete(struct sk_buff *skb)
968{
969 struct ipv6hdr *iph = ipv6_hdr(skb);
970 struct tcphdr *th = tcp_hdr(skb);
971
972 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
973 &iph->saddr, &iph->daddr, 0);
974 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
975
976 return tcp_gro_complete(skb);
977}
978EXPORT_SYMBOL(tcp6_gro_complete);
979
944static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 980static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
945 u32 ts, struct tcp_md5sig_key *key, int rst) 981 u32 ts, struct tcp_md5sig_key *key, int rst)
946{ 982{
@@ -1428,14 +1464,14 @@ out:
1428static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) 1464static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1429{ 1465{
1430 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1466 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1431 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr, 1467 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1432 &ipv6_hdr(skb)->daddr, skb->csum)) { 1468 &ipv6_hdr(skb)->daddr, skb->csum)) {
1433 skb->ip_summed = CHECKSUM_UNNECESSARY; 1469 skb->ip_summed = CHECKSUM_UNNECESSARY;
1434 return 0; 1470 return 0;
1435 } 1471 }
1436 } 1472 }
1437 1473
1438 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len, 1474 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1439 &ipv6_hdr(skb)->saddr, 1475 &ipv6_hdr(skb)->saddr,
1440 &ipv6_hdr(skb)->daddr, 0)); 1476 &ipv6_hdr(skb)->daddr, 0));
1441 1477
@@ -1639,7 +1675,7 @@ process:
1639#ifdef CONFIG_NET_DMA 1675#ifdef CONFIG_NET_DMA
1640 struct tcp_sock *tp = tcp_sk(sk); 1676 struct tcp_sock *tp = tcp_sk(sk);
1641 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1677 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1642 tp->ucopy.dma_chan = get_softnet_dma(); 1678 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1643 if (tp->ucopy.dma_chan) 1679 if (tp->ucopy.dma_chan)
1644 ret = tcp_v6_do_rcv(sk, skb); 1680 ret = tcp_v6_do_rcv(sk, skb);
1645 else 1681 else
@@ -1830,7 +1866,9 @@ static int tcp_v6_init_sock(struct sock *sk)
1830 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1866 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1831 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1867 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1832 1868
1869 local_bh_disable();
1833 percpu_counter_inc(&tcp_sockets_allocated); 1870 percpu_counter_inc(&tcp_sockets_allocated);
1871 local_bh_enable();
1834 1872
1835 return 0; 1873 return 0;
1836} 1874}
@@ -2059,6 +2097,8 @@ static struct inet6_protocol tcpv6_protocol = {
2059 .err_handler = tcp_v6_err, 2097 .err_handler = tcp_v6_err,
2060 .gso_send_check = tcp_v6_gso_send_check, 2098 .gso_send_check = tcp_v6_gso_send_check,
2061 .gso_segment = tcp_tso_segment, 2099 .gso_segment = tcp_tso_segment,
2100 .gro_receive = tcp6_gro_receive,
2101 .gro_complete = tcp6_gro_complete,
2062 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 2102 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2063}; 2103};
2064 2104
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index e4e2caeb9d82..086d5ef098fd 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -371,9 +371,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
371 IRDA_DEBUG(2, "%s()\n", __func__ ); 371 IRDA_DEBUG(2, "%s()\n", __func__ );
372 372
373 line = tty->index; 373 line = tty->index;
374 if ((line < 0) || (line >= IRCOMM_TTY_PORTS)) { 374 if (line >= IRCOMM_TTY_PORTS)
375 return -ENODEV; 375 return -ENODEV;
376 }
377 376
378 /* Check if instance already exists */ 377 /* Check if instance already exists */
379 self = hashbin_lock_find(ircomm_tty, line, NULL); 378 self = hashbin_lock_find(ircomm_tty, line, NULL);
@@ -405,6 +404,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
405 * Force TTY into raw mode by default which is usually what 404 * Force TTY into raw mode by default which is usually what
406 * we want for IrCOMM and IrLPT. This way applications will 405 * we want for IrCOMM and IrLPT. This way applications will
407 * not have to twiddle with printcap etc. 406 * not have to twiddle with printcap etc.
407 *
408 * Note this is completely usafe and doesn't work properly
408 */ 409 */
409 tty->termios->c_iflag = 0; 410 tty->termios->c_iflag = 0;
410 tty->termios->c_oflag = 0; 411 tty->termios->c_oflag = 0;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index af3192d2a5a3..eb8a2a0b6eb7 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -494,7 +494,21 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
494 if (err) { 494 if (err) {
495 iucv_path_free(iucv->path); 495 iucv_path_free(iucv->path);
496 iucv->path = NULL; 496 iucv->path = NULL;
497 err = -ECONNREFUSED; 497 switch (err) {
498 case 0x0b: /* Target communicator is not logged on */
499 err = -ENETUNREACH;
500 break;
501 case 0x0d: /* Max connections for this guest exceeded */
502 case 0x0e: /* Max connections for target guest exceeded */
503 err = -EAGAIN;
504 break;
505 case 0x0f: /* Missing IUCV authorization */
506 err = -EACCES;
507 break;
508 default:
509 err = -ECONNREFUSED;
510 break;
511 }
498 goto done; 512 goto done;
499 } 513 }
500 514
@@ -507,6 +521,13 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
507 release_sock(sk); 521 release_sock(sk);
508 return -ECONNREFUSED; 522 return -ECONNREFUSED;
509 } 523 }
524
525 if (err) {
526 iucv_path_sever(iucv->path, NULL);
527 iucv_path_free(iucv->path);
528 iucv->path = NULL;
529 }
530
510done: 531done:
511 release_sock(sk); 532 release_sock(sk);
512 return err; 533 return err;
@@ -1021,12 +1042,14 @@ static int iucv_callback_connreq(struct iucv_path *path,
1021 ASCEBC(user_data, sizeof(user_data)); 1042 ASCEBC(user_data, sizeof(user_data));
1022 if (sk->sk_state != IUCV_LISTEN) { 1043 if (sk->sk_state != IUCV_LISTEN) {
1023 err = iucv_path_sever(path, user_data); 1044 err = iucv_path_sever(path, user_data);
1045 iucv_path_free(path);
1024 goto fail; 1046 goto fail;
1025 } 1047 }
1026 1048
1027 /* Check for backlog size */ 1049 /* Check for backlog size */
1028 if (sk_acceptq_is_full(sk)) { 1050 if (sk_acceptq_is_full(sk)) {
1029 err = iucv_path_sever(path, user_data); 1051 err = iucv_path_sever(path, user_data);
1052 iucv_path_free(path);
1030 goto fail; 1053 goto fail;
1031 } 1054 }
1032 1055
@@ -1034,6 +1057,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1034 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); 1057 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
1035 if (!nsk) { 1058 if (!nsk) {
1036 err = iucv_path_sever(path, user_data); 1059 err = iucv_path_sever(path, user_data);
1060 iucv_path_free(path);
1037 goto fail; 1061 goto fail;
1038 } 1062 }
1039 1063
@@ -1057,6 +1081,8 @@ static int iucv_callback_connreq(struct iucv_path *path,
1057 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 1081 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1058 if (err) { 1082 if (err) {
1059 err = iucv_path_sever(path, user_data); 1083 err = iucv_path_sever(path, user_data);
1084 iucv_path_free(path);
1085 iucv_sock_kill(nsk);
1060 goto fail; 1086 goto fail;
1061 } 1087 }
1062 1088
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 8f57d4f4328a..a35240f61ec3 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -50,7 +50,6 @@
50#include <asm/ebcdic.h> 50#include <asm/ebcdic.h>
51#include <asm/io.h> 51#include <asm/io.h>
52#include <asm/s390_ext.h> 52#include <asm/s390_ext.h>
53#include <asm/s390_rdev.h>
54#include <asm/smp.h> 53#include <asm/smp.h>
55 54
56/* 55/*
@@ -517,6 +516,7 @@ static int iucv_enable(void)
517 size_t alloc_size; 516 size_t alloc_size;
518 int cpu, rc; 517 int cpu, rc;
519 518
519 get_online_cpus();
520 rc = -ENOMEM; 520 rc = -ENOMEM;
521 alloc_size = iucv_max_pathid * sizeof(struct iucv_path); 521 alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
522 iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); 522 iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
@@ -524,19 +524,17 @@ static int iucv_enable(void)
524 goto out; 524 goto out;
525 /* Declare per cpu buffers. */ 525 /* Declare per cpu buffers. */
526 rc = -EIO; 526 rc = -EIO;
527 get_online_cpus();
528 for_each_online_cpu(cpu) 527 for_each_online_cpu(cpu)
529 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 528 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
530 if (cpus_empty(iucv_buffer_cpumask)) 529 if (cpus_empty(iucv_buffer_cpumask))
531 /* No cpu could declare an iucv buffer. */ 530 /* No cpu could declare an iucv buffer. */
532 goto out_path; 531 goto out;
533 put_online_cpus(); 532 put_online_cpus();
534 return 0; 533 return 0;
535
536out_path:
537 put_online_cpus();
538 kfree(iucv_path_table);
539out: 534out:
535 kfree(iucv_path_table);
536 iucv_path_table = NULL;
537 put_online_cpus();
540 return rc; 538 return rc;
541} 539}
542 540
@@ -551,8 +549,9 @@ static void iucv_disable(void)
551{ 549{
552 get_online_cpus(); 550 get_online_cpus();
553 on_each_cpu(iucv_retrieve_cpu, NULL, 1); 551 on_each_cpu(iucv_retrieve_cpu, NULL, 1);
554 put_online_cpus();
555 kfree(iucv_path_table); 552 kfree(iucv_path_table);
553 iucv_path_table = NULL;
554 put_online_cpus();
556} 555}
557 556
558static int __cpuinit iucv_cpu_notify(struct notifier_block *self, 557static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
@@ -589,10 +588,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
589 case CPU_ONLINE_FROZEN: 588 case CPU_ONLINE_FROZEN:
590 case CPU_DOWN_FAILED: 589 case CPU_DOWN_FAILED:
591 case CPU_DOWN_FAILED_FROZEN: 590 case CPU_DOWN_FAILED_FROZEN:
591 if (!iucv_path_table)
592 break;
592 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 593 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
593 break; 594 break;
594 case CPU_DOWN_PREPARE: 595 case CPU_DOWN_PREPARE:
595 case CPU_DOWN_PREPARE_FROZEN: 596 case CPU_DOWN_PREPARE_FROZEN:
597 if (!iucv_path_table)
598 break;
596 cpumask = iucv_buffer_cpumask; 599 cpumask = iucv_buffer_cpumask;
597 cpu_clear(cpu, cpumask); 600 cpu_clear(cpu, cpumask);
598 if (cpus_empty(cpumask)) 601 if (cpus_empty(cpumask))
@@ -1692,7 +1695,7 @@ static int __init iucv_init(void)
1692 rc = register_external_interrupt(0x4000, iucv_external_interrupt); 1695 rc = register_external_interrupt(0x4000, iucv_external_interrupt);
1693 if (rc) 1696 if (rc)
1694 goto out; 1697 goto out;
1695 iucv_root = s390_root_dev_register("iucv"); 1698 iucv_root = root_device_register("iucv");
1696 if (IS_ERR(iucv_root)) { 1699 if (IS_ERR(iucv_root)) {
1697 rc = PTR_ERR(iucv_root); 1700 rc = PTR_ERR(iucv_root);
1698 goto out_int; 1701 goto out_int;
@@ -1736,7 +1739,7 @@ out_free:
1736 kfree(iucv_irq_data[cpu]); 1739 kfree(iucv_irq_data[cpu]);
1737 iucv_irq_data[cpu] = NULL; 1740 iucv_irq_data[cpu] = NULL;
1738 } 1741 }
1739 s390_root_dev_unregister(iucv_root); 1742 root_device_unregister(iucv_root);
1740out_int: 1743out_int:
1741 unregister_external_interrupt(0x4000, iucv_external_interrupt); 1744 unregister_external_interrupt(0x4000, iucv_external_interrupt);
1742out: 1745out:
@@ -1766,7 +1769,7 @@ static void __exit iucv_exit(void)
1766 kfree(iucv_irq_data[cpu]); 1769 kfree(iucv_irq_data[cpu]);
1767 iucv_irq_data[cpu] = NULL; 1770 iucv_irq_data[cpu] = NULL;
1768 } 1771 }
1769 s390_root_dev_unregister(iucv_root); 1772 root_device_unregister(iucv_root);
1770 bus_unregister(&iucv_bus); 1773 bus_unregister(&iucv_bus);
1771 unregister_external_interrupt(0x4000, iucv_external_interrupt); 1774 unregister_external_interrupt(0x4000, iucv_external_interrupt);
1772} 1775}
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 9394f539966a..3eb5e2660c49 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -507,7 +507,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
507 /* No cache entry or it is invalid, time to schedule */ 507 /* No cache entry or it is invalid, time to schedule */
508 dest = __ip_vs_lblc_schedule(svc); 508 dest = __ip_vs_lblc_schedule(svc);
509 if (!dest) { 509 if (!dest) {
510 IP_VS_DBG(1, "no destination available\n"); 510 IP_VS_ERR_RL("LBLC: no destination available\n");
511 return NULL; 511 return NULL;
512 } 512 }
513 513
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 92dc76a6842c..c04ce56c7f0f 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -690,7 +690,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
690 /* The cache entry is invalid, time to schedule */ 690 /* The cache entry is invalid, time to schedule */
691 dest = __ip_vs_lblcr_schedule(svc); 691 dest = __ip_vs_lblcr_schedule(svc);
692 if (!dest) { 692 if (!dest) {
693 IP_VS_DBG(1, "no destination available\n"); 693 IP_VS_ERR_RL("LBLCR: no destination available\n");
694 read_unlock(&svc->sched_lock); 694 read_unlock(&svc->sched_lock);
695 return NULL; 695 return NULL;
696 } 696 }
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c
index 51912cab777b..d0dadc8a65fd 100644
--- a/net/netfilter/ipvs/ip_vs_lc.c
+++ b/net/netfilter/ipvs/ip_vs_lc.c
@@ -66,11 +66,15 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
66 } 66 }
67 } 67 }
68 68
69 if (least) 69 if (!least)
70 IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n", 70 IP_VS_ERR_RL("LC: no destination available\n");
71 IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), 71 else
72 atomic_read(&least->activeconns), 72 IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
73 atomic_read(&least->inactconns)); 73 "inactconns %d\n",
74 IP_VS_DBG_ADDR(svc->af, &least->addr),
75 ntohs(least->port),
76 atomic_read(&least->activeconns),
77 atomic_read(&least->inactconns));
74 78
75 return least; 79 return least;
76} 80}
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index 6758ad2ceaaf..694952db5026 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -95,8 +95,10 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
95 } 95 }
96 } 96 }
97 97
98 if (!least) 98 if (!least) {
99 IP_VS_ERR_RL("NQ: no destination available\n");
99 return NULL; 100 return NULL;
101 }
100 102
101 out: 103 out:
102 IP_VS_DBG_BUF(6, "NQ: server %s:%u " 104 IP_VS_DBG_BUF(6, "NQ: server %s:%u "
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index 8fb51c169eb8..2d16ab7f8c1e 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -69,6 +69,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
69 q = q->next; 69 q = q->next;
70 } while (q != p); 70 } while (q != p);
71 write_unlock(&svc->sched_lock); 71 write_unlock(&svc->sched_lock);
72 IP_VS_ERR_RL("RR: no destination available\n");
72 return NULL; 73 return NULL;
73 74
74 out: 75 out:
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index 691a6a0086e1..20e4657d2f3b 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -84,6 +84,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
84 goto nextstage; 84 goto nextstage;
85 } 85 }
86 } 86 }
87 IP_VS_ERR_RL("SED: no destination available\n");
87 return NULL; 88 return NULL;
88 89
89 /* 90 /*
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 0e53955ef139..75709ebeb630 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -219,6 +219,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
219 || !(dest->flags & IP_VS_DEST_F_AVAILABLE) 219 || !(dest->flags & IP_VS_DEST_F_AVAILABLE)
220 || atomic_read(&dest->weight) <= 0 220 || atomic_read(&dest->weight) <= 0
221 || is_overloaded(dest)) { 221 || is_overloaded(dest)) {
222 IP_VS_ERR_RL("SH: no destination available\n");
222 return NULL; 223 return NULL;
223 } 224 }
224 225
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index 57b452bbb4ea..8e942565b47d 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -72,6 +72,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
72 goto nextstage; 72 goto nextstage;
73 } 73 }
74 } 74 }
75 IP_VS_ERR_RL("WLC: no destination available\n");
75 return NULL; 76 return NULL;
76 77
77 /* 78 /*
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 2f618dc29c5b..f7d74ef1ecf9 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -155,6 +155,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
155 155
156 if (mark->cl == mark->cl->next) { 156 if (mark->cl == mark->cl->next) {
157 /* no dest entry */ 157 /* no dest entry */
158 IP_VS_ERR_RL("WRR: no destination available: "
159 "no destinations present\n");
158 dest = NULL; 160 dest = NULL;
159 goto out; 161 goto out;
160 } 162 }
@@ -168,8 +170,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
168 */ 170 */
169 if (mark->cw == 0) { 171 if (mark->cw == 0) {
170 mark->cl = &svc->destinations; 172 mark->cl = &svc->destinations;
171 IP_VS_ERR_RL("ip_vs_wrr_schedule(): " 173 IP_VS_ERR_RL("WRR: no destination "
172 "no available servers\n"); 174 "available\n");
173 dest = NULL; 175 dest = NULL;
174 goto out; 176 goto out;
175 } 177 }
@@ -191,6 +193,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
191 /* back to the start, and no dest is found. 193 /* back to the start, and no dest is found.
192 It is only possible when all dests are OVERLOADED */ 194 It is only possible when all dests are OVERLOADED */
193 dest = NULL; 195 dest = NULL;
196 IP_VS_ERR_RL("WRR: no destination available: "
197 "all destinations are overloaded\n");
194 goto out; 198 goto out;
195 } 199 }
196 } 200 }
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index f37b9b74c6a8..4da54b0b9233 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -200,7 +200,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
200 if (*pos == 0) 200 if (*pos == 0)
201 return SEQ_START_TOKEN; 201 return SEQ_START_TOKEN;
202 202
203 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { 203 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
204 if (!cpu_possible(cpu)) 204 if (!cpu_possible(cpu))
205 continue; 205 continue;
206 *pos = cpu + 1; 206 *pos = cpu + 1;
@@ -215,7 +215,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
215 struct net *net = seq_file_net(seq); 215 struct net *net = seq_file_net(seq);
216 int cpu; 216 int cpu;
217 217
218 for (cpu = *pos; cpu < NR_CPUS; ++cpu) { 218 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
219 if (!cpu_possible(cpu)) 219 if (!cpu_possible(cpu))
220 continue; 220 continue;
221 *pos = cpu + 1; 221 *pos = cpu + 1;
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index fff32b70efa9..bf1ab1a6790d 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -130,6 +130,7 @@ static int netlbl_cipsov4_add_common(struct genl_info *info,
130/** 130/**
131 * netlbl_cipsov4_add_std - Adds a CIPSO V4 DOI definition 131 * netlbl_cipsov4_add_std - Adds a CIPSO V4 DOI definition
132 * @info: the Generic NETLINK info block 132 * @info: the Generic NETLINK info block
133 * @audit_info: NetLabel audit information
133 * 134 *
134 * Description: 135 * Description:
135 * Create a new CIPSO_V4_MAP_TRANS DOI definition based on the given ADD 136 * Create a new CIPSO_V4_MAP_TRANS DOI definition based on the given ADD
@@ -137,7 +138,8 @@ static int netlbl_cipsov4_add_common(struct genl_info *info,
137 * non-zero on error. 138 * non-zero on error.
138 * 139 *
139 */ 140 */
140static int netlbl_cipsov4_add_std(struct genl_info *info) 141static int netlbl_cipsov4_add_std(struct genl_info *info,
142 struct netlbl_audit *audit_info)
141{ 143{
142 int ret_val = -EINVAL; 144 int ret_val = -EINVAL;
143 struct cipso_v4_doi *doi_def = NULL; 145 struct cipso_v4_doi *doi_def = NULL;
@@ -316,7 +318,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info)
316 } 318 }
317 } 319 }
318 320
319 ret_val = cipso_v4_doi_add(doi_def); 321 ret_val = cipso_v4_doi_add(doi_def, audit_info);
320 if (ret_val != 0) 322 if (ret_val != 0)
321 goto add_std_failure; 323 goto add_std_failure;
322 return 0; 324 return 0;
@@ -330,6 +332,7 @@ add_std_failure:
330/** 332/**
331 * netlbl_cipsov4_add_pass - Adds a CIPSO V4 DOI definition 333 * netlbl_cipsov4_add_pass - Adds a CIPSO V4 DOI definition
332 * @info: the Generic NETLINK info block 334 * @info: the Generic NETLINK info block
335 * @audit_info: NetLabel audit information
333 * 336 *
334 * Description: 337 * Description:
335 * Create a new CIPSO_V4_MAP_PASS DOI definition based on the given ADD message 338 * Create a new CIPSO_V4_MAP_PASS DOI definition based on the given ADD message
@@ -337,7 +340,8 @@ add_std_failure:
337 * error. 340 * error.
338 * 341 *
339 */ 342 */
340static int netlbl_cipsov4_add_pass(struct genl_info *info) 343static int netlbl_cipsov4_add_pass(struct genl_info *info,
344 struct netlbl_audit *audit_info)
341{ 345{
342 int ret_val; 346 int ret_val;
343 struct cipso_v4_doi *doi_def = NULL; 347 struct cipso_v4_doi *doi_def = NULL;
@@ -354,7 +358,7 @@ static int netlbl_cipsov4_add_pass(struct genl_info *info)
354 if (ret_val != 0) 358 if (ret_val != 0)
355 goto add_pass_failure; 359 goto add_pass_failure;
356 360
357 ret_val = cipso_v4_doi_add(doi_def); 361 ret_val = cipso_v4_doi_add(doi_def, audit_info);
358 if (ret_val != 0) 362 if (ret_val != 0)
359 goto add_pass_failure; 363 goto add_pass_failure;
360 return 0; 364 return 0;
@@ -367,6 +371,7 @@ add_pass_failure:
367/** 371/**
368 * netlbl_cipsov4_add_local - Adds a CIPSO V4 DOI definition 372 * netlbl_cipsov4_add_local - Adds a CIPSO V4 DOI definition
369 * @info: the Generic NETLINK info block 373 * @info: the Generic NETLINK info block
374 * @audit_info: NetLabel audit information
370 * 375 *
371 * Description: 376 * Description:
372 * Create a new CIPSO_V4_MAP_LOCAL DOI definition based on the given ADD 377 * Create a new CIPSO_V4_MAP_LOCAL DOI definition based on the given ADD
@@ -374,7 +379,8 @@ add_pass_failure:
374 * non-zero on error. 379 * non-zero on error.
375 * 380 *
376 */ 381 */
377static int netlbl_cipsov4_add_local(struct genl_info *info) 382static int netlbl_cipsov4_add_local(struct genl_info *info,
383 struct netlbl_audit *audit_info)
378{ 384{
379 int ret_val; 385 int ret_val;
380 struct cipso_v4_doi *doi_def = NULL; 386 struct cipso_v4_doi *doi_def = NULL;
@@ -391,7 +397,7 @@ static int netlbl_cipsov4_add_local(struct genl_info *info)
391 if (ret_val != 0) 397 if (ret_val != 0)
392 goto add_local_failure; 398 goto add_local_failure;
393 399
394 ret_val = cipso_v4_doi_add(doi_def); 400 ret_val = cipso_v4_doi_add(doi_def, audit_info);
395 if (ret_val != 0) 401 if (ret_val != 0)
396 goto add_local_failure; 402 goto add_local_failure;
397 return 0; 403 return 0;
@@ -415,48 +421,31 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
415 421
416{ 422{
417 int ret_val = -EINVAL; 423 int ret_val = -EINVAL;
418 u32 type;
419 u32 doi;
420 const char *type_str = "(unknown)"; 424 const char *type_str = "(unknown)";
421 struct audit_buffer *audit_buf;
422 struct netlbl_audit audit_info; 425 struct netlbl_audit audit_info;
423 426
424 if (!info->attrs[NLBL_CIPSOV4_A_DOI] || 427 if (!info->attrs[NLBL_CIPSOV4_A_DOI] ||
425 !info->attrs[NLBL_CIPSOV4_A_MTYPE]) 428 !info->attrs[NLBL_CIPSOV4_A_MTYPE])
426 return -EINVAL; 429 return -EINVAL;
427 430
428 doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]);
429 netlbl_netlink_auditinfo(skb, &audit_info); 431 netlbl_netlink_auditinfo(skb, &audit_info);
430 432 switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) {
431 type = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE]);
432 switch (type) {
433 case CIPSO_V4_MAP_TRANS: 433 case CIPSO_V4_MAP_TRANS:
434 type_str = "trans"; 434 type_str = "trans";
435 ret_val = netlbl_cipsov4_add_std(info); 435 ret_val = netlbl_cipsov4_add_std(info, &audit_info);
436 break; 436 break;
437 case CIPSO_V4_MAP_PASS: 437 case CIPSO_V4_MAP_PASS:
438 type_str = "pass"; 438 type_str = "pass";
439 ret_val = netlbl_cipsov4_add_pass(info); 439 ret_val = netlbl_cipsov4_add_pass(info, &audit_info);
440 break; 440 break;
441 case CIPSO_V4_MAP_LOCAL: 441 case CIPSO_V4_MAP_LOCAL:
442 type_str = "local"; 442 type_str = "local";
443 ret_val = netlbl_cipsov4_add_local(info); 443 ret_val = netlbl_cipsov4_add_local(info, &audit_info);
444 break; 444 break;
445 } 445 }
446 if (ret_val == 0) 446 if (ret_val == 0)
447 atomic_inc(&netlabel_mgmt_protocount); 447 atomic_inc(&netlabel_mgmt_protocount);
448 448
449 audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_ADD,
450 &audit_info);
451 if (audit_buf != NULL) {
452 audit_log_format(audit_buf,
453 " cipso_doi=%u cipso_type=%s res=%u",
454 doi,
455 type_str,
456 ret_val == 0 ? 1 : 0);
457 audit_log_end(audit_buf);
458 }
459
460 return ret_val; 449 return ret_val;
461} 450}
462 451
@@ -725,9 +714,7 @@ static int netlbl_cipsov4_remove_cb(struct netlbl_dom_map *entry, void *arg)
725static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info) 714static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info)
726{ 715{
727 int ret_val = -EINVAL; 716 int ret_val = -EINVAL;
728 u32 doi = 0;
729 struct netlbl_domhsh_walk_arg cb_arg; 717 struct netlbl_domhsh_walk_arg cb_arg;
730 struct audit_buffer *audit_buf;
731 struct netlbl_audit audit_info; 718 struct netlbl_audit audit_info;
732 u32 skip_bkt = 0; 719 u32 skip_bkt = 0;
733 u32 skip_chain = 0; 720 u32 skip_chain = 0;
@@ -735,29 +722,17 @@ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info)
735 if (!info->attrs[NLBL_CIPSOV4_A_DOI]) 722 if (!info->attrs[NLBL_CIPSOV4_A_DOI])
736 return -EINVAL; 723 return -EINVAL;
737 724
738 doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]);
739 netlbl_netlink_auditinfo(skb, &audit_info); 725 netlbl_netlink_auditinfo(skb, &audit_info);
740 726 cb_arg.doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]);
741 cb_arg.doi = doi;
742 cb_arg.audit_info = &audit_info; 727 cb_arg.audit_info = &audit_info;
743 ret_val = netlbl_domhsh_walk(&skip_bkt, &skip_chain, 728 ret_val = netlbl_domhsh_walk(&skip_bkt, &skip_chain,
744 netlbl_cipsov4_remove_cb, &cb_arg); 729 netlbl_cipsov4_remove_cb, &cb_arg);
745 if (ret_val == 0 || ret_val == -ENOENT) { 730 if (ret_val == 0 || ret_val == -ENOENT) {
746 ret_val = cipso_v4_doi_remove(doi, &audit_info); 731 ret_val = cipso_v4_doi_remove(cb_arg.doi, &audit_info);
747 if (ret_val == 0) 732 if (ret_val == 0)
748 atomic_dec(&netlabel_mgmt_protocount); 733 atomic_dec(&netlabel_mgmt_protocount);
749 } 734 }
750 735
751 audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_DEL,
752 &audit_info);
753 if (audit_buf != NULL) {
754 audit_log_format(audit_buf,
755 " cipso_doi=%u res=%u",
756 doi,
757 ret_val == 0 ? 1 : 0);
758 audit_log_end(audit_buf);
759 }
760
761 return ret_val; 736 return ret_val;
762} 737}
763 738
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 5fadf10e5ddf..7a10bbe02c13 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -483,6 +483,73 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
483} 483}
484 484
485/** 485/**
486 * netlbl_domhsh_remove_af4 - Removes an address selector entry
487 * @domain: the domain
488 * @addr: IPv4 address
489 * @mask: IPv4 address mask
490 * @audit_info: NetLabel audit information
491 *
492 * Description:
493 * Removes an individual address selector from a domain mapping and potentially
494 * the entire mapping if it is empty. Returns zero on success, negative values
495 * on failure.
496 *
497 */
498int netlbl_domhsh_remove_af4(const char *domain,
499 const struct in_addr *addr,
500 const struct in_addr *mask,
501 struct netlbl_audit *audit_info)
502{
503 struct netlbl_dom_map *entry_map;
504 struct netlbl_af4list *entry_addr;
505 struct netlbl_af4list *iter4;
506#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
507 struct netlbl_af6list *iter6;
508#endif /* IPv6 */
509 struct netlbl_domaddr4_map *entry;
510
511 rcu_read_lock();
512
513 if (domain)
514 entry_map = netlbl_domhsh_search(domain);
515 else
516 entry_map = netlbl_domhsh_search_def(domain);
517 if (entry_map == NULL || entry_map->type != NETLBL_NLTYPE_ADDRSELECT)
518 goto remove_af4_failure;
519
520 spin_lock(&netlbl_domhsh_lock);
521 entry_addr = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
522 &entry_map->type_def.addrsel->list4);
523 spin_unlock(&netlbl_domhsh_lock);
524
525 if (entry_addr == NULL)
526 goto remove_af4_failure;
527 netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4)
528 goto remove_af4_single_addr;
529#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
530 netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6)
531 goto remove_af4_single_addr;
532#endif /* IPv6 */
533 /* the domain mapping is empty so remove it from the mapping table */
534 netlbl_domhsh_remove_entry(entry_map, audit_info);
535
536remove_af4_single_addr:
537 rcu_read_unlock();
538 /* yick, we can't use call_rcu here because we don't have a rcu head
539 * pointer but hopefully this should be a rare case so the pause
540 * shouldn't be a problem */
541 synchronize_rcu();
542 entry = netlbl_domhsh_addr4_entry(entry_addr);
543 cipso_v4_doi_putdef(entry->type_def.cipsov4);
544 kfree(entry);
545 return 0;
546
547remove_af4_failure:
548 rcu_read_unlock();
549 return -ENOENT;
550}
551
552/**
486 * netlbl_domhsh_remove - Removes an entry from the domain hash table 553 * netlbl_domhsh_remove - Removes an entry from the domain hash table
487 * @domain: the domain to remove 554 * @domain: the domain to remove
488 * @audit_info: NetLabel audit information 555 * @audit_info: NetLabel audit information
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index bfcb6763a1a1..0261dda3f2d2 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -90,6 +90,10 @@ int netlbl_domhsh_add_default(struct netlbl_dom_map *entry,
90 struct netlbl_audit *audit_info); 90 struct netlbl_audit *audit_info);
91int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, 91int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
92 struct netlbl_audit *audit_info); 92 struct netlbl_audit *audit_info);
93int netlbl_domhsh_remove_af4(const char *domain,
94 const struct in_addr *addr,
95 const struct in_addr *mask,
96 struct netlbl_audit *audit_info);
93int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info); 97int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
94int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info); 98int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info);
95struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain); 99struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain);
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index b32eceb3ab0d..fd9229db075c 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -31,7 +31,10 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/audit.h> 33#include <linux/audit.h>
34#include <linux/in.h>
35#include <linux/in6.h>
34#include <net/ip.h> 36#include <net/ip.h>
37#include <net/ipv6.h>
35#include <net/netlabel.h> 38#include <net/netlabel.h>
36#include <net/cipso_ipv4.h> 39#include <net/cipso_ipv4.h>
37#include <asm/bug.h> 40#include <asm/bug.h>
@@ -42,6 +45,7 @@
42#include "netlabel_cipso_v4.h" 45#include "netlabel_cipso_v4.h"
43#include "netlabel_user.h" 46#include "netlabel_user.h"
44#include "netlabel_mgmt.h" 47#include "netlabel_mgmt.h"
48#include "netlabel_addrlist.h"
45 49
46/* 50/*
47 * Configuration Functions 51 * Configuration Functions
@@ -50,6 +54,9 @@
50/** 54/**
51 * netlbl_cfg_map_del - Remove a NetLabel/LSM domain mapping 55 * netlbl_cfg_map_del - Remove a NetLabel/LSM domain mapping
52 * @domain: the domain mapping to remove 56 * @domain: the domain mapping to remove
57 * @family: address family
58 * @addr: IP address
59 * @mask: IP address mask
53 * @audit_info: NetLabel audit information 60 * @audit_info: NetLabel audit information
54 * 61 *
55 * Description: 62 * Description:
@@ -58,14 +65,32 @@
58 * values on failure. 65 * values on failure.
59 * 66 *
60 */ 67 */
61int netlbl_cfg_map_del(const char *domain, struct netlbl_audit *audit_info) 68int netlbl_cfg_map_del(const char *domain,
69 u16 family,
70 const void *addr,
71 const void *mask,
72 struct netlbl_audit *audit_info)
62{ 73{
63 return netlbl_domhsh_remove(domain, audit_info); 74 if (addr == NULL && mask == NULL) {
75 return netlbl_domhsh_remove(domain, audit_info);
76 } else if (addr != NULL && mask != NULL) {
77 switch (family) {
78 case AF_INET:
79 return netlbl_domhsh_remove_af4(domain, addr, mask,
80 audit_info);
81 default:
82 return -EPFNOSUPPORT;
83 }
84 } else
85 return -EINVAL;
64} 86}
65 87
66/** 88/**
67 * netlbl_cfg_unlbl_add_map - Add an unlabeled NetLabel/LSM domain mapping 89 * netlbl_cfg_unlbl_map_add - Add a new unlabeled mapping
68 * @domain: the domain mapping to add 90 * @domain: the domain mapping to add
91 * @family: address family
92 * @addr: IP address
93 * @mask: IP address mask
69 * @audit_info: NetLabel audit information 94 * @audit_info: NetLabel audit information
70 * 95 *
71 * Description: 96 * Description:
@@ -74,11 +99,19 @@ int netlbl_cfg_map_del(const char *domain, struct netlbl_audit *audit_info)
74 * negative values on failure. 99 * negative values on failure.
75 * 100 *
76 */ 101 */
77int netlbl_cfg_unlbl_add_map(const char *domain, 102int netlbl_cfg_unlbl_map_add(const char *domain,
103 u16 family,
104 const void *addr,
105 const void *mask,
78 struct netlbl_audit *audit_info) 106 struct netlbl_audit *audit_info)
79{ 107{
80 int ret_val = -ENOMEM; 108 int ret_val = -ENOMEM;
81 struct netlbl_dom_map *entry; 109 struct netlbl_dom_map *entry;
110 struct netlbl_domaddr_map *addrmap = NULL;
111 struct netlbl_domaddr4_map *map4 = NULL;
112 struct netlbl_domaddr6_map *map6 = NULL;
113 const struct in_addr *addr4, *mask4;
114 const struct in6_addr *addr6, *mask6;
82 115
83 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 116 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
84 if (entry == NULL) 117 if (entry == NULL)
@@ -86,49 +119,225 @@ int netlbl_cfg_unlbl_add_map(const char *domain,
86 if (domain != NULL) { 119 if (domain != NULL) {
87 entry->domain = kstrdup(domain, GFP_ATOMIC); 120 entry->domain = kstrdup(domain, GFP_ATOMIC);
88 if (entry->domain == NULL) 121 if (entry->domain == NULL)
89 goto cfg_unlbl_add_map_failure; 122 goto cfg_unlbl_map_add_failure;
123 }
124
125 if (addr == NULL && mask == NULL)
126 entry->type = NETLBL_NLTYPE_UNLABELED;
127 else if (addr != NULL && mask != NULL) {
128 addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
129 if (addrmap == NULL)
130 goto cfg_unlbl_map_add_failure;
131 INIT_LIST_HEAD(&addrmap->list4);
132 INIT_LIST_HEAD(&addrmap->list6);
133
134 switch (family) {
135 case AF_INET:
136 addr4 = addr;
137 mask4 = mask;
138 map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
139 if (map4 == NULL)
140 goto cfg_unlbl_map_add_failure;
141 map4->type = NETLBL_NLTYPE_UNLABELED;
142 map4->list.addr = addr4->s_addr & mask4->s_addr;
143 map4->list.mask = mask4->s_addr;
144 map4->list.valid = 1;
145 ret_val = netlbl_af4list_add(&map4->list,
146 &addrmap->list4);
147 if (ret_val != 0)
148 goto cfg_unlbl_map_add_failure;
149 break;
150 case AF_INET6:
151 addr6 = addr;
152 mask6 = mask;
153 map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
154 if (map4 == NULL)
155 goto cfg_unlbl_map_add_failure;
156 map6->type = NETLBL_NLTYPE_UNLABELED;
157 ipv6_addr_copy(&map6->list.addr, addr6);
158 map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
159 map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
160 map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2];
161 map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
162 ipv6_addr_copy(&map6->list.mask, mask6);
163 map6->list.valid = 1;
164 ret_val = netlbl_af4list_add(&map4->list,
165 &addrmap->list4);
166 if (ret_val != 0)
167 goto cfg_unlbl_map_add_failure;
168 break;
169 default:
170 goto cfg_unlbl_map_add_failure;
171 break;
172 }
173
174 entry->type_def.addrsel = addrmap;
175 entry->type = NETLBL_NLTYPE_ADDRSELECT;
176 } else {
177 ret_val = -EINVAL;
178 goto cfg_unlbl_map_add_failure;
90 } 179 }
91 entry->type = NETLBL_NLTYPE_UNLABELED;
92 180
93 ret_val = netlbl_domhsh_add(entry, audit_info); 181 ret_val = netlbl_domhsh_add(entry, audit_info);
94 if (ret_val != 0) 182 if (ret_val != 0)
95 goto cfg_unlbl_add_map_failure; 183 goto cfg_unlbl_map_add_failure;
96 184
97 return 0; 185 return 0;
98 186
99cfg_unlbl_add_map_failure: 187cfg_unlbl_map_add_failure:
100 if (entry != NULL) 188 if (entry != NULL)
101 kfree(entry->domain); 189 kfree(entry->domain);
102 kfree(entry); 190 kfree(entry);
191 kfree(addrmap);
192 kfree(map4);
193 kfree(map6);
103 return ret_val; 194 return ret_val;
104} 195}
105 196
197
198/**
199 * netlbl_cfg_unlbl_static_add - Adds a new static label
200 * @net: network namespace
201 * @dev_name: interface name
202 * @addr: IP address in network byte order (struct in[6]_addr)
203 * @mask: address mask in network byte order (struct in[6]_addr)
204 * @family: address family
205 * @secid: LSM secid value for the entry
206 * @audit_info: NetLabel audit information
207 *
208 * Description:
209 * Adds a new NetLabel static label to be used when protocol provided labels
210 * are not present on incoming traffic. If @dev_name is NULL then the default
211 * interface will be used. Returns zero on success, negative values on failure.
212 *
213 */
214int netlbl_cfg_unlbl_static_add(struct net *net,
215 const char *dev_name,
216 const void *addr,
217 const void *mask,
218 u16 family,
219 u32 secid,
220 struct netlbl_audit *audit_info)
221{
222 u32 addr_len;
223
224 switch (family) {
225 case AF_INET:
226 addr_len = sizeof(struct in_addr);
227 break;
228 case AF_INET6:
229 addr_len = sizeof(struct in6_addr);
230 break;
231 default:
232 return -EPFNOSUPPORT;
233 }
234
235 return netlbl_unlhsh_add(net,
236 dev_name, addr, mask, addr_len,
237 secid, audit_info);
238}
239
240/**
241 * netlbl_cfg_unlbl_static_del - Removes an existing static label
242 * @net: network namespace
243 * @dev_name: interface name
244 * @addr: IP address in network byte order (struct in[6]_addr)
245 * @mask: address mask in network byte order (struct in[6]_addr)
246 * @family: address family
247 * @secid: LSM secid value for the entry
248 * @audit_info: NetLabel audit information
249 *
250 * Description:
251 * Removes an existing NetLabel static label used when protocol provided labels
252 * are not present on incoming traffic. If @dev_name is NULL then the default
253 * interface will be used. Returns zero on success, negative values on failure.
254 *
255 */
256int netlbl_cfg_unlbl_static_del(struct net *net,
257 const char *dev_name,
258 const void *addr,
259 const void *mask,
260 u16 family,
261 struct netlbl_audit *audit_info)
262{
263 u32 addr_len;
264
265 switch (family) {
266 case AF_INET:
267 addr_len = sizeof(struct in_addr);
268 break;
269 case AF_INET6:
270 addr_len = sizeof(struct in6_addr);
271 break;
272 default:
273 return -EPFNOSUPPORT;
274 }
275
276 return netlbl_unlhsh_remove(net,
277 dev_name, addr, mask, addr_len,
278 audit_info);
279}
280
281/**
282 * netlbl_cfg_cipsov4_add - Add a new CIPSOv4 DOI definition
283 * @doi_def: CIPSO DOI definition
284 * @audit_info: NetLabel audit information
285 *
286 * Description:
287 * Add a new CIPSO DOI definition as defined by @doi_def. Returns zero on
288 * success and negative values on failure.
289 *
290 */
291int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def,
292 struct netlbl_audit *audit_info)
293{
294 return cipso_v4_doi_add(doi_def, audit_info);
295}
296
297/**
298 * netlbl_cfg_cipsov4_del - Remove an existing CIPSOv4 DOI definition
299 * @doi: CIPSO DOI
300 * @audit_info: NetLabel audit information
301 *
302 * Description:
303 * Remove an existing CIPSO DOI definition matching @doi. Returns zero on
304 * success and negative values on failure.
305 *
306 */
307void netlbl_cfg_cipsov4_del(u32 doi, struct netlbl_audit *audit_info)
308{
309 cipso_v4_doi_remove(doi, audit_info);
310}
311
106/** 312/**
107 * netlbl_cfg_cipsov4_add_map - Add a new CIPSOv4 DOI definition and mapping 313 * netlbl_cfg_cipsov4_map_add - Add a new CIPSOv4 DOI mapping
108 * @doi_def: the DOI definition 314 * @doi: the CIPSO DOI
109 * @domain: the domain mapping to add 315 * @domain: the domain mapping to add
316 * @addr: IP address
317 * @mask: IP address mask
110 * @audit_info: NetLabel audit information 318 * @audit_info: NetLabel audit information
111 * 319 *
112 * Description: 320 * Description:
113 * Add a new CIPSOv4 DOI definition and NetLabel/LSM domain mapping for this 321 * Add a new NetLabel/LSM domain mapping for the given CIPSO DOI to the NetLabel
114 * new DOI definition to the NetLabel subsystem. A @domain value of NULL adds 322 * subsystem. A @domain value of NULL adds a new default domain mapping.
115 * a new default domain mapping. Returns zero on success, negative values on 323 * Returns zero on success, negative values on failure.
116 * failure.
117 * 324 *
118 */ 325 */
119int netlbl_cfg_cipsov4_add_map(struct cipso_v4_doi *doi_def, 326int netlbl_cfg_cipsov4_map_add(u32 doi,
120 const char *domain, 327 const char *domain,
328 const struct in_addr *addr,
329 const struct in_addr *mask,
121 struct netlbl_audit *audit_info) 330 struct netlbl_audit *audit_info)
122{ 331{
123 int ret_val = -ENOMEM; 332 int ret_val = -ENOMEM;
124 u32 doi; 333 struct cipso_v4_doi *doi_def;
125 u32 doi_type;
126 struct netlbl_dom_map *entry; 334 struct netlbl_dom_map *entry;
127 const char *type_str; 335 struct netlbl_domaddr_map *addrmap = NULL;
128 struct audit_buffer *audit_buf; 336 struct netlbl_domaddr4_map *addrinfo = NULL;
129 337
130 doi = doi_def->doi; 338 doi_def = cipso_v4_doi_getdef(doi);
131 doi_type = doi_def->type; 339 if (doi_def == NULL)
340 return -ENOENT;
132 341
133 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 342 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
134 if (entry == NULL) 343 if (entry == NULL)
@@ -136,56 +345,52 @@ int netlbl_cfg_cipsov4_add_map(struct cipso_v4_doi *doi_def,
136 if (domain != NULL) { 345 if (domain != NULL) {
137 entry->domain = kstrdup(domain, GFP_ATOMIC); 346 entry->domain = kstrdup(domain, GFP_ATOMIC);
138 if (entry->domain == NULL) 347 if (entry->domain == NULL)
139 goto cfg_cipsov4_add_map_failure; 348 goto cfg_cipsov4_map_add_failure;
140 } 349 }
141 350
142 ret_val = cipso_v4_doi_add(doi_def); 351 if (addr == NULL && mask == NULL) {
143 if (ret_val != 0) 352 entry->type_def.cipsov4 = doi_def;
144 goto cfg_cipsov4_add_map_failure_remove_doi; 353 entry->type = NETLBL_NLTYPE_CIPSOV4;
145 entry->type = NETLBL_NLTYPE_CIPSOV4; 354 } else if (addr != NULL && mask != NULL) {
146 entry->type_def.cipsov4 = cipso_v4_doi_getdef(doi); 355 addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
147 if (entry->type_def.cipsov4 == NULL) { 356 if (addrmap == NULL)
148 ret_val = -ENOENT; 357 goto cfg_cipsov4_map_add_failure;
149 goto cfg_cipsov4_add_map_failure_remove_doi; 358 INIT_LIST_HEAD(&addrmap->list4);
359 INIT_LIST_HEAD(&addrmap->list6);
360
361 addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC);
362 if (addrinfo == NULL)
363 goto cfg_cipsov4_map_add_failure;
364 addrinfo->type_def.cipsov4 = doi_def;
365 addrinfo->type = NETLBL_NLTYPE_CIPSOV4;
366 addrinfo->list.addr = addr->s_addr & mask->s_addr;
367 addrinfo->list.mask = mask->s_addr;
368 addrinfo->list.valid = 1;
369 ret_val = netlbl_af4list_add(&addrinfo->list, &addrmap->list4);
370 if (ret_val != 0)
371 goto cfg_cipsov4_map_add_failure;
372
373 entry->type_def.addrsel = addrmap;
374 entry->type = NETLBL_NLTYPE_ADDRSELECT;
375 } else {
376 ret_val = -EINVAL;
377 goto cfg_cipsov4_map_add_failure;
150 } 378 }
379
151 ret_val = netlbl_domhsh_add(entry, audit_info); 380 ret_val = netlbl_domhsh_add(entry, audit_info);
152 if (ret_val != 0) 381 if (ret_val != 0)
153 goto cfg_cipsov4_add_map_failure_release_doi; 382 goto cfg_cipsov4_map_add_failure;
154
155cfg_cipsov4_add_map_return:
156 audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_ADD,
157 audit_info);
158 if (audit_buf != NULL) {
159 switch (doi_type) {
160 case CIPSO_V4_MAP_TRANS:
161 type_str = "trans";
162 break;
163 case CIPSO_V4_MAP_PASS:
164 type_str = "pass";
165 break;
166 case CIPSO_V4_MAP_LOCAL:
167 type_str = "local";
168 break;
169 default:
170 type_str = "(unknown)";
171 }
172 audit_log_format(audit_buf,
173 " cipso_doi=%u cipso_type=%s res=%u",
174 doi, type_str, ret_val == 0 ? 1 : 0);
175 audit_log_end(audit_buf);
176 }
177 383
178 return ret_val; 384 return 0;
179 385
180cfg_cipsov4_add_map_failure_release_doi: 386cfg_cipsov4_map_add_failure:
181 cipso_v4_doi_putdef(doi_def); 387 cipso_v4_doi_putdef(doi_def);
182cfg_cipsov4_add_map_failure_remove_doi:
183 cipso_v4_doi_remove(doi, audit_info);
184cfg_cipsov4_add_map_failure:
185 if (entry != NULL) 388 if (entry != NULL)
186 kfree(entry->domain); 389 kfree(entry->domain);
187 kfree(entry); 390 kfree(entry);
188 goto cfg_cipsov4_add_map_return; 391 kfree(addrmap);
392 kfree(addrinfo);
393 return ret_val;
189} 394}
190 395
191/* 396/*
@@ -691,6 +896,28 @@ int netlbl_cache_add(const struct sk_buff *skb,
691} 896}
692 897
693/* 898/*
899 * Protocol Engine Functions
900 */
901
902/**
903 * netlbl_audit_start - Start an audit message
904 * @type: audit message type
905 * @audit_info: NetLabel audit information
906 *
907 * Description:
908 * Start an audit message using the type specified in @type and fill the audit
909 * message with some fields common to all NetLabel audit messages. This
910 * function should only be used by protocol engines, not LSMs. Returns a
911 * pointer to the audit buffer on success, NULL on failure.
912 *
913 */
914struct audit_buffer *netlbl_audit_start(int type,
915 struct netlbl_audit *audit_info)
916{
917 return netlbl_audit_start_common(type, audit_info);
918}
919
920/*
694 * Setup Functions 921 * Setup Functions
695 */ 922 */
696 923
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 8c0308032178..f3c5c68c6848 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -450,13 +450,13 @@ add_iface_failure:
450 * success, negative values on failure. 450 * success, negative values on failure.
451 * 451 *
452 */ 452 */
453static int netlbl_unlhsh_add(struct net *net, 453int netlbl_unlhsh_add(struct net *net,
454 const char *dev_name, 454 const char *dev_name,
455 const void *addr, 455 const void *addr,
456 const void *mask, 456 const void *mask,
457 u32 addr_len, 457 u32 addr_len,
458 u32 secid, 458 u32 secid,
459 struct netlbl_audit *audit_info) 459 struct netlbl_audit *audit_info)
460{ 460{
461 int ret_val; 461 int ret_val;
462 int ifindex; 462 int ifindex;
@@ -720,12 +720,12 @@ unlhsh_condremove_failure:
720 * Returns zero on success, negative values on failure. 720 * Returns zero on success, negative values on failure.
721 * 721 *
722 */ 722 */
723static int netlbl_unlhsh_remove(struct net *net, 723int netlbl_unlhsh_remove(struct net *net,
724 const char *dev_name, 724 const char *dev_name,
725 const void *addr, 725 const void *addr,
726 const void *mask, 726 const void *mask,
727 u32 addr_len, 727 u32 addr_len,
728 struct netlbl_audit *audit_info) 728 struct netlbl_audit *audit_info)
729{ 729{
730 int ret_val; 730 int ret_val;
731 struct net_device *dev; 731 struct net_device *dev;
diff --git a/net/netlabel/netlabel_unlabeled.h b/net/netlabel/netlabel_unlabeled.h
index 06b1301ac072..7aba63595137 100644
--- a/net/netlabel/netlabel_unlabeled.h
+++ b/net/netlabel/netlabel_unlabeled.h
@@ -221,6 +221,21 @@ int netlbl_unlabel_genl_init(void);
221/* General Unlabeled init function */ 221/* General Unlabeled init function */
222int netlbl_unlabel_init(u32 size); 222int netlbl_unlabel_init(u32 size);
223 223
224/* Static/Fallback label management functions */
225int netlbl_unlhsh_add(struct net *net,
226 const char *dev_name,
227 const void *addr,
228 const void *mask,
229 u32 addr_len,
230 u32 secid,
231 struct netlbl_audit *audit_info);
232int netlbl_unlhsh_remove(struct net *net,
233 const char *dev_name,
234 const void *addr,
235 const void *mask,
236 u32 addr_len,
237 struct netlbl_audit *audit_info);
238
224/* Process Unlabeled incoming network packets */ 239/* Process Unlabeled incoming network packets */
225int netlbl_unlabel_getattr(const struct sk_buff *skb, 240int netlbl_unlabel_getattr(const struct sk_buff *skb,
226 u16 family, 241 u16 family,
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 3e1191cecaf0..1d3dd30099df 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -225,6 +225,7 @@ void genl_unregister_mc_group(struct genl_family *family,
225 __genl_unregister_mc_group(family, grp); 225 __genl_unregister_mc_group(family, grp);
226 genl_unlock(); 226 genl_unlock();
227} 227}
228EXPORT_SYMBOL(genl_unregister_mc_group);
228 229
229static void genl_unregister_mc_groups(struct genl_family *family) 230static void genl_unregister_mc_groups(struct genl_family *family)
230{ 231{
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index b0ceac2d6cd1..6a91a32a80c1 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -227,6 +227,13 @@ static int gprs_set_mtu(struct net_device *dev, int new_mtu)
227 return 0; 227 return 0;
228} 228}
229 229
230static const struct net_device_ops gprs_netdev_ops = {
231 .ndo_open = gprs_open,
232 .ndo_stop = gprs_close,
233 .ndo_start_xmit = gprs_xmit,
234 .ndo_change_mtu = gprs_set_mtu,
235};
236
230static void gprs_setup(struct net_device *dev) 237static void gprs_setup(struct net_device *dev)
231{ 238{
232 dev->features = NETIF_F_FRAGLIST; 239 dev->features = NETIF_F_FRAGLIST;
@@ -237,11 +244,8 @@ static void gprs_setup(struct net_device *dev)
237 dev->addr_len = 0; 244 dev->addr_len = 0;
238 dev->tx_queue_len = 10; 245 dev->tx_queue_len = 10;
239 246
247 dev->netdev_ops = &gprs_netdev_ops;
240 dev->destructor = free_netdev; 248 dev->destructor = free_netdev;
241 dev->open = gprs_open;
242 dev->stop = gprs_close;
243 dev->hard_start_xmit = gprs_xmit; /* mandatory */
244 dev->change_mtu = gprs_set_mtu;
245} 249}
246 250
247/* 251/*
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 3c94f76d5525..3eaa39403c13 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -54,10 +54,10 @@ static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
54static bool rfkill_epo_lock_active; 54static bool rfkill_epo_lock_active;
55 55
56 56
57#ifdef CONFIG_RFKILL_LEDS
57static void rfkill_led_trigger(struct rfkill *rfkill, 58static void rfkill_led_trigger(struct rfkill *rfkill,
58 enum rfkill_state state) 59 enum rfkill_state state)
59{ 60{
60#ifdef CONFIG_RFKILL_LEDS
61 struct led_trigger *led = &rfkill->led_trigger; 61 struct led_trigger *led = &rfkill->led_trigger;
62 62
63 if (!led->name) 63 if (!led->name)
@@ -66,10 +66,8 @@ static void rfkill_led_trigger(struct rfkill *rfkill,
66 led_trigger_event(led, LED_OFF); 66 led_trigger_event(led, LED_OFF);
67 else 67 else
68 led_trigger_event(led, LED_FULL); 68 led_trigger_event(led, LED_FULL);
69#endif /* CONFIG_RFKILL_LEDS */
70} 69}
71 70
72#ifdef CONFIG_RFKILL_LEDS
73static void rfkill_led_trigger_activate(struct led_classdev *led) 71static void rfkill_led_trigger_activate(struct led_classdev *led)
74{ 72{
75 struct rfkill *rfkill = container_of(led->trigger, 73 struct rfkill *rfkill = container_of(led->trigger,
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 4f7ef0db302b..929218a47620 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -335,9 +335,6 @@ config NET_CLS_CGROUP
335 Say Y here if you want to classify packets based on the control 335 Say Y here if you want to classify packets based on the control
336 cgroup of their process. 336 cgroup of their process.
337 337
338 To compile this code as a module, choose M here: the
339 module will be called cls_cgroup.
340
341config NET_EMATCH 338config NET_EMATCH
342 bool "Extended Matches" 339 bool "Extended Matches"
343 select NET_CLS 340 select NET_CLS
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 0d68b1975983..91a3db4a76f8 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -24,10 +24,16 @@ struct cgroup_cls_state
24 u32 classid; 24 u32 classid;
25}; 25};
26 26
27static inline struct cgroup_cls_state *net_cls_state(struct cgroup *cgrp) 27static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
28{ 28{
29 return (struct cgroup_cls_state *) 29 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
30 cgroup_subsys_state(cgrp, net_cls_subsys_id); 30 struct cgroup_cls_state, css);
31}
32
33static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
34{
35 return container_of(task_subsys_state(p, net_cls_subsys_id),
36 struct cgroup_cls_state, css);
31} 37}
32 38
33static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 39static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
@@ -39,19 +45,19 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
39 return ERR_PTR(-ENOMEM); 45 return ERR_PTR(-ENOMEM);
40 46
41 if (cgrp->parent) 47 if (cgrp->parent)
42 cs->classid = net_cls_state(cgrp->parent)->classid; 48 cs->classid = cgrp_cls_state(cgrp->parent)->classid;
43 49
44 return &cs->css; 50 return &cs->css;
45} 51}
46 52
47static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) 53static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
48{ 54{
49 kfree(ss); 55 kfree(cgrp_cls_state(cgrp));
50} 56}
51 57
52static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) 58static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
53{ 59{
54 return net_cls_state(cgrp)->classid; 60 return cgrp_cls_state(cgrp)->classid;
55} 61}
56 62
57static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) 63static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
@@ -59,7 +65,7 @@ static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
59 if (!cgroup_lock_live_group(cgrp)) 65 if (!cgroup_lock_live_group(cgrp))
60 return -ENODEV; 66 return -ENODEV;
61 67
62 net_cls_state(cgrp)->classid = (u32) value; 68 cgrp_cls_state(cgrp)->classid = (u32) value;
63 69
64 cgroup_unlock(); 70 cgroup_unlock();
65 71
@@ -115,8 +121,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
115 return -1; 121 return -1;
116 122
117 rcu_read_lock(); 123 rcu_read_lock();
118 cs = (struct cgroup_cls_state *) task_subsys_state(current, 124 cs = task_cls_state(current);
119 net_cls_subsys_id);
120 if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { 125 if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) {
121 res->classid = cs->classid; 126 res->classid = cs->classid;
122 res->class = 0; 127 res->class = 0;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 05d178008cbc..07372f60bee3 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -638,8 +638,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
638 break; 638 break;
639 639
640 n->next = *ins; 640 n->next = *ins;
641 wmb(); 641 tcf_tree_lock(tp);
642 *ins = n; 642 *ins = n;
643 tcf_tree_unlock(tp);
643 644
644 *arg = (unsigned long)n; 645 *arg = (unsigned long)n;
645 return 0; 646 return 0;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index f3965df00559..33133d27b539 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -435,7 +435,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
435 int i; 435 int i;
436 436
437 q->perturb_timer.function = sfq_perturbation; 437 q->perturb_timer.function = sfq_perturbation;
438 q->perturb_timer.data = (unsigned long)sch;; 438 q->perturb_timer.data = (unsigned long)sch;
439 init_timer_deferrable(&q->perturb_timer); 439 init_timer_deferrable(&q->perturb_timer);
440 440
441 for (i = 0; i < SFQ_HASH_DIVISOR; i++) 441 for (i = 0; i < SFQ_HASH_DIVISOR; i++)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index cfc8e7caba62..ec697cebb63b 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -289,9 +289,9 @@ restart:
289 289
290 do { 290 do {
291 struct net_device *slave = qdisc_dev(q); 291 struct net_device *slave = qdisc_dev(q);
292 struct netdev_queue *slave_txq; 292 struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
293 const struct net_device_ops *slave_ops = slave->netdev_ops;
293 294
294 slave_txq = netdev_get_tx_queue(slave, 0);
295 if (slave_txq->qdisc_sleeping != q) 295 if (slave_txq->qdisc_sleeping != q)
296 continue; 296 continue;
297 if (__netif_subqueue_stopped(slave, subq) || 297 if (__netif_subqueue_stopped(slave, subq) ||
@@ -305,7 +305,7 @@ restart:
305 if (__netif_tx_trylock(slave_txq)) { 305 if (__netif_tx_trylock(slave_txq)) {
306 if (!netif_tx_queue_stopped(slave_txq) && 306 if (!netif_tx_queue_stopped(slave_txq) &&
307 !netif_tx_queue_frozen(slave_txq) && 307 !netif_tx_queue_frozen(slave_txq) &&
308 slave->hard_start_xmit(skb, slave) == 0) { 308 slave_ops->ndo_start_xmit(skb, slave) == 0) {
309 __netif_tx_unlock(slave_txq); 309 __netif_tx_unlock(slave_txq);
310 master->slaves = NEXT_SLAVE(q); 310 master->slaves = NEXT_SLAVE(q);
311 netif_wake_queue(dev); 311 netif_wake_queue(dev);
@@ -420,6 +420,14 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
420 return 0; 420 return 0;
421} 421}
422 422
423static const struct net_device_ops teql_netdev_ops = {
424 .ndo_open = teql_master_open,
425 .ndo_stop = teql_master_close,
426 .ndo_start_xmit = teql_master_xmit,
427 .ndo_get_stats = teql_master_stats,
428 .ndo_change_mtu = teql_master_mtu,
429};
430
423static __init void teql_master_setup(struct net_device *dev) 431static __init void teql_master_setup(struct net_device *dev)
424{ 432{
425 struct teql_master *master = netdev_priv(dev); 433 struct teql_master *master = netdev_priv(dev);
@@ -436,11 +444,7 @@ static __init void teql_master_setup(struct net_device *dev)
436 ops->destroy = teql_destroy; 444 ops->destroy = teql_destroy;
437 ops->owner = THIS_MODULE; 445 ops->owner = THIS_MODULE;
438 446
439 dev->open = teql_master_open; 447 dev->netdev_ops = &teql_netdev_ops;
440 dev->hard_start_xmit = teql_master_xmit;
441 dev->stop = teql_master_close;
442 dev->get_stats = teql_master_stats;
443 dev->change_mtu = teql_master_mtu;
444 dev->type = ARPHRD_VOID; 448 dev->type = ARPHRD_VOID;
445 dev->mtu = 1500; 449 dev->mtu = 1500;
446 dev->tx_queue_len = 100; 450 dev->tx_queue_len = 100;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 52db5f60daa0..56935bbc1496 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -141,8 +141,8 @@ void sctp_auth_destroy_keys(struct list_head *keys)
141/* Compare two byte vectors as numbers. Return values 141/* Compare two byte vectors as numbers. Return values
142 * are: 142 * are:
143 * 0 - vectors are equal 143 * 0 - vectors are equal
144 * < 0 - vector 1 is smaller then vector2 144 * < 0 - vector 1 is smaller than vector2
145 * > 0 - vector 1 is greater then vector2 145 * > 0 - vector 1 is greater than vector2
146 * 146 *
147 * Algorithm is: 147 * Algorithm is:
148 * This is performed by selecting the numerically smaller key vector... 148 * This is performed by selecting the numerically smaller key vector...
@@ -489,7 +489,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
489 return 0; 489 return 0;
490 490
491out_err: 491out_err:
492 /* Clean up any successfull allocations */ 492 /* Clean up any successful allocations */
493 sctp_auth_destroy_hmacs(ep->auth_hmacs); 493 sctp_auth_destroy_hmacs(ep->auth_hmacs);
494 return -ENOMEM; 494 return -ENOMEM;
495} 495}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 1c4e5d6c29c0..3a0cd075914f 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -4268,9 +4268,9 @@ nomem:
4268 4268
4269/* 4269/*
4270 * Handle a protocol violation when the chunk length is invalid. 4270 * Handle a protocol violation when the chunk length is invalid.
4271 * "Invalid" length is identified as smaller then the minimal length a 4271 * "Invalid" length is identified as smaller than the minimal length a
4272 * given chunk can be. For example, a SACK chunk has invalid length 4272 * given chunk can be. For example, a SACK chunk has invalid length
4273 * if it's length is set to be smaller then the size of sctp_sack_chunk_t. 4273 * if its length is set to be smaller than the size of sctp_sack_chunk_t.
4274 * 4274 *
4275 * We inform the other end by sending an ABORT with a Protocol Violation 4275 * We inform the other end by sending an ABORT with a Protocol Violation
4276 * error code. 4276 * error code.
@@ -4300,7 +4300,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
4300 4300
4301/* 4301/*
4302 * Handle a protocol violation when the parameter length is invalid. 4302 * Handle a protocol violation when the parameter length is invalid.
4303 * "Invalid" length is identified as smaller then the minimal length a 4303 * "Invalid" length is identified as smaller than the minimal length a
4304 * given parameter can be. 4304 * given parameter can be.
4305 */ 4305 */
4306static sctp_disposition_t sctp_sf_violation_paramlen( 4306static sctp_disposition_t sctp_sf_violation_paramlen(
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b14a8f33e42d..ff0a8f88de04 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2717,7 +2717,7 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int o
2717 paths++; 2717 paths++;
2718 } 2718 }
2719 2719
2720 /* Only validate asocmaxrxt if we have more then 2720 /* Only validate asocmaxrxt if we have more than
2721 * one path/transport. We do this because path 2721 * one path/transport. We do this because path
2722 * retransmissions are only counted when we have more 2722 * retransmissions are only counted when we have more
2723 * then one path. 2723 * then one path.
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 35c73e82553a..9bd64565021a 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -227,7 +227,7 @@ void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn)
227 */ 227 */
228 bitmap_zero(map->tsn_map, map->len); 228 bitmap_zero(map->tsn_map, map->len);
229 } else { 229 } else {
230 /* If the gap is smaller then the map size, 230 /* If the gap is smaller than the map size,
231 * shift the map by 'gap' bits and update further. 231 * shift the map by 'gap' bits and update further.
232 */ 232 */
233 bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len); 233 bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len);
diff --git a/net/socket.c b/net/socket.c
index 2c730fc718ab..06603d73c411 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1313,13 +1313,7 @@ asmlinkage long sys_socketpair(int family, int type, int protocol,
1313 goto out_fd1; 1313 goto out_fd1;
1314 } 1314 }
1315 1315
1316 err = audit_fd_pair(fd1, fd2); 1316 audit_fd_pair(fd1, fd2);
1317 if (err < 0) {
1318 fput(newfile1);
1319 fput(newfile2);
1320 goto out_fd;
1321 }
1322
1323 fd_install(fd1, newfile1); 1317 fd_install(fd1, newfile1);
1324 fd_install(fd2, newfile2); 1318 fd_install(fd2, newfile2);
1325 /* fd1 and fd2 may be already another descriptors. 1319 /* fd1 and fd2 may be already another descriptors.
@@ -1349,7 +1343,6 @@ out_fd2:
1349out_fd1: 1343out_fd1:
1350 put_filp(newfile2); 1344 put_filp(newfile2);
1351 sock_release(sock2); 1345 sock_release(sock2);
1352out_fd:
1353 put_unused_fd(fd1); 1346 put_unused_fd(fd1);
1354 put_unused_fd(fd2); 1347 put_unused_fd(fd2);
1355 goto out; 1348 goto out;
@@ -2065,9 +2058,7 @@ asmlinkage long sys_socketcall(int call, unsigned long __user *args)
2065 if (copy_from_user(a, args, nargs[call])) 2058 if (copy_from_user(a, args, nargs[call]))
2066 return -EFAULT; 2059 return -EFAULT;
2067 2060
2068 err = audit_socketcall(nargs[call] / sizeof(unsigned long), a); 2061 audit_socketcall(nargs[call] / sizeof(unsigned long), a);
2069 if (err)
2070 return err;
2071 2062
2072 a0 = a[0]; 2063 a0 = a[0];
2073 a1 = a[1]; 2064 a1 = a[1];
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 0443f8349458..0c431c277af5 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -234,7 +234,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
234 list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { 234 list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) {
235 235
236 /* Enforce a 60 second garbage collection moratorium */ 236 /* Enforce a 60 second garbage collection moratorium */
237 if (time_in_range(cred->cr_expire, expired, jiffies) && 237 if (time_in_range_open(cred->cr_expire, expired, jiffies) &&
238 test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) 238 test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
239 continue; 239 continue;
240 240
@@ -515,7 +515,7 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
515 if (cred->cr_ops->crwrap_req) 515 if (cred->cr_ops->crwrap_req)
516 return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); 516 return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
517 /* By default, we encode the arguments normally. */ 517 /* By default, we encode the arguments normally. */
518 return rpc_call_xdrproc(encode, rqstp, data, obj); 518 return encode(rqstp, data, obj);
519} 519}
520 520
521int 521int
@@ -530,7 +530,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
530 return cred->cr_ops->crunwrap_resp(task, decode, rqstp, 530 return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
531 data, obj); 531 data, obj);
532 /* By default, we decode the arguments normally. */ 532 /* By default, we decode the arguments normally. */
533 return rpc_call_xdrproc(decode, rqstp, data, obj); 533 return decode(rqstp, data, obj);
534} 534}
535 535
536int 536int
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 853a4142cea1..e630b38a6047 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -72,11 +72,25 @@ struct gss_auth {
72 struct gss_api_mech *mech; 72 struct gss_api_mech *mech;
73 enum rpc_gss_svc service; 73 enum rpc_gss_svc service;
74 struct rpc_clnt *client; 74 struct rpc_clnt *client;
75 struct dentry *dentry; 75 /*
76 * There are two upcall pipes; dentry[1], named "gssd", is used
77 * for the new text-based upcall; dentry[0] is named after the
78 * mechanism (for example, "krb5") and exists for
79 * backwards-compatibility with older gssd's.
80 */
81 struct dentry *dentry[2];
76}; 82};
77 83
84/* pipe_version >= 0 if and only if someone has a pipe open. */
85static int pipe_version = -1;
86static atomic_t pipe_users = ATOMIC_INIT(0);
87static DEFINE_SPINLOCK(pipe_version_lock);
88static struct rpc_wait_queue pipe_version_rpc_waitqueue;
89static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
90
78static void gss_free_ctx(struct gss_cl_ctx *); 91static void gss_free_ctx(struct gss_cl_ctx *);
79static struct rpc_pipe_ops gss_upcall_ops; 92static struct rpc_pipe_ops gss_upcall_ops_v0;
93static struct rpc_pipe_ops gss_upcall_ops_v1;
80 94
81static inline struct gss_cl_ctx * 95static inline struct gss_cl_ctx *
82gss_get_ctx(struct gss_cl_ctx *ctx) 96gss_get_ctx(struct gss_cl_ctx *ctx)
@@ -220,6 +234,7 @@ err:
220 return p; 234 return p;
221} 235}
222 236
237#define UPCALL_BUF_LEN 128
223 238
224struct gss_upcall_msg { 239struct gss_upcall_msg {
225 atomic_t count; 240 atomic_t count;
@@ -227,16 +242,41 @@ struct gss_upcall_msg {
227 struct rpc_pipe_msg msg; 242 struct rpc_pipe_msg msg;
228 struct list_head list; 243 struct list_head list;
229 struct gss_auth *auth; 244 struct gss_auth *auth;
245 struct rpc_inode *inode;
230 struct rpc_wait_queue rpc_waitqueue; 246 struct rpc_wait_queue rpc_waitqueue;
231 wait_queue_head_t waitqueue; 247 wait_queue_head_t waitqueue;
232 struct gss_cl_ctx *ctx; 248 struct gss_cl_ctx *ctx;
249 char databuf[UPCALL_BUF_LEN];
233}; 250};
234 251
252static int get_pipe_version(void)
253{
254 int ret;
255
256 spin_lock(&pipe_version_lock);
257 if (pipe_version >= 0) {
258 atomic_inc(&pipe_users);
259 ret = pipe_version;
260 } else
261 ret = -EAGAIN;
262 spin_unlock(&pipe_version_lock);
263 return ret;
264}
265
266static void put_pipe_version(void)
267{
268 if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) {
269 pipe_version = -1;
270 spin_unlock(&pipe_version_lock);
271 }
272}
273
235static void 274static void
236gss_release_msg(struct gss_upcall_msg *gss_msg) 275gss_release_msg(struct gss_upcall_msg *gss_msg)
237{ 276{
238 if (!atomic_dec_and_test(&gss_msg->count)) 277 if (!atomic_dec_and_test(&gss_msg->count))
239 return; 278 return;
279 put_pipe_version();
240 BUG_ON(!list_empty(&gss_msg->list)); 280 BUG_ON(!list_empty(&gss_msg->list));
241 if (gss_msg->ctx != NULL) 281 if (gss_msg->ctx != NULL)
242 gss_put_ctx(gss_msg->ctx); 282 gss_put_ctx(gss_msg->ctx);
@@ -266,8 +306,8 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
266static inline struct gss_upcall_msg * 306static inline struct gss_upcall_msg *
267gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) 307gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
268{ 308{
269 struct inode *inode = gss_auth->dentry->d_inode; 309 struct rpc_inode *rpci = gss_msg->inode;
270 struct rpc_inode *rpci = RPC_I(inode); 310 struct inode *inode = &rpci->vfs_inode;
271 struct gss_upcall_msg *old; 311 struct gss_upcall_msg *old;
272 312
273 spin_lock(&inode->i_lock); 313 spin_lock(&inode->i_lock);
@@ -293,8 +333,7 @@ __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
293static void 333static void
294gss_unhash_msg(struct gss_upcall_msg *gss_msg) 334gss_unhash_msg(struct gss_upcall_msg *gss_msg)
295{ 335{
296 struct gss_auth *gss_auth = gss_msg->auth; 336 struct inode *inode = &gss_msg->inode->vfs_inode;
297 struct inode *inode = gss_auth->dentry->d_inode;
298 337
299 if (list_empty(&gss_msg->list)) 338 if (list_empty(&gss_msg->list))
300 return; 339 return;
@@ -310,7 +349,7 @@ gss_upcall_callback(struct rpc_task *task)
310 struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, 349 struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
311 struct gss_cred, gc_base); 350 struct gss_cred, gc_base);
312 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; 351 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
313 struct inode *inode = gss_msg->auth->dentry->d_inode; 352 struct inode *inode = &gss_msg->inode->vfs_inode;
314 353
315 spin_lock(&inode->i_lock); 354 spin_lock(&inode->i_lock);
316 if (gss_msg->ctx) 355 if (gss_msg->ctx)
@@ -323,22 +362,75 @@ gss_upcall_callback(struct rpc_task *task)
323 gss_release_msg(gss_msg); 362 gss_release_msg(gss_msg);
324} 363}
325 364
365static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
366{
367 gss_msg->msg.data = &gss_msg->uid;
368 gss_msg->msg.len = sizeof(gss_msg->uid);
369}
370
371static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
372 struct rpc_clnt *clnt, int machine_cred)
373{
374 char *p = gss_msg->databuf;
375 int len = 0;
376
377 gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
378 gss_msg->auth->mech->gm_name,
379 gss_msg->uid);
380 p += gss_msg->msg.len;
381 if (clnt->cl_principal) {
382 len = sprintf(p, "target=%s ", clnt->cl_principal);
383 p += len;
384 gss_msg->msg.len += len;
385 }
386 if (machine_cred) {
387 len = sprintf(p, "service=* ");
388 p += len;
389 gss_msg->msg.len += len;
390 } else if (!strcmp(clnt->cl_program->name, "nfs4_cb")) {
391 len = sprintf(p, "service=nfs ");
392 p += len;
393 gss_msg->msg.len += len;
394 }
395 len = sprintf(p, "\n");
396 gss_msg->msg.len += len;
397
398 gss_msg->msg.data = gss_msg->databuf;
399 BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN);
400}
401
402static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
403 struct rpc_clnt *clnt, int machine_cred)
404{
405 if (pipe_version == 0)
406 gss_encode_v0_msg(gss_msg);
407 else /* pipe_version == 1 */
408 gss_encode_v1_msg(gss_msg, clnt, machine_cred);
409}
410
326static inline struct gss_upcall_msg * 411static inline struct gss_upcall_msg *
327gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid) 412gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid, struct rpc_clnt *clnt,
413 int machine_cred)
328{ 414{
329 struct gss_upcall_msg *gss_msg; 415 struct gss_upcall_msg *gss_msg;
416 int vers;
330 417
331 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); 418 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
332 if (gss_msg != NULL) { 419 if (gss_msg == NULL)
333 INIT_LIST_HEAD(&gss_msg->list); 420 return ERR_PTR(-ENOMEM);
334 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 421 vers = get_pipe_version();
335 init_waitqueue_head(&gss_msg->waitqueue); 422 if (vers < 0) {
336 atomic_set(&gss_msg->count, 1); 423 kfree(gss_msg);
337 gss_msg->msg.data = &gss_msg->uid; 424 return ERR_PTR(vers);
338 gss_msg->msg.len = sizeof(gss_msg->uid);
339 gss_msg->uid = uid;
340 gss_msg->auth = gss_auth;
341 } 425 }
426 gss_msg->inode = RPC_I(gss_auth->dentry[vers]->d_inode);
427 INIT_LIST_HEAD(&gss_msg->list);
428 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
429 init_waitqueue_head(&gss_msg->waitqueue);
430 atomic_set(&gss_msg->count, 1);
431 gss_msg->uid = uid;
432 gss_msg->auth = gss_auth;
433 gss_encode_msg(gss_msg, clnt, machine_cred);
342 return gss_msg; 434 return gss_msg;
343} 435}
344 436
@@ -350,16 +442,13 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
350 struct gss_upcall_msg *gss_new, *gss_msg; 442 struct gss_upcall_msg *gss_new, *gss_msg;
351 uid_t uid = cred->cr_uid; 443 uid_t uid = cred->cr_uid;
352 444
353 /* Special case: rpc.gssd assumes that uid == 0 implies machine creds */ 445 gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred);
354 if (gss_cred->gc_machine_cred != 0) 446 if (IS_ERR(gss_new))
355 uid = 0; 447 return gss_new;
356
357 gss_new = gss_alloc_msg(gss_auth, uid);
358 if (gss_new == NULL)
359 return ERR_PTR(-ENOMEM);
360 gss_msg = gss_add_msg(gss_auth, gss_new); 448 gss_msg = gss_add_msg(gss_auth, gss_new);
361 if (gss_msg == gss_new) { 449 if (gss_msg == gss_new) {
362 int res = rpc_queue_upcall(gss_auth->dentry->d_inode, &gss_new->msg); 450 struct inode *inode = &gss_new->inode->vfs_inode;
451 int res = rpc_queue_upcall(inode, &gss_new->msg);
363 if (res) { 452 if (res) {
364 gss_unhash_msg(gss_new); 453 gss_unhash_msg(gss_new);
365 gss_msg = ERR_PTR(res); 454 gss_msg = ERR_PTR(res);
@@ -369,6 +458,18 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
369 return gss_msg; 458 return gss_msg;
370} 459}
371 460
461static void warn_gssd(void)
462{
463 static unsigned long ratelimit;
464 unsigned long now = jiffies;
465
466 if (time_after(now, ratelimit)) {
467 printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
468 "Please check user daemon is running.\n");
469 ratelimit = now + 15*HZ;
470 }
471}
472
372static inline int 473static inline int
373gss_refresh_upcall(struct rpc_task *task) 474gss_refresh_upcall(struct rpc_task *task)
374{ 475{
@@ -378,16 +479,25 @@ gss_refresh_upcall(struct rpc_task *task)
378 struct gss_cred *gss_cred = container_of(cred, 479 struct gss_cred *gss_cred = container_of(cred,
379 struct gss_cred, gc_base); 480 struct gss_cred, gc_base);
380 struct gss_upcall_msg *gss_msg; 481 struct gss_upcall_msg *gss_msg;
381 struct inode *inode = gss_auth->dentry->d_inode; 482 struct inode *inode;
382 int err = 0; 483 int err = 0;
383 484
384 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, 485 dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
385 cred->cr_uid); 486 cred->cr_uid);
386 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); 487 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
488 if (IS_ERR(gss_msg) == -EAGAIN) {
489 /* XXX: warning on the first, under the assumption we
490 * shouldn't normally hit this case on a refresh. */
491 warn_gssd();
492 task->tk_timeout = 15*HZ;
493 rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL);
494 return 0;
495 }
387 if (IS_ERR(gss_msg)) { 496 if (IS_ERR(gss_msg)) {
388 err = PTR_ERR(gss_msg); 497 err = PTR_ERR(gss_msg);
389 goto out; 498 goto out;
390 } 499 }
500 inode = &gss_msg->inode->vfs_inode;
391 spin_lock(&inode->i_lock); 501 spin_lock(&inode->i_lock);
392 if (gss_cred->gc_upcall != NULL) 502 if (gss_cred->gc_upcall != NULL)
393 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); 503 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
@@ -414,18 +524,29 @@ out:
414static inline int 524static inline int
415gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) 525gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
416{ 526{
417 struct inode *inode = gss_auth->dentry->d_inode; 527 struct inode *inode;
418 struct rpc_cred *cred = &gss_cred->gc_base; 528 struct rpc_cred *cred = &gss_cred->gc_base;
419 struct gss_upcall_msg *gss_msg; 529 struct gss_upcall_msg *gss_msg;
420 DEFINE_WAIT(wait); 530 DEFINE_WAIT(wait);
421 int err = 0; 531 int err = 0;
422 532
423 dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid); 533 dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid);
534retry:
424 gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); 535 gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
536 if (PTR_ERR(gss_msg) == -EAGAIN) {
537 err = wait_event_interruptible_timeout(pipe_version_waitqueue,
538 pipe_version >= 0, 15*HZ);
539 if (err)
540 goto out;
541 if (pipe_version < 0)
542 warn_gssd();
543 goto retry;
544 }
425 if (IS_ERR(gss_msg)) { 545 if (IS_ERR(gss_msg)) {
426 err = PTR_ERR(gss_msg); 546 err = PTR_ERR(gss_msg);
427 goto out; 547 goto out;
428 } 548 }
549 inode = &gss_msg->inode->vfs_inode;
429 for (;;) { 550 for (;;) {
430 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); 551 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
431 spin_lock(&inode->i_lock); 552 spin_lock(&inode->i_lock);
@@ -543,6 +664,38 @@ out:
543 return err; 664 return err;
544} 665}
545 666
667static int gss_pipe_open(struct inode *inode, int new_version)
668{
669 int ret = 0;
670
671 spin_lock(&pipe_version_lock);
672 if (pipe_version < 0) {
673 /* First open of any gss pipe determines the version: */
674 pipe_version = new_version;
675 rpc_wake_up(&pipe_version_rpc_waitqueue);
676 wake_up(&pipe_version_waitqueue);
677 } else if (pipe_version != new_version) {
678 /* Trying to open a pipe of a different version */
679 ret = -EBUSY;
680 goto out;
681 }
682 atomic_inc(&pipe_users);
683out:
684 spin_unlock(&pipe_version_lock);
685 return ret;
686
687}
688
689static int gss_pipe_open_v0(struct inode *inode)
690{
691 return gss_pipe_open(inode, 0);
692}
693
694static int gss_pipe_open_v1(struct inode *inode)
695{
696 return gss_pipe_open(inode, 1);
697}
698
546static void 699static void
547gss_pipe_release(struct inode *inode) 700gss_pipe_release(struct inode *inode)
548{ 701{
@@ -562,27 +715,22 @@ gss_pipe_release(struct inode *inode)
562 spin_lock(&inode->i_lock); 715 spin_lock(&inode->i_lock);
563 } 716 }
564 spin_unlock(&inode->i_lock); 717 spin_unlock(&inode->i_lock);
718
719 put_pipe_version();
565} 720}
566 721
567static void 722static void
568gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) 723gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
569{ 724{
570 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); 725 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
571 static unsigned long ratelimit;
572 726
573 if (msg->errno < 0) { 727 if (msg->errno < 0) {
574 dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n", 728 dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n",
575 gss_msg); 729 gss_msg);
576 atomic_inc(&gss_msg->count); 730 atomic_inc(&gss_msg->count);
577 gss_unhash_msg(gss_msg); 731 gss_unhash_msg(gss_msg);
578 if (msg->errno == -ETIMEDOUT) { 732 if (msg->errno == -ETIMEDOUT)
579 unsigned long now = jiffies; 733 warn_gssd();
580 if (time_after(now, ratelimit)) {
581 printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
582 "Please check user daemon is running!\n");
583 ratelimit = now + 15*HZ;
584 }
585 }
586 gss_release_msg(gss_msg); 734 gss_release_msg(gss_msg);
587 } 735 }
588} 736}
@@ -623,20 +771,38 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
623 atomic_set(&auth->au_count, 1); 771 atomic_set(&auth->au_count, 1);
624 kref_init(&gss_auth->kref); 772 kref_init(&gss_auth->kref);
625 773
626 gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, 774 /*
627 clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); 775 * Note: if we created the old pipe first, then someone who
628 if (IS_ERR(gss_auth->dentry)) { 776 * examined the directory at the right moment might conclude
629 err = PTR_ERR(gss_auth->dentry); 777 * that we supported only the old pipe. So we instead create
778 * the new pipe first.
779 */
780 gss_auth->dentry[1] = rpc_mkpipe(clnt->cl_dentry,
781 "gssd",
782 clnt, &gss_upcall_ops_v1,
783 RPC_PIPE_WAIT_FOR_OPEN);
784 if (IS_ERR(gss_auth->dentry[1])) {
785 err = PTR_ERR(gss_auth->dentry[1]);
630 goto err_put_mech; 786 goto err_put_mech;
631 } 787 }
632 788
789 gss_auth->dentry[0] = rpc_mkpipe(clnt->cl_dentry,
790 gss_auth->mech->gm_name,
791 clnt, &gss_upcall_ops_v0,
792 RPC_PIPE_WAIT_FOR_OPEN);
793 if (IS_ERR(gss_auth->dentry[0])) {
794 err = PTR_ERR(gss_auth->dentry[0]);
795 goto err_unlink_pipe_1;
796 }
633 err = rpcauth_init_credcache(auth); 797 err = rpcauth_init_credcache(auth);
634 if (err) 798 if (err)
635 goto err_unlink_pipe; 799 goto err_unlink_pipe_0;
636 800
637 return auth; 801 return auth;
638err_unlink_pipe: 802err_unlink_pipe_0:
639 rpc_unlink(gss_auth->dentry); 803 rpc_unlink(gss_auth->dentry[0]);
804err_unlink_pipe_1:
805 rpc_unlink(gss_auth->dentry[1]);
640err_put_mech: 806err_put_mech:
641 gss_mech_put(gss_auth->mech); 807 gss_mech_put(gss_auth->mech);
642err_free: 808err_free:
@@ -649,8 +815,8 @@ out_dec:
649static void 815static void
650gss_free(struct gss_auth *gss_auth) 816gss_free(struct gss_auth *gss_auth)
651{ 817{
652 rpc_unlink(gss_auth->dentry); 818 rpc_unlink(gss_auth->dentry[1]);
653 gss_auth->dentry = NULL; 819 rpc_unlink(gss_auth->dentry[0]);
654 gss_mech_put(gss_auth->mech); 820 gss_mech_put(gss_auth->mech);
655 821
656 kfree(gss_auth); 822 kfree(gss_auth);
@@ -693,7 +859,7 @@ gss_destroying_context(struct rpc_cred *cred)
693 struct rpc_task *task; 859 struct rpc_task *task;
694 860
695 if (gss_cred->gc_ctx == NULL || 861 if (gss_cred->gc_ctx == NULL ||
696 test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) 862 test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
697 return 0; 863 return 0;
698 864
699 gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; 865 gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY;
@@ -757,14 +923,12 @@ gss_free_cred_callback(struct rcu_head *head)
757} 923}
758 924
759static void 925static void
760gss_destroy_cred(struct rpc_cred *cred) 926gss_destroy_nullcred(struct rpc_cred *cred)
761{ 927{
762 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 928 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
763 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 929 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
764 struct gss_cl_ctx *ctx = gss_cred->gc_ctx; 930 struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
765 931
766 if (gss_destroying_context(cred))
767 return;
768 rcu_assign_pointer(gss_cred->gc_ctx, NULL); 932 rcu_assign_pointer(gss_cred->gc_ctx, NULL);
769 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 933 call_rcu(&cred->cr_rcu, gss_free_cred_callback);
770 if (ctx) 934 if (ctx)
@@ -772,6 +936,15 @@ gss_destroy_cred(struct rpc_cred *cred)
772 kref_put(&gss_auth->kref, gss_free_callback); 936 kref_put(&gss_auth->kref, gss_free_callback);
773} 937}
774 938
939static void
940gss_destroy_cred(struct rpc_cred *cred)
941{
942
943 if (gss_destroying_context(cred))
944 return;
945 gss_destroy_nullcred(cred);
946}
947
775/* 948/*
776 * Lookup RPCSEC_GSS cred for the current process 949 * Lookup RPCSEC_GSS cred for the current process
777 */ 950 */
@@ -1017,7 +1190,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1017 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1190 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1018 *p++ = htonl(rqstp->rq_seqno); 1191 *p++ = htonl(rqstp->rq_seqno);
1019 1192
1020 status = rpc_call_xdrproc(encode, rqstp, p, obj); 1193 status = encode(rqstp, p, obj);
1021 if (status) 1194 if (status)
1022 return status; 1195 return status;
1023 1196
@@ -1111,7 +1284,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1111 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1284 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1112 *p++ = htonl(rqstp->rq_seqno); 1285 *p++ = htonl(rqstp->rq_seqno);
1113 1286
1114 status = rpc_call_xdrproc(encode, rqstp, p, obj); 1287 status = encode(rqstp, p, obj);
1115 if (status) 1288 if (status)
1116 return status; 1289 return status;
1117 1290
@@ -1170,12 +1343,12 @@ gss_wrap_req(struct rpc_task *task,
1170 /* The spec seems a little ambiguous here, but I think that not 1343 /* The spec seems a little ambiguous here, but I think that not
1171 * wrapping context destruction requests makes the most sense. 1344 * wrapping context destruction requests makes the most sense.
1172 */ 1345 */
1173 status = rpc_call_xdrproc(encode, rqstp, p, obj); 1346 status = encode(rqstp, p, obj);
1174 goto out; 1347 goto out;
1175 } 1348 }
1176 switch (gss_cred->gc_service) { 1349 switch (gss_cred->gc_service) {
1177 case RPC_GSS_SVC_NONE: 1350 case RPC_GSS_SVC_NONE:
1178 status = rpc_call_xdrproc(encode, rqstp, p, obj); 1351 status = encode(rqstp, p, obj);
1179 break; 1352 break;
1180 case RPC_GSS_SVC_INTEGRITY: 1353 case RPC_GSS_SVC_INTEGRITY:
1181 status = gss_wrap_req_integ(cred, ctx, encode, 1354 status = gss_wrap_req_integ(cred, ctx, encode,
@@ -1291,7 +1464,7 @@ gss_unwrap_resp(struct rpc_task *task,
1291 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) 1464 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
1292 + (savedlen - head->iov_len); 1465 + (savedlen - head->iov_len);
1293out_decode: 1466out_decode:
1294 status = rpc_call_xdrproc(decode, rqstp, p, obj); 1467 status = decode(rqstp, p, obj);
1295out: 1468out:
1296 gss_put_ctx(ctx); 1469 gss_put_ctx(ctx);
1297 dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, 1470 dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
@@ -1324,7 +1497,7 @@ static const struct rpc_credops gss_credops = {
1324 1497
1325static const struct rpc_credops gss_nullops = { 1498static const struct rpc_credops gss_nullops = {
1326 .cr_name = "AUTH_GSS", 1499 .cr_name = "AUTH_GSS",
1327 .crdestroy = gss_destroy_cred, 1500 .crdestroy = gss_destroy_nullcred,
1328 .crbind = rpcauth_generic_bind_cred, 1501 .crbind = rpcauth_generic_bind_cred,
1329 .crmatch = gss_match, 1502 .crmatch = gss_match,
1330 .crmarshal = gss_marshal, 1503 .crmarshal = gss_marshal,
@@ -1334,10 +1507,19 @@ static const struct rpc_credops gss_nullops = {
1334 .crunwrap_resp = gss_unwrap_resp, 1507 .crunwrap_resp = gss_unwrap_resp,
1335}; 1508};
1336 1509
1337static struct rpc_pipe_ops gss_upcall_ops = { 1510static struct rpc_pipe_ops gss_upcall_ops_v0 = {
1511 .upcall = gss_pipe_upcall,
1512 .downcall = gss_pipe_downcall,
1513 .destroy_msg = gss_pipe_destroy_msg,
1514 .open_pipe = gss_pipe_open_v0,
1515 .release_pipe = gss_pipe_release,
1516};
1517
1518static struct rpc_pipe_ops gss_upcall_ops_v1 = {
1338 .upcall = gss_pipe_upcall, 1519 .upcall = gss_pipe_upcall,
1339 .downcall = gss_pipe_downcall, 1520 .downcall = gss_pipe_downcall,
1340 .destroy_msg = gss_pipe_destroy_msg, 1521 .destroy_msg = gss_pipe_destroy_msg,
1522 .open_pipe = gss_pipe_open_v1,
1341 .release_pipe = gss_pipe_release, 1523 .release_pipe = gss_pipe_release,
1342}; 1524};
1343 1525
@@ -1354,6 +1536,7 @@ static int __init init_rpcsec_gss(void)
1354 err = gss_svc_init(); 1536 err = gss_svc_init();
1355 if (err) 1537 if (err)
1356 goto out_unregister; 1538 goto out_unregister;
1539 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
1357 return 0; 1540 return 0;
1358out_unregister: 1541out_unregister:
1359 rpcauth_unregister(&authgss_ops); 1542 rpcauth_unregister(&authgss_ops);
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index d83b881685fe..c0ba39c4f5f2 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -152,7 +152,7 @@ g_token_size(struct xdr_netobj *mech, unsigned int body_size)
152 return(1 + der_length_size(body_size) + body_size); 152 return(1 + der_length_size(body_size) + body_size);
153} 153}
154 154
155EXPORT_SYMBOL(g_token_size); 155EXPORT_SYMBOL_GPL(g_token_size);
156 156
157/* fills in a buffer with the token header. The buffer is assumed to 157/* fills in a buffer with the token header. The buffer is assumed to
158 be the right size. buf is advanced past the token header */ 158 be the right size. buf is advanced past the token header */
@@ -167,7 +167,7 @@ g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf)
167 TWRITE_STR(*buf, mech->data, ((int) mech->len)); 167 TWRITE_STR(*buf, mech->data, ((int) mech->len));
168} 168}
169 169
170EXPORT_SYMBOL(g_make_token_header); 170EXPORT_SYMBOL_GPL(g_make_token_header);
171 171
172/* 172/*
173 * Given a buffer containing a token, reads and verifies the token, 173 * Given a buffer containing a token, reads and verifies the token,
@@ -231,5 +231,5 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
231 return(ret); 231 return(ret);
232} 232}
233 233
234EXPORT_SYMBOL(g_verify_token_header); 234EXPORT_SYMBOL_GPL(g_verify_token_header);
235 235
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index bce9d527af08..6efbb0cd3c7c 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -117,7 +117,7 @@ gss_mech_register(struct gss_api_mech *gm)
117 return 0; 117 return 0;
118} 118}
119 119
120EXPORT_SYMBOL(gss_mech_register); 120EXPORT_SYMBOL_GPL(gss_mech_register);
121 121
122void 122void
123gss_mech_unregister(struct gss_api_mech *gm) 123gss_mech_unregister(struct gss_api_mech *gm)
@@ -129,7 +129,7 @@ gss_mech_unregister(struct gss_api_mech *gm)
129 gss_mech_free(gm); 129 gss_mech_free(gm);
130} 130}
131 131
132EXPORT_SYMBOL(gss_mech_unregister); 132EXPORT_SYMBOL_GPL(gss_mech_unregister);
133 133
134struct gss_api_mech * 134struct gss_api_mech *
135gss_mech_get(struct gss_api_mech *gm) 135gss_mech_get(struct gss_api_mech *gm)
@@ -138,7 +138,7 @@ gss_mech_get(struct gss_api_mech *gm)
138 return gm; 138 return gm;
139} 139}
140 140
141EXPORT_SYMBOL(gss_mech_get); 141EXPORT_SYMBOL_GPL(gss_mech_get);
142 142
143struct gss_api_mech * 143struct gss_api_mech *
144gss_mech_get_by_name(const char *name) 144gss_mech_get_by_name(const char *name)
@@ -158,7 +158,7 @@ gss_mech_get_by_name(const char *name)
158 158
159} 159}
160 160
161EXPORT_SYMBOL(gss_mech_get_by_name); 161EXPORT_SYMBOL_GPL(gss_mech_get_by_name);
162 162
163static inline int 163static inline int
164mech_supports_pseudoflavor(struct gss_api_mech *gm, u32 pseudoflavor) 164mech_supports_pseudoflavor(struct gss_api_mech *gm, u32 pseudoflavor)
@@ -191,7 +191,7 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
191 return gm; 191 return gm;
192} 192}
193 193
194EXPORT_SYMBOL(gss_mech_get_by_pseudoflavor); 194EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
195 195
196u32 196u32
197gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service) 197gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
@@ -205,7 +205,7 @@ gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
205 } 205 }
206 return RPC_AUTH_MAXFLAVOR; /* illegal value */ 206 return RPC_AUTH_MAXFLAVOR; /* illegal value */
207} 207}
208EXPORT_SYMBOL(gss_svc_to_pseudoflavor); 208EXPORT_SYMBOL_GPL(gss_svc_to_pseudoflavor);
209 209
210u32 210u32
211gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor) 211gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
@@ -219,7 +219,7 @@ gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
219 return 0; 219 return 0;
220} 220}
221 221
222EXPORT_SYMBOL(gss_pseudoflavor_to_service); 222EXPORT_SYMBOL_GPL(gss_pseudoflavor_to_service);
223 223
224char * 224char *
225gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service) 225gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service)
@@ -233,7 +233,7 @@ gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service)
233 return NULL; 233 return NULL;
234} 234}
235 235
236EXPORT_SYMBOL(gss_service_to_auth_domain_name); 236EXPORT_SYMBOL_GPL(gss_service_to_auth_domain_name);
237 237
238void 238void
239gss_mech_put(struct gss_api_mech * gm) 239gss_mech_put(struct gss_api_mech * gm)
@@ -242,7 +242,7 @@ gss_mech_put(struct gss_api_mech * gm)
242 module_put(gm->gm_owner); 242 module_put(gm->gm_owner);
243} 243}
244 244
245EXPORT_SYMBOL(gss_mech_put); 245EXPORT_SYMBOL_GPL(gss_mech_put);
246 246
247/* The mech could probably be determined from the token instead, but it's just 247/* The mech could probably be determined from the token instead, but it's just
248 * as easy for now to pass it in. */ 248 * as easy for now to pass it in. */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 81ae3d62a0cc..2278a50c6444 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -332,6 +332,7 @@ struct rsc {
332 struct svc_cred cred; 332 struct svc_cred cred;
333 struct gss_svc_seq_data seqdata; 333 struct gss_svc_seq_data seqdata;
334 struct gss_ctx *mechctx; 334 struct gss_ctx *mechctx;
335 char *client_name;
335}; 336};
336 337
337static struct cache_head *rsc_table[RSC_HASHMAX]; 338static struct cache_head *rsc_table[RSC_HASHMAX];
@@ -346,6 +347,7 @@ static void rsc_free(struct rsc *rsci)
346 gss_delete_sec_context(&rsci->mechctx); 347 gss_delete_sec_context(&rsci->mechctx);
347 if (rsci->cred.cr_group_info) 348 if (rsci->cred.cr_group_info)
348 put_group_info(rsci->cred.cr_group_info); 349 put_group_info(rsci->cred.cr_group_info);
350 kfree(rsci->client_name);
349} 351}
350 352
351static void rsc_put(struct kref *ref) 353static void rsc_put(struct kref *ref)
@@ -383,6 +385,7 @@ rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
383 tmp->handle.data = NULL; 385 tmp->handle.data = NULL;
384 new->mechctx = NULL; 386 new->mechctx = NULL;
385 new->cred.cr_group_info = NULL; 387 new->cred.cr_group_info = NULL;
388 new->client_name = NULL;
386} 389}
387 390
388static void 391static void
@@ -397,6 +400,8 @@ update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
397 spin_lock_init(&new->seqdata.sd_lock); 400 spin_lock_init(&new->seqdata.sd_lock);
398 new->cred = tmp->cred; 401 new->cred = tmp->cred;
399 tmp->cred.cr_group_info = NULL; 402 tmp->cred.cr_group_info = NULL;
403 new->client_name = tmp->client_name;
404 tmp->client_name = NULL;
400} 405}
401 406
402static struct cache_head * 407static struct cache_head *
@@ -486,6 +491,15 @@ static int rsc_parse(struct cache_detail *cd,
486 status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); 491 status = gss_import_sec_context(buf, len, gm, &rsci.mechctx);
487 if (status) 492 if (status)
488 goto out; 493 goto out;
494
495 /* get client name */
496 len = qword_get(&mesg, buf, mlen);
497 if (len > 0) {
498 rsci.client_name = kstrdup(buf, GFP_KERNEL);
499 if (!rsci.client_name)
500 goto out;
501 }
502
489 } 503 }
490 rsci.h.expiry_time = expiry; 504 rsci.h.expiry_time = expiry;
491 rscp = rsc_update(&rsci, rscp); 505 rscp = rsc_update(&rsci, rscp);
@@ -746,7 +760,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom)
746 return gd->pseudoflavor; 760 return gd->pseudoflavor;
747} 761}
748 762
749EXPORT_SYMBOL(svcauth_gss_flavor); 763EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
750 764
751int 765int
752svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) 766svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
@@ -780,7 +794,7 @@ out:
780 return stat; 794 return stat;
781} 795}
782 796
783EXPORT_SYMBOL(svcauth_gss_register_pseudoflavor); 797EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
784 798
785static inline int 799static inline int
786read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj) 800read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
@@ -913,6 +927,16 @@ struct gss_svc_data {
913 struct rsc *rsci; 927 struct rsc *rsci;
914}; 928};
915 929
930char *svc_gss_principal(struct svc_rqst *rqstp)
931{
932 struct gss_svc_data *gd = (struct gss_svc_data *)rqstp->rq_auth_data;
933
934 if (gd && gd->rsci)
935 return gd->rsci->client_name;
936 return NULL;
937}
938EXPORT_SYMBOL_GPL(svc_gss_principal);
939
916static int 940static int
917svcauth_gss_set_client(struct svc_rqst *rqstp) 941svcauth_gss_set_client(struct svc_rqst *rqstp)
918{ 942{
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index c9966713282a..4735caad26ed 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -98,7 +98,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
98 98
99 return new; 99 return new;
100} 100}
101EXPORT_SYMBOL(sunrpc_cache_lookup); 101EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
102 102
103 103
104static void queue_loose(struct cache_detail *detail, struct cache_head *ch); 104static void queue_loose(struct cache_detail *detail, struct cache_head *ch);
@@ -173,7 +173,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
173 cache_put(old, detail); 173 cache_put(old, detail);
174 return tmp; 174 return tmp;
175} 175}
176EXPORT_SYMBOL(sunrpc_cache_update); 176EXPORT_SYMBOL_GPL(sunrpc_cache_update);
177 177
178static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); 178static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
179/* 179/*
@@ -245,7 +245,7 @@ int cache_check(struct cache_detail *detail,
245 cache_put(h, detail); 245 cache_put(h, detail);
246 return rv; 246 return rv;
247} 247}
248EXPORT_SYMBOL(cache_check); 248EXPORT_SYMBOL_GPL(cache_check);
249 249
250/* 250/*
251 * caches need to be periodically cleaned. 251 * caches need to be periodically cleaned.
@@ -373,7 +373,7 @@ int cache_register(struct cache_detail *cd)
373 schedule_delayed_work(&cache_cleaner, 0); 373 schedule_delayed_work(&cache_cleaner, 0);
374 return 0; 374 return 0;
375} 375}
376EXPORT_SYMBOL(cache_register); 376EXPORT_SYMBOL_GPL(cache_register);
377 377
378void cache_unregister(struct cache_detail *cd) 378void cache_unregister(struct cache_detail *cd)
379{ 379{
@@ -399,7 +399,7 @@ void cache_unregister(struct cache_detail *cd)
399out: 399out:
400 printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name); 400 printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
401} 401}
402EXPORT_SYMBOL(cache_unregister); 402EXPORT_SYMBOL_GPL(cache_unregister);
403 403
404/* clean cache tries to find something to clean 404/* clean cache tries to find something to clean
405 * and cleans it. 405 * and cleans it.
@@ -514,7 +514,7 @@ void cache_flush(void)
514 while (cache_clean() != -1) 514 while (cache_clean() != -1)
515 cond_resched(); 515 cond_resched();
516} 516}
517EXPORT_SYMBOL(cache_flush); 517EXPORT_SYMBOL_GPL(cache_flush);
518 518
519void cache_purge(struct cache_detail *detail) 519void cache_purge(struct cache_detail *detail)
520{ 520{
@@ -523,7 +523,7 @@ void cache_purge(struct cache_detail *detail)
523 cache_flush(); 523 cache_flush();
524 detail->flush_time = 1; 524 detail->flush_time = 1;
525} 525}
526EXPORT_SYMBOL(cache_purge); 526EXPORT_SYMBOL_GPL(cache_purge);
527 527
528 528
529/* 529/*
@@ -988,7 +988,7 @@ void qword_add(char **bpp, int *lp, char *str)
988 *bpp = bp; 988 *bpp = bp;
989 *lp = len; 989 *lp = len;
990} 990}
991EXPORT_SYMBOL(qword_add); 991EXPORT_SYMBOL_GPL(qword_add);
992 992
993void qword_addhex(char **bpp, int *lp, char *buf, int blen) 993void qword_addhex(char **bpp, int *lp, char *buf, int blen)
994{ 994{
@@ -1017,7 +1017,7 @@ void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1017 *bpp = bp; 1017 *bpp = bp;
1018 *lp = len; 1018 *lp = len;
1019} 1019}
1020EXPORT_SYMBOL(qword_addhex); 1020EXPORT_SYMBOL_GPL(qword_addhex);
1021 1021
1022static void warn_no_listener(struct cache_detail *detail) 1022static void warn_no_listener(struct cache_detail *detail)
1023{ 1023{
@@ -1140,7 +1140,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
1140 *dest = '\0'; 1140 *dest = '\0';
1141 return len; 1141 return len;
1142} 1142}
1143EXPORT_SYMBOL(qword_get); 1143EXPORT_SYMBOL_GPL(qword_get);
1144 1144
1145 1145
1146/* 1146/*
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 3ca518386d15..836f15c0c4a3 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -197,6 +197,12 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
197 197
198 clnt->cl_rtt = &clnt->cl_rtt_default; 198 clnt->cl_rtt = &clnt->cl_rtt_default;
199 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); 199 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
200 clnt->cl_principal = NULL;
201 if (args->client_name) {
202 clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
203 if (!clnt->cl_principal)
204 goto out_no_principal;
205 }
200 206
201 kref_init(&clnt->cl_kref); 207 kref_init(&clnt->cl_kref);
202 208
@@ -226,6 +232,8 @@ out_no_auth:
226 rpc_put_mount(); 232 rpc_put_mount();
227 } 233 }
228out_no_path: 234out_no_path:
235 kfree(clnt->cl_principal);
236out_no_principal:
229 rpc_free_iostats(clnt->cl_metrics); 237 rpc_free_iostats(clnt->cl_metrics);
230out_no_stats: 238out_no_stats:
231 if (clnt->cl_server != clnt->cl_inline_name) 239 if (clnt->cl_server != clnt->cl_inline_name)
@@ -354,6 +362,11 @@ rpc_clone_client(struct rpc_clnt *clnt)
354 new->cl_metrics = rpc_alloc_iostats(clnt); 362 new->cl_metrics = rpc_alloc_iostats(clnt);
355 if (new->cl_metrics == NULL) 363 if (new->cl_metrics == NULL)
356 goto out_no_stats; 364 goto out_no_stats;
365 if (clnt->cl_principal) {
366 new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL);
367 if (new->cl_principal == NULL)
368 goto out_no_principal;
369 }
357 kref_init(&new->cl_kref); 370 kref_init(&new->cl_kref);
358 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 371 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
359 if (err != 0) 372 if (err != 0)
@@ -366,6 +379,8 @@ rpc_clone_client(struct rpc_clnt *clnt)
366 rpciod_up(); 379 rpciod_up();
367 return new; 380 return new;
368out_no_path: 381out_no_path:
382 kfree(new->cl_principal);
383out_no_principal:
369 rpc_free_iostats(new->cl_metrics); 384 rpc_free_iostats(new->cl_metrics);
370out_no_stats: 385out_no_stats:
371 kfree(new); 386 kfree(new);
@@ -417,6 +432,7 @@ rpc_free_client(struct kref *kref)
417out_free: 432out_free:
418 rpc_unregister_client(clnt); 433 rpc_unregister_client(clnt);
419 rpc_free_iostats(clnt->cl_metrics); 434 rpc_free_iostats(clnt->cl_metrics);
435 kfree(clnt->cl_principal);
420 clnt->cl_metrics = NULL; 436 clnt->cl_metrics = NULL;
421 xprt_put(clnt->cl_xprt); 437 xprt_put(clnt->cl_xprt);
422 rpciod_down(); 438 rpciod_down();
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 23a2b8f6dc49..577385a4a5dc 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -113,7 +113,7 @@ out:
113 wake_up(&rpci->waitq); 113 wake_up(&rpci->waitq);
114 return res; 114 return res;
115} 115}
116EXPORT_SYMBOL(rpc_queue_upcall); 116EXPORT_SYMBOL_GPL(rpc_queue_upcall);
117 117
118static inline void 118static inline void
119rpc_inode_setowner(struct inode *inode, void *private) 119rpc_inode_setowner(struct inode *inode, void *private)
@@ -126,13 +126,14 @@ rpc_close_pipes(struct inode *inode)
126{ 126{
127 struct rpc_inode *rpci = RPC_I(inode); 127 struct rpc_inode *rpci = RPC_I(inode);
128 struct rpc_pipe_ops *ops; 128 struct rpc_pipe_ops *ops;
129 int need_release;
129 130
130 mutex_lock(&inode->i_mutex); 131 mutex_lock(&inode->i_mutex);
131 ops = rpci->ops; 132 ops = rpci->ops;
132 if (ops != NULL) { 133 if (ops != NULL) {
133 LIST_HEAD(free_list); 134 LIST_HEAD(free_list);
134
135 spin_lock(&inode->i_lock); 135 spin_lock(&inode->i_lock);
136 need_release = rpci->nreaders != 0 || rpci->nwriters != 0;
136 rpci->nreaders = 0; 137 rpci->nreaders = 0;
137 list_splice_init(&rpci->in_upcall, &free_list); 138 list_splice_init(&rpci->in_upcall, &free_list);
138 list_splice_init(&rpci->pipe, &free_list); 139 list_splice_init(&rpci->pipe, &free_list);
@@ -141,7 +142,7 @@ rpc_close_pipes(struct inode *inode)
141 spin_unlock(&inode->i_lock); 142 spin_unlock(&inode->i_lock);
142 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); 143 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
143 rpci->nwriters = 0; 144 rpci->nwriters = 0;
144 if (ops->release_pipe) 145 if (need_release && ops->release_pipe)
145 ops->release_pipe(inode); 146 ops->release_pipe(inode);
146 cancel_delayed_work_sync(&rpci->queue_timeout); 147 cancel_delayed_work_sync(&rpci->queue_timeout);
147 } 148 }
@@ -169,16 +170,24 @@ static int
169rpc_pipe_open(struct inode *inode, struct file *filp) 170rpc_pipe_open(struct inode *inode, struct file *filp)
170{ 171{
171 struct rpc_inode *rpci = RPC_I(inode); 172 struct rpc_inode *rpci = RPC_I(inode);
173 int first_open;
172 int res = -ENXIO; 174 int res = -ENXIO;
173 175
174 mutex_lock(&inode->i_mutex); 176 mutex_lock(&inode->i_mutex);
175 if (rpci->ops != NULL) { 177 if (rpci->ops == NULL)
176 if (filp->f_mode & FMODE_READ) 178 goto out;
177 rpci->nreaders ++; 179 first_open = rpci->nreaders == 0 && rpci->nwriters == 0;
178 if (filp->f_mode & FMODE_WRITE) 180 if (first_open && rpci->ops->open_pipe) {
179 rpci->nwriters ++; 181 res = rpci->ops->open_pipe(inode);
180 res = 0; 182 if (res)
183 goto out;
181 } 184 }
185 if (filp->f_mode & FMODE_READ)
186 rpci->nreaders++;
187 if (filp->f_mode & FMODE_WRITE)
188 rpci->nwriters++;
189 res = 0;
190out:
182 mutex_unlock(&inode->i_mutex); 191 mutex_unlock(&inode->i_mutex);
183 return res; 192 return res;
184} 193}
@@ -188,6 +197,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
188{ 197{
189 struct rpc_inode *rpci = RPC_I(inode); 198 struct rpc_inode *rpci = RPC_I(inode);
190 struct rpc_pipe_msg *msg; 199 struct rpc_pipe_msg *msg;
200 int last_close;
191 201
192 mutex_lock(&inode->i_mutex); 202 mutex_lock(&inode->i_mutex);
193 if (rpci->ops == NULL) 203 if (rpci->ops == NULL)
@@ -214,7 +224,8 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
214 rpci->ops->destroy_msg, -EAGAIN); 224 rpci->ops->destroy_msg, -EAGAIN);
215 } 225 }
216 } 226 }
217 if (rpci->ops->release_pipe) 227 last_close = rpci->nwriters == 0 && rpci->nreaders == 0;
228 if (last_close && rpci->ops->release_pipe)
218 rpci->ops->release_pipe(inode); 229 rpci->ops->release_pipe(inode);
219out: 230out:
220 mutex_unlock(&inode->i_mutex); 231 mutex_unlock(&inode->i_mutex);
@@ -396,6 +407,7 @@ enum {
396 RPCAUTH_nfs, 407 RPCAUTH_nfs,
397 RPCAUTH_portmap, 408 RPCAUTH_portmap,
398 RPCAUTH_statd, 409 RPCAUTH_statd,
410 RPCAUTH_nfsd4_cb,
399 RPCAUTH_RootEOF 411 RPCAUTH_RootEOF
400}; 412};
401 413
@@ -429,6 +441,10 @@ static struct rpc_filelist files[] = {
429 .name = "statd", 441 .name = "statd",
430 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 442 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
431 }, 443 },
444 [RPCAUTH_nfsd4_cb] = {
445 .name = "nfsd4_cb",
446 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
447 },
432}; 448};
433 449
434enum { 450enum {
@@ -506,8 +522,6 @@ rpc_get_inode(struct super_block *sb, int mode)
506 if (!inode) 522 if (!inode)
507 return NULL; 523 return NULL;
508 inode->i_mode = mode; 524 inode->i_mode = mode;
509 inode->i_uid = inode->i_gid = 0;
510 inode->i_blocks = 0;
511 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 525 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
512 switch(mode & S_IFMT) { 526 switch(mode & S_IFMT) {
513 case S_IFDIR: 527 case S_IFDIR:
@@ -748,7 +762,7 @@ rpc_rmdir(struct dentry *dentry)
748 * @name: name of pipe 762 * @name: name of pipe
749 * @private: private data to associate with the pipe, for the caller's use 763 * @private: private data to associate with the pipe, for the caller's use
750 * @ops: operations defining the behavior of the pipe: upcall, downcall, 764 * @ops: operations defining the behavior of the pipe: upcall, downcall,
751 * release_pipe, and destroy_msg. 765 * release_pipe, open_pipe, and destroy_msg.
752 * @flags: rpc_inode flags 766 * @flags: rpc_inode flags
753 * 767 *
754 * Data is made available for userspace to read by calls to 768 * Data is made available for userspace to read by calls to
@@ -808,7 +822,7 @@ err_dput:
808 -ENOMEM); 822 -ENOMEM);
809 goto out; 823 goto out;
810} 824}
811EXPORT_SYMBOL(rpc_mkpipe); 825EXPORT_SYMBOL_GPL(rpc_mkpipe);
812 826
813/** 827/**
814 * rpc_unlink - remove a pipe 828 * rpc_unlink - remove a pipe
@@ -839,7 +853,7 @@ rpc_unlink(struct dentry *dentry)
839 dput(parent); 853 dput(parent);
840 return error; 854 return error;
841} 855}
842EXPORT_SYMBOL(rpc_unlink); 856EXPORT_SYMBOL_GPL(rpc_unlink);
843 857
844/* 858/*
845 * populate the filesystem 859 * populate the filesystem
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 50b049c6598a..085372ef4feb 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -106,7 +106,7 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
106 seq_putc(seq, '\n'); 106 seq_putc(seq, '\n');
107 } 107 }
108} 108}
109EXPORT_SYMBOL(svc_seq_show); 109EXPORT_SYMBOL_GPL(svc_seq_show);
110 110
111/** 111/**
112 * rpc_alloc_iostats - allocate an rpc_iostats structure 112 * rpc_alloc_iostats - allocate an rpc_iostats structure
@@ -249,14 +249,14 @@ svc_proc_register(struct svc_stat *statp, const struct file_operations *fops)
249{ 249{
250 return do_register(statp->program->pg_name, statp, fops); 250 return do_register(statp->program->pg_name, statp, fops);
251} 251}
252EXPORT_SYMBOL(svc_proc_register); 252EXPORT_SYMBOL_GPL(svc_proc_register);
253 253
254void 254void
255svc_proc_unregister(const char *name) 255svc_proc_unregister(const char *name)
256{ 256{
257 remove_proc_entry(name, proc_net_rpc); 257 remove_proc_entry(name, proc_net_rpc);
258} 258}
259EXPORT_SYMBOL(svc_proc_unregister); 259EXPORT_SYMBOL_GPL(svc_proc_unregister);
260 260
261void 261void
262rpc_proc_init(void) 262rpc_proc_init(void)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 54c98d876847..c51fed4d1af1 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -431,7 +431,7 @@ svc_create(struct svc_program *prog, unsigned int bufsize,
431{ 431{
432 return __svc_create(prog, bufsize, /*npools*/1, family, shutdown); 432 return __svc_create(prog, bufsize, /*npools*/1, family, shutdown);
433} 433}
434EXPORT_SYMBOL(svc_create); 434EXPORT_SYMBOL_GPL(svc_create);
435 435
436struct svc_serv * 436struct svc_serv *
437svc_create_pooled(struct svc_program *prog, unsigned int bufsize, 437svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
@@ -450,7 +450,7 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
450 450
451 return serv; 451 return serv;
452} 452}
453EXPORT_SYMBOL(svc_create_pooled); 453EXPORT_SYMBOL_GPL(svc_create_pooled);
454 454
455/* 455/*
456 * Destroy an RPC service. Should be called with appropriate locking to 456 * Destroy an RPC service. Should be called with appropriate locking to
@@ -492,7 +492,7 @@ svc_destroy(struct svc_serv *serv)
492 kfree(serv->sv_pools); 492 kfree(serv->sv_pools);
493 kfree(serv); 493 kfree(serv);
494} 494}
495EXPORT_SYMBOL(svc_destroy); 495EXPORT_SYMBOL_GPL(svc_destroy);
496 496
497/* 497/*
498 * Allocate an RPC server's buffer space. 498 * Allocate an RPC server's buffer space.
@@ -567,7 +567,7 @@ out_thread:
567out_enomem: 567out_enomem:
568 return ERR_PTR(-ENOMEM); 568 return ERR_PTR(-ENOMEM);
569} 569}
570EXPORT_SYMBOL(svc_prepare_thread); 570EXPORT_SYMBOL_GPL(svc_prepare_thread);
571 571
572/* 572/*
573 * Choose a pool in which to create a new thread, for svc_set_num_threads 573 * Choose a pool in which to create a new thread, for svc_set_num_threads
@@ -689,7 +689,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
689 689
690 return error; 690 return error;
691} 691}
692EXPORT_SYMBOL(svc_set_num_threads); 692EXPORT_SYMBOL_GPL(svc_set_num_threads);
693 693
694/* 694/*
695 * Called from a server thread as it's exiting. Caller must hold the BKL or 695 * Called from a server thread as it's exiting. Caller must hold the BKL or
@@ -717,7 +717,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
717 if (serv) 717 if (serv)
718 svc_destroy(serv); 718 svc_destroy(serv);
719} 719}
720EXPORT_SYMBOL(svc_exit_thread); 720EXPORT_SYMBOL_GPL(svc_exit_thread);
721 721
722#ifdef CONFIG_SUNRPC_REGISTER_V4 722#ifdef CONFIG_SUNRPC_REGISTER_V4
723 723
@@ -1231,7 +1231,7 @@ err_bad:
1231 svc_putnl(resv, ntohl(rpc_stat)); 1231 svc_putnl(resv, ntohl(rpc_stat));
1232 goto sendit; 1232 goto sendit;
1233} 1233}
1234EXPORT_SYMBOL(svc_process); 1234EXPORT_SYMBOL_GPL(svc_process);
1235 1235
1236/* 1236/*
1237 * Return (transport-specific) limit on the rpc payload. 1237 * Return (transport-specific) limit on the rpc payload.
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index bf5b5cdafebf..e588df5d6b34 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -440,7 +440,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
440 svc_xprt_enqueue(xprt); 440 svc_xprt_enqueue(xprt);
441 } 441 }
442} 442}
443EXPORT_SYMBOL(svc_reserve); 443EXPORT_SYMBOL_GPL(svc_reserve);
444 444
445static void svc_xprt_release(struct svc_rqst *rqstp) 445static void svc_xprt_release(struct svc_rqst *rqstp)
446{ 446{
@@ -448,6 +448,9 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
448 448
449 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 449 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
450 450
451 kfree(rqstp->rq_deferred);
452 rqstp->rq_deferred = NULL;
453
451 svc_free_res_pages(rqstp); 454 svc_free_res_pages(rqstp);
452 rqstp->rq_res.page_len = 0; 455 rqstp->rq_res.page_len = 0;
453 rqstp->rq_res.page_base = 0; 456 rqstp->rq_res.page_base = 0;
@@ -498,7 +501,7 @@ void svc_wake_up(struct svc_serv *serv)
498 spin_unlock_bh(&pool->sp_lock); 501 spin_unlock_bh(&pool->sp_lock);
499 } 502 }
500} 503}
501EXPORT_SYMBOL(svc_wake_up); 504EXPORT_SYMBOL_GPL(svc_wake_up);
502 505
503int svc_port_is_privileged(struct sockaddr *sin) 506int svc_port_is_privileged(struct sockaddr *sin)
504{ 507{
@@ -515,8 +518,10 @@ int svc_port_is_privileged(struct sockaddr *sin)
515} 518}
516 519
517/* 520/*
518 * Make sure that we don't have too many active connections. If we 521 * Make sure that we don't have too many active connections. If we have,
519 * have, something must be dropped. 522 * something must be dropped. It's not clear what will happen if we allow
523 * "too many" connections, but when dealing with network-facing software,
524 * we have to code defensively. Here we do that by imposing hard limits.
520 * 525 *
521 * There's no point in trying to do random drop here for DoS 526 * There's no point in trying to do random drop here for DoS
522 * prevention. The NFS clients does 1 reconnect in 15 seconds. An 527 * prevention. The NFS clients does 1 reconnect in 15 seconds. An
@@ -525,19 +530,27 @@ int svc_port_is_privileged(struct sockaddr *sin)
525 * The only somewhat efficient mechanism would be if drop old 530 * The only somewhat efficient mechanism would be if drop old
526 * connections from the same IP first. But right now we don't even 531 * connections from the same IP first. But right now we don't even
527 * record the client IP in svc_sock. 532 * record the client IP in svc_sock.
533 *
534 * single-threaded services that expect a lot of clients will probably
535 * need to set sv_maxconn to override the default value which is based
536 * on the number of threads
528 */ 537 */
529static void svc_check_conn_limits(struct svc_serv *serv) 538static void svc_check_conn_limits(struct svc_serv *serv)
530{ 539{
531 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { 540 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
541 (serv->sv_nrthreads+3) * 20;
542
543 if (serv->sv_tmpcnt > limit) {
532 struct svc_xprt *xprt = NULL; 544 struct svc_xprt *xprt = NULL;
533 spin_lock_bh(&serv->sv_lock); 545 spin_lock_bh(&serv->sv_lock);
534 if (!list_empty(&serv->sv_tempsocks)) { 546 if (!list_empty(&serv->sv_tempsocks)) {
535 if (net_ratelimit()) { 547 if (net_ratelimit()) {
536 /* Try to help the admin */ 548 /* Try to help the admin */
537 printk(KERN_NOTICE "%s: too many open " 549 printk(KERN_NOTICE "%s: too many open "
538 "connections, consider increasing the " 550 "connections, consider increasing %s\n",
539 "number of nfsd threads\n", 551 serv->sv_name, serv->sv_maxconn ?
540 serv->sv_name); 552 "the max number of connections." :
553 "the number of threads.");
541 } 554 }
542 /* 555 /*
543 * Always select the oldest connection. It's not fair, 556 * Always select the oldest connection. It's not fair,
@@ -730,7 +743,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
730 serv->sv_stats->netcnt++; 743 serv->sv_stats->netcnt++;
731 return len; 744 return len;
732} 745}
733EXPORT_SYMBOL(svc_recv); 746EXPORT_SYMBOL_GPL(svc_recv);
734 747
735/* 748/*
736 * Drop request 749 * Drop request
@@ -740,7 +753,7 @@ void svc_drop(struct svc_rqst *rqstp)
740 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 753 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
741 svc_xprt_release(rqstp); 754 svc_xprt_release(rqstp);
742} 755}
743EXPORT_SYMBOL(svc_drop); 756EXPORT_SYMBOL_GPL(svc_drop);
744 757
745/* 758/*
746 * Return reply to client. 759 * Return reply to client.
@@ -837,6 +850,11 @@ static void svc_age_temp_xprts(unsigned long closure)
837void svc_delete_xprt(struct svc_xprt *xprt) 850void svc_delete_xprt(struct svc_xprt *xprt)
838{ 851{
839 struct svc_serv *serv = xprt->xpt_server; 852 struct svc_serv *serv = xprt->xpt_server;
853 struct svc_deferred_req *dr;
854
855 /* Only do this once */
856 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
857 return;
840 858
841 dprintk("svc: svc_delete_xprt(%p)\n", xprt); 859 dprintk("svc: svc_delete_xprt(%p)\n", xprt);
842 xprt->xpt_ops->xpo_detach(xprt); 860 xprt->xpt_ops->xpo_detach(xprt);
@@ -851,12 +869,16 @@ void svc_delete_xprt(struct svc_xprt *xprt)
851 * while still attached to a queue, the queue itself 869 * while still attached to a queue, the queue itself
852 * is about to be destroyed (in svc_destroy). 870 * is about to be destroyed (in svc_destroy).
853 */ 871 */
854 if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) { 872 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
855 BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2); 873 serv->sv_tmpcnt--;
856 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 874
857 serv->sv_tmpcnt--; 875 for (dr = svc_deferred_dequeue(xprt); dr;
876 dr = svc_deferred_dequeue(xprt)) {
858 svc_xprt_put(xprt); 877 svc_xprt_put(xprt);
878 kfree(dr);
859 } 879 }
880
881 svc_xprt_put(xprt);
860 spin_unlock_bh(&serv->sv_lock); 882 spin_unlock_bh(&serv->sv_lock);
861} 883}
862 884
@@ -902,17 +924,19 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
902 container_of(dreq, struct svc_deferred_req, handle); 924 container_of(dreq, struct svc_deferred_req, handle);
903 struct svc_xprt *xprt = dr->xprt; 925 struct svc_xprt *xprt = dr->xprt;
904 926
905 if (too_many) { 927 spin_lock(&xprt->xpt_lock);
928 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
929 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
930 spin_unlock(&xprt->xpt_lock);
931 dprintk("revisit canceled\n");
906 svc_xprt_put(xprt); 932 svc_xprt_put(xprt);
907 kfree(dr); 933 kfree(dr);
908 return; 934 return;
909 } 935 }
910 dprintk("revisit queued\n"); 936 dprintk("revisit queued\n");
911 dr->xprt = NULL; 937 dr->xprt = NULL;
912 spin_lock(&xprt->xpt_lock);
913 list_add(&dr->handle.recent, &xprt->xpt_deferred); 938 list_add(&dr->handle.recent, &xprt->xpt_deferred);
914 spin_unlock(&xprt->xpt_lock); 939 spin_unlock(&xprt->xpt_lock);
915 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
916 svc_xprt_enqueue(xprt); 940 svc_xprt_enqueue(xprt);
917 svc_xprt_put(xprt); 941 svc_xprt_put(xprt);
918} 942}
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 8a73cbb16052..e64109b02aee 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -57,13 +57,13 @@ svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
57 rqstp->rq_authop = aops; 57 rqstp->rq_authop = aops;
58 return aops->accept(rqstp, authp); 58 return aops->accept(rqstp, authp);
59} 59}
60EXPORT_SYMBOL(svc_authenticate); 60EXPORT_SYMBOL_GPL(svc_authenticate);
61 61
62int svc_set_client(struct svc_rqst *rqstp) 62int svc_set_client(struct svc_rqst *rqstp)
63{ 63{
64 return rqstp->rq_authop->set_client(rqstp); 64 return rqstp->rq_authop->set_client(rqstp);
65} 65}
66EXPORT_SYMBOL(svc_set_client); 66EXPORT_SYMBOL_GPL(svc_set_client);
67 67
68/* A request, which was authenticated, has now executed. 68/* A request, which was authenticated, has now executed.
69 * Time to finalise the credentials and verifier 69 * Time to finalise the credentials and verifier
@@ -95,7 +95,7 @@ svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops)
95 spin_unlock(&authtab_lock); 95 spin_unlock(&authtab_lock);
96 return rv; 96 return rv;
97} 97}
98EXPORT_SYMBOL(svc_auth_register); 98EXPORT_SYMBOL_GPL(svc_auth_register);
99 99
100void 100void
101svc_auth_unregister(rpc_authflavor_t flavor) 101svc_auth_unregister(rpc_authflavor_t flavor)
@@ -105,7 +105,7 @@ svc_auth_unregister(rpc_authflavor_t flavor)
105 authtab[flavor] = NULL; 105 authtab[flavor] = NULL;
106 spin_unlock(&authtab_lock); 106 spin_unlock(&authtab_lock);
107} 107}
108EXPORT_SYMBOL(svc_auth_unregister); 108EXPORT_SYMBOL_GPL(svc_auth_unregister);
109 109
110/************************************************** 110/**************************************************
111 * 'auth_domains' are stored in a hash table indexed by name. 111 * 'auth_domains' are stored in a hash table indexed by name.
@@ -132,7 +132,7 @@ void auth_domain_put(struct auth_domain *dom)
132 spin_unlock(&auth_domain_lock); 132 spin_unlock(&auth_domain_lock);
133 } 133 }
134} 134}
135EXPORT_SYMBOL(auth_domain_put); 135EXPORT_SYMBOL_GPL(auth_domain_put);
136 136
137struct auth_domain * 137struct auth_domain *
138auth_domain_lookup(char *name, struct auth_domain *new) 138auth_domain_lookup(char *name, struct auth_domain *new)
@@ -157,10 +157,10 @@ auth_domain_lookup(char *name, struct auth_domain *new)
157 spin_unlock(&auth_domain_lock); 157 spin_unlock(&auth_domain_lock);
158 return new; 158 return new;
159} 159}
160EXPORT_SYMBOL(auth_domain_lookup); 160EXPORT_SYMBOL_GPL(auth_domain_lookup);
161 161
162struct auth_domain *auth_domain_find(char *name) 162struct auth_domain *auth_domain_find(char *name)
163{ 163{
164 return auth_domain_lookup(name, NULL); 164 return auth_domain_lookup(name, NULL);
165} 165}
166EXPORT_SYMBOL(auth_domain_find); 166EXPORT_SYMBOL_GPL(auth_domain_find);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 82240e6127b2..5c865e2d299e 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -64,7 +64,7 @@ struct auth_domain *unix_domain_find(char *name)
64 rv = auth_domain_lookup(name, &new->h); 64 rv = auth_domain_lookup(name, &new->h);
65 } 65 }
66} 66}
67EXPORT_SYMBOL(unix_domain_find); 67EXPORT_SYMBOL_GPL(unix_domain_find);
68 68
69static void svcauth_unix_domain_release(struct auth_domain *dom) 69static void svcauth_unix_domain_release(struct auth_domain *dom)
70{ 70{
@@ -358,7 +358,7 @@ int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom)
358 else 358 else
359 return -ENOMEM; 359 return -ENOMEM;
360} 360}
361EXPORT_SYMBOL(auth_unix_add_addr); 361EXPORT_SYMBOL_GPL(auth_unix_add_addr);
362 362
363int auth_unix_forget_old(struct auth_domain *dom) 363int auth_unix_forget_old(struct auth_domain *dom)
364{ 364{
@@ -370,7 +370,7 @@ int auth_unix_forget_old(struct auth_domain *dom)
370 udom->addr_changes++; 370 udom->addr_changes++;
371 return 0; 371 return 0;
372} 372}
373EXPORT_SYMBOL(auth_unix_forget_old); 373EXPORT_SYMBOL_GPL(auth_unix_forget_old);
374 374
375struct auth_domain *auth_unix_lookup(struct in6_addr *addr) 375struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
376{ 376{
@@ -395,13 +395,13 @@ struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
395 cache_put(&ipm->h, &ip_map_cache); 395 cache_put(&ipm->h, &ip_map_cache);
396 return rv; 396 return rv;
397} 397}
398EXPORT_SYMBOL(auth_unix_lookup); 398EXPORT_SYMBOL_GPL(auth_unix_lookup);
399 399
400void svcauth_unix_purge(void) 400void svcauth_unix_purge(void)
401{ 401{
402 cache_purge(&ip_map_cache); 402 cache_purge(&ip_map_cache);
403} 403}
404EXPORT_SYMBOL(svcauth_unix_purge); 404EXPORT_SYMBOL_GPL(svcauth_unix_purge);
405 405
406static inline struct ip_map * 406static inline struct ip_map *
407ip_map_cached_get(struct svc_rqst *rqstp) 407ip_map_cached_get(struct svc_rqst *rqstp)
@@ -714,7 +714,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
714 return SVC_OK; 714 return SVC_OK;
715} 715}
716 716
717EXPORT_SYMBOL(svcauth_unix_set_client); 717EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
718 718
719static int 719static int
720svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) 720svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index ef3238d665ee..5763e6460fea 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -59,6 +59,7 @@ static void svc_udp_data_ready(struct sock *, int);
59static int svc_udp_recvfrom(struct svc_rqst *); 59static int svc_udp_recvfrom(struct svc_rqst *);
60static int svc_udp_sendto(struct svc_rqst *); 60static int svc_udp_sendto(struct svc_rqst *);
61static void svc_sock_detach(struct svc_xprt *); 61static void svc_sock_detach(struct svc_xprt *);
62static void svc_tcp_sock_detach(struct svc_xprt *);
62static void svc_sock_free(struct svc_xprt *); 63static void svc_sock_free(struct svc_xprt *);
63 64
64static struct svc_xprt *svc_create_socket(struct svc_serv *, int, 65static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
@@ -102,7 +103,6 @@ static void svc_reclassify_socket(struct socket *sock)
102static void svc_release_skb(struct svc_rqst *rqstp) 103static void svc_release_skb(struct svc_rqst *rqstp)
103{ 104{
104 struct sk_buff *skb = rqstp->rq_xprt_ctxt; 105 struct sk_buff *skb = rqstp->rq_xprt_ctxt;
105 struct svc_deferred_req *dr = rqstp->rq_deferred;
106 106
107 if (skb) { 107 if (skb) {
108 struct svc_sock *svsk = 108 struct svc_sock *svsk =
@@ -112,10 +112,6 @@ static void svc_release_skb(struct svc_rqst *rqstp)
112 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); 112 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
113 skb_free_datagram(svsk->sk_sk, skb); 113 skb_free_datagram(svsk->sk_sk, skb);
114 } 114 }
115 if (dr) {
116 rqstp->rq_deferred = NULL;
117 kfree(dr);
118 }
119} 115}
120 116
121union svc_pktinfo_u { 117union svc_pktinfo_u {
@@ -289,7 +285,7 @@ svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
289 return -ENOENT; 285 return -ENOENT;
290 return len; 286 return len;
291} 287}
292EXPORT_SYMBOL(svc_sock_names); 288EXPORT_SYMBOL_GPL(svc_sock_names);
293 289
294/* 290/*
295 * Check input queue length 291 * Check input queue length
@@ -1017,7 +1013,7 @@ static struct svc_xprt_ops svc_tcp_ops = {
1017 .xpo_recvfrom = svc_tcp_recvfrom, 1013 .xpo_recvfrom = svc_tcp_recvfrom,
1018 .xpo_sendto = svc_tcp_sendto, 1014 .xpo_sendto = svc_tcp_sendto,
1019 .xpo_release_rqst = svc_release_skb, 1015 .xpo_release_rqst = svc_release_skb,
1020 .xpo_detach = svc_sock_detach, 1016 .xpo_detach = svc_tcp_sock_detach,
1021 .xpo_free = svc_sock_free, 1017 .xpo_free = svc_sock_free,
1022 .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr, 1018 .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
1023 .xpo_has_wspace = svc_tcp_has_wspace, 1019 .xpo_has_wspace = svc_tcp_has_wspace,
@@ -1101,7 +1097,7 @@ void svc_sock_update_bufs(struct svc_serv *serv)
1101 } 1097 }
1102 spin_unlock_bh(&serv->sv_lock); 1098 spin_unlock_bh(&serv->sv_lock);
1103} 1099}
1104EXPORT_SYMBOL(svc_sock_update_bufs); 1100EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
1105 1101
1106/* 1102/*
1107 * Initialize socket for RPC use and create svc_sock struct 1103 * Initialize socket for RPC use and create svc_sock struct
@@ -1287,6 +1283,24 @@ static void svc_sock_detach(struct svc_xprt *xprt)
1287 sk->sk_state_change = svsk->sk_ostate; 1283 sk->sk_state_change = svsk->sk_ostate;
1288 sk->sk_data_ready = svsk->sk_odata; 1284 sk->sk_data_ready = svsk->sk_odata;
1289 sk->sk_write_space = svsk->sk_owspace; 1285 sk->sk_write_space = svsk->sk_owspace;
1286
1287 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1288 wake_up_interruptible(sk->sk_sleep);
1289}
1290
1291/*
1292 * Disconnect the socket, and reset the callbacks
1293 */
1294static void svc_tcp_sock_detach(struct svc_xprt *xprt)
1295{
1296 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1297
1298 dprintk("svc: svc_tcp_sock_detach(%p)\n", svsk);
1299
1300 svc_sock_detach(xprt);
1301
1302 if (!test_bit(XPT_LISTENER, &xprt->xpt_flags))
1303 kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR);
1290} 1304}
1291 1305
1292/* 1306/*
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 79a55d56cc98..406e26de584e 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -28,7 +28,7 @@ xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
28 memcpy(p, obj->data, obj->len); 28 memcpy(p, obj->data, obj->len);
29 return p + XDR_QUADLEN(obj->len); 29 return p + XDR_QUADLEN(obj->len);
30} 30}
31EXPORT_SYMBOL(xdr_encode_netobj); 31EXPORT_SYMBOL_GPL(xdr_encode_netobj);
32 32
33__be32 * 33__be32 *
34xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) 34xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
@@ -41,7 +41,7 @@ xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
41 obj->data = (u8 *) p; 41 obj->data = (u8 *) p;
42 return p + XDR_QUADLEN(len); 42 return p + XDR_QUADLEN(len);
43} 43}
44EXPORT_SYMBOL(xdr_decode_netobj); 44EXPORT_SYMBOL_GPL(xdr_decode_netobj);
45 45
46/** 46/**
47 * xdr_encode_opaque_fixed - Encode fixed length opaque data 47 * xdr_encode_opaque_fixed - Encode fixed length opaque data
@@ -71,7 +71,7 @@ __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
71 } 71 }
72 return p; 72 return p;
73} 73}
74EXPORT_SYMBOL(xdr_encode_opaque_fixed); 74EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
75 75
76/** 76/**
77 * xdr_encode_opaque - Encode variable length opaque data 77 * xdr_encode_opaque - Encode variable length opaque data
@@ -86,14 +86,14 @@ __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
86 *p++ = htonl(nbytes); 86 *p++ = htonl(nbytes);
87 return xdr_encode_opaque_fixed(p, ptr, nbytes); 87 return xdr_encode_opaque_fixed(p, ptr, nbytes);
88} 88}
89EXPORT_SYMBOL(xdr_encode_opaque); 89EXPORT_SYMBOL_GPL(xdr_encode_opaque);
90 90
91__be32 * 91__be32 *
92xdr_encode_string(__be32 *p, const char *string) 92xdr_encode_string(__be32 *p, const char *string)
93{ 93{
94 return xdr_encode_array(p, string, strlen(string)); 94 return xdr_encode_array(p, string, strlen(string));
95} 95}
96EXPORT_SYMBOL(xdr_encode_string); 96EXPORT_SYMBOL_GPL(xdr_encode_string);
97 97
98__be32 * 98__be32 *
99xdr_decode_string_inplace(__be32 *p, char **sp, 99xdr_decode_string_inplace(__be32 *p, char **sp,
@@ -108,7 +108,7 @@ xdr_decode_string_inplace(__be32 *p, char **sp,
108 *sp = (char *) p; 108 *sp = (char *) p;
109 return p + XDR_QUADLEN(len); 109 return p + XDR_QUADLEN(len);
110} 110}
111EXPORT_SYMBOL(xdr_decode_string_inplace); 111EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
112 112
113void 113void
114xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, 114xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
@@ -136,7 +136,7 @@ xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
136 xdr->buflen += len; 136 xdr->buflen += len;
137 xdr->len += len; 137 xdr->len += len;
138} 138}
139EXPORT_SYMBOL(xdr_encode_pages); 139EXPORT_SYMBOL_GPL(xdr_encode_pages);
140 140
141void 141void
142xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, 142xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
@@ -158,7 +158,7 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
158 158
159 xdr->buflen += len; 159 xdr->buflen += len;
160} 160}
161EXPORT_SYMBOL(xdr_inline_pages); 161EXPORT_SYMBOL_GPL(xdr_inline_pages);
162 162
163/* 163/*
164 * Helper routines for doing 'memmove' like operations on a struct xdr_buf 164 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
@@ -428,7 +428,7 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len)
428{ 428{
429 xdr_shrink_bufhead(buf, len); 429 xdr_shrink_bufhead(buf, len);
430} 430}
431EXPORT_SYMBOL(xdr_shift_buf); 431EXPORT_SYMBOL_GPL(xdr_shift_buf);
432 432
433/** 433/**
434 * xdr_init_encode - Initialize a struct xdr_stream for sending data. 434 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
@@ -465,7 +465,7 @@ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
465 iov->iov_len += len; 465 iov->iov_len += len;
466 } 466 }
467} 467}
468EXPORT_SYMBOL(xdr_init_encode); 468EXPORT_SYMBOL_GPL(xdr_init_encode);
469 469
470/** 470/**
471 * xdr_reserve_space - Reserve buffer space for sending 471 * xdr_reserve_space - Reserve buffer space for sending
@@ -492,7 +492,7 @@ __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
492 xdr->buf->len += nbytes; 492 xdr->buf->len += nbytes;
493 return p; 493 return p;
494} 494}
495EXPORT_SYMBOL(xdr_reserve_space); 495EXPORT_SYMBOL_GPL(xdr_reserve_space);
496 496
497/** 497/**
498 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending 498 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
@@ -527,7 +527,7 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
527 buf->buflen += len; 527 buf->buflen += len;
528 buf->len += len; 528 buf->len += len;
529} 529}
530EXPORT_SYMBOL(xdr_write_pages); 530EXPORT_SYMBOL_GPL(xdr_write_pages);
531 531
532/** 532/**
533 * xdr_init_decode - Initialize an xdr_stream for decoding data. 533 * xdr_init_decode - Initialize an xdr_stream for decoding data.
@@ -547,7 +547,7 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
547 xdr->p = p; 547 xdr->p = p;
548 xdr->end = (__be32 *)((char *)iov->iov_base + len); 548 xdr->end = (__be32 *)((char *)iov->iov_base + len);
549} 549}
550EXPORT_SYMBOL(xdr_init_decode); 550EXPORT_SYMBOL_GPL(xdr_init_decode);
551 551
552/** 552/**
553 * xdr_inline_decode - Retrieve non-page XDR data to decode 553 * xdr_inline_decode - Retrieve non-page XDR data to decode
@@ -569,7 +569,7 @@ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
569 xdr->p = q; 569 xdr->p = q;
570 return p; 570 return p;
571} 571}
572EXPORT_SYMBOL(xdr_inline_decode); 572EXPORT_SYMBOL_GPL(xdr_inline_decode);
573 573
574/** 574/**
575 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position 575 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
@@ -613,7 +613,7 @@ void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
613 xdr->p = (__be32 *)((char *)iov->iov_base + padding); 613 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
614 xdr->end = (__be32 *)((char *)iov->iov_base + end); 614 xdr->end = (__be32 *)((char *)iov->iov_base + end);
615} 615}
616EXPORT_SYMBOL(xdr_read_pages); 616EXPORT_SYMBOL_GPL(xdr_read_pages);
617 617
618/** 618/**
619 * xdr_enter_page - decode data from the XDR page 619 * xdr_enter_page - decode data from the XDR page
@@ -638,7 +638,7 @@ void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
638 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base); 638 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
639 xdr->end = (__be32 *)((char *)xdr->p + len); 639 xdr->end = (__be32 *)((char *)xdr->p + len);
640} 640}
641EXPORT_SYMBOL(xdr_enter_page); 641EXPORT_SYMBOL_GPL(xdr_enter_page);
642 642
643static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; 643static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
644 644
@@ -650,7 +650,7 @@ xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
650 buf->page_len = 0; 650 buf->page_len = 0;
651 buf->buflen = buf->len = iov->iov_len; 651 buf->buflen = buf->len = iov->iov_len;
652} 652}
653EXPORT_SYMBOL(xdr_buf_from_iov); 653EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
654 654
655/* Sets subbuf to the portion of buf of length len beginning base bytes 655/* Sets subbuf to the portion of buf of length len beginning base bytes
656 * from the start of buf. Returns -1 if base of length are out of bounds. */ 656 * from the start of buf. Returns -1 if base of length are out of bounds. */
@@ -699,7 +699,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
699 return -1; 699 return -1;
700 return 0; 700 return 0;
701} 701}
702EXPORT_SYMBOL(xdr_buf_subsegment); 702EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
703 703
704static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 704static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
705{ 705{
@@ -730,7 +730,7 @@ int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, u
730 __read_bytes_from_xdr_buf(&subbuf, obj, len); 730 __read_bytes_from_xdr_buf(&subbuf, obj, len);
731 return 0; 731 return 0;
732} 732}
733EXPORT_SYMBOL(read_bytes_from_xdr_buf); 733EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
734 734
735static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 735static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
736{ 736{
@@ -774,7 +774,7 @@ xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
774 *obj = ntohl(raw); 774 *obj = ntohl(raw);
775 return 0; 775 return 0;
776} 776}
777EXPORT_SYMBOL(xdr_decode_word); 777EXPORT_SYMBOL_GPL(xdr_decode_word);
778 778
779int 779int
780xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) 780xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
@@ -783,7 +783,7 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
783 783
784 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 784 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
785} 785}
786EXPORT_SYMBOL(xdr_encode_word); 786EXPORT_SYMBOL_GPL(xdr_encode_word);
787 787
788/* If the netobj starting offset bytes from the start of xdr_buf is contained 788/* If the netobj starting offset bytes from the start of xdr_buf is contained
789 * entirely in the head or the tail, set object to point to it; otherwise 789 * entirely in the head or the tail, set object to point to it; otherwise
@@ -821,7 +821,7 @@ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned in
821 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len); 821 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
822 return 0; 822 return 0;
823} 823}
824EXPORT_SYMBOL(xdr_buf_read_netobj); 824EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
825 825
826/* Returns 0 on success, or else a negative error code. */ 826/* Returns 0 on success, or else a negative error code. */
827static int 827static int
@@ -1027,7 +1027,7 @@ xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1027 1027
1028 return xdr_xcode_array2(buf, base, desc, 0); 1028 return xdr_xcode_array2(buf, base, desc, 0);
1029} 1029}
1030EXPORT_SYMBOL(xdr_decode_array2); 1030EXPORT_SYMBOL_GPL(xdr_decode_array2);
1031 1031
1032int 1032int
1033xdr_encode_array2(struct xdr_buf *buf, unsigned int base, 1033xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
@@ -1039,7 +1039,7 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1039 1039
1040 return xdr_xcode_array2(buf, base, desc, 1); 1040 return xdr_xcode_array2(buf, base, desc, 1);
1041} 1041}
1042EXPORT_SYMBOL(xdr_encode_array2); 1042EXPORT_SYMBOL_GPL(xdr_encode_array2);
1043 1043
1044int 1044int
1045xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, 1045xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
@@ -1106,5 +1106,5 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1106out: 1106out:
1107 return ret; 1107 return ret;
1108} 1108}
1109EXPORT_SYMBOL(xdr_process_buf); 1109EXPORT_SYMBOL_GPL(xdr_process_buf);
1110 1110
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c6250d0055d2..d1b89820ab4f 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -836,7 +836,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
836 err = mnt_want_write(nd.path.mnt); 836 err = mnt_want_write(nd.path.mnt);
837 if (err) 837 if (err)
838 goto out_mknod_dput; 838 goto out_mknod_dput;
839 err = security_path_mknod(&nd.path, dentry, mode, 0);
840 if (err)
841 goto out_mknod_drop_write;
839 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0); 842 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
843out_mknod_drop_write:
840 mnt_drop_write(nd.path.mnt); 844 mnt_drop_write(nd.path.mnt);
841 if (err) 845 if (err)
842 goto out_mknod_dput; 846 goto out_mknod_dput;
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig
new file mode 100644
index 000000000000..18495cdcd10d
--- /dev/null
+++ b/net/wimax/Kconfig
@@ -0,0 +1,52 @@
1#
2# WiMAX LAN device configuration
3#
4# Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a
5# module if WIMAX is to be linked in. The WiMAX code is done in such a
6# way that it doesn't require and explicit dependency on RFKILL in
7# case an embedded system wants to rip it out.
8#
9# As well, enablement of the RFKILL code means we need the INPUT layer
10# support to inject events coming from hw rfkill switches. That
11# dependency could be killed if input.h provided appropiate means to
12# work when input is disabled.
13
14comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled"
15 depends on INPUT = n && RFKILL != n
16
17menuconfig WIMAX
18 tristate "WiMAX Wireless Broadband support"
19 depends on (y && RFKILL != m) || m
20 depends on (INPUT && RFKILL != n) || RFKILL = n
21 help
22
23 Select to configure support for devices that provide
24 wireless broadband connectivity using the WiMAX protocol
25 (IEEE 802.16).
26
27 Please note that most of these devices require signing up
28 for a service plan with a provider.
29
30 The different WiMAX drivers can be enabled in the menu entry
31
32 Device Drivers > Network device support > WiMAX Wireless
33 Broadband devices
34
35 If unsure, it is safe to select M (module).
36
37config WIMAX_DEBUG_LEVEL
38 int "WiMAX debug level"
39 depends on WIMAX
40 default 8
41 help
42
43 Select the maximum debug verbosity level to be compiled into
44 the WiMAX stack code.
45
46 By default, debug messages are disabled at runtime and can
47 be selectively enabled for different parts of the code using
48 the sysfs debug-levels file.
49
50 If set at zero, this will compile out all the debug code.
51
52 It is recommended that it is left at 8.
diff --git a/net/wimax/Makefile b/net/wimax/Makefile
new file mode 100644
index 000000000000..5b80b941c2c9
--- /dev/null
+++ b/net/wimax/Makefile
@@ -0,0 +1,13 @@
1
2obj-$(CONFIG_WIMAX) += wimax.o
3
4wimax-y := \
5 id-table.o \
6 op-msg.o \
7 op-reset.o \
8 op-rfkill.o \
9 stack.o
10
11wimax-$(CONFIG_DEBUG_FS) += debugfs.o
12
13
diff --git a/net/wimax/debug-levels.h b/net/wimax/debug-levels.h
new file mode 100644
index 000000000000..1c29123a3aa9
--- /dev/null
+++ b/net/wimax/debug-levels.h
@@ -0,0 +1,42 @@
1/*
2 * Linux WiMAX Stack
3 * Debug levels control file for the wimax module
4 *
5 *
6 * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 */
23#ifndef __debug_levels__h__
24#define __debug_levels__h__
25
26/* Maximum compile and run time debug level for all submodules */
27#define D_MODULENAME wimax
28#define D_MASTER CONFIG_WIMAX_DEBUG_LEVEL
29
30#include <linux/wimax/debug.h>
31
32/* List of all the enabled modules */
33enum d_module {
34 D_SUBMODULE_DECLARE(debugfs),
35 D_SUBMODULE_DECLARE(id_table),
36 D_SUBMODULE_DECLARE(op_msg),
37 D_SUBMODULE_DECLARE(op_reset),
38 D_SUBMODULE_DECLARE(op_rfkill),
39 D_SUBMODULE_DECLARE(stack),
40};
41
42#endif /* #ifndef __debug_levels__h__ */
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c
new file mode 100644
index 000000000000..87cf4430079c
--- /dev/null
+++ b/net/wimax/debugfs.c
@@ -0,0 +1,90 @@
1/*
2 * Linux WiMAX
3 * Debugfs support
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 */
23#include <linux/debugfs.h>
24#include <linux/wimax.h>
25#include "wimax-internal.h"
26
27#define D_SUBMODULE debugfs
28#include "debug-levels.h"
29
30
31/* Debug framework control of debug levels */
32struct d_level D_LEVEL[] = {
33 D_SUBMODULE_DEFINE(debugfs),
34 D_SUBMODULE_DEFINE(id_table),
35 D_SUBMODULE_DEFINE(op_msg),
36 D_SUBMODULE_DEFINE(op_reset),
37 D_SUBMODULE_DEFINE(op_rfkill),
38 D_SUBMODULE_DEFINE(stack),
39};
40size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
41
42#define __debugfs_register(prefix, name, parent) \
43do { \
44 result = d_level_register_debugfs(prefix, name, parent); \
45 if (result < 0) \
46 goto error; \
47} while (0)
48
49
50int wimax_debugfs_add(struct wimax_dev *wimax_dev)
51{
52 int result;
53 struct net_device *net_dev = wimax_dev->net_dev;
54 struct device *dev = net_dev->dev.parent;
55 struct dentry *dentry;
56 char buf[128];
57
58 snprintf(buf, sizeof(buf), "wimax:%s", net_dev->name);
59 dentry = debugfs_create_dir(buf, NULL);
60 result = PTR_ERR(dentry);
61 if (IS_ERR(dentry)) {
62 if (result == -ENODEV)
63 result = 0; /* No debugfs support */
64 else
65 dev_err(dev, "Can't create debugfs dentry: %d\n",
66 result);
67 goto out;
68 }
69 wimax_dev->debugfs_dentry = dentry;
70 __debugfs_register("wimax_dl_", debugfs, dentry);
71 __debugfs_register("wimax_dl_", id_table, dentry);
72 __debugfs_register("wimax_dl_", op_msg, dentry);
73 __debugfs_register("wimax_dl_", op_reset, dentry);
74 __debugfs_register("wimax_dl_", op_rfkill, dentry);
75 __debugfs_register("wimax_dl_", stack, dentry);
76 result = 0;
77out:
78 return result;
79
80error:
81 debugfs_remove_recursive(wimax_dev->debugfs_dentry);
82 return result;
83}
84
85void wimax_debugfs_rm(struct wimax_dev *wimax_dev)
86{
87 debugfs_remove_recursive(wimax_dev->debugfs_dentry);
88}
89
90
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c
new file mode 100644
index 000000000000..5e685f7eda90
--- /dev/null
+++ b/net/wimax/id-table.c
@@ -0,0 +1,144 @@
1/*
2 * Linux WiMAX
3 * Mappping of generic netlink family IDs to net devices
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * We assign a single generic netlink family ID to each device (to
25 * simplify lookup).
26 *
27 * We need a way to map family ID to a wimax_dev pointer.
28 *
29 * The idea is to use a very simple lookup. Using a netlink attribute
30 * with (for example) the interface name implies a heavier search over
31 * all the network devices; seemed kind of a waste given that we know
32 * we are looking for a WiMAX device and that most systems will have
33 * just a single WiMAX adapter.
34 *
35 * We put all the WiMAX devices in the system in a linked list and
36 * match the generic link family ID against the list.
37 *
38 * By using a linked list, the case of a single adapter in the system
39 * becomes (almost) no overhead, while still working for many more. If
40 * it ever goes beyond two, I'll be surprised.
41 */
42#include <linux/device.h>
43#include <net/genetlink.h>
44#include <linux/netdevice.h>
45#include <linux/list.h>
46#include <linux/wimax.h>
47#include "wimax-internal.h"
48
49
50#define D_SUBMODULE id_table
51#include "debug-levels.h"
52
53
54static DEFINE_SPINLOCK(wimax_id_table_lock);
55static struct list_head wimax_id_table = LIST_HEAD_INIT(wimax_id_table);
56
57
58/*
59 * wimax_id_table_add - add a gennetlink familiy ID / wimax_dev mapping
60 *
61 * @wimax_dev: WiMAX device descriptor to associate to the Generic
62 * Netlink family ID.
63 *
64 * Look for an empty spot in the ID table; if none found, double the
65 * table's size and get the first spot.
66 */
67void wimax_id_table_add(struct wimax_dev *wimax_dev)
68{
69 d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
70 spin_lock(&wimax_id_table_lock);
71 list_add(&wimax_dev->id_table_node, &wimax_id_table);
72 spin_unlock(&wimax_id_table_lock);
73 d_fnend(3, NULL, "(wimax_dev %p)\n", wimax_dev);
74}
75
76
77/*
78 * wimax_get_netdev_by_info - lookup a wimax_dev from the gennetlink info
79 *
80 * The generic netlink family ID has been filled out in the
81 * nlmsghdr->nlmsg_type field, so we pull it from there, look it up in
82 * the mapping table and reference the wimax_dev.
83 *
84 * When done, the reference should be dropped with
85 * 'dev_put(wimax_dev->net_dev)'.
86 */
87struct wimax_dev *wimax_dev_get_by_genl_info(
88 struct genl_info *info, int ifindex)
89{
90 struct wimax_dev *wimax_dev = NULL;
91
92 d_fnstart(3, NULL, "(info %p ifindex %d)\n", info, ifindex);
93 spin_lock(&wimax_id_table_lock);
94 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
95 if (wimax_dev->net_dev->ifindex == ifindex) {
96 dev_hold(wimax_dev->net_dev);
97 break;
98 }
99 }
100 if (wimax_dev == NULL)
101 d_printf(1, NULL, "wimax: no devices found with ifindex %d\n",
102 ifindex);
103 spin_unlock(&wimax_id_table_lock);
104 d_fnend(3, NULL, "(info %p ifindex %d) = %p\n",
105 info, ifindex, wimax_dev);
106 return wimax_dev;
107}
108
109
110/*
111 * wimax_id_table_rm - Remove a gennetlink familiy ID / wimax_dev mapping
112 *
113 * @id: family ID to remove from the table
114 */
115void wimax_id_table_rm(struct wimax_dev *wimax_dev)
116{
117 spin_lock(&wimax_id_table_lock);
118 list_del_init(&wimax_dev->id_table_node);
119 spin_unlock(&wimax_id_table_lock);
120}
121
122
123/*
124 * Release the gennetlink family id / mapping table
125 *
126 * On debug, verify that the table is empty upon removal. We want the
127 * code always compiled, to ensure it doesn't bit rot. It will be
128 * compiled out if CONFIG_BUG is disabled.
129 */
130void wimax_id_table_release(void)
131{
132 struct wimax_dev *wimax_dev;
133
134#ifndef CONFIG_BUG
135 return;
136#endif
137 spin_lock(&wimax_id_table_lock);
138 list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
139 printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n",
140 __func__, wimax_dev, wimax_dev->net_dev->ifindex);
141 WARN_ON(1);
142 }
143 spin_unlock(&wimax_id_table_lock);
144}
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
new file mode 100644
index 000000000000..cb3b4ad53683
--- /dev/null
+++ b/net/wimax/op-msg.c
@@ -0,0 +1,421 @@
1/*
2 * Linux WiMAX
3 * Generic messaging interface between userspace and driver/device
4 *
5 *
6 * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This implements a direct communication channel between user space and
25 * the driver/device, by which free form messages can be sent back and
26 * forth.
27 *
28 * This is intended for device-specific features, vendor quirks, etc.
29 *
30 * See include/net/wimax.h
31 *
32 * GENERIC NETLINK ENCODING AND CAPACITY
33 *
34 * A destination "pipe name" is added to each message; it is up to the
35 * drivers to assign or use those names (if using them at all).
36 *
37 * Messages are encoded as a binary netlink attribute using nla_put()
38 * using type NLA_UNSPEC (as some versions of libnl still in
39 * deployment don't yet understand NLA_BINARY).
40 *
41 * The maximum capacity of this transport is PAGESIZE per message (so
42 * the actual payload will be bit smaller depending on the
43 * netlink/generic netlink attributes and headers).
44 *
45 * RECEPTION OF MESSAGES
46 *
47 * When a message is received from user space, it is passed verbatim
48 * to the driver calling wimax_dev->op_msg_from_user(). The return
49 * value from this function is passed back to user space as an ack
50 * over the generic netlink protocol.
51 *
52 * The stack doesn't do any processing or interpretation of these
53 * messages.
54 *
55 * SENDING MESSAGES
56 *
57 * Messages can be sent with wimax_msg().
58 *
59 * If the message delivery needs to happen on a different context to
60 * that of its creation, wimax_msg_alloc() can be used to get a
61 * pointer to the message that can be delivered later on with
62 * wimax_msg_send().
63 *
64 * ROADMAP
65 *
66 * wimax_gnl_doit_msg_from_user() Process a message from user space
67 * wimax_dev_get_by_genl_info()
68 * wimax_dev->op_msg_from_user() Delivery of message to the driver
69 *
70 * wimax_msg() Send a message to user space
71 * wimax_msg_alloc()
72 * wimax_msg_send()
73 */
74#include <linux/device.h>
75#include <net/genetlink.h>
76#include <linux/netdevice.h>
77#include <linux/wimax.h>
78#include <linux/security.h>
79#include "wimax-internal.h"
80
81
82#define D_SUBMODULE op_msg
83#include "debug-levels.h"
84
85
86/**
87 * wimax_msg_alloc - Create a new skb for sending a message to userspace
88 *
89 * @wimax_dev: WiMAX device descriptor
90 * @pipe_name: "named pipe" the message will be sent to
91 * @msg: pointer to the message data to send
92 * @size: size of the message to send (in bytes), including the header.
93 * @gfp_flags: flags for memory allocation.
94 *
95 * Returns: %0 if ok, negative errno code on error
96 *
97 * Description:
98 *
99 * Allocates an skb that will contain the message to send to user
100 * space over the messaging pipe and initializes it, copying the
101 * payload.
102 *
103 * Once this call is done, you can deliver it with
104 * wimax_msg_send().
105 *
106 * IMPORTANT:
107 *
108 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
109 * wimax_msg_send() depends on skb->data being placed at the
110 * beginning of the user message.
111 */
112struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
113 const char *pipe_name,
114 const void *msg, size_t size,
115 gfp_t gfp_flags)
116{
117 int result;
118 struct device *dev = wimax_dev->net_dev->dev.parent;
119 size_t msg_size;
120 void *genl_msg;
121 struct sk_buff *skb;
122
123 msg_size = nla_total_size(size)
124 + nla_total_size(sizeof(u32))
125 + (pipe_name ? nla_total_size(strlen(pipe_name)) : 0);
126 result = -ENOMEM;
127 skb = genlmsg_new(msg_size, gfp_flags);
128 if (skb == NULL)
129 goto error_new;
130 genl_msg = genlmsg_put(skb, 0, 0, &wimax_gnl_family,
131 0, WIMAX_GNL_OP_MSG_TO_USER);
132 if (genl_msg == NULL) {
133 dev_err(dev, "no memory to create generic netlink message\n");
134 goto error_genlmsg_put;
135 }
136 result = nla_put_u32(skb, WIMAX_GNL_MSG_IFIDX,
137 wimax_dev->net_dev->ifindex);
138 if (result < 0) {
139 dev_err(dev, "no memory to add ifindex attribute\n");
140 goto error_nla_put;
141 }
142 if (pipe_name) {
143 result = nla_put_string(skb, WIMAX_GNL_MSG_PIPE_NAME,
144 pipe_name);
145 if (result < 0) {
146 dev_err(dev, "no memory to add pipe_name attribute\n");
147 goto error_nla_put;
148 }
149 }
150 result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
151 if (result < 0) {
152 dev_err(dev, "no memory to add payload in attribute\n");
153 goto error_nla_put;
154 }
155 genlmsg_end(skb, genl_msg);
156 return skb;
157
158error_nla_put:
159error_genlmsg_put:
160error_new:
161 nlmsg_free(skb);
162 return ERR_PTR(result);
163
164}
165EXPORT_SYMBOL_GPL(wimax_msg_alloc);
166
167
168/**
169 * wimax_msg_data_len - Return a pointer and size of a message's payload
170 *
171 * @msg: Pointer to a message created with wimax_msg_alloc()
172 * @size: Pointer to where to store the message's size
173 *
174 * Returns the pointer to the message data.
175 */
176const void *wimax_msg_data_len(struct sk_buff *msg, size_t *size)
177{
178 struct nlmsghdr *nlh = (void *) msg->head;
179 struct nlattr *nla;
180
181 nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
182 WIMAX_GNL_MSG_DATA);
183 if (nla == NULL) {
184 printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
185 return NULL;
186 }
187 *size = nla_len(nla);
188 return nla_data(nla);
189}
190EXPORT_SYMBOL_GPL(wimax_msg_data_len);
191
192
193/**
194 * wimax_msg_data - Return a pointer to a message's payload
195 *
196 * @msg: Pointer to a message created with wimax_msg_alloc()
197 */
198const void *wimax_msg_data(struct sk_buff *msg)
199{
200 struct nlmsghdr *nlh = (void *) msg->head;
201 struct nlattr *nla;
202
203 nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
204 WIMAX_GNL_MSG_DATA);
205 if (nla == NULL) {
206 printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
207 return NULL;
208 }
209 return nla_data(nla);
210}
211EXPORT_SYMBOL_GPL(wimax_msg_data);
212
213
214/**
215 * wimax_msg_len - Return a message's payload length
216 *
217 * @msg: Pointer to a message created with wimax_msg_alloc()
218 */
219ssize_t wimax_msg_len(struct sk_buff *msg)
220{
221 struct nlmsghdr *nlh = (void *) msg->head;
222 struct nlattr *nla;
223
224 nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
225 WIMAX_GNL_MSG_DATA);
226 if (nla == NULL) {
227 printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
228 return -EINVAL;
229 }
230 return nla_len(nla);
231}
232EXPORT_SYMBOL_GPL(wimax_msg_len);
233
234
235/**
236 * wimax_msg_send - Send a pre-allocated message to user space
237 *
238 * @wimax_dev: WiMAX device descriptor
239 *
240 * @skb: &struct sk_buff returned by wimax_msg_alloc(). Note the
241 * ownership of @skb is transferred to this function.
242 *
243 * Returns: 0 if ok, < 0 errno code on error
244 *
245 * Description:
246 *
247 * Sends a free-form message that was preallocated with
248 * wimax_msg_alloc() and filled up.
249 *
250 * Assumes that once you pass an skb to this function for sending, it
251 * owns it and will release it when done (on success).
252 *
253 * IMPORTANT:
254 *
255 * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
256 * wimax_msg_send() depends on skb->data being placed at the
257 * beginning of the user message.
258 */
259int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
260{
261 int result;
262 struct device *dev = wimax_dev->net_dev->dev.parent;
263 void *msg = skb->data;
264 size_t size = skb->len;
265 might_sleep();
266
267 d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size);
268 d_dump(2, dev, msg, size);
269 result = genlmsg_multicast(skb, 0, wimax_gnl_mcg.id, GFP_KERNEL);
270 d_printf(1, dev, "CTX: genl multicast result %d\n", result);
271 if (result == -ESRCH) /* Nobody connected, ignore it */
272 result = 0; /* btw, the skb is freed already */
273 return result;
274}
275EXPORT_SYMBOL_GPL(wimax_msg_send);
276
277
278/**
279 * wimax_msg - Send a message to user space
280 *
281 * @wimax_dev: WiMAX device descriptor (properly referenced)
282 * @pipe_name: "named pipe" the message will be sent to
283 * @buf: pointer to the message to send.
284 * @size: size of the buffer pointed to by @buf (in bytes).
285 * @gfp_flags: flags for memory allocation.
286 *
287 * Returns: %0 if ok, negative errno code on error.
288 *
289 * Description:
290 *
291 * Sends a free-form message to user space on the device @wimax_dev.
292 *
293 * NOTES:
294 *
295 * Once the @skb is given to this function, who will own it and will
296 * release it when done (unless it returns error).
297 */
298int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
299 const void *buf, size_t size, gfp_t gfp_flags)
300{
301 int result = -ENOMEM;
302 struct sk_buff *skb;
303
304 skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
305 if (skb == NULL)
306 goto error_msg_new;
307 result = wimax_msg_send(wimax_dev, skb);
308error_msg_new:
309 return result;
310}
311EXPORT_SYMBOL_GPL(wimax_msg);
312
313
314static const
315struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
316 [WIMAX_GNL_MSG_IFIDX] = {
317 .type = NLA_U32,
318 },
319 [WIMAX_GNL_MSG_DATA] = {
320 .type = NLA_UNSPEC, /* libnl doesn't grok BINARY yet */
321 },
322};
323
324
325/*
326 * Relays a message from user space to the driver
327 *
328 * The skb is passed to the driver-specific function with the netlink
329 * and generic netlink headers already stripped.
330 *
331 * This call will block while handling/relaying the message.
332 */
333static
334int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
335{
336 int result, ifindex;
337 struct wimax_dev *wimax_dev;
338 struct device *dev;
339 struct nlmsghdr *nlh = info->nlhdr;
340 char *pipe_name;
341 void *msg_buf;
342 size_t msg_len;
343
344 might_sleep();
345 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
346 result = -ENODEV;
347 if (info->attrs[WIMAX_GNL_MSG_IFIDX] == NULL) {
348 printk(KERN_ERR "WIMAX_GNL_MSG_FROM_USER: can't find IFIDX "
349 "attribute\n");
350 goto error_no_wimax_dev;
351 }
352 ifindex = nla_get_u32(info->attrs[WIMAX_GNL_MSG_IFIDX]);
353 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
354 if (wimax_dev == NULL)
355 goto error_no_wimax_dev;
356 dev = wimax_dev_to_dev(wimax_dev);
357
358 /* Unpack arguments */
359 result = -EINVAL;
360 if (info->attrs[WIMAX_GNL_MSG_DATA] == NULL) {
361 dev_err(dev, "WIMAX_GNL_MSG_FROM_USER: can't find MSG_DATA "
362 "attribute\n");
363 goto error_no_data;
364 }
365 msg_buf = nla_data(info->attrs[WIMAX_GNL_MSG_DATA]);
366 msg_len = nla_len(info->attrs[WIMAX_GNL_MSG_DATA]);
367
368 if (info->attrs[WIMAX_GNL_MSG_PIPE_NAME] == NULL)
369 pipe_name = NULL;
370 else {
371 struct nlattr *attr = info->attrs[WIMAX_GNL_MSG_PIPE_NAME];
372 size_t attr_len = nla_len(attr);
373 /* libnl-1.1 does not yet support NLA_NUL_STRING */
374 result = -ENOMEM;
375 pipe_name = kstrndup(nla_data(attr), attr_len + 1, GFP_KERNEL);
376 if (pipe_name == NULL)
377 goto error_alloc;
378 pipe_name[attr_len] = 0;
379 }
380 mutex_lock(&wimax_dev->mutex);
381 result = wimax_dev_is_ready(wimax_dev);
382 if (result < 0)
383 goto error_not_ready;
384 result = -ENOSYS;
385 if (wimax_dev->op_msg_from_user == NULL)
386 goto error_noop;
387
388 d_printf(1, dev,
389 "CRX: nlmsghdr len %u type %u flags 0x%04x seq 0x%x pid %u\n",
390 nlh->nlmsg_len, nlh->nlmsg_type, nlh->nlmsg_flags,
391 nlh->nlmsg_seq, nlh->nlmsg_pid);
392 d_printf(1, dev, "CRX: wimax message %zu bytes\n", msg_len);
393 d_dump(2, dev, msg_buf, msg_len);
394
395 result = wimax_dev->op_msg_from_user(wimax_dev, pipe_name,
396 msg_buf, msg_len, info);
397error_noop:
398error_not_ready:
399 mutex_unlock(&wimax_dev->mutex);
400error_alloc:
401 kfree(pipe_name);
402error_no_data:
403 dev_put(wimax_dev->net_dev);
404error_no_wimax_dev:
405 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
406 return result;
407}
408
409
410/*
411 * Generic Netlink glue
412 */
413
414struct genl_ops wimax_gnl_msg_from_user = {
415 .cmd = WIMAX_GNL_OP_MSG_FROM_USER,
416 .flags = GENL_ADMIN_PERM,
417 .policy = wimax_gnl_msg_policy,
418 .doit = wimax_gnl_doit_msg_from_user,
419 .dumpit = NULL,
420};
421
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
new file mode 100644
index 000000000000..ca269178c4d4
--- /dev/null
+++ b/net/wimax/op-reset.c
@@ -0,0 +1,143 @@
1/*
2 * Linux WiMAX
3 * Implement and export a method for resetting a WiMAX device
4 *
5 *
6 * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This implements a simple synchronous call to reset a WiMAX device.
25 *
26 * Resets aim at being warm, keeping the device handles active;
27 * however, when that fails, it falls back to a cold reset (that will
28 * disconnect and reconnect the device).
29 */
30
31#include <net/wimax.h>
32#include <net/genetlink.h>
33#include <linux/wimax.h>
34#include <linux/security.h>
35#include "wimax-internal.h"
36
37#define D_SUBMODULE op_reset
38#include "debug-levels.h"
39
40
41/**
42 * wimax_reset - Reset a WiMAX device
43 *
44 * @wimax_dev: WiMAX device descriptor
45 *
46 * Returns:
47 *
48 * %0 if ok and a warm reset was done (the device still exists in
49 * the system).
50 *
51 * -%ENODEV if a cold/bus reset had to be done (device has
52 * disconnected and reconnected, so current handle is not valid
53 * any more).
54 *
55 * -%EINVAL if the device is not even registered.
56 *
57 * Any other negative error code shall be considered as
58 * non-recoverable.
59 *
60 * Description:
61 *
62 * Called when wanting to reset the device for any reason. Device is
63 * taken back to power on status.
64 *
65 * This call blocks; on succesful return, the device has completed the
66 * reset process and is ready to operate.
67 */
68int wimax_reset(struct wimax_dev *wimax_dev)
69{
70 int result = -EINVAL;
71 struct device *dev = wimax_dev_to_dev(wimax_dev);
72 enum wimax_st state;
73
74 might_sleep();
75 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
76 mutex_lock(&wimax_dev->mutex);
77 dev_hold(wimax_dev->net_dev);
78 state = wimax_dev->state;
79 mutex_unlock(&wimax_dev->mutex);
80
81 if (state >= WIMAX_ST_DOWN) {
82 mutex_lock(&wimax_dev->mutex_reset);
83 result = wimax_dev->op_reset(wimax_dev);
84 mutex_unlock(&wimax_dev->mutex_reset);
85 }
86 dev_put(wimax_dev->net_dev);
87
88 d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
89 return result;
90}
91EXPORT_SYMBOL(wimax_reset);
92
93
94static const
95struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
96 [WIMAX_GNL_RESET_IFIDX] = {
97 .type = NLA_U32,
98 },
99};
100
101
102/*
103 * Exporting to user space over generic netlink
104 *
105 * Parse the reset command from user space, return error code.
106 *
107 * No attributes.
108 */
109static
110int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
111{
112 int result, ifindex;
113 struct wimax_dev *wimax_dev;
114 struct device *dev;
115
116 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
117 result = -ENODEV;
118 if (info->attrs[WIMAX_GNL_RESET_IFIDX] == NULL) {
119 printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX "
120 "attribute\n");
121 goto error_no_wimax_dev;
122 }
123 ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RESET_IFIDX]);
124 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
125 if (wimax_dev == NULL)
126 goto error_no_wimax_dev;
127 dev = wimax_dev_to_dev(wimax_dev);
128 /* Execute the operation and send the result back to user space */
129 result = wimax_reset(wimax_dev);
130 dev_put(wimax_dev->net_dev);
131error_no_wimax_dev:
132 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
133 return result;
134}
135
136
137struct genl_ops wimax_gnl_reset = {
138 .cmd = WIMAX_GNL_OP_RESET,
139 .flags = GENL_ADMIN_PERM,
140 .policy = wimax_gnl_reset_policy,
141 .doit = wimax_gnl_doit_reset,
142 .dumpit = NULL,
143};
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
new file mode 100644
index 000000000000..2b75aee04217
--- /dev/null
+++ b/net/wimax/op-rfkill.c
@@ -0,0 +1,532 @@
1/*
2 * Linux WiMAX
3 * RF-kill framework integration
4 *
5 *
6 * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This integrates into the Linux Kernel rfkill susbystem so that the
25 * drivers just have to do the bare minimal work, which is providing a
26 * method to set the software RF-Kill switch and to report changes in
27 * the software and hardware switch status.
28 *
29 * A non-polled generic rfkill device is embedded into the WiMAX
30 * subsystem's representation of a device.
31 *
32 * FIXME: Need polled support? use a timer or add the implementation
33 * to the stack.
34 *
35 * All device drivers have to do is after wimax_dev_init(), call
36 * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update
37 * initial state and then every time it changes. See wimax.h:struct
38 * wimax_dev for more information.
39 *
40 * ROADMAP
41 *
42 * wimax_gnl_doit_rfkill() User space calling wimax_rfkill()
43 * wimax_rfkill() Kernel calling wimax_rfkill()
44 * __wimax_rf_toggle_radio()
45 *
46 * wimax_rfkill_toggle_radio() RF-Kill subsytem calling
47 * __wimax_rf_toggle_radio()
48 *
49 * __wimax_rf_toggle_radio()
50 * wimax_dev->op_rfkill_sw_toggle() Driver backend
51 * __wimax_state_change()
52 *
53 * wimax_report_rfkill_sw() Driver reports state change
54 * __wimax_state_change()
55 *
56 * wimax_report_rfkill_hw() Driver reports state change
57 * __wimax_state_change()
58 *
59 * wimax_rfkill_add() Initialize/shutdown rfkill support
60 * wimax_rfkill_rm() [called by wimax_dev_add/rm()]
61 */
62
63#include <net/wimax.h>
64#include <net/genetlink.h>
65#include <linux/wimax.h>
66#include <linux/security.h>
67#include <linux/rfkill.h>
68#include <linux/input.h>
69#include "wimax-internal.h"
70
71#define D_SUBMODULE op_rfkill
72#include "debug-levels.h"
73
74#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
75
76
77/**
78 * wimax_report_rfkill_hw - Reports changes in the hardware RF switch
79 *
80 * @wimax_dev: WiMAX device descriptor
81 *
82 * @state: New state of the RF Kill switch. %WIMAX_RF_ON radio on,
83 * %WIMAX_RF_OFF radio off.
84 *
85 * When the device detects a change in the state of thehardware RF
86 * switch, it must call this function to let the WiMAX kernel stack
87 * know that the state has changed so it can be properly propagated.
88 *
89 * The WiMAX stack caches the state (the driver doesn't need to). As
90 * well, as the change is propagated it will come back as a request to
91 * change the software state to mirror the hardware state.
92 *
93 * If the device doesn't have a hardware kill switch, just report
94 * it on initialization as always on (%WIMAX_RF_ON, radio on).
95 */
96void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
97 enum wimax_rf_state state)
98{
99 int result;
100 struct device *dev = wimax_dev_to_dev(wimax_dev);
101 enum wimax_st wimax_state;
102 enum rfkill_state rfkill_state;
103
104 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
105 BUG_ON(state == WIMAX_RF_QUERY);
106 BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
107
108 mutex_lock(&wimax_dev->mutex);
109 result = wimax_dev_is_ready(wimax_dev);
110 if (result < 0)
111 goto error_not_ready;
112
113 if (state != wimax_dev->rf_hw) {
114 wimax_dev->rf_hw = state;
115 rfkill_state = state == WIMAX_RF_ON ?
116 RFKILL_STATE_OFF : RFKILL_STATE_ON;
117 if (wimax_dev->rf_hw == WIMAX_RF_ON
118 && wimax_dev->rf_sw == WIMAX_RF_ON)
119 wimax_state = WIMAX_ST_READY;
120 else
121 wimax_state = WIMAX_ST_RADIO_OFF;
122 __wimax_state_change(wimax_dev, wimax_state);
123 input_report_key(wimax_dev->rfkill_input, KEY_WIMAX,
124 rfkill_state);
125 }
126error_not_ready:
127 mutex_unlock(&wimax_dev->mutex);
128 d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
129 wimax_dev, state, result);
130}
131EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
132
133
134/**
135 * wimax_report_rfkill_sw - Reports changes in the software RF switch
136 *
137 * @wimax_dev: WiMAX device descriptor
138 *
139 * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on,
140 * %WIMAX_RF_OFF radio off.
141 *
142 * Reports changes in the software RF switch state to the the WiMAX
143 * stack.
144 *
145 * The main use is during initialization, so the driver can query the
146 * device for its current software radio kill switch state and feed it
147 * to the system.
148 *
149 * On the side, the device does not change the software state by
150 * itself. In practice, this can happen, as the device might decide to
151 * switch (in software) the radio off for different reasons.
152 */
153void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
154 enum wimax_rf_state state)
155{
156 int result;
157 struct device *dev = wimax_dev_to_dev(wimax_dev);
158 enum wimax_st wimax_state;
159
160 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
161 BUG_ON(state == WIMAX_RF_QUERY);
162 BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
163
164 mutex_lock(&wimax_dev->mutex);
165 result = wimax_dev_is_ready(wimax_dev);
166 if (result < 0)
167 goto error_not_ready;
168
169 if (state != wimax_dev->rf_sw) {
170 wimax_dev->rf_sw = state;
171 if (wimax_dev->rf_hw == WIMAX_RF_ON
172 && wimax_dev->rf_sw == WIMAX_RF_ON)
173 wimax_state = WIMAX_ST_READY;
174 else
175 wimax_state = WIMAX_ST_RADIO_OFF;
176 __wimax_state_change(wimax_dev, wimax_state);
177 }
178error_not_ready:
179 mutex_unlock(&wimax_dev->mutex);
180 d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
181 wimax_dev, state, result);
182}
183EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw);
184
185
186/*
187 * Callback for the RF Kill toggle operation
188 *
189 * This function is called by:
190 *
191 * - The rfkill subsystem when the RF-Kill key is pressed in the
192 * hardware and the driver notifies through
193 * wimax_report_rfkill_hw(). The rfkill subsystem ends up calling back
194 * here so the software RF Kill switch state is changed to reflect
195 * the hardware switch state.
196 *
197 * - When the user sets the state through sysfs' rfkill/state file
198 *
199 * - When the user calls wimax_rfkill().
200 *
201 * This call blocks!
202 *
203 * WARNING! When we call rfkill_unregister(), this will be called with
204 * state 0!
205 *
206 * WARNING: wimax_dev must be locked
207 */
208static
209int __wimax_rf_toggle_radio(struct wimax_dev *wimax_dev,
210 enum wimax_rf_state state)
211{
212 int result = 0;
213 struct device *dev = wimax_dev_to_dev(wimax_dev);
214 enum wimax_st wimax_state;
215
216 might_sleep();
217 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
218 if (wimax_dev->rf_sw == state)
219 goto out_no_change;
220 if (wimax_dev->op_rfkill_sw_toggle != NULL)
221 result = wimax_dev->op_rfkill_sw_toggle(wimax_dev, state);
222 else if (state == WIMAX_RF_OFF) /* No op? can't turn off */
223 result = -ENXIO;
224 else /* No op? can turn on */
225 result = 0; /* should never happen tho */
226 if (result >= 0) {
227 result = 0;
228 wimax_dev->rf_sw = state;
229 wimax_state = state == WIMAX_RF_ON ?
230 WIMAX_ST_READY : WIMAX_ST_RADIO_OFF;
231 __wimax_state_change(wimax_dev, wimax_state);
232 }
233out_no_change:
234 d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
235 wimax_dev, state, result);
236 return result;
237}
238
239
240/*
241 * Translate from rfkill state to wimax state
242 *
243 * NOTE: Special state handling rules here
244 *
245 * Just pretend the call didn't happen if we are in a state where
246 * we know for sure it cannot be handled (WIMAX_ST_DOWN or
247 * __WIMAX_ST_QUIESCING). rfkill() needs it to register and
248 * unregister, as it will run this path.
249 *
250 * NOTE: This call will block until the operation is completed.
251 */
252static
253int wimax_rfkill_toggle_radio(void *data, enum rfkill_state state)
254{
255 int result;
256 struct wimax_dev *wimax_dev = data;
257 struct device *dev = wimax_dev_to_dev(wimax_dev);
258 enum wimax_rf_state rf_state;
259
260 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
261 switch (state) {
262 case RFKILL_STATE_ON:
263 rf_state = WIMAX_RF_OFF;
264 break;
265 case RFKILL_STATE_OFF:
266 rf_state = WIMAX_RF_ON;
267 break;
268 default:
269 BUG();
270 }
271 mutex_lock(&wimax_dev->mutex);
272 if (wimax_dev->state <= __WIMAX_ST_QUIESCING)
273 result = 0; /* just pretend it didn't happen */
274 else
275 result = __wimax_rf_toggle_radio(wimax_dev, rf_state);
276 mutex_unlock(&wimax_dev->mutex);
277 d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
278 wimax_dev, state, result);
279 return result;
280}
281
282
283/**
284 * wimax_rfkill - Set the software RF switch state for a WiMAX device
285 *
286 * @wimax_dev: WiMAX device descriptor
287 *
288 * @state: New RF state.
289 *
290 * Returns:
291 *
292 * >= 0 toggle state if ok, < 0 errno code on error. The toggle state
293 * is returned as a bitmap, bit 0 being the hardware RF state, bit 1
294 * the software RF state.
295 *
296 * 0 means disabled (%WIMAX_RF_ON, radio on), 1 means enabled radio
297 * off (%WIMAX_RF_OFF).
298 *
299 * Description:
300 *
301 * Called by the user when he wants to request the WiMAX radio to be
302 * switched on (%WIMAX_RF_ON) or off (%WIMAX_RF_OFF). With
303 * %WIMAX_RF_QUERY, just the current state is returned.
304 *
305 * NOTE:
306 *
307 * This call will block until the operation is complete.
308 */
309int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state)
310{
311 int result;
312 struct device *dev = wimax_dev_to_dev(wimax_dev);
313
314 d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
315 mutex_lock(&wimax_dev->mutex);
316 result = wimax_dev_is_ready(wimax_dev);
317 if (result < 0)
318 goto error_not_ready;
319 switch (state) {
320 case WIMAX_RF_ON:
321 case WIMAX_RF_OFF:
322 result = __wimax_rf_toggle_radio(wimax_dev, state);
323 if (result < 0)
324 goto error;
325 break;
326 case WIMAX_RF_QUERY:
327 break;
328 default:
329 result = -EINVAL;
330 goto error;
331 }
332 result = wimax_dev->rf_sw << 1 | wimax_dev->rf_hw;
333error:
334error_not_ready:
335 mutex_unlock(&wimax_dev->mutex);
336 d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
337 wimax_dev, state, result);
338 return result;
339}
340EXPORT_SYMBOL(wimax_rfkill);
341
342
343/*
344 * Register a new WiMAX device's RF Kill support
345 *
346 * WARNING: wimax_dev->mutex must be unlocked
347 */
348int wimax_rfkill_add(struct wimax_dev *wimax_dev)
349{
350 int result;
351 struct rfkill *rfkill;
352 struct input_dev *input_dev;
353 struct device *dev = wimax_dev_to_dev(wimax_dev);
354
355 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
356 /* Initialize RF Kill */
357 result = -ENOMEM;
358 rfkill = rfkill_allocate(dev, RFKILL_TYPE_WIMAX);
359 if (rfkill == NULL)
360 goto error_rfkill_allocate;
361 wimax_dev->rfkill = rfkill;
362
363 rfkill->name = wimax_dev->name;
364 rfkill->state = RFKILL_STATE_OFF;
365 rfkill->data = wimax_dev;
366 rfkill->toggle_radio = wimax_rfkill_toggle_radio;
367 rfkill->user_claim_unsupported = 1;
368
369 /* Initialize the input device for the hw key */
370 input_dev = input_allocate_device();
371 if (input_dev == NULL)
372 goto error_input_allocate;
373 wimax_dev->rfkill_input = input_dev;
374 d_printf(1, dev, "rfkill %p input %p\n", rfkill, input_dev);
375
376 input_dev->name = wimax_dev->name;
377 /* FIXME: get a real device bus ID and stuff? do we care? */
378 input_dev->id.bustype = BUS_HOST;
379 input_dev->id.vendor = 0xffff;
380 input_dev->evbit[0] = BIT(EV_KEY);
381 set_bit(KEY_WIMAX, input_dev->keybit);
382
383 /* Register both */
384 result = input_register_device(wimax_dev->rfkill_input);
385 if (result < 0)
386 goto error_input_register;
387 result = rfkill_register(wimax_dev->rfkill);
388 if (result < 0)
389 goto error_rfkill_register;
390
391 /* If there is no SW toggle op, SW RFKill is always on */
392 if (wimax_dev->op_rfkill_sw_toggle == NULL)
393 wimax_dev->rf_sw = WIMAX_RF_ON;
394
395 d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev);
396 return 0;
397
398 /* if rfkill_register() suceeds, can't use rfkill_free() any
399 * more, only rfkill_unregister() [it owns the refcount]; with
400 * the input device we have the same issue--hence the if. */
401error_rfkill_register:
402 input_unregister_device(wimax_dev->rfkill_input);
403 wimax_dev->rfkill_input = NULL;
404error_input_register:
405 if (wimax_dev->rfkill_input)
406 input_free_device(wimax_dev->rfkill_input);
407error_input_allocate:
408 rfkill_free(wimax_dev->rfkill);
409error_rfkill_allocate:
410 d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
411 return result;
412}
413
414
415/*
416 * Deregister a WiMAX device's RF Kill support
417 *
418 * Ick, we can't call rfkill_free() after rfkill_unregister()...oh
419 * well.
420 *
421 * WARNING: wimax_dev->mutex must be unlocked
422 */
423void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
424{
425 struct device *dev = wimax_dev_to_dev(wimax_dev);
426 d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
427 rfkill_unregister(wimax_dev->rfkill); /* frees */
428 input_unregister_device(wimax_dev->rfkill_input);
429 d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev);
430}
431
432
433#else /* #ifdef CONFIG_RFKILL */
434
435void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
436 enum wimax_rf_state state)
437{
438}
439EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
440
441void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
442 enum wimax_rf_state state)
443{
444}
445EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw);
446
447int wimax_rfkill(struct wimax_dev *wimax_dev,
448 enum wimax_rf_state state)
449{
450 return WIMAX_RF_ON << 1 | WIMAX_RF_ON;
451}
452EXPORT_SYMBOL_GPL(wimax_rfkill);
453
454int wimax_rfkill_add(struct wimax_dev *wimax_dev)
455{
456 return 0;
457}
458
459void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
460{
461}
462
463#endif /* #ifdef CONFIG_RFKILL */
464
465
466/*
467 * Exporting to user space over generic netlink
468 *
469 * Parse the rfkill command from user space, return a combination
470 * value that describe the states of the different toggles.
471 *
472 * Only one attribute: the new state requested (on, off or no change,
473 * just query).
474 */
475
476static const
477struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
478 [WIMAX_GNL_RFKILL_IFIDX] = {
479 .type = NLA_U32,
480 },
481 [WIMAX_GNL_RFKILL_STATE] = {
482 .type = NLA_U32 /* enum wimax_rf_state */
483 },
484};
485
486
487static
488int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info)
489{
490 int result, ifindex;
491 struct wimax_dev *wimax_dev;
492 struct device *dev;
493 enum wimax_rf_state new_state;
494
495 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
496 result = -ENODEV;
497 if (info->attrs[WIMAX_GNL_RFKILL_IFIDX] == NULL) {
498 printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX "
499 "attribute\n");
500 goto error_no_wimax_dev;
501 }
502 ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_IFIDX]);
503 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
504 if (wimax_dev == NULL)
505 goto error_no_wimax_dev;
506 dev = wimax_dev_to_dev(wimax_dev);
507 result = -EINVAL;
508 if (info->attrs[WIMAX_GNL_RFKILL_STATE] == NULL) {
509 dev_err(dev, "WIMAX_GNL_RFKILL: can't find RFKILL_STATE "
510 "attribute\n");
511 goto error_no_pid;
512 }
513 new_state = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_STATE]);
514
515 /* Execute the operation and send the result back to user space */
516 result = wimax_rfkill(wimax_dev, new_state);
517error_no_pid:
518 dev_put(wimax_dev->net_dev);
519error_no_wimax_dev:
520 d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
521 return result;
522}
523
524
525struct genl_ops wimax_gnl_rfkill = {
526 .cmd = WIMAX_GNL_OP_RFKILL,
527 .flags = GENL_ADMIN_PERM,
528 .policy = wimax_gnl_rfkill_policy,
529 .doit = wimax_gnl_doit_rfkill,
530 .dumpit = NULL,
531};
532
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
new file mode 100644
index 000000000000..d4da92f8981a
--- /dev/null
+++ b/net/wimax/stack.c
@@ -0,0 +1,599 @@
1/*
2 * Linux WiMAX
3 * Initialization, addition and removal of wimax devices
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This implements:
25 *
26 * - basic life cycle of 'struct wimax_dev' [wimax_dev_*()]; on
27 * addition/registration initialize all subfields and allocate
28 * generic netlink resources for user space communication. On
29 * removal/unregistration, undo all that.
30 *
31 * - device state machine [wimax_state_change()] and support to send
32 * reports to user space when the state changes
33 * [wimax_gnl_re_state_change*()].
34 *
35 * See include/net/wimax.h for rationales and design.
36 *
37 * ROADMAP
38 *
39 * [__]wimax_state_change() Called by drivers to update device's state
40 * wimax_gnl_re_state_change_alloc()
41 * wimax_gnl_re_state_change_send()
42 *
43 * wimax_dev_init() Init a device
44 * wimax_dev_add() Register
45 * wimax_rfkill_add()
46 * wimax_gnl_add() Register all the generic netlink resources.
47 * wimax_id_table_add()
48 * wimax_dev_rm() Unregister
49 * wimax_id_table_rm()
50 * wimax_gnl_rm()
51 * wimax_rfkill_rm()
52 */
53#include <linux/device.h>
54#include <net/genetlink.h>
55#include <linux/netdevice.h>
56#include <linux/wimax.h>
57#include "wimax-internal.h"
58
59
60#define D_SUBMODULE stack
61#include "debug-levels.h"
62
63/*
64 * Authoritative source for the RE_STATE_CHANGE attribute policy
65 *
66 * We don't really use it here, but /me likes to keep the definition
67 * close to where the data is generated.
68 */
69/*
70static const
71struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
72 [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
73 [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
74};
75*/
76
77
78/*
79 * Allocate a Report State Change message
80 *
81 * @header: save it, you need it for _send()
82 *
83 * Creates and fills a basic state change message; different code
84 * paths can then add more attributes to the message as needed.
85 *
86 * Use wimax_gnl_re_state_change_send() to send the returned skb.
87 *
88 * Returns: skb with the genl message if ok, IS_ERR() ptr on error
89 * with an errno code.
90 */
91static
92struct sk_buff *wimax_gnl_re_state_change_alloc(
93 struct wimax_dev *wimax_dev,
94 enum wimax_st new_state, enum wimax_st old_state,
95 void **header)
96{
97 int result;
98 struct device *dev = wimax_dev_to_dev(wimax_dev);
99 void *data;
100 struct sk_buff *report_skb;
101
102 d_fnstart(3, dev, "(wimax_dev %p new_state %u old_state %u)\n",
103 wimax_dev, new_state, old_state);
104 result = -ENOMEM;
105 report_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
106 if (report_skb == NULL) {
107 dev_err(dev, "RE_STCH: can't create message\n");
108 goto error_new;
109 }
110 data = genlmsg_put(report_skb, 0, wimax_gnl_mcg.id, &wimax_gnl_family,
111 0, WIMAX_GNL_RE_STATE_CHANGE);
112 if (data == NULL) {
113 dev_err(dev, "RE_STCH: can't put data into message\n");
114 goto error_put;
115 }
116 *header = data;
117
118 result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_OLD, old_state);
119 if (result < 0) {
120 dev_err(dev, "RE_STCH: Error adding OLD attr: %d\n", result);
121 goto error_put;
122 }
123 result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_NEW, new_state);
124 if (result < 0) {
125 dev_err(dev, "RE_STCH: Error adding NEW attr: %d\n", result);
126 goto error_put;
127 }
128 result = nla_put_u32(report_skb, WIMAX_GNL_STCH_IFIDX,
129 wimax_dev->net_dev->ifindex);
130 if (result < 0) {
131 dev_err(dev, "RE_STCH: Error adding IFINDEX attribute\n");
132 goto error_put;
133 }
134 d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %p\n",
135 wimax_dev, new_state, old_state, report_skb);
136 return report_skb;
137
138error_put:
139 nlmsg_free(report_skb);
140error_new:
141 d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %d\n",
142 wimax_dev, new_state, old_state, result);
143 return ERR_PTR(result);
144}
145
146
147/*
148 * Send a Report State Change message (as created with _alloc).
149 *
150 * @report_skb: as returned by wimax_gnl_re_state_change_alloc()
151 * @header: as returned by wimax_gnl_re_state_change_alloc()
152 *
153 * Returns: 0 if ok, < 0 errno code on error.
154 *
155 * If the message is NULL, pretend it didn't happen.
156 */
157static
158int wimax_gnl_re_state_change_send(
159 struct wimax_dev *wimax_dev, struct sk_buff *report_skb,
160 void *header)
161{
162 int result = 0;
163 struct device *dev = wimax_dev_to_dev(wimax_dev);
164 d_fnstart(3, dev, "(wimax_dev %p report_skb %p)\n",
165 wimax_dev, report_skb);
166 if (report_skb == NULL)
167 goto out;
168 genlmsg_end(report_skb, header);
169 result = genlmsg_multicast(report_skb, 0, wimax_gnl_mcg.id, GFP_KERNEL);
170 if (result == -ESRCH) /* Nobody connected, ignore it */
171 result = 0; /* btw, the skb is freed already */
172 if (result < 0) {
173 dev_err(dev, "RE_STCH: Error sending: %d\n", result);
174 nlmsg_free(report_skb);
175 }
176out:
177 d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n",
178 wimax_dev, report_skb, result);
179 return result;
180}
181
182
183static
184void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
185 unsigned allowed_states_bm)
186{
187 if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
188 printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n",
189 old_state, new_state);
190 }
191}
192
193
194/*
195 * Set the current state of a WiMAX device [unlocking version of
196 * wimax_state_change().
197 */
198void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
199{
200 struct device *dev = wimax_dev_to_dev(wimax_dev);
201 enum wimax_st old_state = wimax_dev->state;
202 struct sk_buff *stch_skb;
203 void *header;
204
205 d_fnstart(3, dev, "(wimax_dev %p new_state %u [old %u])\n",
206 wimax_dev, new_state, old_state);
207
208 if (WARN_ON(new_state >= __WIMAX_ST_INVALID)) {
209 dev_err(dev, "SW BUG: requesting invalid state %u\n",
210 new_state);
211 goto out;
212 }
213 if (old_state == new_state)
214 goto out;
215 header = NULL; /* gcc complains? can't grok why */
216 stch_skb = wimax_gnl_re_state_change_alloc(
217 wimax_dev, new_state, old_state, &header);
218
219 /* Verify the state transition and do exit-from-state actions */
220 switch (old_state) {
221 case __WIMAX_ST_NULL:
222 __check_new_state(old_state, new_state,
223 1 << WIMAX_ST_DOWN);
224 break;
225 case WIMAX_ST_DOWN:
226 __check_new_state(old_state, new_state,
227 1 << __WIMAX_ST_QUIESCING
228 | 1 << WIMAX_ST_UNINITIALIZED
229 | 1 << WIMAX_ST_RADIO_OFF);
230 break;
231 case __WIMAX_ST_QUIESCING:
232 __check_new_state(old_state, new_state, 1 << WIMAX_ST_DOWN);
233 break;
234 case WIMAX_ST_UNINITIALIZED:
235 __check_new_state(old_state, new_state,
236 1 << __WIMAX_ST_QUIESCING
237 | 1 << WIMAX_ST_RADIO_OFF);
238 break;
239 case WIMAX_ST_RADIO_OFF:
240 __check_new_state(old_state, new_state,
241 1 << __WIMAX_ST_QUIESCING
242 | 1 << WIMAX_ST_READY);
243 break;
244 case WIMAX_ST_READY:
245 __check_new_state(old_state, new_state,
246 1 << __WIMAX_ST_QUIESCING
247 | 1 << WIMAX_ST_RADIO_OFF
248 | 1 << WIMAX_ST_SCANNING
249 | 1 << WIMAX_ST_CONNECTING
250 | 1 << WIMAX_ST_CONNECTED);
251 break;
252 case WIMAX_ST_SCANNING:
253 __check_new_state(old_state, new_state,
254 1 << __WIMAX_ST_QUIESCING
255 | 1 << WIMAX_ST_RADIO_OFF
256 | 1 << WIMAX_ST_READY
257 | 1 << WIMAX_ST_CONNECTING
258 | 1 << WIMAX_ST_CONNECTED);
259 break;
260 case WIMAX_ST_CONNECTING:
261 __check_new_state(old_state, new_state,
262 1 << __WIMAX_ST_QUIESCING
263 | 1 << WIMAX_ST_RADIO_OFF
264 | 1 << WIMAX_ST_READY
265 | 1 << WIMAX_ST_SCANNING
266 | 1 << WIMAX_ST_CONNECTED);
267 break;
268 case WIMAX_ST_CONNECTED:
269 __check_new_state(old_state, new_state,
270 1 << __WIMAX_ST_QUIESCING
271 | 1 << WIMAX_ST_RADIO_OFF
272 | 1 << WIMAX_ST_READY);
273 netif_tx_disable(wimax_dev->net_dev);
274 netif_carrier_off(wimax_dev->net_dev);
275 break;
276 case __WIMAX_ST_INVALID:
277 default:
278 dev_err(dev, "SW BUG: wimax_dev %p is in unknown state %u\n",
279 wimax_dev, wimax_dev->state);
280 WARN_ON(1);
281 goto out;
282 }
283
284 /* Execute the actions of entry to the new state */
285 switch (new_state) {
286 case __WIMAX_ST_NULL:
287 dev_err(dev, "SW BUG: wimax_dev %p entering NULL state "
288 "from %u\n", wimax_dev, wimax_dev->state);
289 WARN_ON(1); /* Nobody can enter this state */
290 break;
291 case WIMAX_ST_DOWN:
292 break;
293 case __WIMAX_ST_QUIESCING:
294 break;
295 case WIMAX_ST_UNINITIALIZED:
296 break;
297 case WIMAX_ST_RADIO_OFF:
298 break;
299 case WIMAX_ST_READY:
300 break;
301 case WIMAX_ST_SCANNING:
302 break;
303 case WIMAX_ST_CONNECTING:
304 break;
305 case WIMAX_ST_CONNECTED:
306 netif_carrier_on(wimax_dev->net_dev);
307 netif_wake_queue(wimax_dev->net_dev);
308 break;
309 case __WIMAX_ST_INVALID:
310 default:
311 BUG();
312 }
313 __wimax_state_set(wimax_dev, new_state);
314 if (stch_skb)
315 wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header);
316out:
317 d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n",
318 wimax_dev, new_state, old_state);
319 return;
320}
321
322
323/**
324 * wimax_state_change - Set the current state of a WiMAX device
325 *
326 * @wimax_dev: WiMAX device descriptor (properly referenced)
327 * @new_state: New state to switch to
328 *
329 * This implements the state changes for the wimax devices. It will
330 *
331 * - verify that the state transition is legal (for now it'll just
332 * print a warning if not) according to the table in
333 * linux/wimax.h's documentation for 'enum wimax_st'.
334 *
335 * - perform the actions needed for leaving the current state and
336 * whichever are needed for entering the new state.
337 *
338 * - issue a report to user space indicating the new state (and an
339 * optional payload with information about the new state).
340 *
341 * NOTE: @wimax_dev must be locked
342 */
343void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
344{
345 mutex_lock(&wimax_dev->mutex);
346 __wimax_state_change(wimax_dev, new_state);
347 mutex_unlock(&wimax_dev->mutex);
348 return;
349}
350EXPORT_SYMBOL_GPL(wimax_state_change);
351
352
353/**
354 * wimax_state_get() - Return the current state of a WiMAX device
355 *
356 * @wimax_dev: WiMAX device descriptor
357 *
358 * Returns: Current state of the device according to its driver.
359 */
360enum wimax_st wimax_state_get(struct wimax_dev *wimax_dev)
361{
362 enum wimax_st state;
363 mutex_lock(&wimax_dev->mutex);
364 state = wimax_dev->state;
365 mutex_unlock(&wimax_dev->mutex);
366 return state;
367}
368EXPORT_SYMBOL_GPL(wimax_state_get);
369
370
371/**
372 * wimax_dev_init - initialize a newly allocated instance
373 *
374 * @wimax_dev: WiMAX device descriptor to initialize.
375 *
376 * Initializes fields of a freshly allocated @wimax_dev instance. This
377 * function assumes that after allocation, the memory occupied by
378 * @wimax_dev was zeroed.
379 */
380void wimax_dev_init(struct wimax_dev *wimax_dev)
381{
382 INIT_LIST_HEAD(&wimax_dev->id_table_node);
383 __wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED);
384 mutex_init(&wimax_dev->mutex);
385 mutex_init(&wimax_dev->mutex_reset);
386}
387EXPORT_SYMBOL_GPL(wimax_dev_init);
388
389/*
390 * This extern is declared here because it's easier to keep track --
391 * both declarations are a list of the same
392 */
393extern struct genl_ops
394 wimax_gnl_msg_from_user,
395 wimax_gnl_reset,
396 wimax_gnl_rfkill;
397
398static
399struct genl_ops *wimax_gnl_ops[] = {
400 &wimax_gnl_msg_from_user,
401 &wimax_gnl_reset,
402 &wimax_gnl_rfkill,
403};
404
405
406static
407size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
408 unsigned char *addr, size_t addr_len)
409{
410 unsigned cnt, total;
411 for (total = cnt = 0; cnt < addr_len; cnt++)
412 total += scnprintf(addr_str + total, addr_str_size - total,
413 "%02x%c", addr[cnt],
414 cnt == addr_len - 1 ? '\0' : ':');
415 return total;
416}
417
418
419/**
420 * wimax_dev_add - Register a new WiMAX device
421 *
422 * @wimax_dev: WiMAX device descriptor (as embedded in your @net_dev's
423 * priv data). You must have called wimax_dev_init() on it before.
424 *
425 * @net_dev: net device the @wimax_dev is associated with. The
426 * function expects SET_NETDEV_DEV() and register_netdev() were
427 * already called on it.
428 *
429 * Registers the new WiMAX device, sets up the user-kernel control
430 * interface (generic netlink) and common WiMAX infrastructure.
431 *
432 * Note that the parts that will allow interaction with user space are
433 * setup at the very end, when the rest is in place, as once that
434 * happens, the driver might get user space control requests via
435 * netlink or from debugfs that might translate into calls into
436 * wimax_dev->op_*().
437 */
438int wimax_dev_add(struct wimax_dev *wimax_dev, struct net_device *net_dev)
439{
440 int result;
441 struct device *dev = net_dev->dev.parent;
442 char addr_str[32];
443
444 d_fnstart(3, dev, "(wimax_dev %p net_dev %p)\n", wimax_dev, net_dev);
445
446 /* Do the RFKILL setup before locking, as RFKILL will call
447 * into our functions. */
448 wimax_dev->net_dev = net_dev;
449 result = wimax_rfkill_add(wimax_dev);
450 if (result < 0)
451 goto error_rfkill_add;
452
453 /* Set up user-space interaction */
454 mutex_lock(&wimax_dev->mutex);
455 wimax_id_table_add(wimax_dev);
456 result = wimax_debugfs_add(wimax_dev);
457 if (result < 0) {
458 dev_err(dev, "cannot initialize debugfs: %d\n",
459 result);
460 goto error_debugfs_add;
461 }
462
463 __wimax_state_set(wimax_dev, WIMAX_ST_DOWN);
464 mutex_unlock(&wimax_dev->mutex);
465
466 wimax_addr_scnprint(addr_str, sizeof(addr_str),
467 net_dev->dev_addr, net_dev->addr_len);
468 dev_err(dev, "WiMAX interface %s (%s) ready\n",
469 net_dev->name, addr_str);
470 d_fnend(3, dev, "(wimax_dev %p net_dev %p) = 0\n", wimax_dev, net_dev);
471 return 0;
472
473error_debugfs_add:
474 wimax_id_table_rm(wimax_dev);
475 mutex_unlock(&wimax_dev->mutex);
476 wimax_rfkill_rm(wimax_dev);
477error_rfkill_add:
478 d_fnend(3, dev, "(wimax_dev %p net_dev %p) = %d\n",
479 wimax_dev, net_dev, result);
480 return result;
481}
482EXPORT_SYMBOL_GPL(wimax_dev_add);
483
484
485/**
486 * wimax_dev_rm - Unregister an existing WiMAX device
487 *
488 * @wimax_dev: WiMAX device descriptor
489 *
490 * Unregisters a WiMAX device previously registered for use with
491 * wimax_add_rm().
492 *
493 * IMPORTANT! Must call before calling unregister_netdev().
494 *
495 * After this function returns, you will not get any more user space
496 * control requests (via netlink or debugfs) and thus to wimax_dev->ops.
497 *
498 * Reentrancy control is ensured by setting the state to
499 * %__WIMAX_ST_QUIESCING. rfkill operations coming through
500 * wimax_*rfkill*() will be stopped by the quiescing state; ops coming
501 * from the rfkill subsystem will be stopped by the support being
502 * removed by wimax_rfkill_rm().
503 */
504void wimax_dev_rm(struct wimax_dev *wimax_dev)
505{
506 d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
507
508 mutex_lock(&wimax_dev->mutex);
509 __wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
510 wimax_debugfs_rm(wimax_dev);
511 wimax_id_table_rm(wimax_dev);
512 __wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
513 mutex_unlock(&wimax_dev->mutex);
514 wimax_rfkill_rm(wimax_dev);
515 d_fnend(3, NULL, "(wimax_dev %p) = void\n", wimax_dev);
516}
517EXPORT_SYMBOL_GPL(wimax_dev_rm);
518
519struct genl_family wimax_gnl_family = {
520 .id = GENL_ID_GENERATE,
521 .name = "WiMAX",
522 .version = WIMAX_GNL_VERSION,
523 .hdrsize = 0,
524 .maxattr = WIMAX_GNL_ATTR_MAX,
525};
526
527struct genl_multicast_group wimax_gnl_mcg = {
528 .name = "msg",
529};
530
531
532
533/* Shutdown the wimax stack */
534static
535int __init wimax_subsys_init(void)
536{
537 int result, cnt;
538
539 d_fnstart(4, NULL, "()\n");
540 snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name),
541 "WiMAX");
542 result = genl_register_family(&wimax_gnl_family);
543 if (unlikely(result < 0)) {
544 printk(KERN_ERR "cannot register generic netlink family: %d\n",
545 result);
546 goto error_register_family;
547 }
548
549 for (cnt = 0; cnt < ARRAY_SIZE(wimax_gnl_ops); cnt++) {
550 result = genl_register_ops(&wimax_gnl_family,
551 wimax_gnl_ops[cnt]);
552 d_printf(4, NULL, "registering generic netlink op code "
553 "%u: %d\n", wimax_gnl_ops[cnt]->cmd, result);
554 if (unlikely(result < 0)) {
555 printk(KERN_ERR "cannot register generic netlink op "
556 "code %u: %d\n",
557 wimax_gnl_ops[cnt]->cmd, result);
558 goto error_register_ops;
559 }
560 }
561
562 result = genl_register_mc_group(&wimax_gnl_family, &wimax_gnl_mcg);
563 if (result < 0)
564 goto error_mc_group;
565 d_fnend(4, NULL, "() = 0\n");
566 return 0;
567
568error_mc_group:
569error_register_ops:
570 for (cnt--; cnt >= 0; cnt--)
571 genl_unregister_ops(&wimax_gnl_family,
572 wimax_gnl_ops[cnt]);
573 genl_unregister_family(&wimax_gnl_family);
574error_register_family:
575 d_fnend(4, NULL, "() = %d\n", result);
576 return result;
577
578}
579module_init(wimax_subsys_init);
580
581
582/* Shutdown the wimax stack */
583static
584void __exit wimax_subsys_exit(void)
585{
586 int cnt;
587 wimax_id_table_release();
588 genl_unregister_mc_group(&wimax_gnl_family, &wimax_gnl_mcg);
589 for (cnt = ARRAY_SIZE(wimax_gnl_ops) - 1; cnt >= 0; cnt--)
590 genl_unregister_ops(&wimax_gnl_family,
591 wimax_gnl_ops[cnt]);
592 genl_unregister_family(&wimax_gnl_family);
593}
594module_exit(wimax_subsys_exit);
595
596MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
597MODULE_DESCRIPTION("Linux WiMAX stack");
598MODULE_LICENSE("GPL");
599
diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h
new file mode 100644
index 000000000000..1e743d214856
--- /dev/null
+++ b/net/wimax/wimax-internal.h
@@ -0,0 +1,91 @@
1/*
2 * Linux WiMAX
3 * Internal API for kernel space WiMAX stack
4 *
5 *
6 * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This header file is for declarations and definitions internal to
25 * the WiMAX stack. For public APIs and documentation, see
26 * include/net/wimax.h and include/linux/wimax.h.
27 */
28
29#ifndef __WIMAX_INTERNAL_H__
30#define __WIMAX_INTERNAL_H__
31#ifdef __KERNEL__
32
33#include <linux/device.h>
34#include <net/wimax.h>
35
36
37/*
38 * Decide if a (locked) device is ready for use
39 *
40 * Before using the device structure, it must be locked
41 * (wimax_dev->mutex). As well, most operations need to call this
42 * function to check if the state is the right one.
43 *
44 * An error value will be returned if the state is not the right
45 * one. In that case, the caller should not attempt to use the device
46 * and just unlock it.
47 */
48static inline __must_check
49int wimax_dev_is_ready(struct wimax_dev *wimax_dev)
50{
51 if (wimax_dev->state == __WIMAX_ST_NULL)
52 return -EINVAL; /* Device is not even registered! */
53 if (wimax_dev->state == WIMAX_ST_DOWN)
54 return -ENOMEDIUM;
55 if (wimax_dev->state == __WIMAX_ST_QUIESCING)
56 return -ESHUTDOWN;
57 return 0;
58}
59
60
61static inline
62void __wimax_state_set(struct wimax_dev *wimax_dev, enum wimax_st state)
63{
64 wimax_dev->state = state;
65}
66extern void __wimax_state_change(struct wimax_dev *, enum wimax_st);
67
68#ifdef CONFIG_DEBUG_FS
69extern int wimax_debugfs_add(struct wimax_dev *);
70extern void wimax_debugfs_rm(struct wimax_dev *);
71#else
72static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev)
73{
74 return 0;
75}
76static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {}
77#endif
78
79extern void wimax_id_table_add(struct wimax_dev *);
80extern struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
81extern void wimax_id_table_rm(struct wimax_dev *);
82extern void wimax_id_table_release(void);
83
84extern int wimax_rfkill_add(struct wimax_dev *);
85extern void wimax_rfkill_rm(struct wimax_dev *);
86
87extern struct genl_family wimax_gnl_family;
88extern struct genl_multicast_group wimax_gnl_mcg;
89
90#endif /* #ifdef __KERNEL__ */
91#endif /* #ifndef __WIMAX_INTERNAL_H__ */
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index e49a2d1ef1e4..cb6a5bb85d80 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1055,8 +1055,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
1055 return private(dev, iwr, cmd, info, handler); 1055 return private(dev, iwr, cmd, info, handler);
1056 } 1056 }
1057 /* Old driver API : call driver ioctl handler */ 1057 /* Old driver API : call driver ioctl handler */
1058 if (dev->do_ioctl) 1058 if (dev->netdev_ops->ndo_do_ioctl)
1059 return dev->do_ioctl(dev, ifr, cmd); 1059 return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
1060 return -EOPNOTSUPP; 1060 return -EOPNOTSUPP;
1061} 1061}
1062 1062
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index 284eaef1dbf2..a2adb51849a9 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -44,27 +44,14 @@ static struct snmp_mib xfrm_mib_list[] = {
44 SNMP_MIB_SENTINEL 44 SNMP_MIB_SENTINEL
45}; 45};
46 46
47static unsigned long
48fold_field(void *mib[], int offt)
49{
50 unsigned long res = 0;
51 int i;
52
53 for_each_possible_cpu(i) {
54 res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
55 res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
56 }
57 return res;
58}
59
60static int xfrm_statistics_seq_show(struct seq_file *seq, void *v) 47static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
61{ 48{
62 struct net *net = seq->private; 49 struct net *net = seq->private;
63 int i; 50 int i;
64 for (i=0; xfrm_mib_list[i].name; i++) 51 for (i=0; xfrm_mib_list[i].name; i++)
65 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, 52 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
66 fold_field((void **)net->mib.xfrm_statistics, 53 snmp_fold_field((void **)net->mib.xfrm_statistics,
67 xfrm_mib_list[i].entry)); 54 xfrm_mib_list[i].entry));
68 return 0; 55 return 0;
69} 56}
70 57