aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/tr.c1
-rw-r--r--net/atm/clip.c462
-rw-r--r--net/ax25/af_ax25.c93
-rw-r--r--net/ax25/ax25_addr.c9
-rw-r--r--net/ax25/ax25_ds_timer.c3
-rw-r--r--net/ax25/ax25_iface.c13
-rw-r--r--net/ax25/ax25_ip.c3
-rw-r--r--net/ax25/ax25_out.c3
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/ax25/ax25_timer.c3
-rw-r--r--net/ax25/ax25_uid.c4
-rw-r--r--net/ax25/sysctl_net_ax25.c10
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br.c3
-rw-r--r--net/bridge/br_forward.c8
-rw-r--r--net/bridge/br_if.c28
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/bridge/br_netfilter.c13
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/bridge/netfilter/ebtables.c38
-rw-r--r--net/core/dev.c127
-rw-r--r--net/core/dv.c5
-rw-r--r--net/core/filter.c5
-rw-r--r--net/core/flow.c8
-rw-r--r--net/core/gen_estimator.c3
-rw-r--r--net/core/link_watch.c10
-rw-r--r--net/core/neighbour.c37
-rw-r--r--net/core/net-sysfs.c51
-rw-r--r--net/core/request_sock.c4
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/stream.c1
-rw-r--r--net/core/utils.c4
-rw-r--r--net/core/wireless.c8
-rw-r--r--net/dccp/ackvec.c1
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/proto.c13
-rw-r--r--net/decnet/dn_neigh.c5
-rw-r--r--net/ethernet/Makefile1
-rw-r--r--net/ethernet/sysctl_net_ether.c14
-rw-r--r--net/ieee80211/ieee80211_crypt_tkip.c11
-rw-r--r--net/ieee80211/ieee80211_rx.c18
-rw-r--r--net/ieee80211/ieee80211_tx.c88
-rw-r--r--net/ieee80211/ieee80211_wx.c44
-rw-r--r--net/ieee80211/softmac/Kconfig2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c132
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c31
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_event.c46
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_io.c157
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c123
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_priv.h5
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_scan.c10
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c73
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/arp.c4
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/fib_trie.c14
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/inet_hashtables.c4
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/ip_fragment.c15
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ipcomp.c15
-rw-r--r--net/ipv4/ipip.c4
-rw-r--r--net/ipv4/netfilter.c50
-rw-r--r--net/ipv4/netfilter/Kconfig7
-rw-r--r--net/ipv4/netfilter/arp_tables.c6
-rw-r--r--net/ipv4/netfilter/arptable_filter.c19
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323.c68
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c48
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.h98
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_h323_types.h938
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_pptp.c4
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_icmp.c23
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_sctp.c11
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c7
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_udp.c7
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c268
-rw-r--r--net/ipv4/netfilter/ip_nat_helper_h323.c71
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_gre.c12
-rw-r--r--net/ipv4/netfilter/ip_nat_rule.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_snmp_basic.c16
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c161
-rw-r--r--net/ipv4/netfilter/ip_queue.c31
-rw-r--r--net/ipv4/netfilter/ip_tables.c43
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c40
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c2
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c9
-rw-r--r--net/ipv4/netfilter/ipt_recent.c2
-rw-r--r--net/ipv4/netfilter/iptable_filter.c21
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c33
-rw-r--r--net/ipv4/netfilter/iptable_raw.c35
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c224
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c20
-rw-r--r--net/ipv4/proc.c4
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp.c13
-rw-r--r--net/ipv4/tcp_highspeed.c5
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_output.c25
-rw-r--r--net/ipv4/tunnel4.c8
-rw-r--r--net/ipv4/xfrm4_input.c4
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/exthdrs.c16
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/inet6_hashtables.c80
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/ip6_tunnel.c12
-rw-r--r--net/ipv6/ipcomp6.c10
-rw-r--r--net/ipv6/netfilter.c52
-rw-r--r--net/ipv6/netfilter/ip6_queue.c31
-rw-r--r--net/ipv6/netfilter/ip6_tables.c19
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c2
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c2
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c21
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c33
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c15
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c179
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c12
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/ipv6/reassembly.c18
-rw-r--r--net/ipv6/route.c21
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/tunnel6.c8
-rw-r--r--net/ipv6/xfrm6_policy.c8
-rw-r--r--net/ipx/af_ipx.c4
-rw-r--r--net/ipx/ipx_route.c2
-rw-r--r--net/irda/iriap.c3
-rw-r--r--net/irda/irias_object.c3
-rw-r--r--net/irda/irlap.c3
-rw-r--r--net/llc/llc_input.c3
-rw-r--r--net/netfilter/core.c51
-rw-r--r--net/netfilter/nf_conntrack_core.c17
-rw-r--r--net/netfilter/nf_conntrack_l3proto_generic.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c50
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c50
-rw-r--r--net/netfilter/nf_conntrack_standalone.c115
-rw-r--r--net/netfilter/nf_queue.c49
-rw-r--r--net/netfilter/nfnetlink_log.c29
-rw-r--r--net/netfilter/nfnetlink_queue.c27
-rw-r--r--net/netfilter/x_tables.c10
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/netrom/af_netrom.c18
-rw-r--r--net/netrom/nr_dev.c1
-rw-r--r--net/rose/af_rose.c13
-rw-r--r--net/rose/rose_dev.c1
-rw-r--r--net/rose/rose_link.c6
-rw-r--r--net/rose/rose_route.c7
-rw-r--r--net/sched/act_ipt.c5
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/sch_generic.c6
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sctp/input.c144
-rw-r--r--net/sctp/inqueue.c1
-rw-r--r--net/sctp/proc.c2
-rw-r--r--net/sctp/sm_sideeffect.c16
-rw-r--r--net/sctp/sm_statefuns.c140
-rw-r--r--net/sctp/sm_statetable.c10
-rw-r--r--net/sctp/socket.c29
-rw-r--r--net/sctp/ulpqueue.c27
-rw-r--r--net/socket.c9
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c1
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c11
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c14
-rw-r--r--net/sunrpc/cache.c1
-rw-r--r--net/sunrpc/stats.c3
-rw-r--r--net/sysctl_net.c8
-rw-r--r--net/tipc/name_distr.c3
-rw-r--r--net/wanrouter/af_wanpipe.c2
-rw-r--r--net/x25/x25_timer.c4
-rw-r--r--net/xfrm/xfrm_input.c2
-rw-r--r--net/xfrm/xfrm_policy.c16
-rw-r--r--net/xfrm/xfrm_state.c33
182 files changed, 2334 insertions, 3381 deletions
diff --git a/net/802/tr.c b/net/802/tr.c
index afd8385c0c9c..e9dc803f2fe0 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -643,6 +643,5 @@ static int __init rif_init(void)
643 643
644module_init(rif_init); 644module_init(rif_init);
645 645
646EXPORT_SYMBOL(tr_source_route);
647EXPORT_SYMBOL(tr_type_trans); 646EXPORT_SYMBOL(tr_type_trans);
648EXPORT_SYMBOL(alloc_trdev); 647EXPORT_SYMBOL(alloc_trdev);
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 3ab4e7947bab..72d852982664 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -2,7 +2,6 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5
6#include <linux/config.h> 5#include <linux/config.h>
7#include <linux/string.h> 6#include <linux/string.h>
8#include <linux/errno.h> 7#include <linux/errno.h>
@@ -54,24 +53,24 @@ static struct net_device *clip_devs;
54static struct atm_vcc *atmarpd; 53static struct atm_vcc *atmarpd;
55static struct neigh_table clip_tbl; 54static struct neigh_table clip_tbl;
56static struct timer_list idle_timer; 55static struct timer_list idle_timer;
57static int start_timer = 1;
58
59 56
60static int to_atmarpd(enum atmarp_ctrl_type type,int itf,unsigned long ip) 57static int to_atmarpd(enum atmarp_ctrl_type type, int itf, unsigned long ip)
61{ 58{
62 struct sock *sk; 59 struct sock *sk;
63 struct atmarp_ctrl *ctrl; 60 struct atmarp_ctrl *ctrl;
64 struct sk_buff *skb; 61 struct sk_buff *skb;
65 62
66 DPRINTK("to_atmarpd(%d)\n",type); 63 DPRINTK("to_atmarpd(%d)\n", type);
67 if (!atmarpd) return -EUNATCH; 64 if (!atmarpd)
65 return -EUNATCH;
68 skb = alloc_skb(sizeof(struct atmarp_ctrl),GFP_ATOMIC); 66 skb = alloc_skb(sizeof(struct atmarp_ctrl),GFP_ATOMIC);
69 if (!skb) return -ENOMEM; 67 if (!skb)
68 return -ENOMEM;
70 ctrl = (struct atmarp_ctrl *) skb_put(skb,sizeof(struct atmarp_ctrl)); 69 ctrl = (struct atmarp_ctrl *) skb_put(skb,sizeof(struct atmarp_ctrl));
71 ctrl->type = type; 70 ctrl->type = type;
72 ctrl->itf_num = itf; 71 ctrl->itf_num = itf;
73 ctrl->ip = ip; 72 ctrl->ip = ip;
74 atm_force_charge(atmarpd,skb->truesize); 73 atm_force_charge(atmarpd, skb->truesize);
75 74
76 sk = sk_atm(atmarpd); 75 sk = sk_atm(atmarpd);
77 skb_queue_tail(&sk->sk_receive_queue, skb); 76 skb_queue_tail(&sk->sk_receive_queue, skb);
@@ -79,26 +78,24 @@ static int to_atmarpd(enum atmarp_ctrl_type type,int itf,unsigned long ip)
79 return 0; 78 return 0;
80} 79}
81 80
82 81static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry)
83static void link_vcc(struct clip_vcc *clip_vcc,struct atmarp_entry *entry)
84{ 82{
85 DPRINTK("link_vcc %p to entry %p (neigh %p)\n",clip_vcc,entry, 83 DPRINTK("link_vcc %p to entry %p (neigh %p)\n", clip_vcc, entry,
86 entry->neigh); 84 entry->neigh);
87 clip_vcc->entry = entry; 85 clip_vcc->entry = entry;
88 clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */ 86 clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */
89 clip_vcc->next = entry->vccs; 87 clip_vcc->next = entry->vccs;
90 entry->vccs = clip_vcc; 88 entry->vccs = clip_vcc;
91 entry->neigh->used = jiffies; 89 entry->neigh->used = jiffies;
92} 90}
93 91
94
95static void unlink_clip_vcc(struct clip_vcc *clip_vcc) 92static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
96{ 93{
97 struct atmarp_entry *entry = clip_vcc->entry; 94 struct atmarp_entry *entry = clip_vcc->entry;
98 struct clip_vcc **walk; 95 struct clip_vcc **walk;
99 96
100 if (!entry) { 97 if (!entry) {
101 printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc); 98 printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
102 return; 99 return;
103 } 100 }
104 spin_lock_bh(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */ 101 spin_lock_bh(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */
@@ -107,24 +104,24 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
107 if (*walk == clip_vcc) { 104 if (*walk == clip_vcc) {
108 int error; 105 int error;
109 106
110 *walk = clip_vcc->next; /* atomic */ 107 *walk = clip_vcc->next; /* atomic */
111 clip_vcc->entry = NULL; 108 clip_vcc->entry = NULL;
112 if (clip_vcc->xoff) 109 if (clip_vcc->xoff)
113 netif_wake_queue(entry->neigh->dev); 110 netif_wake_queue(entry->neigh->dev);
114 if (entry->vccs) 111 if (entry->vccs)
115 goto out; 112 goto out;
116 entry->expires = jiffies-1; 113 entry->expires = jiffies - 1;
117 /* force resolution or expiration */ 114 /* force resolution or expiration */
118 error = neigh_update(entry->neigh, NULL, NUD_NONE, 115 error = neigh_update(entry->neigh, NULL, NUD_NONE,
119 NEIGH_UPDATE_F_ADMIN); 116 NEIGH_UPDATE_F_ADMIN);
120 if (error) 117 if (error)
121 printk(KERN_CRIT "unlink_clip_vcc: " 118 printk(KERN_CRIT "unlink_clip_vcc: "
122 "neigh_update failed with %d\n",error); 119 "neigh_update failed with %d\n", error);
123 goto out; 120 goto out;
124 } 121 }
125 printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " 122 printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
126 "0x%p)\n",entry,clip_vcc); 123 "0x%p)\n", entry, clip_vcc);
127out: 124 out:
128 spin_unlock_bh(&entry->neigh->dev->xmit_lock); 125 spin_unlock_bh(&entry->neigh->dev->xmit_lock);
129} 126}
130 127
@@ -153,13 +150,13 @@ static int neigh_check_cb(struct neighbour *n)
153 DPRINTK("destruction postponed with ref %d\n", 150 DPRINTK("destruction postponed with ref %d\n",
154 atomic_read(&n->refcnt)); 151 atomic_read(&n->refcnt));
155 152
156 while ((skb = skb_dequeue(&n->arp_queue)) != NULL) 153 while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
157 dev_kfree_skb(skb); 154 dev_kfree_skb(skb);
158 155
159 return 0; 156 return 0;
160 } 157 }
161 158
162 DPRINTK("expired neigh %p\n",n); 159 DPRINTK("expired neigh %p\n", n);
163 return 1; 160 return 1;
164} 161}
165 162
@@ -167,7 +164,7 @@ static void idle_timer_check(unsigned long dummy)
167{ 164{
168 write_lock(&clip_tbl.lock); 165 write_lock(&clip_tbl.lock);
169 __neigh_for_each_release(&clip_tbl, neigh_check_cb); 166 __neigh_for_each_release(&clip_tbl, neigh_check_cb);
170 mod_timer(&idle_timer, jiffies+CLIP_CHECK_INTERVAL*HZ); 167 mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
171 write_unlock(&clip_tbl.lock); 168 write_unlock(&clip_tbl.lock);
172} 169}
173 170
@@ -177,13 +174,13 @@ static int clip_arp_rcv(struct sk_buff *skb)
177 174
178 DPRINTK("clip_arp_rcv\n"); 175 DPRINTK("clip_arp_rcv\n");
179 vcc = ATM_SKB(skb)->vcc; 176 vcc = ATM_SKB(skb)->vcc;
180 if (!vcc || !atm_charge(vcc,skb->truesize)) { 177 if (!vcc || !atm_charge(vcc, skb->truesize)) {
181 dev_kfree_skb_any(skb); 178 dev_kfree_skb_any(skb);
182 return 0; 179 return 0;
183 } 180 }
184 DPRINTK("pushing to %p\n",vcc); 181 DPRINTK("pushing to %p\n", vcc);
185 DPRINTK("using %p\n",CLIP_VCC(vcc)->old_push); 182 DPRINTK("using %p\n", CLIP_VCC(vcc)->old_push);
186 CLIP_VCC(vcc)->old_push(vcc,skb); 183 CLIP_VCC(vcc)->old_push(vcc, skb);
187 return 0; 184 return 0;
188} 185}
189 186
@@ -193,34 +190,38 @@ static const unsigned char llc_oui[] = {
193 0x03, /* Ctrl: Unnumbered Information Command PDU */ 190 0x03, /* Ctrl: Unnumbered Information Command PDU */
194 0x00, /* OUI: EtherType */ 191 0x00, /* OUI: EtherType */
195 0x00, 192 0x00,
196 0x00 }; 193 0x00
194};
197 195
198static void clip_push(struct atm_vcc *vcc,struct sk_buff *skb) 196static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
199{ 197{
200 struct clip_vcc *clip_vcc = CLIP_VCC(vcc); 198 struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
201 199
202 DPRINTK("clip push\n"); 200 DPRINTK("clip push\n");
203 if (!skb) { 201 if (!skb) {
204 DPRINTK("removing VCC %p\n",clip_vcc); 202 DPRINTK("removing VCC %p\n", clip_vcc);
205 if (clip_vcc->entry) unlink_clip_vcc(clip_vcc); 203 if (clip_vcc->entry)
206 clip_vcc->old_push(vcc,NULL); /* pass on the bad news */ 204 unlink_clip_vcc(clip_vcc);
205 clip_vcc->old_push(vcc, NULL); /* pass on the bad news */
207 kfree(clip_vcc); 206 kfree(clip_vcc);
208 return; 207 return;
209 } 208 }
210 atm_return(vcc,skb->truesize); 209 atm_return(vcc, skb->truesize);
211 skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs; 210 skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs;
212 /* clip_vcc->entry == NULL if we don't have an IP address yet */ 211 /* clip_vcc->entry == NULL if we don't have an IP address yet */
213 if (!skb->dev) { 212 if (!skb->dev) {
214 dev_kfree_skb_any(skb); 213 dev_kfree_skb_any(skb);
215 return; 214 return;
216 } 215 }
217 ATM_SKB(skb)->vcc = vcc; 216 ATM_SKB(skb)->vcc = vcc;
218 skb->mac.raw = skb->data; 217 skb->mac.raw = skb->data;
219 if (!clip_vcc->encap || skb->len < RFC1483LLC_LEN || memcmp(skb->data, 218 if (!clip_vcc->encap
220 llc_oui,sizeof(llc_oui))) skb->protocol = htons(ETH_P_IP); 219 || skb->len < RFC1483LLC_LEN
220 || memcmp(skb->data, llc_oui, sizeof (llc_oui)))
221 skb->protocol = htons(ETH_P_IP);
221 else { 222 else {
222 skb->protocol = ((u16 *) skb->data)[3]; 223 skb->protocol = ((u16 *) skb->data)[3];
223 skb_pull(skb,RFC1483LLC_LEN); 224 skb_pull(skb, RFC1483LLC_LEN);
224 if (skb->protocol == htons(ETH_P_ARP)) { 225 if (skb->protocol == htons(ETH_P_ARP)) {
225 PRIV(skb->dev)->stats.rx_packets++; 226 PRIV(skb->dev)->stats.rx_packets++;
226 PRIV(skb->dev)->stats.rx_bytes += skb->len; 227 PRIV(skb->dev)->stats.rx_bytes += skb->len;
@@ -235,58 +236,54 @@ static void clip_push(struct atm_vcc *vcc,struct sk_buff *skb)
235 netif_rx(skb); 236 netif_rx(skb);
236} 237}
237 238
238
239/* 239/*
240 * Note: these spinlocks _must_not_ block on non-SMP. The only goal is that 240 * Note: these spinlocks _must_not_ block on non-SMP. The only goal is that
241 * clip_pop is atomic with respect to the critical section in clip_start_xmit. 241 * clip_pop is atomic with respect to the critical section in clip_start_xmit.
242 */ 242 */
243 243
244 244static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
245static void clip_pop(struct atm_vcc *vcc,struct sk_buff *skb)
246{ 245{
247 struct clip_vcc *clip_vcc = CLIP_VCC(vcc); 246 struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
248 struct net_device *dev = skb->dev; 247 struct net_device *dev = skb->dev;
249 int old; 248 int old;
250 unsigned long flags; 249 unsigned long flags;
251 250
252 DPRINTK("clip_pop(vcc %p)\n",vcc); 251 DPRINTK("clip_pop(vcc %p)\n", vcc);
253 clip_vcc->old_pop(vcc,skb); 252 clip_vcc->old_pop(vcc, skb);
254 /* skb->dev == NULL in outbound ARP packets */ 253 /* skb->dev == NULL in outbound ARP packets */
255 if (!dev) return; 254 if (!dev)
256 spin_lock_irqsave(&PRIV(dev)->xoff_lock,flags); 255 return;
257 if (atm_may_send(vcc,0)) { 256 spin_lock_irqsave(&PRIV(dev)->xoff_lock, flags);
258 old = xchg(&clip_vcc->xoff,0); 257 if (atm_may_send(vcc, 0)) {
259 if (old) netif_wake_queue(dev); 258 old = xchg(&clip_vcc->xoff, 0);
259 if (old)
260 netif_wake_queue(dev);
260 } 261 }
261 spin_unlock_irqrestore(&PRIV(dev)->xoff_lock,flags); 262 spin_unlock_irqrestore(&PRIV(dev)->xoff_lock, flags);
262} 263}
263 264
264
265static void clip_neigh_destroy(struct neighbour *neigh) 265static void clip_neigh_destroy(struct neighbour *neigh)
266{ 266{
267 DPRINTK("clip_neigh_destroy (neigh %p)\n",neigh); 267 DPRINTK("clip_neigh_destroy (neigh %p)\n", neigh);
268 if (NEIGH2ENTRY(neigh)->vccs) 268 if (NEIGH2ENTRY(neigh)->vccs)
269 printk(KERN_CRIT "clip_neigh_destroy: vccs != NULL !!!\n"); 269 printk(KERN_CRIT "clip_neigh_destroy: vccs != NULL !!!\n");
270 NEIGH2ENTRY(neigh)->vccs = (void *) 0xdeadbeef; 270 NEIGH2ENTRY(neigh)->vccs = (void *) 0xdeadbeef;
271} 271}
272 272
273 273static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
274static void clip_neigh_solicit(struct neighbour *neigh,struct sk_buff *skb)
275{ 274{
276 DPRINTK("clip_neigh_solicit (neigh %p, skb %p)\n",neigh,skb); 275 DPRINTK("clip_neigh_solicit (neigh %p, skb %p)\n", neigh, skb);
277 to_atmarpd(act_need,PRIV(neigh->dev)->number,NEIGH2ENTRY(neigh)->ip); 276 to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip);
278} 277}
279 278
280 279static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb)
281static void clip_neigh_error(struct neighbour *neigh,struct sk_buff *skb)
282{ 280{
283#ifndef CONFIG_ATM_CLIP_NO_ICMP 281#ifndef CONFIG_ATM_CLIP_NO_ICMP
284 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_HOST_UNREACH,0); 282 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
285#endif 283#endif
286 kfree_skb(skb); 284 kfree_skb(skb);
287} 285}
288 286
289
290static struct neigh_ops clip_neigh_ops = { 287static struct neigh_ops clip_neigh_ops = {
291 .family = AF_INET, 288 .family = AF_INET,
292 .solicit = clip_neigh_solicit, 289 .solicit = clip_neigh_solicit,
@@ -297,7 +294,6 @@ static struct neigh_ops clip_neigh_ops = {
297 .queue_xmit = dev_queue_xmit, 294 .queue_xmit = dev_queue_xmit,
298}; 295};
299 296
300
301static int clip_constructor(struct neighbour *neigh) 297static int clip_constructor(struct neighbour *neigh)
302{ 298{
303 struct atmarp_entry *entry = NEIGH2ENTRY(neigh); 299 struct atmarp_entry *entry = NEIGH2ENTRY(neigh);
@@ -305,9 +301,10 @@ static int clip_constructor(struct neighbour *neigh)
305 struct in_device *in_dev; 301 struct in_device *in_dev;
306 struct neigh_parms *parms; 302 struct neigh_parms *parms;
307 303
308 DPRINTK("clip_constructor (neigh %p, entry %p)\n",neigh,entry); 304 DPRINTK("clip_constructor (neigh %p, entry %p)\n", neigh, entry);
309 neigh->type = inet_addr_type(entry->ip); 305 neigh->type = inet_addr_type(entry->ip);
310 if (neigh->type != RTN_UNICAST) return -EINVAL; 306 if (neigh->type != RTN_UNICAST)
307 return -EINVAL;
311 308
312 rcu_read_lock(); 309 rcu_read_lock();
313 in_dev = __in_dev_get_rcu(dev); 310 in_dev = __in_dev_get_rcu(dev);
@@ -326,13 +323,13 @@ static int clip_constructor(struct neighbour *neigh)
326 neigh->ops->connected_output : neigh->ops->output; 323 neigh->ops->connected_output : neigh->ops->output;
327 entry->neigh = neigh; 324 entry->neigh = neigh;
328 entry->vccs = NULL; 325 entry->vccs = NULL;
329 entry->expires = jiffies-1; 326 entry->expires = jiffies - 1;
330 return 0; 327 return 0;
331} 328}
332 329
333static u32 clip_hash(const void *pkey, const struct net_device *dev) 330static u32 clip_hash(const void *pkey, const struct net_device *dev)
334{ 331{
335 return jhash_2words(*(u32 *)pkey, dev->ifindex, clip_tbl.hash_rnd); 332 return jhash_2words(*(u32 *) pkey, dev->ifindex, clip_tbl.hash_rnd);
336} 333}
337 334
338static struct neigh_table clip_tbl = { 335static struct neigh_table clip_tbl = {
@@ -366,7 +363,6 @@ static struct neigh_table clip_tbl = {
366 .gc_thresh3 = 1024, 363 .gc_thresh3 = 1024,
367}; 364};
368 365
369
370/* @@@ copy bh locking from arp.c -- need to bh-enable atm code before */ 366/* @@@ copy bh locking from arp.c -- need to bh-enable atm code before */
371 367
372/* 368/*
@@ -376,15 +372,13 @@ static struct neigh_table clip_tbl = {
376 * clip_setentry. 372 * clip_setentry.
377 */ 373 */
378 374
379 375static int clip_encap(struct atm_vcc *vcc, int mode)
380static int clip_encap(struct atm_vcc *vcc,int mode)
381{ 376{
382 CLIP_VCC(vcc)->encap = mode; 377 CLIP_VCC(vcc)->encap = mode;
383 return 0; 378 return 0;
384} 379}
385 380
386 381static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
387static int clip_start_xmit(struct sk_buff *skb,struct net_device *dev)
388{ 382{
389 struct clip_priv *clip_priv = PRIV(dev); 383 struct clip_priv *clip_priv = PRIV(dev);
390 struct atmarp_entry *entry; 384 struct atmarp_entry *entry;
@@ -392,7 +386,7 @@ static int clip_start_xmit(struct sk_buff *skb,struct net_device *dev)
392 int old; 386 int old;
393 unsigned long flags; 387 unsigned long flags;
394 388
395 DPRINTK("clip_start_xmit (skb %p)\n",skb); 389 DPRINTK("clip_start_xmit (skb %p)\n", skb);
396 if (!skb->dst) { 390 if (!skb->dst) {
397 printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n"); 391 printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n");
398 dev_kfree_skb(skb); 392 dev_kfree_skb(skb);
@@ -401,9 +395,9 @@ static int clip_start_xmit(struct sk_buff *skb,struct net_device *dev)
401 } 395 }
402 if (!skb->dst->neighbour) { 396 if (!skb->dst->neighbour) {
403#if 0 397#if 0
404 skb->dst->neighbour = clip_find_neighbour(skb->dst,1); 398 skb->dst->neighbour = clip_find_neighbour(skb->dst, 1);
405 if (!skb->dst->neighbour) { 399 if (!skb->dst->neighbour) {
406 dev_kfree_skb(skb); /* lost that one */ 400 dev_kfree_skb(skb); /* lost that one */
407 clip_priv->stats.tx_dropped++; 401 clip_priv->stats.tx_dropped++;
408 return 0; 402 return 0;
409 } 403 }
@@ -417,73 +411,73 @@ static int clip_start_xmit(struct sk_buff *skb,struct net_device *dev)
417 if (!entry->vccs) { 411 if (!entry->vccs) {
418 if (time_after(jiffies, entry->expires)) { 412 if (time_after(jiffies, entry->expires)) {
419 /* should be resolved */ 413 /* should be resolved */
420 entry->expires = jiffies+ATMARP_RETRY_DELAY*HZ; 414 entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
421 to_atmarpd(act_need,PRIV(dev)->number,entry->ip); 415 to_atmarpd(act_need, PRIV(dev)->number, entry->ip);
422 } 416 }
423 if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS) 417 if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
424 skb_queue_tail(&entry->neigh->arp_queue,skb); 418 skb_queue_tail(&entry->neigh->arp_queue, skb);
425 else { 419 else {
426 dev_kfree_skb(skb); 420 dev_kfree_skb(skb);
427 clip_priv->stats.tx_dropped++; 421 clip_priv->stats.tx_dropped++;
428 } 422 }
429 return 0; 423 return 0;
430 } 424 }
431 DPRINTK("neigh %p, vccs %p\n",entry,entry->vccs); 425 DPRINTK("neigh %p, vccs %p\n", entry, entry->vccs);
432 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; 426 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
433 DPRINTK("using neighbour %p, vcc %p\n",skb->dst->neighbour,vcc); 427 DPRINTK("using neighbour %p, vcc %p\n", skb->dst->neighbour, vcc);
434 if (entry->vccs->encap) { 428 if (entry->vccs->encap) {
435 void *here; 429 void *here;
436 430
437 here = skb_push(skb,RFC1483LLC_LEN); 431 here = skb_push(skb, RFC1483LLC_LEN);
438 memcpy(here,llc_oui,sizeof(llc_oui)); 432 memcpy(here, llc_oui, sizeof(llc_oui));
439 ((u16 *) here)[3] = skb->protocol; 433 ((u16 *) here)[3] = skb->protocol;
440 } 434 }
441 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 435 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
442 ATM_SKB(skb)->atm_options = vcc->atm_options; 436 ATM_SKB(skb)->atm_options = vcc->atm_options;
443 entry->vccs->last_use = jiffies; 437 entry->vccs->last_use = jiffies;
444 DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n",skb,vcc,vcc->dev); 438 DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
445 old = xchg(&entry->vccs->xoff,1); /* assume XOFF ... */ 439 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
446 if (old) { 440 if (old) {
447 printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); 441 printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
448 return 0; 442 return 0;
449 } 443 }
450 clip_priv->stats.tx_packets++; 444 clip_priv->stats.tx_packets++;
451 clip_priv->stats.tx_bytes += skb->len; 445 clip_priv->stats.tx_bytes += skb->len;
452 (void) vcc->send(vcc,skb); 446 vcc->send(vcc, skb);
453 if (atm_may_send(vcc,0)) { 447 if (atm_may_send(vcc, 0)) {
454 entry->vccs->xoff = 0; 448 entry->vccs->xoff = 0;
455 return 0; 449 return 0;
456 } 450 }
457 spin_lock_irqsave(&clip_priv->xoff_lock,flags); 451 spin_lock_irqsave(&clip_priv->xoff_lock, flags);
458 netif_stop_queue(dev); /* XOFF -> throttle immediately */ 452 netif_stop_queue(dev); /* XOFF -> throttle immediately */
459 barrier(); 453 barrier();
460 if (!entry->vccs->xoff) 454 if (!entry->vccs->xoff)
461 netif_start_queue(dev); 455 netif_start_queue(dev);
462 /* Oh, we just raced with clip_pop. netif_start_queue should be 456 /* Oh, we just raced with clip_pop. netif_start_queue should be
463 good enough, because nothing should really be asleep because 457 good enough, because nothing should really be asleep because
464 of the brief netif_stop_queue. If this isn't true or if it 458 of the brief netif_stop_queue. If this isn't true or if it
465 changes, use netif_wake_queue instead. */ 459 changes, use netif_wake_queue instead. */
466 spin_unlock_irqrestore(&clip_priv->xoff_lock,flags); 460 spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
467 return 0; 461 return 0;
468} 462}
469 463
470
471static struct net_device_stats *clip_get_stats(struct net_device *dev) 464static struct net_device_stats *clip_get_stats(struct net_device *dev)
472{ 465{
473 return &PRIV(dev)->stats; 466 return &PRIV(dev)->stats;
474} 467}
475 468
476 469static int clip_mkip(struct atm_vcc *vcc, int timeout)
477static int clip_mkip(struct atm_vcc *vcc,int timeout)
478{ 470{
479 struct clip_vcc *clip_vcc; 471 struct clip_vcc *clip_vcc;
480 struct sk_buff_head copy; 472 struct sk_buff_head copy;
481 struct sk_buff *skb; 473 struct sk_buff *skb;
482 474
483 if (!vcc->push) return -EBADFD; 475 if (!vcc->push)
484 clip_vcc = kmalloc(sizeof(struct clip_vcc),GFP_KERNEL); 476 return -EBADFD;
485 if (!clip_vcc) return -ENOMEM; 477 clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
486 DPRINTK("mkip clip_vcc %p vcc %p\n",clip_vcc,vcc); 478 if (!clip_vcc)
479 return -ENOMEM;
480 DPRINTK("mkip clip_vcc %p vcc %p\n", clip_vcc, vcc);
487 clip_vcc->vcc = vcc; 481 clip_vcc->vcc = vcc;
488 vcc->user_back = clip_vcc; 482 vcc->user_back = clip_vcc;
489 set_bit(ATM_VF_IS_CLIP, &vcc->flags); 483 set_bit(ATM_VF_IS_CLIP, &vcc->flags);
@@ -491,7 +485,7 @@ static int clip_mkip(struct atm_vcc *vcc,int timeout)
491 clip_vcc->xoff = 0; 485 clip_vcc->xoff = 0;
492 clip_vcc->encap = 1; 486 clip_vcc->encap = 1;
493 clip_vcc->last_use = jiffies; 487 clip_vcc->last_use = jiffies;
494 clip_vcc->idle_timeout = timeout*HZ; 488 clip_vcc->idle_timeout = timeout * HZ;
495 clip_vcc->old_push = vcc->push; 489 clip_vcc->old_push = vcc->push;
496 clip_vcc->old_pop = vcc->pop; 490 clip_vcc->old_pop = vcc->pop;
497 vcc->push = clip_push; 491 vcc->push = clip_push;
@@ -501,27 +495,25 @@ static int clip_mkip(struct atm_vcc *vcc,int timeout)
501 /* re-process everything received between connection setup and MKIP */ 495 /* re-process everything received between connection setup and MKIP */
502 while ((skb = skb_dequeue(&copy)) != NULL) 496 while ((skb = skb_dequeue(&copy)) != NULL)
503 if (!clip_devs) { 497 if (!clip_devs) {
504 atm_return(vcc,skb->truesize); 498 atm_return(vcc, skb->truesize);
505 kfree_skb(skb); 499 kfree_skb(skb);
506 } 500 } else {
507 else {
508 unsigned int len = skb->len; 501 unsigned int len = skb->len;
509 502
510 clip_push(vcc,skb); 503 clip_push(vcc, skb);
511 PRIV(skb->dev)->stats.rx_packets--; 504 PRIV(skb->dev)->stats.rx_packets--;
512 PRIV(skb->dev)->stats.rx_bytes -= len; 505 PRIV(skb->dev)->stats.rx_bytes -= len;
513 } 506 }
514 return 0; 507 return 0;
515} 508}
516 509
517 510static int clip_setentry(struct atm_vcc *vcc, u32 ip)
518static int clip_setentry(struct atm_vcc *vcc,u32 ip)
519{ 511{
520 struct neighbour *neigh; 512 struct neighbour *neigh;
521 struct atmarp_entry *entry; 513 struct atmarp_entry *entry;
522 int error; 514 int error;
523 struct clip_vcc *clip_vcc; 515 struct clip_vcc *clip_vcc;
524 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip, .tos = 1 } } }; 516 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip, .tos = 1}} };
525 struct rtable *rt; 517 struct rtable *rt;
526 518
527 if (vcc->push != clip_push) { 519 if (vcc->push != clip_push) {
@@ -538,28 +530,29 @@ static int clip_setentry(struct atm_vcc *vcc,u32 ip)
538 unlink_clip_vcc(clip_vcc); 530 unlink_clip_vcc(clip_vcc);
539 return 0; 531 return 0;
540 } 532 }
541 error = ip_route_output_key(&rt,&fl); 533 error = ip_route_output_key(&rt, &fl);
542 if (error) return error; 534 if (error)
543 neigh = __neigh_lookup(&clip_tbl,&ip,rt->u.dst.dev,1); 535 return error;
536 neigh = __neigh_lookup(&clip_tbl, &ip, rt->u.dst.dev, 1);
544 ip_rt_put(rt); 537 ip_rt_put(rt);
545 if (!neigh) 538 if (!neigh)
546 return -ENOMEM; 539 return -ENOMEM;
547 entry = NEIGH2ENTRY(neigh); 540 entry = NEIGH2ENTRY(neigh);
548 if (entry != clip_vcc->entry) { 541 if (entry != clip_vcc->entry) {
549 if (!clip_vcc->entry) DPRINTK("setentry: add\n"); 542 if (!clip_vcc->entry)
543 DPRINTK("setentry: add\n");
550 else { 544 else {
551 DPRINTK("setentry: update\n"); 545 DPRINTK("setentry: update\n");
552 unlink_clip_vcc(clip_vcc); 546 unlink_clip_vcc(clip_vcc);
553 } 547 }
554 link_vcc(clip_vcc,entry); 548 link_vcc(clip_vcc, entry);
555 } 549 }
556 error = neigh_update(neigh, llc_oui, NUD_PERMANENT, 550 error = neigh_update(neigh, llc_oui, NUD_PERMANENT,
557 NEIGH_UPDATE_F_OVERRIDE|NEIGH_UPDATE_F_ADMIN); 551 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN);
558 neigh_release(neigh); 552 neigh_release(neigh);
559 return error; 553 return error;
560} 554}
561 555
562
563static void clip_setup(struct net_device *dev) 556static void clip_setup(struct net_device *dev)
564{ 557{
565 dev->hard_start_xmit = clip_start_xmit; 558 dev->hard_start_xmit = clip_start_xmit;
@@ -568,15 +561,14 @@ static void clip_setup(struct net_device *dev)
568 dev->type = ARPHRD_ATM; 561 dev->type = ARPHRD_ATM;
569 dev->hard_header_len = RFC1483LLC_LEN; 562 dev->hard_header_len = RFC1483LLC_LEN;
570 dev->mtu = RFC1626_MTU; 563 dev->mtu = RFC1626_MTU;
571 dev->tx_queue_len = 100; /* "normal" queue (packets) */ 564 dev->tx_queue_len = 100; /* "normal" queue (packets) */
572 /* When using a "real" qdisc, the qdisc determines the queue */ 565 /* When using a "real" qdisc, the qdisc determines the queue */
573 /* length. tx_queue_len is only used for the default case, */ 566 /* length. tx_queue_len is only used for the default case, */
574 /* without any more elaborate queuing. 100 is a reasonable */ 567 /* without any more elaborate queuing. 100 is a reasonable */
575 /* compromise between decent burst-tolerance and protection */ 568 /* compromise between decent burst-tolerance and protection */
576 /* against memory hogs. */ 569 /* against memory hogs. */
577} 570}
578 571
579
580static int clip_create(int number) 572static int clip_create(int number)
581{ 573{
582 struct net_device *dev; 574 struct net_device *dev;
@@ -585,19 +577,19 @@ static int clip_create(int number)
585 577
586 if (number != -1) { 578 if (number != -1) {
587 for (dev = clip_devs; dev; dev = PRIV(dev)->next) 579 for (dev = clip_devs; dev; dev = PRIV(dev)->next)
588 if (PRIV(dev)->number == number) return -EEXIST; 580 if (PRIV(dev)->number == number)
589 } 581 return -EEXIST;
590 else { 582 } else {
591 number = 0; 583 number = 0;
592 for (dev = clip_devs; dev; dev = PRIV(dev)->next) 584 for (dev = clip_devs; dev; dev = PRIV(dev)->next)
593 if (PRIV(dev)->number >= number) 585 if (PRIV(dev)->number >= number)
594 number = PRIV(dev)->number+1; 586 number = PRIV(dev)->number + 1;
595 } 587 }
596 dev = alloc_netdev(sizeof(struct clip_priv), "", clip_setup); 588 dev = alloc_netdev(sizeof(struct clip_priv), "", clip_setup);
597 if (!dev) 589 if (!dev)
598 return -ENOMEM; 590 return -ENOMEM;
599 clip_priv = PRIV(dev); 591 clip_priv = PRIV(dev);
600 sprintf(dev->name,"atm%d",number); 592 sprintf(dev->name, "atm%d", number);
601 spin_lock_init(&clip_priv->xoff_lock); 593 spin_lock_init(&clip_priv->xoff_lock);
602 clip_priv->number = number; 594 clip_priv->number = number;
603 error = register_netdev(dev); 595 error = register_netdev(dev);
@@ -607,53 +599,48 @@ static int clip_create(int number)
607 } 599 }
608 clip_priv->next = clip_devs; 600 clip_priv->next = clip_devs;
609 clip_devs = dev; 601 clip_devs = dev;
610 DPRINTK("registered (net:%s)\n",dev->name); 602 DPRINTK("registered (net:%s)\n", dev->name);
611 return number; 603 return number;
612} 604}
613 605
614 606static int clip_device_event(struct notifier_block *this, unsigned long event,
615static int clip_device_event(struct notifier_block *this,unsigned long event, 607 void *arg)
616 void *dev)
617{ 608{
609 struct net_device *dev = arg;
610
611 if (event == NETDEV_UNREGISTER) {
612 neigh_ifdown(&clip_tbl, dev);
613 return NOTIFY_DONE;
614 }
615
618 /* ignore non-CLIP devices */ 616 /* ignore non-CLIP devices */
619 if (((struct net_device *) dev)->type != ARPHRD_ATM || 617 if (dev->type != ARPHRD_ATM || dev->hard_start_xmit != clip_start_xmit)
620 ((struct net_device *) dev)->hard_start_xmit != clip_start_xmit)
621 return NOTIFY_DONE; 618 return NOTIFY_DONE;
619
622 switch (event) { 620 switch (event) {
623 case NETDEV_UP: 621 case NETDEV_UP:
624 DPRINTK("clip_device_event NETDEV_UP\n"); 622 DPRINTK("clip_device_event NETDEV_UP\n");
625 (void) to_atmarpd(act_up,PRIV(dev)->number,0); 623 to_atmarpd(act_up, PRIV(dev)->number, 0);
626 break; 624 break;
627 case NETDEV_GOING_DOWN: 625 case NETDEV_GOING_DOWN:
628 DPRINTK("clip_device_event NETDEV_DOWN\n"); 626 DPRINTK("clip_device_event NETDEV_DOWN\n");
629 (void) to_atmarpd(act_down,PRIV(dev)->number,0); 627 to_atmarpd(act_down, PRIV(dev)->number, 0);
630 break; 628 break;
631 case NETDEV_CHANGE: 629 case NETDEV_CHANGE:
632 case NETDEV_CHANGEMTU: 630 case NETDEV_CHANGEMTU:
633 DPRINTK("clip_device_event NETDEV_CHANGE*\n"); 631 DPRINTK("clip_device_event NETDEV_CHANGE*\n");
634 (void) to_atmarpd(act_change,PRIV(dev)->number,0); 632 to_atmarpd(act_change, PRIV(dev)->number, 0);
635 break; 633 break;
636 case NETDEV_REBOOT:
637 case NETDEV_REGISTER:
638 case NETDEV_DOWN:
639 DPRINTK("clip_device_event %ld\n",event);
640 /* ignore */
641 break;
642 default:
643 printk(KERN_WARNING "clip_device_event: unknown event "
644 "%ld\n",event);
645 break;
646 } 634 }
647 return NOTIFY_DONE; 635 return NOTIFY_DONE;
648} 636}
649 637
650 638static int clip_inet_event(struct notifier_block *this, unsigned long event,
651static int clip_inet_event(struct notifier_block *this,unsigned long event, 639 void *ifa)
652 void *ifa)
653{ 640{
654 struct in_device *in_dev; 641 struct in_device *in_dev;
655 642
656 in_dev = ((struct in_ifaddr *) ifa)->ifa_dev; 643 in_dev = ((struct in_ifaddr *)ifa)->ifa_dev;
657 if (!in_dev || !in_dev->dev) { 644 if (!in_dev || !in_dev->dev) {
658 printk(KERN_WARNING "clip_inet_event: no device\n"); 645 printk(KERN_WARNING "clip_inet_event: no device\n");
659 return NOTIFY_DONE; 646 return NOTIFY_DONE;
@@ -662,23 +649,20 @@ static int clip_inet_event(struct notifier_block *this,unsigned long event,
662 * Transitions are of the down-change-up type, so it's sufficient to 649 * Transitions are of the down-change-up type, so it's sufficient to
663 * handle the change on up. 650 * handle the change on up.
664 */ 651 */
665 if (event != NETDEV_UP) return NOTIFY_DONE; 652 if (event != NETDEV_UP)
666 return clip_device_event(this,NETDEV_CHANGE,in_dev->dev); 653 return NOTIFY_DONE;
654 return clip_device_event(this, NETDEV_CHANGE, in_dev->dev);
667} 655}
668 656
669 657
670static struct notifier_block clip_dev_notifier = { 658static struct notifier_block clip_dev_notifier = {
671 clip_device_event, 659 .notifier_call = clip_device_event,
672 NULL,
673 0
674}; 660};
675 661
676 662
677 663
678static struct notifier_block clip_inet_notifier = { 664static struct notifier_block clip_inet_notifier = {
679 clip_inet_event, 665 .notifier_call = clip_inet_event,
680 NULL,
681 0
682}; 666};
683 667
684 668
@@ -686,14 +670,12 @@ static struct notifier_block clip_inet_notifier = {
686static void atmarpd_close(struct atm_vcc *vcc) 670static void atmarpd_close(struct atm_vcc *vcc)
687{ 671{
688 DPRINTK("atmarpd_close\n"); 672 DPRINTK("atmarpd_close\n");
689 atmarpd = NULL; /* assumed to be atomic */ 673
690 barrier(); 674 rtnl_lock();
691 unregister_inetaddr_notifier(&clip_inet_notifier); 675 atmarpd = NULL;
692 unregister_netdevice_notifier(&clip_dev_notifier);
693 if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
694 printk(KERN_ERR "atmarpd_close: closing with requests "
695 "pending\n");
696 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); 676 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
677 rtnl_unlock();
678
697 DPRINTK("(done)\n"); 679 DPRINTK("(done)\n");
698 module_put(THIS_MODULE); 680 module_put(THIS_MODULE);
699} 681}
@@ -714,14 +696,14 @@ static struct atm_dev atmarpd_dev = {
714 696
715static int atm_init_atmarp(struct atm_vcc *vcc) 697static int atm_init_atmarp(struct atm_vcc *vcc)
716{ 698{
717 if (atmarpd) return -EADDRINUSE; 699 rtnl_lock();
718 if (start_timer) { 700 if (atmarpd) {
719 start_timer = 0; 701 rtnl_unlock();
720 init_timer(&idle_timer); 702 return -EADDRINUSE;
721 idle_timer.expires = jiffies+CLIP_CHECK_INTERVAL*HZ;
722 idle_timer.function = idle_timer_check;
723 add_timer(&idle_timer);
724 } 703 }
704
705 mod_timer(&idle_timer, jiffies+CLIP_CHECK_INTERVAL*HZ);
706
725 atmarpd = vcc; 707 atmarpd = vcc;
726 set_bit(ATM_VF_META,&vcc->flags); 708 set_bit(ATM_VF_META,&vcc->flags);
727 set_bit(ATM_VF_READY,&vcc->flags); 709 set_bit(ATM_VF_READY,&vcc->flags);
@@ -731,10 +713,7 @@ static int atm_init_atmarp(struct atm_vcc *vcc)
731 vcc->push = NULL; 713 vcc->push = NULL;
732 vcc->pop = NULL; /* crash */ 714 vcc->pop = NULL; /* crash */
733 vcc->push_oam = NULL; /* crash */ 715 vcc->push_oam = NULL; /* crash */
734 if (register_netdevice_notifier(&clip_dev_notifier)) 716 rtnl_unlock();
735 printk(KERN_ERR "register_netdevice_notifier failed\n");
736 if (register_inetaddr_notifier(&clip_inet_notifier))
737 printk(KERN_ERR "register_inetaddr_notifier failed\n");
738 return 0; 717 return 0;
739} 718}
740 719
@@ -744,53 +723,53 @@ static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
744 int err = 0; 723 int err = 0;
745 724
746 switch (cmd) { 725 switch (cmd) {
747 case SIOCMKCLIP: 726 case SIOCMKCLIP:
748 case ATMARPD_CTRL: 727 case ATMARPD_CTRL:
749 case ATMARP_MKIP: 728 case ATMARP_MKIP:
750 case ATMARP_SETENTRY: 729 case ATMARP_SETENTRY:
751 case ATMARP_ENCAP: 730 case ATMARP_ENCAP:
752 if (!capable(CAP_NET_ADMIN)) 731 if (!capable(CAP_NET_ADMIN))
753 return -EPERM; 732 return -EPERM;
754 break; 733 break;
755 default: 734 default:
756 return -ENOIOCTLCMD; 735 return -ENOIOCTLCMD;
757 } 736 }
758 737
759 switch (cmd) { 738 switch (cmd) {
760 case SIOCMKCLIP: 739 case SIOCMKCLIP:
761 err = clip_create(arg); 740 err = clip_create(arg);
762 break; 741 break;
763 case ATMARPD_CTRL: 742 case ATMARPD_CTRL:
764 err = atm_init_atmarp(vcc); 743 err = atm_init_atmarp(vcc);
765 if (!err) { 744 if (!err) {
766 sock->state = SS_CONNECTED; 745 sock->state = SS_CONNECTED;
767 __module_get(THIS_MODULE); 746 __module_get(THIS_MODULE);
768 } 747 }
769 break; 748 break;
770 case ATMARP_MKIP: 749 case ATMARP_MKIP:
771 err = clip_mkip(vcc ,arg); 750 err = clip_mkip(vcc, arg);
772 break; 751 break;
773 case ATMARP_SETENTRY: 752 case ATMARP_SETENTRY:
774 err = clip_setentry(vcc, arg); 753 err = clip_setentry(vcc, arg);
775 break; 754 break;
776 case ATMARP_ENCAP: 755 case ATMARP_ENCAP:
777 err = clip_encap(vcc, arg); 756 err = clip_encap(vcc, arg);
778 break; 757 break;
779 } 758 }
780 return err; 759 return err;
781} 760}
782 761
783static struct atm_ioctl clip_ioctl_ops = { 762static struct atm_ioctl clip_ioctl_ops = {
784 .owner = THIS_MODULE, 763 .owner = THIS_MODULE,
785 .ioctl = clip_ioctl, 764 .ioctl = clip_ioctl,
786}; 765};
787 766
788#ifdef CONFIG_PROC_FS 767#ifdef CONFIG_PROC_FS
789 768
790static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr) 769static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr)
791{ 770{
792 static int code[] = { 1,2,10,6,1,0 }; 771 static int code[] = { 1, 2, 10, 6, 1, 0 };
793 static int e164[] = { 1,8,4,6,1,0 }; 772 static int e164[] = { 1, 8, 4, 6, 1, 0 };
794 773
795 if (*addr->sas_addr.pub) { 774 if (*addr->sas_addr.pub) {
796 seq_printf(seq, "%s", addr->sas_addr.pub); 775 seq_printf(seq, "%s", addr->sas_addr.pub);
@@ -809,7 +788,7 @@ static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr)
809 for (i = 0; fields[i]; i++) { 788 for (i = 0; fields[i]; i++) {
810 for (j = fields[i]; j; j--) 789 for (j = fields[i]; j; j--)
811 seq_printf(seq, "%02X", *prv++); 790 seq_printf(seq, "%02X", *prv++);
812 if (fields[i+1]) 791 if (fields[i + 1])
813 seq_putc(seq, '.'); 792 seq_putc(seq, '.');
814 } 793 }
815 } 794 }
@@ -828,8 +807,7 @@ static void atmarp_info(struct seq_file *seq, struct net_device *dev,
828 svc = ((clip_vcc == SEQ_NO_VCC_TOKEN) || 807 svc = ((clip_vcc == SEQ_NO_VCC_TOKEN) ||
829 (sk_atm(clip_vcc->vcc)->sk_family == AF_ATMSVC)); 808 (sk_atm(clip_vcc->vcc)->sk_family == AF_ATMSVC));
830 809
831 llc = ((clip_vcc == SEQ_NO_VCC_TOKEN) || 810 llc = ((clip_vcc == SEQ_NO_VCC_TOKEN) || clip_vcc->encap);
832 clip_vcc->encap);
833 811
834 if (clip_vcc == SEQ_NO_VCC_TOKEN) 812 if (clip_vcc == SEQ_NO_VCC_TOKEN)
835 exp = entry->neigh->used; 813 exp = entry->neigh->used;
@@ -839,10 +817,7 @@ static void atmarp_info(struct seq_file *seq, struct net_device *dev,
839 exp = (jiffies - exp) / HZ; 817 exp = (jiffies - exp) / HZ;
840 818
841 seq_printf(seq, "%-6s%-4s%-4s%5ld ", 819 seq_printf(seq, "%-6s%-4s%-4s%5ld ",
842 dev->name, 820 dev->name, svc ? "SVC" : "PVC", llc ? "LLC" : "NULL", exp);
843 svc ? "SVC" : "PVC",
844 llc ? "LLC" : "NULL",
845 exp);
846 821
847 off = scnprintf(buf, sizeof(buf) - 1, "%d.%d.%d.%d", 822 off = scnprintf(buf, sizeof(buf) - 1, "%d.%d.%d.%d",
848 NIPQUAD(entry->ip)); 823 NIPQUAD(entry->ip));
@@ -860,8 +835,7 @@ static void atmarp_info(struct seq_file *seq, struct net_device *dev,
860 } else if (!svc) { 835 } else if (!svc) {
861 seq_printf(seq, "%d.%d.%d\n", 836 seq_printf(seq, "%d.%d.%d\n",
862 clip_vcc->vcc->dev->number, 837 clip_vcc->vcc->dev->number,
863 clip_vcc->vcc->vpi, 838 clip_vcc->vcc->vpi, clip_vcc->vcc->vci);
864 clip_vcc->vcc->vci);
865 } else { 839 } else {
866 svc_addr(seq, &clip_vcc->vcc->remote); 840 svc_addr(seq, &clip_vcc->vcc->remote);
867 seq_putc(seq, '\n'); 841 seq_putc(seq, '\n');
@@ -894,7 +868,7 @@ static struct clip_vcc *clip_seq_next_vcc(struct atmarp_entry *e,
894} 868}
895 869
896static void *clip_seq_vcc_walk(struct clip_seq_state *state, 870static void *clip_seq_vcc_walk(struct clip_seq_state *state,
897 struct atmarp_entry *e, loff_t *pos) 871 struct atmarp_entry *e, loff_t * pos)
898{ 872{
899 struct clip_vcc *vcc = state->vcc; 873 struct clip_vcc *vcc = state->vcc;
900 874
@@ -911,24 +885,24 @@ static void *clip_seq_vcc_walk(struct clip_seq_state *state,
911 885
912 return vcc; 886 return vcc;
913} 887}
914 888
915static void *clip_seq_sub_iter(struct neigh_seq_state *_state, 889static void *clip_seq_sub_iter(struct neigh_seq_state *_state,
916 struct neighbour *n, loff_t *pos) 890 struct neighbour *n, loff_t * pos)
917{ 891{
918 struct clip_seq_state *state = (struct clip_seq_state *) _state; 892 struct clip_seq_state *state = (struct clip_seq_state *)_state;
919 893
920 return clip_seq_vcc_walk(state, NEIGH2ENTRY(n), pos); 894 return clip_seq_vcc_walk(state, NEIGH2ENTRY(n), pos);
921} 895}
922 896
923static void *clip_seq_start(struct seq_file *seq, loff_t *pos) 897static void *clip_seq_start(struct seq_file *seq, loff_t * pos)
924{ 898{
925 return neigh_seq_start(seq, pos, &clip_tbl, NEIGH_SEQ_NEIGH_ONLY); 899 return neigh_seq_start(seq, pos, &clip_tbl, NEIGH_SEQ_NEIGH_ONLY);
926} 900}
927 901
928static int clip_seq_show(struct seq_file *seq, void *v) 902static int clip_seq_show(struct seq_file *seq, void *v)
929{ 903{
930 static char atm_arp_banner[] = 904 static char atm_arp_banner[] =
931 "IPitf TypeEncp Idle IP address ATM address\n"; 905 "IPitf TypeEncp Idle IP address ATM address\n";
932 906
933 if (v == SEQ_START_TOKEN) { 907 if (v == SEQ_START_TOKEN) {
934 seq_puts(seq, atm_arp_banner); 908 seq_puts(seq, atm_arp_banner);
@@ -939,7 +913,7 @@ static int clip_seq_show(struct seq_file *seq, void *v)
939 913
940 atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc); 914 atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc);
941 } 915 }
942 return 0; 916 return 0;
943} 917}
944 918
945static struct seq_operations arp_seq_ops = { 919static struct seq_operations arp_seq_ops = {
@@ -988,20 +962,19 @@ static struct file_operations arp_seq_fops = {
988 962
989static int __init atm_clip_init(void) 963static int __init atm_clip_init(void)
990{ 964{
991 neigh_table_init(&clip_tbl); 965 struct proc_dir_entry *p;
966 neigh_table_init_no_netlink(&clip_tbl);
992 967
993 clip_tbl_hook = &clip_tbl; 968 clip_tbl_hook = &clip_tbl;
994 register_atm_ioctl(&clip_ioctl_ops); 969 register_atm_ioctl(&clip_ioctl_ops);
970 register_netdevice_notifier(&clip_dev_notifier);
971 register_inetaddr_notifier(&clip_inet_notifier);
995 972
996#ifdef CONFIG_PROC_FS 973 setup_timer(&idle_timer, idle_timer_check, 0);
997{
998 struct proc_dir_entry *p;
999 974
1000 p = create_proc_entry("arp", S_IRUGO, atm_proc_root); 975 p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
1001 if (p) 976 if (p)
1002 p->proc_fops = &arp_seq_fops; 977 p->proc_fops = &arp_seq_fops;
1003}
1004#endif
1005 978
1006 return 0; 979 return 0;
1007} 980}
@@ -1012,13 +985,15 @@ static void __exit atm_clip_exit(void)
1012 985
1013 remove_proc_entry("arp", atm_proc_root); 986 remove_proc_entry("arp", atm_proc_root);
1014 987
988 unregister_inetaddr_notifier(&clip_inet_notifier);
989 unregister_netdevice_notifier(&clip_dev_notifier);
990
1015 deregister_atm_ioctl(&clip_ioctl_ops); 991 deregister_atm_ioctl(&clip_ioctl_ops);
1016 992
1017 /* First, stop the idle timer, so it stops banging 993 /* First, stop the idle timer, so it stops banging
1018 * on the table. 994 * on the table.
1019 */ 995 */
1020 if (start_timer == 0) 996 del_timer_sync(&idle_timer);
1021 del_timer(&idle_timer);
1022 997
1023 /* Next, purge the table, so that the device 998 /* Next, purge the table, so that the device
1024 * unregister loop below does not hang due to 999 * unregister loop below does not hang due to
@@ -1042,5 +1017,6 @@ static void __exit atm_clip_exit(void)
1042 1017
1043module_init(atm_clip_init); 1018module_init(atm_clip_init);
1044module_exit(atm_clip_exit); 1019module_exit(atm_clip_exit);
1045 1020MODULE_AUTHOR("Werner Almesberger");
1021MODULE_DESCRIPTION("Classical/IP over ATM interface");
1046MODULE_LICENSE("GPL"); 1022MODULE_LICENSE("GPL");
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index dbf9b47681f7..a2e0dd047e9f 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -228,6 +228,8 @@ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr,
228 return NULL; 228 return NULL;
229} 229}
230 230
231EXPORT_SYMBOL(ax25_find_cb);
232
231void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto) 233void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
232{ 234{
233 ax25_cb *s; 235 ax25_cb *s;
@@ -424,6 +426,26 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
424 return 0; 426 return 0;
425} 427}
426 428
429static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev)
430{
431 ax25->rtt = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]) / 2;
432 ax25->t1 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]);
433 ax25->t2 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T2]);
434 ax25->t3 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T3]);
435 ax25->n2 = ax25_dev->values[AX25_VALUES_N2];
436 ax25->paclen = ax25_dev->values[AX25_VALUES_PACLEN];
437 ax25->idle = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_IDLE]);
438 ax25->backoff = ax25_dev->values[AX25_VALUES_BACKOFF];
439
440 if (ax25_dev->values[AX25_VALUES_AXDEFMODE]) {
441 ax25->modulus = AX25_EMODULUS;
442 ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW];
443 } else {
444 ax25->modulus = AX25_MODULUS;
445 ax25->window = ax25_dev->values[AX25_VALUES_WINDOW];
446 }
447}
448
427/* 449/*
428 * Fill in a created AX.25 created control block with the default 450 * Fill in a created AX.25 created control block with the default
429 * values for a particular device. 451 * values for a particular device.
@@ -433,39 +455,28 @@ void ax25_fillin_cb(ax25_cb *ax25, ax25_dev *ax25_dev)
433 ax25->ax25_dev = ax25_dev; 455 ax25->ax25_dev = ax25_dev;
434 456
435 if (ax25->ax25_dev != NULL) { 457 if (ax25->ax25_dev != NULL) {
436 ax25->rtt = ax25_dev->values[AX25_VALUES_T1] / 2; 458 ax25_fillin_cb_from_dev(ax25, ax25_dev);
437 ax25->t1 = ax25_dev->values[AX25_VALUES_T1]; 459 return;
438 ax25->t2 = ax25_dev->values[AX25_VALUES_T2]; 460 }
439 ax25->t3 = ax25_dev->values[AX25_VALUES_T3]; 461
440 ax25->n2 = ax25_dev->values[AX25_VALUES_N2]; 462 /*
441 ax25->paclen = ax25_dev->values[AX25_VALUES_PACLEN]; 463 * No device, use kernel / AX.25 spec default values
442 ax25->idle = ax25_dev->values[AX25_VALUES_IDLE]; 464 */
443 ax25->backoff = ax25_dev->values[AX25_VALUES_BACKOFF]; 465 ax25->rtt = msecs_to_jiffies(AX25_DEF_T1) / 2;
444 466 ax25->t1 = msecs_to_jiffies(AX25_DEF_T1);
445 if (ax25_dev->values[AX25_VALUES_AXDEFMODE]) { 467 ax25->t2 = msecs_to_jiffies(AX25_DEF_T2);
446 ax25->modulus = AX25_EMODULUS; 468 ax25->t3 = msecs_to_jiffies(AX25_DEF_T3);
447 ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW]; 469 ax25->n2 = AX25_DEF_N2;
448 } else { 470 ax25->paclen = AX25_DEF_PACLEN;
449 ax25->modulus = AX25_MODULUS; 471 ax25->idle = msecs_to_jiffies(AX25_DEF_IDLE);
450 ax25->window = ax25_dev->values[AX25_VALUES_WINDOW]; 472 ax25->backoff = AX25_DEF_BACKOFF;
451 } 473
474 if (AX25_DEF_AXDEFMODE) {
475 ax25->modulus = AX25_EMODULUS;
476 ax25->window = AX25_DEF_EWINDOW;
452 } else { 477 } else {
453 ax25->rtt = AX25_DEF_T1 / 2; 478 ax25->modulus = AX25_MODULUS;
454 ax25->t1 = AX25_DEF_T1; 479 ax25->window = AX25_DEF_WINDOW;
455 ax25->t2 = AX25_DEF_T2;
456 ax25->t3 = AX25_DEF_T3;
457 ax25->n2 = AX25_DEF_N2;
458 ax25->paclen = AX25_DEF_PACLEN;
459 ax25->idle = AX25_DEF_IDLE;
460 ax25->backoff = AX25_DEF_BACKOFF;
461
462 if (AX25_DEF_AXDEFMODE) {
463 ax25->modulus = AX25_EMODULUS;
464 ax25->window = AX25_DEF_EWINDOW;
465 } else {
466 ax25->modulus = AX25_MODULUS;
467 ax25->window = AX25_DEF_WINDOW;
468 }
469 } 480 }
470} 481}
471 482
@@ -1979,24 +1990,6 @@ static struct notifier_block ax25_dev_notifier = {
1979 .notifier_call =ax25_device_event, 1990 .notifier_call =ax25_device_event,
1980}; 1991};
1981 1992
1982EXPORT_SYMBOL(ax25_hard_header);
1983EXPORT_SYMBOL(ax25_rebuild_header);
1984EXPORT_SYMBOL(ax25_findbyuid);
1985EXPORT_SYMBOL(ax25_find_cb);
1986EXPORT_SYMBOL(ax25_linkfail_register);
1987EXPORT_SYMBOL(ax25_linkfail_release);
1988EXPORT_SYMBOL(ax25_listen_register);
1989EXPORT_SYMBOL(ax25_listen_release);
1990EXPORT_SYMBOL(ax25_protocol_register);
1991EXPORT_SYMBOL(ax25_protocol_release);
1992EXPORT_SYMBOL(ax25_send_frame);
1993EXPORT_SYMBOL(ax25_uid_policy);
1994EXPORT_SYMBOL(ax25cmp);
1995EXPORT_SYMBOL(ax2asc);
1996EXPORT_SYMBOL(asc2ax);
1997EXPORT_SYMBOL(null_ax25_address);
1998EXPORT_SYMBOL(ax25_display_timer);
1999
2000static int __init ax25_init(void) 1993static int __init ax25_init(void)
2001{ 1994{
2002 int rc = proto_register(&ax25_proto, 0); 1995 int rc = proto_register(&ax25_proto, 0);
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c
index 0164a155b8c4..5f0896ad0042 100644
--- a/net/ax25/ax25_addr.c
+++ b/net/ax25/ax25_addr.c
@@ -11,6 +11,7 @@
11#include <linux/socket.h> 11#include <linux/socket.h>
12#include <linux/in.h> 12#include <linux/in.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/module.h>
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/timer.h> 16#include <linux/timer.h>
16#include <linux/string.h> 17#include <linux/string.h>
@@ -33,6 +34,8 @@
33 */ 34 */
34ax25_address null_ax25_address = {{0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x00}}; 35ax25_address null_ax25_address = {{0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x00}};
35 36
37EXPORT_SYMBOL(null_ax25_address);
38
36/* 39/*
37 * ax25 -> ascii conversion 40 * ax25 -> ascii conversion
38 */ 41 */
@@ -64,6 +67,8 @@ char *ax2asc(char *buf, ax25_address *a)
64 67
65} 68}
66 69
70EXPORT_SYMBOL(ax2asc);
71
67/* 72/*
68 * ascii -> ax25 conversion 73 * ascii -> ax25 conversion
69 */ 74 */
@@ -97,6 +102,8 @@ void asc2ax(ax25_address *addr, char *callsign)
97 addr->ax25_call[6] &= 0x1E; 102 addr->ax25_call[6] &= 0x1E;
98} 103}
99 104
105EXPORT_SYMBOL(asc2ax);
106
100/* 107/*
101 * Compare two ax.25 addresses 108 * Compare two ax.25 addresses
102 */ 109 */
@@ -116,6 +123,8 @@ int ax25cmp(ax25_address *a, ax25_address *b)
116 return 2; /* Partial match */ 123 return 2; /* Partial match */
117} 124}
118 125
126EXPORT_SYMBOL(ax25cmp);
127
119/* 128/*
120 * Compare two AX.25 digipeater paths. 129 * Compare two AX.25 digipeater paths.
121 */ 130 */
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 061083efc1dc..5961459935eb 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -61,7 +61,8 @@ void ax25_ds_set_timer(ax25_dev *ax25_dev)
61 return; 61 return;
62 62
63 del_timer(&ax25_dev->dama.slave_timer); 63 del_timer(&ax25_dev->dama.slave_timer);
64 ax25_dev->dama.slave_timeout = ax25_dev->values[AX25_VALUES_DS_TIMEOUT] / 10; 64 ax25_dev->dama.slave_timeout =
65 msecs_to_jiffies(ax25_dev->values[AX25_VALUES_DS_TIMEOUT]) / 10;
65 ax25_ds_add_timer(ax25_dev); 66 ax25_ds_add_timer(ax25_dev);
66} 67}
67 68
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index d68aff100729..3bb152710b77 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -12,6 +12,7 @@
12#include <linux/socket.h> 12#include <linux/socket.h>
13#include <linux/in.h> 13#include <linux/in.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/module.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
16#include <linux/spinlock.h> 17#include <linux/spinlock.h>
17#include <linux/timer.h> 18#include <linux/timer.h>
@@ -74,6 +75,8 @@ int ax25_protocol_register(unsigned int pid,
74 return 1; 75 return 1;
75} 76}
76 77
78EXPORT_SYMBOL(ax25_protocol_register);
79
77void ax25_protocol_release(unsigned int pid) 80void ax25_protocol_release(unsigned int pid)
78{ 81{
79 struct protocol_struct *s, *protocol; 82 struct protocol_struct *s, *protocol;
@@ -106,6 +109,8 @@ void ax25_protocol_release(unsigned int pid)
106 write_unlock(&protocol_list_lock); 109 write_unlock(&protocol_list_lock);
107} 110}
108 111
112EXPORT_SYMBOL(ax25_protocol_release);
113
109int ax25_linkfail_register(void (*func)(ax25_cb *, int)) 114int ax25_linkfail_register(void (*func)(ax25_cb *, int))
110{ 115{
111 struct linkfail_struct *linkfail; 116 struct linkfail_struct *linkfail;
@@ -123,6 +128,8 @@ int ax25_linkfail_register(void (*func)(ax25_cb *, int))
123 return 1; 128 return 1;
124} 129}
125 130
131EXPORT_SYMBOL(ax25_linkfail_register);
132
126void ax25_linkfail_release(void (*func)(ax25_cb *, int)) 133void ax25_linkfail_release(void (*func)(ax25_cb *, int))
127{ 134{
128 struct linkfail_struct *s, *linkfail; 135 struct linkfail_struct *s, *linkfail;
@@ -155,6 +162,8 @@ void ax25_linkfail_release(void (*func)(ax25_cb *, int))
155 spin_unlock_bh(&linkfail_lock); 162 spin_unlock_bh(&linkfail_lock);
156} 163}
157 164
165EXPORT_SYMBOL(ax25_linkfail_release);
166
158int ax25_listen_register(ax25_address *callsign, struct net_device *dev) 167int ax25_listen_register(ax25_address *callsign, struct net_device *dev)
159{ 168{
160 struct listen_struct *listen; 169 struct listen_struct *listen;
@@ -176,6 +185,8 @@ int ax25_listen_register(ax25_address *callsign, struct net_device *dev)
176 return 1; 185 return 1;
177} 186}
178 187
188EXPORT_SYMBOL(ax25_listen_register);
189
179void ax25_listen_release(ax25_address *callsign, struct net_device *dev) 190void ax25_listen_release(ax25_address *callsign, struct net_device *dev)
180{ 191{
181 struct listen_struct *s, *listen; 192 struct listen_struct *s, *listen;
@@ -208,6 +219,8 @@ void ax25_listen_release(ax25_address *callsign, struct net_device *dev)
208 spin_unlock_bh(&listen_lock); 219 spin_unlock_bh(&listen_lock);
209} 220}
210 221
222EXPORT_SYMBOL(ax25_listen_release);
223
211int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *) 224int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *)
212{ 225{
213 int (*res)(struct sk_buff *, ax25_cb *) = NULL; 226 int (*res)(struct sk_buff *, ax25_cb *) = NULL;
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index d643dac3eccc..a0b534f80f17 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -12,6 +12,7 @@
12#include <linux/socket.h> 12#include <linux/socket.h>
13#include <linux/in.h> 13#include <linux/in.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/module.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
16#include <linux/timer.h> 17#include <linux/timer.h>
17#include <linux/string.h> 18#include <linux/string.h>
@@ -221,3 +222,5 @@ int ax25_rebuild_header(struct sk_buff *skb)
221 222
222#endif 223#endif
223 224
225EXPORT_SYMBOL(ax25_hard_header);
226EXPORT_SYMBOL(ax25_rebuild_header);
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index 5fc048dcd39a..5d99852b239c 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -14,6 +14,7 @@
14#include <linux/socket.h> 14#include <linux/socket.h>
15#include <linux/in.h> 15#include <linux/in.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h>
17#include <linux/sched.h> 18#include <linux/sched.h>
18#include <linux/timer.h> 19#include <linux/timer.h>
19#include <linux/string.h> 20#include <linux/string.h>
@@ -104,6 +105,8 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2
104 return ax25; /* We had to create it */ 105 return ax25; /* We had to create it */
105} 106}
106 107
108EXPORT_SYMBOL(ax25_send_frame);
109
107/* 110/*
108 * All outgoing AX.25 I frames pass via this routine. Therefore this is 111 * All outgoing AX.25 I frames pass via this routine. Therefore this is
109 * where the fragmentation of frames takes place. If fragment is set to 112 * where the fragmentation of frames takes place. If fragment is set to
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index f04f8630fd28..5ac98250797b 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -360,7 +360,7 @@ struct file_operations ax25_route_fops = {
360/* 360/*
361 * Find AX.25 route 361 * Find AX.25 route
362 * 362 *
363 * Only routes with a refernce rout of zero can be destroyed. 363 * Only routes with a reference count of zero can be destroyed.
364 */ 364 */
365static ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) 365static ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
366{ 366{
diff --git a/net/ax25/ax25_timer.c b/net/ax25/ax25_timer.c
index 7a6b50a14554..ec254057f212 100644
--- a/net/ax25/ax25_timer.c
+++ b/net/ax25/ax25_timer.c
@@ -18,6 +18,7 @@
18#include <linux/socket.h> 18#include <linux/socket.h>
19#include <linux/in.h> 19#include <linux/in.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/module.h>
21#include <linux/jiffies.h> 22#include <linux/jiffies.h>
22#include <linux/timer.h> 23#include <linux/timer.h>
23#include <linux/string.h> 24#include <linux/string.h>
@@ -137,6 +138,8 @@ unsigned long ax25_display_timer(struct timer_list *timer)
137 return timer->expires - jiffies; 138 return timer->expires - jiffies;
138} 139}
139 140
141EXPORT_SYMBOL(ax25_display_timer);
142
140static void ax25_heartbeat_expiry(unsigned long param) 143static void ax25_heartbeat_expiry(unsigned long param)
141{ 144{
142 int proto = AX25_PROTO_STD_SIMPLEX; 145 int proto = AX25_PROTO_STD_SIMPLEX;
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index b8b5854bce9a..5e9a81e8b214 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -49,6 +49,8 @@ static DEFINE_RWLOCK(ax25_uid_lock);
49 49
50int ax25_uid_policy = 0; 50int ax25_uid_policy = 0;
51 51
52EXPORT_SYMBOL(ax25_uid_policy);
53
52ax25_uid_assoc *ax25_findbyuid(uid_t uid) 54ax25_uid_assoc *ax25_findbyuid(uid_t uid)
53{ 55{
54 ax25_uid_assoc *ax25_uid, *res = NULL; 56 ax25_uid_assoc *ax25_uid, *res = NULL;
@@ -67,6 +69,8 @@ ax25_uid_assoc *ax25_findbyuid(uid_t uid)
67 return res; 69 return res;
68} 70}
69 71
72EXPORT_SYMBOL(ax25_findbyuid);
73
70int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax) 74int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
71{ 75{
72 ax25_uid_assoc *ax25_uid; 76 ax25_uid_assoc *ax25_uid;
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index 894a22558d9d..bdb64c36df12 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -18,14 +18,14 @@ static int min_backoff[1], max_backoff[] = {2};
18static int min_conmode[1], max_conmode[] = {2}; 18static int min_conmode[1], max_conmode[] = {2};
19static int min_window[] = {1}, max_window[] = {7}; 19static int min_window[] = {1}, max_window[] = {7};
20static int min_ewindow[] = {1}, max_ewindow[] = {63}; 20static int min_ewindow[] = {1}, max_ewindow[] = {63};
21static int min_t1[] = {1}, max_t1[] = {30 * HZ}; 21static int min_t1[] = {1}, max_t1[] = {30000};
22static int min_t2[] = {1}, max_t2[] = {20 * HZ}; 22static int min_t2[] = {1}, max_t2[] = {20000};
23static int min_t3[1], max_t3[] = {3600 * HZ}; 23static int min_t3[1], max_t3[] = {3600000};
24static int min_idle[1], max_idle[] = {65535 * HZ}; 24static int min_idle[1], max_idle[] = {65535000};
25static int min_n2[] = {1}, max_n2[] = {31}; 25static int min_n2[] = {1}, max_n2[] = {31};
26static int min_paclen[] = {1}, max_paclen[] = {512}; 26static int min_paclen[] = {1}, max_paclen[] = {512};
27static int min_proto[1], max_proto[] = { AX25_PROTO_MAX }; 27static int min_proto[1], max_proto[] = { AX25_PROTO_MAX };
28static int min_ds_timeout[1], max_ds_timeout[] = {65535 * HZ}; 28static int min_ds_timeout[1], max_ds_timeout[] = {65535000};
29 29
30static struct ctl_table_header *ax25_table_header; 30static struct ctl_table_header *ax25_table_header;
31 31
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 6b61323ce23c..0c2d13ad69bb 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -255,7 +255,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
255 } 255 }
256 256
257 if ((err = hci_send_sco(conn->hcon, skb)) < 0) 257 if ((err = hci_send_sco(conn->hcon, skb)) < 0)
258 goto fail; 258 return err;
259 259
260 return count; 260 return count;
261 261
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 22d806cf40ca..12da21afb9ca 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -55,7 +55,7 @@ static int __init br_init(void)
55 55
56static void __exit br_deinit(void) 56static void __exit br_deinit(void)
57{ 57{
58 llc_sap_close(br_stp_sap); 58 rcu_assign_pointer(br_stp_sap->rcv_func, NULL);
59 59
60#ifdef CONFIG_BRIDGE_NETFILTER 60#ifdef CONFIG_BRIDGE_NETFILTER
61 br_netfilter_fini(); 61 br_netfilter_fini();
@@ -67,6 +67,7 @@ static void __exit br_deinit(void)
67 67
68 synchronize_net(); 68 synchronize_net();
69 69
70 llc_sap_put(br_stp_sap);
70 br_fdb_get_hook = NULL; 71 br_fdb_get_hook = NULL;
71 br_fdb_put_hook = NULL; 72 br_fdb_put_hook = NULL;
72 73
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 2d24fb400e0c..56f3aa47e758 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -16,6 +16,7 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/if_vlan.h>
19#include <linux/netfilter_bridge.h> 20#include <linux/netfilter_bridge.h>
20#include "br_private.h" 21#include "br_private.h"
21 22
@@ -29,10 +30,15 @@ static inline int should_deliver(const struct net_bridge_port *p,
29 return 1; 30 return 1;
30} 31}
31 32
33static inline unsigned packet_length(const struct sk_buff *skb)
34{
35 return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
36}
37
32int br_dev_queue_push_xmit(struct sk_buff *skb) 38int br_dev_queue_push_xmit(struct sk_buff *skb)
33{ 39{
34 /* drop mtu oversized packets except tso */ 40 /* drop mtu oversized packets except tso */
35 if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size) 41 if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->tso_size)
36 kfree_skb(skb); 42 kfree_skb(skb);
37 else { 43 else {
38#ifdef CONFIG_BRIDGE_NETFILTER 44#ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 59eef42d4a42..f5d47bf4f967 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -300,34 +300,22 @@ int br_add_bridge(const char *name)
300 rtnl_lock(); 300 rtnl_lock();
301 if (strchr(dev->name, '%')) { 301 if (strchr(dev->name, '%')) {
302 ret = dev_alloc_name(dev, dev->name); 302 ret = dev_alloc_name(dev, dev->name);
303 if (ret < 0) 303 if (ret < 0) {
304 goto err1; 304 free_netdev(dev);
305 goto out;
306 }
305 } 307 }
306 308
307 ret = register_netdevice(dev); 309 ret = register_netdevice(dev);
308 if (ret) 310 if (ret)
309 goto err2; 311 goto out;
310
311 /* network device kobject is not setup until
312 * after rtnl_unlock does it's hotplug magic.
313 * so hold reference to avoid race.
314 */
315 dev_hold(dev);
316 rtnl_unlock();
317 312
318 ret = br_sysfs_addbr(dev); 313 ret = br_sysfs_addbr(dev);
319 dev_put(dev); 314 if (ret)
320 315 unregister_netdevice(dev);
321 if (ret)
322 unregister_netdev(dev);
323 out: 316 out:
324 return ret;
325
326 err2:
327 free_netdev(dev);
328 err1:
329 rtnl_unlock(); 317 rtnl_unlock();
330 goto out; 318 return ret;
331} 319}
332 320
333int br_del_bridge(const char *name) 321int br_del_bridge(const char *name)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index b7766562d72c..bfa4d8c333f7 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -66,6 +66,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
66 } 66 }
67 67
68 if (is_multicast_ether_addr(dest)) { 68 if (is_multicast_ether_addr(dest)) {
69 br->statistics.multicast++;
69 br_flood_forward(br, skb, !passedup); 70 br_flood_forward(br, skb, !passedup);
70 if (!passedup) 71 if (!passedup)
71 br_pass_frame_up(br, skb); 72 br_pass_frame_up(br, skb);
@@ -125,9 +126,6 @@ int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb)
125 struct sk_buff *skb = *pskb; 126 struct sk_buff *skb = *pskb;
126 const unsigned char *dest = eth_hdr(skb)->h_dest; 127 const unsigned char *dest = eth_hdr(skb)->h_dest;
127 128
128 if (p->state == BR_STATE_DISABLED)
129 goto err;
130
131 if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) 129 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
132 goto err; 130 goto err;
133 131
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index f29450b788be..3da9264449f7 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -765,6 +765,15 @@ out:
765 return NF_STOLEN; 765 return NF_STOLEN;
766} 766}
767 767
768static int br_nf_dev_queue_xmit(struct sk_buff *skb)
769{
770 if (skb->protocol == htons(ETH_P_IP) &&
771 skb->len > skb->dev->mtu &&
772 !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
773 return ip_fragment(skb, br_dev_queue_push_xmit);
774 else
775 return br_dev_queue_push_xmit(skb);
776}
768 777
769/* PF_BRIDGE/POST_ROUTING ********************************************/ 778/* PF_BRIDGE/POST_ROUTING ********************************************/
770static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, 779static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
@@ -824,7 +833,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
824 realoutdev = nf_bridge->netoutdev; 833 realoutdev = nf_bridge->netoutdev;
825#endif 834#endif
826 NF_HOOK(pf, NF_IP_POST_ROUTING, skb, NULL, realoutdev, 835 NF_HOOK(pf, NF_IP_POST_ROUTING, skb, NULL, realoutdev,
827 br_dev_queue_push_xmit); 836 br_nf_dev_queue_xmit);
828 837
829 return NF_STOLEN; 838 return NF_STOLEN;
830 839
@@ -869,7 +878,7 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
869 878
870 if ((out->hard_start_xmit == br_dev_xmit && 879 if ((out->hard_start_xmit == br_dev_xmit &&
871 okfn != br_nf_forward_finish && 880 okfn != br_nf_forward_finish &&
872 okfn != br_nf_local_out_finish && okfn != br_dev_queue_push_xmit) 881 okfn != br_nf_local_out_finish && okfn != br_nf_dev_queue_xmit)
873#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 882#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
874 || ((out->priv_flags & IFF_802_1Q_VLAN) && 883 || ((out->priv_flags & IFF_802_1Q_VLAN) &&
875 VLAN_DEV_INFO(out)->real_dev->hard_start_xmit == br_dev_xmit) 884 VLAN_DEV_INFO(out)->real_dev->hard_start_xmit == br_dev_xmit)
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index d159c92cca84..466ed3440b74 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -168,7 +168,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
168 168
169 if (info->bitmask & EBT_LOG_NFLOG) 169 if (info->bitmask & EBT_LOG_NFLOG)
170 nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, 170 nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
171 info->prefix); 171 "%s", info->prefix);
172 else 172 else
173 ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, 173 ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
174 info->prefix); 174 info->prefix);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 01eae97c53d9..3a13ed643459 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -824,14 +824,14 @@ static int translate_table(struct ebt_replace *repl,
824 if (udc_cnt) { 824 if (udc_cnt) {
825 /* this will get free'd in do_replace()/ebt_register_table() 825 /* this will get free'd in do_replace()/ebt_register_table()
826 if an error occurs */ 826 if an error occurs */
827 newinfo->chainstack = (struct ebt_chainstack **) 827 newinfo->chainstack =
828 vmalloc((highest_possible_processor_id()+1) 828 vmalloc((highest_possible_processor_id()+1)
829 * sizeof(struct ebt_chainstack)); 829 * sizeof(*(newinfo->chainstack)));
830 if (!newinfo->chainstack) 830 if (!newinfo->chainstack)
831 return -ENOMEM; 831 return -ENOMEM;
832 for_each_cpu(i) { 832 for_each_possible_cpu(i) {
833 newinfo->chainstack[i] = 833 newinfo->chainstack[i] =
834 vmalloc(udc_cnt * sizeof(struct ebt_chainstack)); 834 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
835 if (!newinfo->chainstack[i]) { 835 if (!newinfo->chainstack[i]) {
836 while (i) 836 while (i)
837 vfree(newinfo->chainstack[--i]); 837 vfree(newinfo->chainstack[--i]);
@@ -841,8 +841,7 @@ static int translate_table(struct ebt_replace *repl,
841 } 841 }
842 } 842 }
843 843
844 cl_s = (struct ebt_cl_stack *) 844 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
845 vmalloc(udc_cnt * sizeof(struct ebt_cl_stack));
846 if (!cl_s) 845 if (!cl_s)
847 return -ENOMEM; 846 return -ENOMEM;
848 i = 0; /* the i'th udc */ 847 i = 0; /* the i'th udc */
@@ -901,7 +900,7 @@ static void get_counters(struct ebt_counter *oldcounters,
901 sizeof(struct ebt_counter) * nentries); 900 sizeof(struct ebt_counter) * nentries);
902 901
903 /* add other counters to those of cpu 0 */ 902 /* add other counters to those of cpu 0 */
904 for_each_cpu(cpu) { 903 for_each_possible_cpu(cpu) {
905 if (cpu == 0) 904 if (cpu == 0)
906 continue; 905 continue;
907 counter_base = COUNTER_BASE(oldcounters, nentries, cpu); 906 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
@@ -944,8 +943,7 @@ static int do_replace(void __user *user, unsigned int len)
944 943
945 countersize = COUNTER_OFFSET(tmp.nentries) * 944 countersize = COUNTER_OFFSET(tmp.nentries) *
946 (highest_possible_processor_id()+1); 945 (highest_possible_processor_id()+1);
947 newinfo = (struct ebt_table_info *) 946 newinfo = vmalloc(sizeof(*newinfo) + countersize);
948 vmalloc(sizeof(struct ebt_table_info) + countersize);
949 if (!newinfo) 947 if (!newinfo)
950 return -ENOMEM; 948 return -ENOMEM;
951 949
@@ -967,8 +965,7 @@ static int do_replace(void __user *user, unsigned int len)
967 /* the user wants counters back 965 /* the user wants counters back
968 the check on the size is done later, when we have the lock */ 966 the check on the size is done later, when we have the lock */
969 if (tmp.num_counters) { 967 if (tmp.num_counters) {
970 counterstmp = (struct ebt_counter *) 968 counterstmp = vmalloc(tmp.num_counters * sizeof(*counterstmp));
971 vmalloc(tmp.num_counters * sizeof(struct ebt_counter));
972 if (!counterstmp) { 969 if (!counterstmp) {
973 ret = -ENOMEM; 970 ret = -ENOMEM;
974 goto free_entries; 971 goto free_entries;
@@ -1036,7 +1033,7 @@ static int do_replace(void __user *user, unsigned int len)
1036 1033
1037 vfree(table->entries); 1034 vfree(table->entries);
1038 if (table->chainstack) { 1035 if (table->chainstack) {
1039 for_each_cpu(i) 1036 for_each_possible_cpu(i)
1040 vfree(table->chainstack[i]); 1037 vfree(table->chainstack[i]);
1041 vfree(table->chainstack); 1038 vfree(table->chainstack);
1042 } 1039 }
@@ -1054,7 +1051,7 @@ free_counterstmp:
1054 vfree(counterstmp); 1051 vfree(counterstmp);
1055 /* can be initialized in translate_table() */ 1052 /* can be initialized in translate_table() */
1056 if (newinfo->chainstack) { 1053 if (newinfo->chainstack) {
1057 for_each_cpu(i) 1054 for_each_possible_cpu(i)
1058 vfree(newinfo->chainstack[i]); 1055 vfree(newinfo->chainstack[i]);
1059 vfree(newinfo->chainstack); 1056 vfree(newinfo->chainstack);
1060 } 1057 }
@@ -1148,8 +1145,7 @@ int ebt_register_table(struct ebt_table *table)
1148 1145
1149 countersize = COUNTER_OFFSET(table->table->nentries) * 1146 countersize = COUNTER_OFFSET(table->table->nentries) *
1150 (highest_possible_processor_id()+1); 1147 (highest_possible_processor_id()+1);
1151 newinfo = (struct ebt_table_info *) 1148 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1152 vmalloc(sizeof(struct ebt_table_info) + countersize);
1153 ret = -ENOMEM; 1149 ret = -ENOMEM;
1154 if (!newinfo) 1150 if (!newinfo)
1155 return -ENOMEM; 1151 return -ENOMEM;
@@ -1201,7 +1197,7 @@ free_unlock:
1201 mutex_unlock(&ebt_mutex); 1197 mutex_unlock(&ebt_mutex);
1202free_chainstack: 1198free_chainstack:
1203 if (newinfo->chainstack) { 1199 if (newinfo->chainstack) {
1204 for_each_cpu(i) 1200 for_each_possible_cpu(i)
1205 vfree(newinfo->chainstack[i]); 1201 vfree(newinfo->chainstack[i]);
1206 vfree(newinfo->chainstack); 1202 vfree(newinfo->chainstack);
1207 } 1203 }
@@ -1224,7 +1220,7 @@ void ebt_unregister_table(struct ebt_table *table)
1224 mutex_unlock(&ebt_mutex); 1220 mutex_unlock(&ebt_mutex);
1225 vfree(table->private->entries); 1221 vfree(table->private->entries);
1226 if (table->private->chainstack) { 1222 if (table->private->chainstack) {
1227 for_each_cpu(i) 1223 for_each_possible_cpu(i)
1228 vfree(table->private->chainstack[i]); 1224 vfree(table->private->chainstack[i]);
1229 vfree(table->private->chainstack); 1225 vfree(table->private->chainstack);
1230 } 1226 }
@@ -1247,8 +1243,7 @@ static int update_counters(void __user *user, unsigned int len)
1247 if (hlp.num_counters == 0) 1243 if (hlp.num_counters == 0)
1248 return -EINVAL; 1244 return -EINVAL;
1249 1245
1250 if ( !(tmp = (struct ebt_counter *) 1246 if (!(tmp = vmalloc(hlp.num_counters * sizeof(*tmp)))) {
1251 vmalloc(hlp.num_counters * sizeof(struct ebt_counter))) ){
1252 MEMPRINT("Update_counters && nomemory\n"); 1247 MEMPRINT("Update_counters && nomemory\n");
1253 return -ENOMEM; 1248 return -ENOMEM;
1254 } 1249 }
@@ -1377,8 +1372,7 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1377 BUGPRINT("Num_counters wrong\n"); 1372 BUGPRINT("Num_counters wrong\n");
1378 return -EINVAL; 1373 return -EINVAL;
1379 } 1374 }
1380 counterstmp = (struct ebt_counter *) 1375 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1381 vmalloc(nentries * sizeof(struct ebt_counter));
1382 if (!counterstmp) { 1376 if (!counterstmp) {
1383 MEMPRINT("Couldn't copy counters, out of memory\n"); 1377 MEMPRINT("Couldn't copy counters, out of memory\n");
1384 return -ENOMEM; 1378 return -ENOMEM;
diff --git a/net/core/dev.c b/net/core/dev.c
index 434220d093aa..4fba549caf29 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -127,7 +127,7 @@
127 * sure which should go first, but I bet it won't make much 127 * sure which should go first, but I bet it won't make much
128 * difference if we are running VLANs. The good news is that 128 * difference if we are running VLANs. The good news is that
129 * this protocol won't be in the list unless compiled in, so 129 * this protocol won't be in the list unless compiled in, so
130 * the average user (w/out VLANs) will not be adversly affected. 130 * the average user (w/out VLANs) will not be adversely affected.
131 * --BLG 131 * --BLG
132 * 132 *
133 * 0800 IP 133 * 0800 IP
@@ -149,7 +149,7 @@ static struct list_head ptype_base[16]; /* 16 way hashed list */
149static struct list_head ptype_all; /* Taps */ 149static struct list_head ptype_all; /* Taps */
150 150
151/* 151/*
152 * The @dev_base list is protected by @dev_base_lock and the rtln 152 * The @dev_base list is protected by @dev_base_lock and the rtnl
153 * semaphore. 153 * semaphore.
154 * 154 *
155 * Pure readers hold dev_base_lock for reading. 155 * Pure readers hold dev_base_lock for reading.
@@ -193,7 +193,7 @@ static inline struct hlist_head *dev_index_hash(int ifindex)
193 * Our notifier list 193 * Our notifier list
194 */ 194 */
195 195
196static BLOCKING_NOTIFIER_HEAD(netdev_chain); 196static RAW_NOTIFIER_HEAD(netdev_chain);
197 197
198/* 198/*
199 * Device drivers call our routines to queue packets here. We empty the 199 * Device drivers call our routines to queue packets here. We empty the
@@ -641,10 +641,12 @@ int dev_valid_name(const char *name)
641 * @name: name format string 641 * @name: name format string
642 * 642 *
643 * Passed a format string - eg "lt%d" it will try and find a suitable 643 * Passed a format string - eg "lt%d" it will try and find a suitable
644 * id. Not efficient for many devices, not called a lot. The caller 644 * id. It scans list of devices to build up a free map, then chooses
645 * must hold the dev_base or rtnl lock while allocating the name and 645 * the first empty slot. The caller must hold the dev_base or rtnl lock
646 * adding the device in order to avoid duplicates. Returns the number 646 * while allocating the name and adding the device in order to avoid
647 * of the unit assigned or a negative errno code. 647 * duplicates.
648 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
649 * Returns the number of the unit assigned or a negative errno code.
648 */ 650 */
649 651
650int dev_alloc_name(struct net_device *dev, const char *name) 652int dev_alloc_name(struct net_device *dev, const char *name)
@@ -736,7 +738,7 @@ int dev_change_name(struct net_device *dev, char *newname)
736 if (!err) { 738 if (!err) {
737 hlist_del(&dev->name_hlist); 739 hlist_del(&dev->name_hlist);
738 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name)); 740 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
739 blocking_notifier_call_chain(&netdev_chain, 741 raw_notifier_call_chain(&netdev_chain,
740 NETDEV_CHANGENAME, dev); 742 NETDEV_CHANGENAME, dev);
741 } 743 }
742 744
@@ -744,14 +746,14 @@ int dev_change_name(struct net_device *dev, char *newname)
744} 746}
745 747
746/** 748/**
747 * netdev_features_change - device changes fatures 749 * netdev_features_change - device changes features
748 * @dev: device to cause notification 750 * @dev: device to cause notification
749 * 751 *
750 * Called to indicate a device has changed features. 752 * Called to indicate a device has changed features.
751 */ 753 */
752void netdev_features_change(struct net_device *dev) 754void netdev_features_change(struct net_device *dev)
753{ 755{
754 blocking_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev); 756 raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
755} 757}
756EXPORT_SYMBOL(netdev_features_change); 758EXPORT_SYMBOL(netdev_features_change);
757 759
@@ -766,7 +768,7 @@ EXPORT_SYMBOL(netdev_features_change);
766void netdev_state_change(struct net_device *dev) 768void netdev_state_change(struct net_device *dev)
767{ 769{
768 if (dev->flags & IFF_UP) { 770 if (dev->flags & IFF_UP) {
769 blocking_notifier_call_chain(&netdev_chain, 771 raw_notifier_call_chain(&netdev_chain,
770 NETDEV_CHANGE, dev); 772 NETDEV_CHANGE, dev);
771 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 773 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
772 } 774 }
@@ -864,7 +866,7 @@ int dev_open(struct net_device *dev)
864 /* 866 /*
865 * ... and announce new interface. 867 * ... and announce new interface.
866 */ 868 */
867 blocking_notifier_call_chain(&netdev_chain, NETDEV_UP, dev); 869 raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
868 } 870 }
869 return ret; 871 return ret;
870} 872}
@@ -887,7 +889,7 @@ int dev_close(struct net_device *dev)
887 * Tell people we are going down, so that they can 889 * Tell people we are going down, so that they can
888 * prepare to death, when device is still operating. 890 * prepare to death, when device is still operating.
889 */ 891 */
890 blocking_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev); 892 raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
891 893
892 dev_deactivate(dev); 894 dev_deactivate(dev);
893 895
@@ -924,7 +926,7 @@ int dev_close(struct net_device *dev)
924 /* 926 /*
925 * Tell people we are down 927 * Tell people we are down
926 */ 928 */
927 blocking_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); 929 raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
928 930
929 return 0; 931 return 0;
930} 932}
@@ -955,7 +957,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
955 int err; 957 int err;
956 958
957 rtnl_lock(); 959 rtnl_lock();
958 err = blocking_notifier_chain_register(&netdev_chain, nb); 960 err = raw_notifier_chain_register(&netdev_chain, nb);
959 if (!err) { 961 if (!err) {
960 for (dev = dev_base; dev; dev = dev->next) { 962 for (dev = dev_base; dev; dev = dev->next) {
961 nb->notifier_call(nb, NETDEV_REGISTER, dev); 963 nb->notifier_call(nb, NETDEV_REGISTER, dev);
@@ -983,7 +985,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
983 int err; 985 int err;
984 986
985 rtnl_lock(); 987 rtnl_lock();
986 err = blocking_notifier_chain_unregister(&netdev_chain, nb); 988 err = raw_notifier_chain_unregister(&netdev_chain, nb);
987 rtnl_unlock(); 989 rtnl_unlock();
988 return err; 990 return err;
989} 991}
@@ -994,12 +996,12 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
994 * @v: pointer passed unmodified to notifier function 996 * @v: pointer passed unmodified to notifier function
995 * 997 *
996 * Call all network notifier blocks. Parameters and return value 998 * Call all network notifier blocks. Parameters and return value
997 * are as for blocking_notifier_call_chain(). 999 * are as for raw_notifier_call_chain().
998 */ 1000 */
999 1001
1000int call_netdevice_notifiers(unsigned long val, void *v) 1002int call_netdevice_notifiers(unsigned long val, void *v)
1001{ 1003{
1002 return blocking_notifier_call_chain(&netdev_chain, val, v); 1004 return raw_notifier_call_chain(&netdev_chain, val, v);
1003} 1005}
1004 1006
1005/* When > 0 there are consumers of rx skb time stamps */ 1007/* When > 0 there are consumers of rx skb time stamps */
@@ -2196,7 +2198,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2196 * @dev: device 2198 * @dev: device
2197 * @inc: modifier 2199 * @inc: modifier
2198 * 2200 *
2199 * Add or remove promsicuity from a device. While the count in the device 2201 * Add or remove promiscuity from a device. While the count in the device
2200 * remains above zero the interface remains promiscuous. Once it hits zero 2202 * remains above zero the interface remains promiscuous. Once it hits zero
2201 * the device reverts back to normal filtering operation. A negative inc 2203 * the device reverts back to normal filtering operation. A negative inc
2202 * value is used to drop promiscuity on the device. 2204 * value is used to drop promiscuity on the device.
@@ -2308,7 +2310,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
2308 if (dev->flags & IFF_UP && 2310 if (dev->flags & IFF_UP &&
2309 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | 2311 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2310 IFF_VOLATILE))) 2312 IFF_VOLATILE)))
2311 blocking_notifier_call_chain(&netdev_chain, 2313 raw_notifier_call_chain(&netdev_chain,
2312 NETDEV_CHANGE, dev); 2314 NETDEV_CHANGE, dev);
2313 2315
2314 if ((flags ^ dev->gflags) & IFF_PROMISC) { 2316 if ((flags ^ dev->gflags) & IFF_PROMISC) {
@@ -2353,7 +2355,7 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
2353 else 2355 else
2354 dev->mtu = new_mtu; 2356 dev->mtu = new_mtu;
2355 if (!err && dev->flags & IFF_UP) 2357 if (!err && dev->flags & IFF_UP)
2356 blocking_notifier_call_chain(&netdev_chain, 2358 raw_notifier_call_chain(&netdev_chain,
2357 NETDEV_CHANGEMTU, dev); 2359 NETDEV_CHANGEMTU, dev);
2358 return err; 2360 return err;
2359} 2361}
@@ -2370,7 +2372,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
2370 return -ENODEV; 2372 return -ENODEV;
2371 err = dev->set_mac_address(dev, sa); 2373 err = dev->set_mac_address(dev, sa);
2372 if (!err) 2374 if (!err)
2373 blocking_notifier_call_chain(&netdev_chain, 2375 raw_notifier_call_chain(&netdev_chain,
2374 NETDEV_CHANGEADDR, dev); 2376 NETDEV_CHANGEADDR, dev);
2375 return err; 2377 return err;
2376} 2378}
@@ -2427,7 +2429,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2427 return -EINVAL; 2429 return -EINVAL;
2428 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, 2430 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2429 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 2431 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2430 blocking_notifier_call_chain(&netdev_chain, 2432 raw_notifier_call_chain(&netdev_chain,
2431 NETDEV_CHANGEADDR, dev); 2433 NETDEV_CHANGEADDR, dev);
2432 return 0; 2434 return 0;
2433 2435
@@ -2698,7 +2700,8 @@ int dev_ioctl(unsigned int cmd, void __user *arg)
2698 /* If command is `set a parameter', or 2700 /* If command is `set a parameter', or
2699 * `get the encoding parameters', check if 2701 * `get the encoding parameters', check if
2700 * the user has the right to do it */ 2702 * the user has the right to do it */
2701 if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) { 2703 if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE
2704 || cmd == SIOCGIWENCODEEXT) {
2702 if (!capable(CAP_NET_ADMIN)) 2705 if (!capable(CAP_NET_ADMIN))
2703 return -EPERM; 2706 return -EPERM;
2704 } 2707 }
@@ -2776,6 +2779,8 @@ int register_netdevice(struct net_device *dev)
2776 BUG_ON(dev_boot_phase); 2779 BUG_ON(dev_boot_phase);
2777 ASSERT_RTNL(); 2780 ASSERT_RTNL();
2778 2781
2782 might_sleep();
2783
2779 /* When net_device's are persistent, this will be fatal. */ 2784 /* When net_device's are persistent, this will be fatal. */
2780 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 2785 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2781 2786
@@ -2862,6 +2867,11 @@ int register_netdevice(struct net_device *dev)
2862 if (!dev->rebuild_header) 2867 if (!dev->rebuild_header)
2863 dev->rebuild_header = default_rebuild_header; 2868 dev->rebuild_header = default_rebuild_header;
2864 2869
2870 ret = netdev_register_sysfs(dev);
2871 if (ret)
2872 goto out_err;
2873 dev->reg_state = NETREG_REGISTERED;
2874
2865 /* 2875 /*
2866 * Default initial state at registry is that the 2876 * Default initial state at registry is that the
2867 * device is present. 2877 * device is present.
@@ -2877,14 +2887,11 @@ int register_netdevice(struct net_device *dev)
2877 hlist_add_head(&dev->name_hlist, head); 2887 hlist_add_head(&dev->name_hlist, head);
2878 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex)); 2888 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2879 dev_hold(dev); 2889 dev_hold(dev);
2880 dev->reg_state = NETREG_REGISTERING;
2881 write_unlock_bh(&dev_base_lock); 2890 write_unlock_bh(&dev_base_lock);
2882 2891
2883 /* Notify protocols, that a new device appeared. */ 2892 /* Notify protocols, that a new device appeared. */
2884 blocking_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); 2893 raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2885 2894
2886 /* Finish registration after unlock */
2887 net_set_todo(dev);
2888 ret = 0; 2895 ret = 0;
2889 2896
2890out: 2897out:
@@ -2960,7 +2967,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
2960 rtnl_lock(); 2967 rtnl_lock();
2961 2968
2962 /* Rebroadcast unregister notification */ 2969 /* Rebroadcast unregister notification */
2963 blocking_notifier_call_chain(&netdev_chain, 2970 raw_notifier_call_chain(&netdev_chain,
2964 NETDEV_UNREGISTER, dev); 2971 NETDEV_UNREGISTER, dev);
2965 2972
2966 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 2973 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
@@ -3007,7 +3014,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
3007 * 3014 *
3008 * We are invoked by rtnl_unlock() after it drops the semaphore. 3015 * We are invoked by rtnl_unlock() after it drops the semaphore.
3009 * This allows us to deal with problems: 3016 * This allows us to deal with problems:
3010 * 1) We can create/delete sysfs objects which invoke hotplug 3017 * 1) We can delete sysfs objects which invoke hotplug
3011 * without deadlocking with linkwatch via keventd. 3018 * without deadlocking with linkwatch via keventd.
3012 * 2) Since we run with the RTNL semaphore not held, we can sleep 3019 * 2) Since we run with the RTNL semaphore not held, we can sleep
3013 * safely in order to wait for the netdev refcnt to drop to zero. 3020 * safely in order to wait for the netdev refcnt to drop to zero.
@@ -3016,8 +3023,6 @@ static DEFINE_MUTEX(net_todo_run_mutex);
3016void netdev_run_todo(void) 3023void netdev_run_todo(void)
3017{ 3024{
3018 struct list_head list = LIST_HEAD_INIT(list); 3025 struct list_head list = LIST_HEAD_INIT(list);
3019 int err;
3020
3021 3026
3022 /* Need to guard against multiple cpu's getting out of order. */ 3027 /* Need to guard against multiple cpu's getting out of order. */
3023 mutex_lock(&net_todo_run_mutex); 3028 mutex_lock(&net_todo_run_mutex);
@@ -3040,40 +3045,29 @@ void netdev_run_todo(void)
3040 = list_entry(list.next, struct net_device, todo_list); 3045 = list_entry(list.next, struct net_device, todo_list);
3041 list_del(&dev->todo_list); 3046 list_del(&dev->todo_list);
3042 3047
3043 switch(dev->reg_state) { 3048 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
3044 case NETREG_REGISTERING: 3049 printk(KERN_ERR "network todo '%s' but state %d\n",
3045 err = netdev_register_sysfs(dev); 3050 dev->name, dev->reg_state);
3046 if (err) 3051 dump_stack();
3047 printk(KERN_ERR "%s: failed sysfs registration (%d)\n", 3052 continue;
3048 dev->name, err); 3053 }
3049 dev->reg_state = NETREG_REGISTERED;
3050 break;
3051
3052 case NETREG_UNREGISTERING:
3053 netdev_unregister_sysfs(dev);
3054 dev->reg_state = NETREG_UNREGISTERED;
3055
3056 netdev_wait_allrefs(dev);
3057 3054
3058 /* paranoia */ 3055 netdev_unregister_sysfs(dev);
3059 BUG_ON(atomic_read(&dev->refcnt)); 3056 dev->reg_state = NETREG_UNREGISTERED;
3060 BUG_TRAP(!dev->ip_ptr);
3061 BUG_TRAP(!dev->ip6_ptr);
3062 BUG_TRAP(!dev->dn_ptr);
3063 3057
3058 netdev_wait_allrefs(dev);
3064 3059
3065 /* It must be the very last action, 3060 /* paranoia */
3066 * after this 'dev' may point to freed up memory. 3061 BUG_ON(atomic_read(&dev->refcnt));
3067 */ 3062 BUG_TRAP(!dev->ip_ptr);
3068 if (dev->destructor) 3063 BUG_TRAP(!dev->ip6_ptr);
3069 dev->destructor(dev); 3064 BUG_TRAP(!dev->dn_ptr);
3070 break;
3071 3065
3072 default: 3066 /* It must be the very last action,
3073 printk(KERN_ERR "network todo '%s' but state %d\n", 3067 * after this 'dev' may point to freed up memory.
3074 dev->name, dev->reg_state); 3068 */
3075 break; 3069 if (dev->destructor)
3076 } 3070 dev->destructor(dev);
3077 } 3071 }
3078 3072
3079out: 3073out:
@@ -3100,12 +3094,11 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3100 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 3094 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
3101 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; 3095 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
3102 3096
3103 p = kmalloc(alloc_size, GFP_KERNEL); 3097 p = kzalloc(alloc_size, GFP_KERNEL);
3104 if (!p) { 3098 if (!p) {
3105 printk(KERN_ERR "alloc_dev: Unable to allocate device.\n"); 3099 printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
3106 return NULL; 3100 return NULL;
3107 } 3101 }
3108 memset(p, 0, alloc_size);
3109 3102
3110 dev = (struct net_device *) 3103 dev = (struct net_device *)
3111 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); 3104 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
@@ -3131,7 +3124,7 @@ EXPORT_SYMBOL(alloc_netdev);
3131void free_netdev(struct net_device *dev) 3124void free_netdev(struct net_device *dev)
3132{ 3125{
3133#ifdef CONFIG_SYSFS 3126#ifdef CONFIG_SYSFS
3134 /* Compatiablity with error handling in drivers */ 3127 /* Compatibility with error handling in drivers */
3135 if (dev->reg_state == NETREG_UNINITIALIZED) { 3128 if (dev->reg_state == NETREG_UNINITIALIZED) {
3136 kfree((char *)dev - dev->padded); 3129 kfree((char *)dev - dev->padded);
3137 return; 3130 return;
@@ -3216,7 +3209,7 @@ int unregister_netdevice(struct net_device *dev)
3216 /* Notify protocols, that we are about to destroy 3209 /* Notify protocols, that we are about to destroy
3217 this device. They should clean all the things. 3210 this device. They should clean all the things.
3218 */ 3211 */
3219 blocking_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); 3212 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3220 3213
3221 /* 3214 /*
3222 * Flush the multicast chain 3215 * Flush the multicast chain
@@ -3347,7 +3340,7 @@ static int __init net_dev_init(void)
3347 * Initialise the packet receive queues. 3340 * Initialise the packet receive queues.
3348 */ 3341 */
3349 3342
3350 for_each_cpu(i) { 3343 for_each_possible_cpu(i) {
3351 struct softnet_data *queue; 3344 struct softnet_data *queue;
3352 3345
3353 queue = &per_cpu(softnet_data, i); 3346 queue = &per_cpu(softnet_data, i);
diff --git a/net/core/dv.c b/net/core/dv.c
index cf581407538c..29ee77f15932 100644
--- a/net/core/dv.c
+++ b/net/core/dv.c
@@ -55,15 +55,12 @@ int alloc_divert_blk(struct net_device *dev)
55 55
56 dev->divert = NULL; 56 dev->divert = NULL;
57 if (dev->type == ARPHRD_ETHER) { 57 if (dev->type == ARPHRD_ETHER) {
58 dev->divert = (struct divert_blk *) 58 dev->divert = kzalloc(alloc_size, GFP_KERNEL);
59 kmalloc(alloc_size, GFP_KERNEL);
60 if (dev->divert == NULL) { 59 if (dev->divert == NULL) {
61 printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n", 60 printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n",
62 dev->name); 61 dev->name);
63 return -ENOMEM; 62 return -ENOMEM;
64 } 63 }
65
66 memset(dev->divert, 0, sizeof(struct divert_blk));
67 dev_hold(dev); 64 dev_hold(dev);
68 } 65 }
69 66
diff --git a/net/core/filter.c b/net/core/filter.c
index 93fbd01d2259..5b4486a60cf6 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -34,6 +34,7 @@
34#include <linux/timer.h> 34#include <linux/timer.h>
35#include <asm/system.h> 35#include <asm/system.h>
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include <asm/unaligned.h>
37#include <linux/filter.h> 38#include <linux/filter.h>
38 39
39/* No hurry in this branch */ 40/* No hurry in this branch */
@@ -177,7 +178,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
177load_w: 178load_w:
178 ptr = load_pointer(skb, k, 4, &tmp); 179 ptr = load_pointer(skb, k, 4, &tmp);
179 if (ptr != NULL) { 180 if (ptr != NULL) {
180 A = ntohl(*(u32 *)ptr); 181 A = ntohl(get_unaligned((u32 *)ptr));
181 continue; 182 continue;
182 } 183 }
183 break; 184 break;
@@ -186,7 +187,7 @@ load_w:
186load_h: 187load_h:
187 ptr = load_pointer(skb, k, 2, &tmp); 188 ptr = load_pointer(skb, k, 2, &tmp);
188 if (ptr != NULL) { 189 if (ptr != NULL) {
189 A = ntohs(*(u16 *)ptr); 190 A = ntohs(get_unaligned((u16 *)ptr));
190 continue; 191 continue;
191 } 192 }
192 break; 193 break;
diff --git a/net/core/flow.c b/net/core/flow.c
index 55789f832eda..2191af5f26ac 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -79,7 +79,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
79{ 79{
80 int i; 80 int i;
81 81
82 for_each_cpu(i) 82 for_each_possible_cpu(i)
83 flow_hash_rnd_recalc(i) = 1; 83 flow_hash_rnd_recalc(i) = 1;
84 84
85 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; 85 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
@@ -318,12 +318,10 @@ static void __devinit flow_cache_cpu_prepare(int cpu)
318 /* NOTHING */; 318 /* NOTHING */;
319 319
320 flow_table(cpu) = (struct flow_cache_entry **) 320 flow_table(cpu) = (struct flow_cache_entry **)
321 __get_free_pages(GFP_KERNEL, order); 321 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
322 if (!flow_table(cpu)) 322 if (!flow_table(cpu))
323 panic("NET: failed to allocate flow cache order %lu\n", order); 323 panic("NET: failed to allocate flow cache order %lu\n", order);
324 324
325 memset(flow_table(cpu), 0, PAGE_SIZE << order);
326
327 flow_hash_rnd_recalc(cpu) = 1; 325 flow_hash_rnd_recalc(cpu) = 1;
328 flow_count(cpu) = 0; 326 flow_count(cpu) = 0;
329 327
@@ -363,7 +361,7 @@ static int __init flow_cache_init(void)
363 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; 361 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
364 add_timer(&flow_hash_rnd_timer); 362 add_timer(&flow_hash_rnd_timer);
365 363
366 for_each_cpu(i) 364 for_each_possible_cpu(i)
367 flow_cache_cpu_prepare(i); 365 flow_cache_cpu_prepare(i);
368 366
369 hotcpu_notifier(flow_cache_cpu, 0); 367 hotcpu_notifier(flow_cache_cpu, 0);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index b07c029e8219..3cad026764f0 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -159,11 +159,10 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
159 if (parm->interval < -2 || parm->interval > 3) 159 if (parm->interval < -2 || parm->interval > 3)
160 return -EINVAL; 160 return -EINVAL;
161 161
162 est = kmalloc(sizeof(*est), GFP_KERNEL); 162 est = kzalloc(sizeof(*est), GFP_KERNEL);
163 if (est == NULL) 163 if (est == NULL)
164 return -ENOBUFS; 164 return -ENOBUFS;
165 165
166 memset(est, 0, sizeof(*est));
167 est->interval = parm->interval + 2; 166 est->interval = parm->interval + 2;
168 est->bstats = bstats; 167 est->bstats = bstats;
169 est->rate_est = rate_est; 168 est->rate_est = rate_est;
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 341de44c7ed1..646937cc2d84 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -170,13 +170,13 @@ void linkwatch_fire_event(struct net_device *dev)
170 spin_unlock_irqrestore(&lweventlist_lock, flags); 170 spin_unlock_irqrestore(&lweventlist_lock, flags);
171 171
172 if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) { 172 if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) {
173 unsigned long thisevent = jiffies; 173 unsigned long delay = linkwatch_nextevent - jiffies;
174 174
175 if (thisevent >= linkwatch_nextevent) { 175 /* If we wrap around we'll delay it by at most HZ. */
176 if (!delay || delay > HZ)
176 schedule_work(&linkwatch_work); 177 schedule_work(&linkwatch_work);
177 } else { 178 else
178 schedule_delayed_work(&linkwatch_work, linkwatch_nextevent - thisevent); 179 schedule_delayed_work(&linkwatch_work, delay);
179 }
180 } 180 }
181 } 181 }
182} 182}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 0c8666872d10..50a8c73caf97 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -284,14 +284,11 @@ static struct neighbour **neigh_hash_alloc(unsigned int entries)
284 struct neighbour **ret; 284 struct neighbour **ret;
285 285
286 if (size <= PAGE_SIZE) { 286 if (size <= PAGE_SIZE) {
287 ret = kmalloc(size, GFP_ATOMIC); 287 ret = kzalloc(size, GFP_ATOMIC);
288 } else { 288 } else {
289 ret = (struct neighbour **) 289 ret = (struct neighbour **)
290 __get_free_pages(GFP_ATOMIC, get_order(size)); 290 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
291 } 291 }
292 if (ret)
293 memset(ret, 0, size);
294
295 return ret; 292 return ret;
296} 293}
297 294
@@ -1089,8 +1086,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1089 if (hh->hh_type == protocol) 1086 if (hh->hh_type == protocol)
1090 break; 1087 break;
1091 1088
1092 if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) { 1089 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1093 memset(hh, 0, sizeof(struct hh_cache));
1094 rwlock_init(&hh->hh_lock); 1090 rwlock_init(&hh->hh_lock);
1095 hh->hh_type = protocol; 1091 hh->hh_type = protocol;
1096 atomic_set(&hh->hh_refcnt, 0); 1092 atomic_set(&hh->hh_refcnt, 0);
@@ -1330,8 +1326,7 @@ void neigh_parms_destroy(struct neigh_parms *parms)
1330 kfree(parms); 1326 kfree(parms);
1331} 1327}
1332 1328
1333 1329void neigh_table_init_no_netlink(struct neigh_table *tbl)
1334void neigh_table_init(struct neigh_table *tbl)
1335{ 1330{
1336 unsigned long now = jiffies; 1331 unsigned long now = jiffies;
1337 unsigned long phsize; 1332 unsigned long phsize;
@@ -1366,13 +1361,11 @@ void neigh_table_init(struct neigh_table *tbl)
1366 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1); 1361 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1367 1362
1368 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); 1363 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1369 tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL); 1364 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1370 1365
1371 if (!tbl->hash_buckets || !tbl->phash_buckets) 1366 if (!tbl->hash_buckets || !tbl->phash_buckets)
1372 panic("cannot allocate neighbour cache hashes"); 1367 panic("cannot allocate neighbour cache hashes");
1373 1368
1374 memset(tbl->phash_buckets, 0, phsize);
1375
1376 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); 1369 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1377 1370
1378 rwlock_init(&tbl->lock); 1371 rwlock_init(&tbl->lock);
@@ -1389,10 +1382,27 @@ void neigh_table_init(struct neigh_table *tbl)
1389 1382
1390 tbl->last_flush = now; 1383 tbl->last_flush = now;
1391 tbl->last_rand = now + tbl->parms.reachable_time * 20; 1384 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1385}
1386
1387void neigh_table_init(struct neigh_table *tbl)
1388{
1389 struct neigh_table *tmp;
1390
1391 neigh_table_init_no_netlink(tbl);
1392 write_lock(&neigh_tbl_lock); 1392 write_lock(&neigh_tbl_lock);
1393 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1394 if (tmp->family == tbl->family)
1395 break;
1396 }
1393 tbl->next = neigh_tables; 1397 tbl->next = neigh_tables;
1394 neigh_tables = tbl; 1398 neigh_tables = tbl;
1395 write_unlock(&neigh_tbl_lock); 1399 write_unlock(&neigh_tbl_lock);
1400
1401 if (unlikely(tmp)) {
1402 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1403 "family %d\n", tbl->family);
1404 dump_stack();
1405 }
1396} 1406}
1397 1407
1398int neigh_table_clear(struct neigh_table *tbl) 1408int neigh_table_clear(struct neigh_table *tbl)
@@ -1633,7 +1643,7 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1633 1643
1634 memset(&ndst, 0, sizeof(ndst)); 1644 memset(&ndst, 0, sizeof(ndst));
1635 1645
1636 for_each_cpu(cpu) { 1646 for_each_possible_cpu(cpu) {
1637 struct neigh_statistics *st; 1647 struct neigh_statistics *st;
1638 1648
1639 st = per_cpu_ptr(tbl->stats, cpu); 1649 st = per_cpu_ptr(tbl->stats, cpu);
@@ -2663,6 +2673,7 @@ EXPORT_SYMBOL(neigh_rand_reach_time);
2663EXPORT_SYMBOL(neigh_resolve_output); 2673EXPORT_SYMBOL(neigh_resolve_output);
2664EXPORT_SYMBOL(neigh_table_clear); 2674EXPORT_SYMBOL(neigh_table_clear);
2665EXPORT_SYMBOL(neigh_table_init); 2675EXPORT_SYMBOL(neigh_table_init);
2676EXPORT_SYMBOL(neigh_table_init_no_netlink);
2666EXPORT_SYMBOL(neigh_update); 2677EXPORT_SYMBOL(neigh_update);
2667EXPORT_SYMBOL(neigh_update_hhs); 2678EXPORT_SYMBOL(neigh_update_hhs);
2668EXPORT_SYMBOL(pneigh_enqueue); 2679EXPORT_SYMBOL(pneigh_enqueue);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 21b68464cabb..47a6fceb6771 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -29,7 +29,7 @@ static const char fmt_ulong[] = "%lu\n";
29 29
30static inline int dev_isalive(const struct net_device *dev) 30static inline int dev_isalive(const struct net_device *dev)
31{ 31{
32 return dev->reg_state == NETREG_REGISTERED; 32 return dev->reg_state <= NETREG_REGISTERED;
33} 33}
34 34
35/* use same locking rules as GIF* ioctl's */ 35/* use same locking rules as GIF* ioctl's */
@@ -165,7 +165,7 @@ static ssize_t show_operstate(struct class_device *dev, char *buf)
165 operstate = IF_OPER_DOWN; 165 operstate = IF_OPER_DOWN;
166 read_unlock(&dev_base_lock); 166 read_unlock(&dev_base_lock);
167 167
168 if (operstate >= sizeof(operstates)) 168 if (operstate >= ARRAY_SIZE(operstates))
169 return -EINVAL; /* should not happen */ 169 return -EINVAL; /* should not happen */
170 170
171 return sprintf(buf, "%s\n", operstates[operstate]); 171 return sprintf(buf, "%s\n", operstates[operstate]);
@@ -445,58 +445,33 @@ static struct class net_class = {
445 445
446void netdev_unregister_sysfs(struct net_device * net) 446void netdev_unregister_sysfs(struct net_device * net)
447{ 447{
448 struct class_device * class_dev = &(net->class_dev); 448 class_device_del(&(net->class_dev));
449
450 if (net->get_stats)
451 sysfs_remove_group(&class_dev->kobj, &netstat_group);
452
453#ifdef WIRELESS_EXT
454 if (net->get_wireless_stats || (net->wireless_handlers &&
455 net->wireless_handlers->get_wireless_stats))
456 sysfs_remove_group(&class_dev->kobj, &wireless_group);
457#endif
458 class_device_del(class_dev);
459
460} 449}
461 450
462/* Create sysfs entries for network device. */ 451/* Create sysfs entries for network device. */
463int netdev_register_sysfs(struct net_device *net) 452int netdev_register_sysfs(struct net_device *net)
464{ 453{
465 struct class_device *class_dev = &(net->class_dev); 454 struct class_device *class_dev = &(net->class_dev);
466 int ret; 455 struct attribute_group **groups = net->sysfs_groups;
467 456
457 class_device_initialize(class_dev);
468 class_dev->class = &net_class; 458 class_dev->class = &net_class;
469 class_dev->class_data = net; 459 class_dev->class_data = net;
460 class_dev->groups = groups;
470 461
462 BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ);
471 strlcpy(class_dev->class_id, net->name, BUS_ID_SIZE); 463 strlcpy(class_dev->class_id, net->name, BUS_ID_SIZE);
472 if ((ret = class_device_register(class_dev)))
473 goto out;
474 464
475 if (net->get_stats && 465 if (net->get_stats)
476 (ret = sysfs_create_group(&class_dev->kobj, &netstat_group))) 466 *groups++ = &netstat_group;
477 goto out_unreg;
478 467
479#ifdef WIRELESS_EXT 468#ifdef WIRELESS_EXT
480 if (net->get_wireless_stats || (net->wireless_handlers && 469 if (net->get_wireless_stats
481 net->wireless_handlers->get_wireless_stats)) { 470 || (net->wireless_handlers && net->wireless_handlers->get_wireless_stats))
482 ret = sysfs_create_group(&class_dev->kobj, &wireless_group); 471 *groups++ = &wireless_group;
483 if (ret)
484 goto out_cleanup;
485 }
486 return 0;
487out_cleanup:
488 if (net->get_stats)
489 sysfs_remove_group(&class_dev->kobj, &netstat_group);
490#else
491 return 0;
492#endif 472#endif
493 473
494out_unreg: 474 return class_device_add(class_dev);
495 printk(KERN_WARNING "%s: sysfs attribute registration failed %d\n",
496 net->name, ret);
497 class_device_unregister(class_dev);
498out:
499 return ret;
500} 475}
501 476
502int netdev_sysfs_init(void) 477int netdev_sysfs_init(void)
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 1e44eda1fda9..79ebd75fbe4d 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -38,13 +38,11 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
38{ 38{
39 const int lopt_size = sizeof(struct listen_sock) + 39 const int lopt_size = sizeof(struct listen_sock) +
40 nr_table_entries * sizeof(struct request_sock *); 40 nr_table_entries * sizeof(struct request_sock *);
41 struct listen_sock *lopt = kmalloc(lopt_size, GFP_KERNEL); 41 struct listen_sock *lopt = kzalloc(lopt_size, GFP_KERNEL);
42 42
43 if (lopt == NULL) 43 if (lopt == NULL)
44 return -ENOMEM; 44 return -ENOMEM;
45 45
46 memset(lopt, 0, lopt_size);
47
48 for (lopt->max_qlen_log = 6; 46 for (lopt->max_qlen_log = 6;
49 (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog; 47 (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog;
50 lopt->max_qlen_log++); 48 lopt->max_qlen_log++);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 09464fa8d72f..fb3770f9c094 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -112,6 +112,14 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
112 BUG(); 112 BUG();
113} 113}
114 114
115void skb_truesize_bug(struct sk_buff *skb)
116{
117 printk(KERN_ERR "SKB BUG: Invalid truesize (%u) "
118 "len=%u, sizeof(sk_buff)=%Zd\n",
119 skb->truesize, skb->len, sizeof(struct sk_buff));
120}
121EXPORT_SYMBOL(skb_truesize_bug);
122
115/* Allocate a new skbuff. We do this ourselves so we can fill in a few 123/* Allocate a new skbuff. We do this ourselves so we can fill in a few
116 * 'private' fields and also do memory statistics to find all the 124 * 'private' fields and also do memory statistics to find all the
117 * [BEEP] leaks. 125 * [BEEP] leaks.
diff --git a/net/core/stream.c b/net/core/stream.c
index 35e25259fd95..e9489696f694 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -176,6 +176,7 @@ void sk_stream_rfree(struct sk_buff *skb)
176{ 176{
177 struct sock *sk = skb->sk; 177 struct sock *sk = skb->sk;
178 178
179 skb_truesize_check(skb);
179 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 180 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
180 sk->sk_forward_alloc += skb->truesize; 181 sk->sk_forward_alloc += skb->truesize;
181} 182}
diff --git a/net/core/utils.c b/net/core/utils.c
index fdc4f38bc46c..4f96f389243d 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -121,7 +121,7 @@ void __init net_random_init(void)
121{ 121{
122 int i; 122 int i;
123 123
124 for_each_cpu(i) { 124 for_each_possible_cpu(i) {
125 struct nrnd_state *state = &per_cpu(net_rand_state,i); 125 struct nrnd_state *state = &per_cpu(net_rand_state,i);
126 __net_srandom(state, i+jiffies); 126 __net_srandom(state, i+jiffies);
127 } 127 }
@@ -133,7 +133,7 @@ static int net_random_reseed(void)
133 unsigned long seed[NR_CPUS]; 133 unsigned long seed[NR_CPUS];
134 134
135 get_random_bytes(seed, sizeof(seed)); 135 get_random_bytes(seed, sizeof(seed));
136 for_each_cpu(i) { 136 for_each_possible_cpu(i) {
137 struct nrnd_state *state = &per_cpu(net_rand_state,i); 137 struct nrnd_state *state = &per_cpu(net_rand_state,i);
138 __net_srandom(state, seed[i]); 138 __net_srandom(state, seed[i]);
139 } 139 }
diff --git a/net/core/wireless.c b/net/core/wireless.c
index 81d6995fcfdb..d2bc72d318f7 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -1726,6 +1726,14 @@ int wireless_rtnetlink_get(struct net_device * dev,
1726 if(!IW_IS_GET(request->cmd)) 1726 if(!IW_IS_GET(request->cmd))
1727 return -EOPNOTSUPP; 1727 return -EOPNOTSUPP;
1728 1728
1729 /* If command is `get the encoding parameters', check if
1730 * the user has the right to do it */
1731 if (request->cmd == SIOCGIWENCODE ||
1732 request->cmd == SIOCGIWENCODEEXT) {
1733 if (!capable(CAP_NET_ADMIN))
1734 return -EPERM;
1735 }
1736
1729 /* Special cases */ 1737 /* Special cases */
1730 if(request->cmd == SIOCGIWSTATS) 1738 if(request->cmd == SIOCGIWSTATS)
1731 /* Get Wireless Stats */ 1739 /* Get Wireless Stats */
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index b5981e5f6b00..8c211c58893b 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -452,6 +452,7 @@ found:
452 (unsigned long long) 452 (unsigned long long)
453 avr->dccpavr_ack_ackno); 453 avr->dccpavr_ack_ackno);
454 dccp_ackvec_throw_record(av, avr); 454 dccp_ackvec_throw_record(av, avr);
455 break;
455 } 456 }
456 /* 457 /*
457 * If it wasn't received, continue scanning... we might 458 * If it wasn't received, continue scanning... we might
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 29047995c695..f2c011fd2ba1 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -498,7 +498,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
498 goto drop; 498 goto drop;
499 499
500 if (dccp_parse_options(sk, skb)) 500 if (dccp_parse_options(sk, skb))
501 goto drop; 501 goto drop_and_free;
502 502
503 dccp_openreq_init(req, &dp, skb); 503 dccp_openreq_init(req, &dp, skb);
504 504
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 1ff7328b0e17..2e0ee8355c41 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -848,6 +848,7 @@ static int dccp_close_state(struct sock *sk)
848void dccp_close(struct sock *sk, long timeout) 848void dccp_close(struct sock *sk, long timeout)
849{ 849{
850 struct sk_buff *skb; 850 struct sk_buff *skb;
851 int state;
851 852
852 lock_sock(sk); 853 lock_sock(sk);
853 854
@@ -882,6 +883,11 @@ void dccp_close(struct sock *sk, long timeout)
882 sk_stream_wait_close(sk, timeout); 883 sk_stream_wait_close(sk, timeout);
883 884
884adjudge_to_death: 885adjudge_to_death:
886 state = sk->sk_state;
887 sock_hold(sk);
888 sock_orphan(sk);
889 atomic_inc(sk->sk_prot->orphan_count);
890
885 /* 891 /*
886 * It is the last release_sock in its life. It will remove backlog. 892 * It is the last release_sock in its life. It will remove backlog.
887 */ 893 */
@@ -894,8 +900,9 @@ adjudge_to_death:
894 bh_lock_sock(sk); 900 bh_lock_sock(sk);
895 BUG_TRAP(!sock_owned_by_user(sk)); 901 BUG_TRAP(!sock_owned_by_user(sk));
896 902
897 sock_hold(sk); 903 /* Have we already been destroyed by a softirq or backlog? */
898 sock_orphan(sk); 904 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
905 goto out;
899 906
900 /* 907 /*
901 * The last release_sock may have processed the CLOSE or RESET 908 * The last release_sock may have processed the CLOSE or RESET
@@ -915,12 +922,12 @@ adjudge_to_death:
915#endif 922#endif
916 } 923 }
917 924
918 atomic_inc(sk->sk_prot->orphan_count);
919 if (sk->sk_state == DCCP_CLOSED) 925 if (sk->sk_state == DCCP_CLOSED)
920 inet_csk_destroy_sock(sk); 926 inet_csk_destroy_sock(sk);
921 927
922 /* Otherwise, socket is reprieved until protocol close. */ 928 /* Otherwise, socket is reprieved until protocol close. */
923 929
930out:
924 bh_unlock_sock(sk); 931 bh_unlock_sock(sk);
925 local_bh_enable(); 932 local_bh_enable();
926 sock_put(sk); 933 sock_put(sk);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 7c8692c26bfe..66e230c3b328 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -493,7 +493,6 @@ struct elist_cb_state {
493static void neigh_elist_cb(struct neighbour *neigh, void *_info) 493static void neigh_elist_cb(struct neighbour *neigh, void *_info)
494{ 494{
495 struct elist_cb_state *s = _info; 495 struct elist_cb_state *s = _info;
496 struct dn_dev *dn_db;
497 struct dn_neigh *dn; 496 struct dn_neigh *dn;
498 497
499 if (neigh->dev != s->dev) 498 if (neigh->dev != s->dev)
@@ -503,10 +502,6 @@ static void neigh_elist_cb(struct neighbour *neigh, void *_info)
503 if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2))) 502 if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2)))
504 return; 503 return;
505 504
506 dn_db = (struct dn_dev *) s->dev->dn_ptr;
507 if (dn_db->parms.forwarding == 1 && (dn->flags & DN_NDFLAG_R2))
508 return;
509
510 if (s->t == s->n) 505 if (s->t == s->n)
511 s->rs = dn_find_slot(s->ptr, s->n, dn->priority); 506 s->rs = dn_find_slot(s->ptr, s->n, dn->priority);
512 else 507 else
diff --git a/net/ethernet/Makefile b/net/ethernet/Makefile
index 69b74a9a0fc3..7cef1d8ace27 100644
--- a/net/ethernet/Makefile
+++ b/net/ethernet/Makefile
@@ -3,6 +3,5 @@
3# 3#
4 4
5obj-y += eth.o 5obj-y += eth.o
6obj-$(CONFIG_SYSCTL) += sysctl_net_ether.o
7obj-$(subst m,y,$(CONFIG_IPX)) += pe2.o 6obj-$(subst m,y,$(CONFIG_IPX)) += pe2.o
8obj-$(subst m,y,$(CONFIG_ATALK)) += pe2.o 7obj-$(subst m,y,$(CONFIG_ATALK)) += pe2.o
diff --git a/net/ethernet/sysctl_net_ether.c b/net/ethernet/sysctl_net_ether.c
deleted file mode 100644
index 66b39fc342d2..000000000000
--- a/net/ethernet/sysctl_net_ether.c
+++ /dev/null
@@ -1,14 +0,0 @@
1/* -*- linux-c -*-
2 * sysctl_net_ether.c: sysctl interface to net Ethernet subsystem.
3 *
4 * Begun April 1, 1996, Mike Shaver.
5 * Added /proc/sys/net/ether directory entry (empty =) ). [MS]
6 */
7
8#include <linux/mm.h>
9#include <linux/sysctl.h>
10#include <linux/if_ether.h>
11
12ctl_table ether_table[] = {
13 {0}
14};
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
index 93def94c1b32..3fa5df2e1f0b 100644
--- a/net/ieee80211/ieee80211_crypt_tkip.c
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -501,8 +501,11 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
501static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) 501static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
502{ 502{
503 struct ieee80211_hdr_4addr *hdr11; 503 struct ieee80211_hdr_4addr *hdr11;
504 u16 stype;
504 505
505 hdr11 = (struct ieee80211_hdr_4addr *)skb->data; 506 hdr11 = (struct ieee80211_hdr_4addr *)skb->data;
507 stype = WLAN_FC_GET_STYPE(le16_to_cpu(hdr11->frame_ctl));
508
506 switch (le16_to_cpu(hdr11->frame_ctl) & 509 switch (le16_to_cpu(hdr11->frame_ctl) &
507 (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { 510 (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
508 case IEEE80211_FCTL_TODS: 511 case IEEE80211_FCTL_TODS:
@@ -523,7 +526,13 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
523 break; 526 break;
524 } 527 }
525 528
526 hdr[12] = 0; /* priority */ 529 if (stype & IEEE80211_STYPE_QOS_DATA) {
530 const struct ieee80211_hdr_3addrqos *qoshdr =
531 (struct ieee80211_hdr_3addrqos *)skb->data;
532 hdr[12] = le16_to_cpu(qoshdr->qos_ctl) & IEEE80211_QCTL_TID;
533 } else
534 hdr[12] = 0; /* priority */
535
527 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ 536 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
528} 537}
529 538
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 604b7b0097bc..2bf567fd5a17 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -369,7 +369,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
369 369
370 /* Put this code here so that we avoid duplicating it in all 370 /* Put this code here so that we avoid duplicating it in all
371 * Rx paths. - Jean II */ 371 * Rx paths. - Jean II */
372#ifdef CONFIG_WIRELESS_EXT
373#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ 372#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
374 /* If spy monitoring on */ 373 /* If spy monitoring on */
375 if (ieee->spy_data.spy_number > 0) { 374 if (ieee->spy_data.spy_number > 0) {
@@ -398,7 +397,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
398 wireless_spy_update(ieee->dev, hdr->addr2, &wstats); 397 wireless_spy_update(ieee->dev, hdr->addr2, &wstats);
399 } 398 }
400#endif /* IW_WIRELESS_SPY */ 399#endif /* IW_WIRELESS_SPY */
401#endif /* CONFIG_WIRELESS_EXT */
402 400
403#ifdef NOT_YET 401#ifdef NOT_YET
404 hostap_update_rx_stats(local->ap, hdr, rx_stats); 402 hostap_update_rx_stats(local->ap, hdr, rx_stats);
@@ -1692,8 +1690,8 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1692 WLAN_FC_GET_STYPE(le16_to_cpu 1690 WLAN_FC_GET_STYPE(le16_to_cpu
1693 (header->frame_ctl))); 1691 (header->frame_ctl)));
1694 1692
1695 IEEE80211_WARNING("%s: IEEE80211_REASSOC_REQ received\n", 1693 IEEE80211_DEBUG_MGMT("%s: IEEE80211_REASSOC_REQ received\n",
1696 ieee->dev->name); 1694 ieee->dev->name);
1697 if (ieee->handle_reassoc_request != NULL) 1695 if (ieee->handle_reassoc_request != NULL)
1698 ieee->handle_reassoc_request(ieee->dev, 1696 ieee->handle_reassoc_request(ieee->dev,
1699 (struct ieee80211_reassoc_request *) 1697 (struct ieee80211_reassoc_request *)
@@ -1705,8 +1703,8 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1705 WLAN_FC_GET_STYPE(le16_to_cpu 1703 WLAN_FC_GET_STYPE(le16_to_cpu
1706 (header->frame_ctl))); 1704 (header->frame_ctl)));
1707 1705
1708 IEEE80211_WARNING("%s: IEEE80211_ASSOC_REQ received\n", 1706 IEEE80211_DEBUG_MGMT("%s: IEEE80211_ASSOC_REQ received\n",
1709 ieee->dev->name); 1707 ieee->dev->name);
1710 if (ieee->handle_assoc_request != NULL) 1708 if (ieee->handle_assoc_request != NULL)
1711 ieee->handle_assoc_request(ieee->dev); 1709 ieee->handle_assoc_request(ieee->dev);
1712 break; 1710 break;
@@ -1722,10 +1720,10 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1722 IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n", 1720 IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n",
1723 WLAN_FC_GET_STYPE(le16_to_cpu 1721 WLAN_FC_GET_STYPE(le16_to_cpu
1724 (header->frame_ctl))); 1722 (header->frame_ctl)));
1725 IEEE80211_WARNING("%s: Unknown management packet: %d\n", 1723 IEEE80211_DEBUG_MGMT("%s: Unknown management packet: %d\n",
1726 ieee->dev->name, 1724 ieee->dev->name,
1727 WLAN_FC_GET_STYPE(le16_to_cpu 1725 WLAN_FC_GET_STYPE(le16_to_cpu
1728 (header->frame_ctl))); 1726 (header->frame_ctl)));
1729 break; 1727 break;
1730 } 1728 }
1731} 1729}
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index 8b4332f53394..6a5de1b84459 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -220,13 +220,43 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
220 return txb; 220 return txb;
221} 221}
222 222
223static int ieee80211_classify(struct sk_buff *skb)
224{
225 struct ethhdr *eth;
226 struct iphdr *ip;
227
228 eth = (struct ethhdr *)skb->data;
229 if (eth->h_proto != __constant_htons(ETH_P_IP))
230 return 0;
231
232 ip = skb->nh.iph;
233 switch (ip->tos & 0xfc) {
234 case 0x20:
235 return 2;
236 case 0x40:
237 return 1;
238 case 0x60:
239 return 3;
240 case 0x80:
241 return 4;
242 case 0xa0:
243 return 5;
244 case 0xc0:
245 return 6;
246 case 0xe0:
247 return 7;
248 default:
249 return 0;
250 }
251}
252
223/* Incoming skb is converted to a txb which consists of 253/* Incoming skb is converted to a txb which consists of
224 * a block of 802.11 fragment packets (stored as skbs) */ 254 * a block of 802.11 fragment packets (stored as skbs) */
225int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) 255int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
226{ 256{
227 struct ieee80211_device *ieee = netdev_priv(dev); 257 struct ieee80211_device *ieee = netdev_priv(dev);
228 struct ieee80211_txb *txb = NULL; 258 struct ieee80211_txb *txb = NULL;
229 struct ieee80211_hdr_3addr *frag_hdr; 259 struct ieee80211_hdr_3addrqos *frag_hdr;
230 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, 260 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
231 rts_required; 261 rts_required;
232 unsigned long flags; 262 unsigned long flags;
@@ -234,9 +264,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
234 int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv; 264 int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv;
235 int bytes, fc, hdr_len; 265 int bytes, fc, hdr_len;
236 struct sk_buff *skb_frag; 266 struct sk_buff *skb_frag;
237 struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */ 267 struct ieee80211_hdr_3addrqos header = {/* Ensure zero initialized */
238 .duration_id = 0, 268 .duration_id = 0,
239 .seq_ctl = 0 269 .seq_ctl = 0,
270 .qos_ctl = 0
240 }; 271 };
241 u8 dest[ETH_ALEN], src[ETH_ALEN]; 272 u8 dest[ETH_ALEN], src[ETH_ALEN];
242 struct ieee80211_crypt_data *crypt; 273 struct ieee80211_crypt_data *crypt;
@@ -282,12 +313,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
282 memcpy(dest, skb->data, ETH_ALEN); 313 memcpy(dest, skb->data, ETH_ALEN);
283 memcpy(src, skb->data + ETH_ALEN, ETH_ALEN); 314 memcpy(src, skb->data + ETH_ALEN, ETH_ALEN);
284 315
285 /* Advance the SKB to the start of the payload */
286 skb_pull(skb, sizeof(struct ethhdr));
287
288 /* Determine total amount of storage required for TXB packets */
289 bytes = skb->len + SNAP_SIZE + sizeof(u16);
290
291 if (host_encrypt || host_build_iv) 316 if (host_encrypt || host_build_iv)
292 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | 317 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
293 IEEE80211_FCTL_PROTECTED; 318 IEEE80211_FCTL_PROTECTED;
@@ -306,9 +331,23 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
306 memcpy(header.addr2, src, ETH_ALEN); 331 memcpy(header.addr2, src, ETH_ALEN);
307 memcpy(header.addr3, ieee->bssid, ETH_ALEN); 332 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
308 } 333 }
309 header.frame_ctl = cpu_to_le16(fc);
310 hdr_len = IEEE80211_3ADDR_LEN; 334 hdr_len = IEEE80211_3ADDR_LEN;
311 335
336 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
337 fc |= IEEE80211_STYPE_QOS_DATA;
338 hdr_len += 2;
339
340 skb->priority = ieee80211_classify(skb);
341 header.qos_ctl |= skb->priority & IEEE80211_QCTL_TID;
342 }
343 header.frame_ctl = cpu_to_le16(fc);
344
345 /* Advance the SKB to the start of the payload */
346 skb_pull(skb, sizeof(struct ethhdr));
347
348 /* Determine total amount of storage required for TXB packets */
349 bytes = skb->len + SNAP_SIZE + sizeof(u16);
350
312 /* Encrypt msdu first on the whole data packet. */ 351 /* Encrypt msdu first on the whole data packet. */
313 if ((host_encrypt || host_encrypt_msdu) && 352 if ((host_encrypt || host_encrypt_msdu) &&
314 crypt && crypt->ops && crypt->ops->encrypt_msdu) { 353 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
@@ -402,7 +441,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
402 if (rts_required) { 441 if (rts_required) {
403 skb_frag = txb->fragments[0]; 442 skb_frag = txb->fragments[0];
404 frag_hdr = 443 frag_hdr =
405 (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); 444 (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
406 445
407 /* 446 /*
408 * Set header frame_ctl to the RTS. 447 * Set header frame_ctl to the RTS.
@@ -433,7 +472,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
433 crypt->ops->extra_mpdu_prefix_len); 472 crypt->ops->extra_mpdu_prefix_len);
434 473
435 frag_hdr = 474 frag_hdr =
436 (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); 475 (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
437 memcpy(frag_hdr, &header, hdr_len); 476 memcpy(frag_hdr, &header, hdr_len);
438 477
439 /* If this is not the last fragment, then add the MOREFRAGS 478 /* If this is not the last fragment, then add the MOREFRAGS
@@ -516,7 +555,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
516/* Incoming 802.11 strucure is converted to a TXB 555/* Incoming 802.11 strucure is converted to a TXB
517 * a block of 802.11 fragment packets (stored as skbs) */ 556 * a block of 802.11 fragment packets (stored as skbs) */
518int ieee80211_tx_frame(struct ieee80211_device *ieee, 557int ieee80211_tx_frame(struct ieee80211_device *ieee,
519 struct ieee80211_hdr *frame, int len) 558 struct ieee80211_hdr *frame, int hdr_len, int total_len,
559 int encrypt_mpdu)
520{ 560{
521 struct ieee80211_txb *txb = NULL; 561 struct ieee80211_txb *txb = NULL;
522 unsigned long flags; 562 unsigned long flags;
@@ -526,6 +566,9 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee,
526 566
527 spin_lock_irqsave(&ieee->lock, flags); 567 spin_lock_irqsave(&ieee->lock, flags);
528 568
569 if (encrypt_mpdu && !ieee->sec.encrypt)
570 encrypt_mpdu = 0;
571
529 /* If there is no driver handler to take the TXB, dont' bother 572 /* If there is no driver handler to take the TXB, dont' bother
530 * creating it... */ 573 * creating it... */
531 if (!ieee->hard_start_xmit) { 574 if (!ieee->hard_start_xmit) {
@@ -533,32 +576,41 @@ int ieee80211_tx_frame(struct ieee80211_device *ieee,
533 goto success; 576 goto success;
534 } 577 }
535 578
536 if (unlikely(len < 24)) { 579 if (unlikely(total_len < 24)) {
537 printk(KERN_WARNING "%s: skb too small (%d).\n", 580 printk(KERN_WARNING "%s: skb too small (%d).\n",
538 ieee->dev->name, len); 581 ieee->dev->name, total_len);
539 goto success; 582 goto success;
540 } 583 }
541 584
585 if (encrypt_mpdu)
586 frame->frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
587
542 /* When we allocate the TXB we allocate enough space for the reserve 588 /* When we allocate the TXB we allocate enough space for the reserve
543 * and full fragment bytes (bytes_per_frag doesn't include prefix, 589 * and full fragment bytes (bytes_per_frag doesn't include prefix,
544 * postfix, header, FCS, etc.) */ 590 * postfix, header, FCS, etc.) */
545 txb = ieee80211_alloc_txb(1, len, ieee->tx_headroom, GFP_ATOMIC); 591 txb = ieee80211_alloc_txb(1, total_len, ieee->tx_headroom, GFP_ATOMIC);
546 if (unlikely(!txb)) { 592 if (unlikely(!txb)) {
547 printk(KERN_WARNING "%s: Could not allocate TXB\n", 593 printk(KERN_WARNING "%s: Could not allocate TXB\n",
548 ieee->dev->name); 594 ieee->dev->name);
549 goto failed; 595 goto failed;
550 } 596 }
551 txb->encrypted = 0; 597 txb->encrypted = 0;
552 txb->payload_size = len; 598 txb->payload_size = total_len;
553 599
554 skb_frag = txb->fragments[0]; 600 skb_frag = txb->fragments[0];
555 601
556 memcpy(skb_put(skb_frag, len), frame, len); 602 memcpy(skb_put(skb_frag, total_len), frame, total_len);
557 603
558 if (ieee->config & 604 if (ieee->config &
559 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) 605 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
560 skb_put(skb_frag, 4); 606 skb_put(skb_frag, 4);
561 607
608 /* To avoid overcomplicating things, we do the corner-case frame
609 * encryption in software. The only real situation where encryption is
610 * needed here is during software-based shared key authentication. */
611 if (encrypt_mpdu)
612 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
613
562 success: 614 success:
563 spin_unlock_irqrestore(&ieee->lock, flags); 615 spin_unlock_irqrestore(&ieee->lock, flags);
564 616
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index b885fd189403..a78c4f845f66 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -50,7 +50,8 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
50 char *p; 50 char *p;
51 struct iw_event iwe; 51 struct iw_event iwe;
52 int i, j; 52 int i, j;
53 u8 max_rate, rate; 53 char *current_val; /* For rates */
54 u8 rate;
54 55
55 /* First entry *MUST* be the AP MAC address */ 56 /* First entry *MUST* be the AP MAC address */
56 iwe.cmd = SIOCGIWAP; 57 iwe.cmd = SIOCGIWAP;
@@ -107,9 +108,13 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
107 start = iwe_stream_add_point(start, stop, &iwe, network->ssid); 108 start = iwe_stream_add_point(start, stop, &iwe, network->ssid);
108 109
109 /* Add basic and extended rates */ 110 /* Add basic and extended rates */
110 max_rate = 0; 111 /* Rate : stuffing multiple values in a single event require a bit
111 p = custom; 112 * more of magic - Jean II */
112 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); 113 current_val = start + IW_EV_LCP_LEN;
114 iwe.cmd = SIOCGIWRATE;
115 /* Those two flags are ignored... */
116 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
117
113 for (i = 0, j = 0; i < network->rates_len;) { 118 for (i = 0, j = 0; i < network->rates_len;) {
114 if (j < network->rates_ex_len && 119 if (j < network->rates_ex_len &&
115 ((network->rates_ex[j] & 0x7F) < 120 ((network->rates_ex[j] & 0x7F) <
@@ -117,28 +122,21 @@ static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
117 rate = network->rates_ex[j++] & 0x7F; 122 rate = network->rates_ex[j++] & 0x7F;
118 else 123 else
119 rate = network->rates[i++] & 0x7F; 124 rate = network->rates[i++] & 0x7F;
120 if (rate > max_rate) 125 /* Bit rate given in 500 kb/s units (+ 0x80) */
121 max_rate = rate; 126 iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
122 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), 127 /* Add new value to event */
123 "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); 128 current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN);
124 } 129 }
125 for (; j < network->rates_ex_len; j++) { 130 for (; j < network->rates_ex_len; j++) {
126 rate = network->rates_ex[j] & 0x7F; 131 rate = network->rates_ex[j] & 0x7F;
127 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), 132 /* Bit rate given in 500 kb/s units (+ 0x80) */
128 "%d%s ", rate >> 1, (rate & 1) ? ".5" : ""); 133 iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
129 if (rate > max_rate) 134 /* Add new value to event */
130 max_rate = rate; 135 current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN);
131 } 136 }
132 137 /* Check if we added any rate */
133 iwe.cmd = SIOCGIWRATE; 138 if((current_val - start) > IW_EV_LCP_LEN)
134 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; 139 start = current_val;
135 iwe.u.bitrate.value = max_rate * 500000;
136 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_PARAM_LEN);
137
138 iwe.cmd = IWEVCUSTOM;
139 iwe.u.data.length = p - custom;
140 if (iwe.u.data.length)
141 start = iwe_stream_add_point(start, stop, &iwe, custom);
142 140
143 /* Add quality statistics */ 141 /* Add quality statistics */
144 iwe.cmd = IWEVQUAL; 142 iwe.cmd = IWEVQUAL;
@@ -505,7 +503,7 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
505 len = sec->key_sizes[key]; 503 len = sec->key_sizes[key];
506 memcpy(keybuf, sec->keys[key], len); 504 memcpy(keybuf, sec->keys[key], len);
507 505
508 erq->length = (len >= 0 ? len : 0); 506 erq->length = len;
509 erq->flags |= IW_ENCODE_ENABLED; 507 erq->flags |= IW_ENCODE_ENABLED;
510 508
511 if (ieee->open_wep) 509 if (ieee->open_wep)
diff --git a/net/ieee80211/softmac/Kconfig b/net/ieee80211/softmac/Kconfig
index 6cd9f3427be6..2811651cb134 100644
--- a/net/ieee80211/softmac/Kconfig
+++ b/net/ieee80211/softmac/Kconfig
@@ -1,6 +1,8 @@
1config IEEE80211_SOFTMAC 1config IEEE80211_SOFTMAC
2 tristate "Software MAC add-on to the IEEE 802.11 networking stack" 2 tristate "Software MAC add-on to the IEEE 802.11 networking stack"
3 depends on IEEE80211 && EXPERIMENTAL 3 depends on IEEE80211 && EXPERIMENTAL
4 select WIRELESS_EXT
5 select IEEE80211_CRYPT_WEP
4 ---help--- 6 ---help---
5 This option enables the hardware independent software MAC addon 7 This option enables the hardware independent software MAC addon
6 for the IEEE 802.11 networking stack. 8 for the IEEE 802.11 networking stack.
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index be61de78dfa4..5e9a90651d04 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -51,11 +51,12 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft
51 spin_lock_irqsave(&mac->lock, flags); 51 spin_lock_irqsave(&mac->lock, flags);
52 mac->associnfo.associating = 1; 52 mac->associnfo.associating = 1;
53 mac->associated = 0; /* just to make sure */ 53 mac->associated = 0; /* just to make sure */
54 spin_unlock_irqrestore(&mac->lock, flags);
55 54
56 /* Set a timer for timeout */ 55 /* Set a timer for timeout */
57 /* FIXME: make timeout configurable */ 56 /* FIXME: make timeout configurable */
58 schedule_delayed_work(&mac->associnfo.timeout, 5 * HZ); 57 if (likely(mac->running))
58 schedule_delayed_work(&mac->associnfo.timeout, 5 * HZ);
59 spin_unlock_irqrestore(&mac->lock, flags);
59} 60}
60 61
61void 62void
@@ -81,50 +82,52 @@ ieee80211softmac_assoc_timeout(void *d)
81 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT, NULL); 82 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT, NULL);
82} 83}
83 84
84/* Sends out a disassociation request to the desired AP */ 85void
85static void 86ieee80211softmac_disassoc(struct ieee80211softmac_device *mac)
86ieee80211softmac_disassoc(struct ieee80211softmac_device *mac, u16 reason)
87{ 87{
88 unsigned long flags; 88 unsigned long flags;
89
90 spin_lock_irqsave(&mac->lock, flags);
91 if (mac->associnfo.associating)
92 cancel_delayed_work(&mac->associnfo.timeout);
93
94 netif_carrier_off(mac->dev);
95
96 mac->associated = 0;
97 mac->associnfo.bssvalid = 0;
98 mac->associnfo.associating = 0;
99 ieee80211softmac_init_txrates(mac);
100 ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL);
101 spin_unlock_irqrestore(&mac->lock, flags);
102}
103
104/* Sends out a disassociation request to the desired AP */
105void
106ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason)
107{
89 struct ieee80211softmac_network *found; 108 struct ieee80211softmac_network *found;
90 109
91 if (mac->associnfo.bssvalid && mac->associated) { 110 if (mac->associnfo.bssvalid && mac->associated) {
92 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); 111 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid);
93 if (found) 112 if (found)
94 ieee80211softmac_send_mgt_frame(mac, found, IEEE80211_STYPE_DISASSOC, reason); 113 ieee80211softmac_send_mgt_frame(mac, found, IEEE80211_STYPE_DISASSOC, reason);
95 } else if (mac->associnfo.associating) {
96 cancel_delayed_work(&mac->associnfo.timeout);
97 } 114 }
98 115
99 /* Change our state */ 116 ieee80211softmac_disassoc(mac);
100 spin_lock_irqsave(&mac->lock, flags);
101 /* Do NOT clear bssvalid as that will break ieee80211softmac_assoc_work! */
102 mac->associated = 0;
103 mac->associnfo.associating = 0;
104 spin_unlock_irqrestore(&mac->lock, flags);
105} 117}
106 118
107static inline int 119static inline int
108we_support_all_basic_rates(struct ieee80211softmac_device *mac, u8 *from, u8 from_len) 120we_support_all_basic_rates(struct ieee80211softmac_device *mac, u8 *from, u8 from_len)
109{ 121{
110 int idx, search, found; 122 int idx;
111 u8 rate, search_rate; 123 u8 rate;
112 124
113 for (idx = 0; idx < (from_len); idx++) { 125 for (idx = 0; idx < (from_len); idx++) {
114 rate = (from)[idx]; 126 rate = (from)[idx];
115 if (!(rate & IEEE80211_BASIC_RATE_MASK)) 127 if (!(rate & IEEE80211_BASIC_RATE_MASK))
116 continue; 128 continue;
117 found = 0;
118 rate &= ~IEEE80211_BASIC_RATE_MASK; 129 rate &= ~IEEE80211_BASIC_RATE_MASK;
119 for (search = 0; search < mac->ratesinfo.count; search++) { 130 if (!ieee80211softmac_ratesinfo_rate_supported(&mac->ratesinfo, rate))
120 search_rate = mac->ratesinfo.rates[search];
121 search_rate &= ~IEEE80211_BASIC_RATE_MASK;
122 if (rate == search_rate) {
123 found = 1;
124 break;
125 }
126 }
127 if (!found)
128 return 0; 131 return 0;
129 } 132 }
130 return 1; 133 return 1;
@@ -143,6 +146,12 @@ network_matches_request(struct ieee80211softmac_device *mac, struct ieee80211_ne
143 if (!we_support_all_basic_rates(mac, net->rates_ex, net->rates_ex_len)) 146 if (!we_support_all_basic_rates(mac, net->rates_ex, net->rates_ex_len))
144 return 0; 147 return 0;
145 148
149 /* assume that users know what they're doing ...
150 * (note we don't let them select a net we're incompatible with) */
151 if (mac->associnfo.bssfixed) {
152 return !memcmp(mac->associnfo.bssid, net->bssid, ETH_ALEN);
153 }
154
146 /* if 'ANY' network requested, take any that doesn't have privacy enabled */ 155 /* if 'ANY' network requested, take any that doesn't have privacy enabled */
147 if (mac->associnfo.req_essid.len == 0 156 if (mac->associnfo.req_essid.len == 0
148 && !(net->capability & WLAN_CAPABILITY_PRIVACY)) 157 && !(net->capability & WLAN_CAPABILITY_PRIVACY))
@@ -155,12 +164,28 @@ network_matches_request(struct ieee80211softmac_device *mac, struct ieee80211_ne
155} 164}
156 165
157static void 166static void
158ieee80211softmac_assoc_notify(struct net_device *dev, void *context) 167ieee80211softmac_assoc_notify_scan(struct net_device *dev, int event_type, void *context)
159{ 168{
160 struct ieee80211softmac_device *mac = ieee80211_priv(dev); 169 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
161 ieee80211softmac_assoc_work((void*)mac); 170 ieee80211softmac_assoc_work((void*)mac);
162} 171}
163 172
173static void
174ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void *context)
175{
176 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
177
178 switch (event_type) {
179 case IEEE80211SOFTMAC_EVENT_AUTHENTICATED:
180 ieee80211softmac_assoc_work((void*)mac);
181 break;
182 case IEEE80211SOFTMAC_EVENT_AUTH_FAILED:
183 case IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT:
184 ieee80211softmac_disassoc(mac);
185 break;
186 }
187}
188
164/* This function is called to handle userspace requests (asynchronously) */ 189/* This function is called to handle userspace requests (asynchronously) */
165void 190void
166ieee80211softmac_assoc_work(void *d) 191ieee80211softmac_assoc_work(void *d)
@@ -168,14 +193,18 @@ ieee80211softmac_assoc_work(void *d)
168 struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; 193 struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
169 struct ieee80211softmac_network *found = NULL; 194 struct ieee80211softmac_network *found = NULL;
170 struct ieee80211_network *net = NULL, *best = NULL; 195 struct ieee80211_network *net = NULL, *best = NULL;
196 int bssvalid;
171 unsigned long flags; 197 unsigned long flags;
172 198
199 /* ieee80211_disassoc might clear this */
200 bssvalid = mac->associnfo.bssvalid;
201
173 /* meh */ 202 /* meh */
174 if (mac->associated) 203 if (mac->associated)
175 ieee80211softmac_disassoc(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT); 204 ieee80211softmac_send_disassoc_req(mac, WLAN_REASON_DISASSOC_STA_HAS_LEFT);
176 205
177 /* try to find the requested network in our list, if we found one already */ 206 /* try to find the requested network in our list, if we found one already */
178 if (mac->associnfo.bssvalid) 207 if (bssvalid || mac->associnfo.bssfixed)
179 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid); 208 found = ieee80211softmac_get_network_by_bssid(mac, mac->associnfo.bssid);
180 209
181 /* Search the ieee80211 networks for this network if we didn't find it by bssid, 210 /* Search the ieee80211 networks for this network if we didn't find it by bssid,
@@ -236,23 +265,29 @@ ieee80211softmac_assoc_work(void *d)
236 * Maybe we can hope to have more memory after scanning finishes ;) 265 * Maybe we can hope to have more memory after scanning finishes ;)
237 */ 266 */
238 dprintk(KERN_INFO PFX "Associate: Scanning for networks first.\n"); 267 dprintk(KERN_INFO PFX "Associate: Scanning for networks first.\n");
239 ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify, NULL); 268 ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
240 if (ieee80211softmac_start_scan(mac)) 269 if (ieee80211softmac_start_scan(mac))
241 dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n"); 270 dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
242 return; 271 return;
243 } 272 } else {
244 else {
245 spin_lock_irqsave(&mac->lock, flags); 273 spin_lock_irqsave(&mac->lock, flags);
246 mac->associnfo.associating = 0; 274 mac->associnfo.associating = 0;
247 mac->associated = 0; 275 mac->associated = 0;
248 spin_unlock_irqrestore(&mac->lock, flags); 276 spin_unlock_irqrestore(&mac->lock, flags);
249 277
250 dprintk(KERN_INFO PFX "Unable to find matching network after scan!\n"); 278 dprintk(KERN_INFO PFX "Unable to find matching network after scan!\n");
279 /* reset the retry counter for the next user request since we
280 * break out and don't reschedule ourselves after this point. */
281 mac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT;
251 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_NET_NOT_FOUND, NULL); 282 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_NET_NOT_FOUND, NULL);
252 return; 283 return;
253 } 284 }
254 } 285 }
255 286
287 /* reset the retry counter for the next user request since we
288 * now found a net and will try to associate to it, but not
289 * schedule this function again. */
290 mac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT;
256 mac->associnfo.bssvalid = 1; 291 mac->associnfo.bssvalid = 1;
257 memcpy(mac->associnfo.bssid, found->bssid, ETH_ALEN); 292 memcpy(mac->associnfo.bssid, found->bssid, ETH_ALEN);
258 /* copy the ESSID for displaying it */ 293 /* copy the ESSID for displaying it */
@@ -265,7 +300,7 @@ ieee80211softmac_assoc_work(void *d)
265 * otherwise adding the notification would be racy. */ 300 * otherwise adding the notification would be racy. */
266 if (!ieee80211softmac_auth_req(mac, found)) { 301 if (!ieee80211softmac_auth_req(mac, found)) {
267 dprintk(KERN_INFO PFX "cannot associate without being authenticated, requested authentication\n"); 302 dprintk(KERN_INFO PFX "cannot associate without being authenticated, requested authentication\n");
268 ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify, NULL, GFP_KERNEL); 303 ieee80211softmac_notify_internal(mac, IEEE80211SOFTMAC_EVENT_ANY, found, ieee80211softmac_assoc_notify_auth, NULL, GFP_KERNEL);
269 } else { 304 } else {
270 printkl(KERN_WARNING PFX "Not authenticated, but requesting authentication failed. Giving up to associate\n"); 305 printkl(KERN_WARNING PFX "Not authenticated, but requesting authentication failed. Giving up to associate\n");
271 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, found); 306 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED, found);
@@ -283,6 +318,9 @@ ieee80211softmac_associated(struct ieee80211softmac_device *mac,
283 struct ieee80211softmac_network *net) 318 struct ieee80211softmac_network *net)
284{ 319{
285 mac->associnfo.associating = 0; 320 mac->associnfo.associating = 0;
321 mac->associnfo.supported_rates = net->supported_rates;
322 ieee80211softmac_recalc_txrates(mac);
323
286 mac->associated = 1; 324 mac->associated = 1;
287 if (mac->set_bssid_filter) 325 if (mac->set_bssid_filter)
288 mac->set_bssid_filter(mac->dev, net->bssid); 326 mac->set_bssid_filter(mac->dev, net->bssid);
@@ -306,6 +344,9 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
306 u16 status = le16_to_cpup(&resp->status); 344 u16 status = le16_to_cpup(&resp->status);
307 struct ieee80211softmac_network *network = NULL; 345 struct ieee80211softmac_network *network = NULL;
308 unsigned long flags; 346 unsigned long flags;
347
348 if (unlikely(!mac->running))
349 return -ENODEV;
309 350
310 spin_lock_irqsave(&mac->lock, flags); 351 spin_lock_irqsave(&mac->lock, flags);
311 352
@@ -363,19 +404,22 @@ ieee80211softmac_handle_disassoc(struct net_device * dev,
363 struct ieee80211_disassoc *disassoc) 404 struct ieee80211_disassoc *disassoc)
364{ 405{
365 struct ieee80211softmac_device *mac = ieee80211_priv(dev); 406 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
366 unsigned long flags; 407
408 if (unlikely(!mac->running))
409 return -ENODEV;
410
367 if (memcmp(disassoc->header.addr2, mac->associnfo.bssid, ETH_ALEN)) 411 if (memcmp(disassoc->header.addr2, mac->associnfo.bssid, ETH_ALEN))
368 return 0; 412 return 0;
413
369 if (memcmp(disassoc->header.addr1, mac->dev->dev_addr, ETH_ALEN)) 414 if (memcmp(disassoc->header.addr1, mac->dev->dev_addr, ETH_ALEN))
370 return 0; 415 return 0;
416
371 dprintk(KERN_INFO PFX "got disassoc frame\n"); 417 dprintk(KERN_INFO PFX "got disassoc frame\n");
372 netif_carrier_off(dev); 418 ieee80211softmac_disassoc(mac);
373 spin_lock_irqsave(&mac->lock, flags); 419
374 mac->associnfo.bssvalid = 0; 420 /* try to reassociate */
375 mac->associated = 0;
376 schedule_work(&mac->associnfo.work); 421 schedule_work(&mac->associnfo.work);
377 spin_unlock_irqrestore(&mac->lock, flags); 422
378
379 return 0; 423 return 0;
380} 424}
381 425
@@ -386,11 +430,15 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev,
386 struct ieee80211softmac_device *mac = ieee80211_priv(dev); 430 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
387 struct ieee80211softmac_network *network; 431 struct ieee80211softmac_network *network;
388 432
433 if (unlikely(!mac->running))
434 return -ENODEV;
435
389 network = ieee80211softmac_get_network_by_bssid(mac, resp->header.addr3); 436 network = ieee80211softmac_get_network_by_bssid(mac, resp->header.addr3);
390 if (!network) { 437 if (!network) {
391 dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); 438 dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
392 return 0; 439 return 0;
393 } 440 }
394 ieee80211softmac_assoc(mac, network); 441 schedule_work(&mac->associnfo.work);
442
395 return 0; 443 return 0;
396} 444}
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 9a0eac6c61eb..90b8484e509b 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -86,6 +86,11 @@ ieee80211softmac_auth_queue(void *data)
86 86
87 /* Lock and set flags */ 87 /* Lock and set flags */
88 spin_lock_irqsave(&mac->lock, flags); 88 spin_lock_irqsave(&mac->lock, flags);
89 if (unlikely(!mac->running)) {
90 /* Prevent reschedule on workqueue flush */
91 spin_unlock_irqrestore(&mac->lock, flags);
92 return;
93 }
89 net->authenticated = 0; 94 net->authenticated = 0;
90 net->authenticating = 1; 95 net->authenticating = 1;
91 /* add a timeout call so we eventually give up waiting for an auth reply */ 96 /* add a timeout call so we eventually give up waiting for an auth reply */
@@ -102,6 +107,7 @@ ieee80211softmac_auth_queue(void *data)
102 printkl(KERN_WARNING PFX "Authentication timed out with "MAC_FMT"\n", MAC_ARG(net->bssid)); 107 printkl(KERN_WARNING PFX "Authentication timed out with "MAC_FMT"\n", MAC_ARG(net->bssid));
103 /* Remove this item from the queue */ 108 /* Remove this item from the queue */
104 spin_lock_irqsave(&mac->lock, flags); 109 spin_lock_irqsave(&mac->lock, flags);
110 net->authenticating = 0;
105 ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT, net); 111 ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT, net);
106 cancel_delayed_work(&auth->work); /* just to make sure... */ 112 cancel_delayed_work(&auth->work); /* just to make sure... */
107 list_del(&auth->list); 113 list_del(&auth->list);
@@ -124,6 +130,9 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
124 unsigned long flags; 130 unsigned long flags;
125 u8 * data; 131 u8 * data;
126 132
133 if (unlikely(!mac->running))
134 return -ENODEV;
135
127 /* Find correct auth queue item */ 136 /* Find correct auth queue item */
128 spin_lock_irqsave(&mac->lock, flags); 137 spin_lock_irqsave(&mac->lock, flags);
129 list_for_each(list_ptr, &mac->auth_queue) { 138 list_for_each(list_ptr, &mac->auth_queue) {
@@ -204,13 +213,13 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
204 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; 213 aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE;
205 spin_unlock_irqrestore(&mac->lock, flags); 214 spin_unlock_irqrestore(&mac->lock, flags);
206 215
207 /* Switch to correct channel for this network */ 216 /* Send our response */
208 mac->set_channel(mac->dev, net->channel);
209
210 /* Send our response (How to encrypt?) */
211 ieee80211softmac_send_mgt_frame(mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); 217 ieee80211softmac_send_mgt_frame(mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
212 break; 218 return 0;
213 case IEEE80211SOFTMAC_AUTH_SHARED_PASS: 219 case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
220 kfree(net->challenge);
221 net->challenge = NULL;
222 net->challenge_len = 0;
214 /* Check the status code of the response */ 223 /* Check the status code of the response */
215 switch(auth->status) { 224 switch(auth->status) {
216 case WLAN_STATUS_SUCCESS: 225 case WLAN_STATUS_SUCCESS:
@@ -221,6 +230,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
221 spin_unlock_irqrestore(&mac->lock, flags); 230 spin_unlock_irqrestore(&mac->lock, flags);
222 printkl(KERN_NOTICE PFX "Shared Key Authentication completed with "MAC_FMT"\n", 231 printkl(KERN_NOTICE PFX "Shared Key Authentication completed with "MAC_FMT"\n",
223 MAC_ARG(net->bssid)); 232 MAC_ARG(net->bssid));
233 ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net);
224 break; 234 break;
225 default: 235 default:
226 printkl(KERN_NOTICE PFX "Shared Key Authentication with "MAC_FMT" failed, error code: %i\n", 236 printkl(KERN_NOTICE PFX "Shared Key Authentication with "MAC_FMT" failed, error code: %i\n",
@@ -271,6 +281,9 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac,
271 struct list_head *list_ptr; 281 struct list_head *list_ptr;
272 unsigned long flags; 282 unsigned long flags;
273 283
284 /* deauthentication implies disassociation */
285 ieee80211softmac_disassoc(mac);
286
274 /* Lock and reset status flags */ 287 /* Lock and reset status flags */
275 spin_lock_irqsave(&mac->lock, flags); 288 spin_lock_irqsave(&mac->lock, flags);
276 net->authenticating = 0; 289 net->authenticating = 0;
@@ -298,8 +311,6 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac,
298 311
299 /* can't transmit data right now... */ 312 /* can't transmit data right now... */
300 netif_carrier_off(mac->dev); 313 netif_carrier_off(mac->dev);
301 /* let's try to re-associate */
302 schedule_work(&mac->associnfo.work);
303 spin_unlock_irqrestore(&mac->lock, flags); 314 spin_unlock_irqrestore(&mac->lock, flags);
304} 315}
305 316
@@ -338,6 +349,9 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
338 struct ieee80211softmac_network *net = NULL; 349 struct ieee80211softmac_network *net = NULL;
339 struct ieee80211softmac_device *mac = ieee80211_priv(dev); 350 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
340 351
352 if (unlikely(!mac->running))
353 return -ENODEV;
354
341 if (!deauth) { 355 if (!deauth) {
342 dprintk("deauth without deauth packet. eek!\n"); 356 dprintk("deauth without deauth packet. eek!\n");
343 return 0; 357 return 0;
@@ -360,5 +374,8 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
360 } 374 }
361 375
362 ieee80211softmac_deauth_from_net(mac, net); 376 ieee80211softmac_deauth_from_net(mac, net);
377
378 /* let's try to re-associate */
379 schedule_work(&mac->associnfo.work);
363 return 0; 380 return 0;
364} 381}
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c
index 0a52bbda1e4c..f34fa2ef666b 100644
--- a/net/ieee80211/softmac/ieee80211softmac_event.c
+++ b/net/ieee80211/softmac/ieee80211softmac_event.c
@@ -38,7 +38,8 @@
38 * The event context is private and can only be used from 38 * The event context is private and can only be used from
39 * within this module. Its meaning varies with the event 39 * within this module. Its meaning varies with the event
40 * type: 40 * type:
41 * SCAN_FINISHED: no special meaning 41 * SCAN_FINISHED,
42 * DISASSOCIATED: NULL
42 * ASSOCIATED, 43 * ASSOCIATED,
43 * ASSOCIATE_FAILED, 44 * ASSOCIATE_FAILED,
44 * ASSOCIATE_TIMEOUT, 45 * ASSOCIATE_TIMEOUT,
@@ -59,14 +60,15 @@
59 */ 60 */
60 61
61static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = { 62static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = {
62 "scan finished", 63 NULL, /* scan finished */
63 "associated", 64 NULL, /* associated */
64 "associating failed", 65 "associating failed",
65 "associating timed out", 66 "associating timed out",
66 "authenticated", 67 "authenticated",
67 "authenticating failed", 68 "authenticating failed",
68 "authenticating timed out", 69 "authenticating timed out",
69 "associating failed because no suitable network was found", 70 "associating failed because no suitable network was found",
71 NULL, /* disassociated */
70}; 72};
71 73
72 74
@@ -76,7 +78,7 @@ ieee80211softmac_notify_callback(void *d)
76 struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d; 78 struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d;
77 kfree(d); 79 kfree(d);
78 80
79 event.fun(event.mac->dev, event.context); 81 event.fun(event.mac->dev, event.event_type, event.context);
80} 82}
81 83
82int 84int
@@ -128,13 +130,36 @@ void
128ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int event, void *event_ctx) 130ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int event, void *event_ctx)
129{ 131{
130 struct ieee80211softmac_event *eventptr, *tmp; 132 struct ieee80211softmac_event *eventptr, *tmp;
131 union iwreq_data wrqu; 133 struct ieee80211softmac_network *network;
132 char *msg;
133 134
134 if (event >= 0) { 135 if (event >= 0) {
135 msg = event_descriptions[event]; 136 union iwreq_data wrqu;
136 wrqu.data.length = strlen(msg); 137 int we_event;
137 wireless_send_event(mac->dev, IWEVCUSTOM, &wrqu, msg); 138 char *msg = NULL;
139
140 memset(&wrqu, '\0', sizeof (union iwreq_data));
141
142 switch(event) {
143 case IEEE80211SOFTMAC_EVENT_ASSOCIATED:
144 network = (struct ieee80211softmac_network *)event_ctx;
145 memcpy(wrqu.ap_addr.sa_data, &network->bssid[0], ETH_ALEN);
146 /* fall through */
147 case IEEE80211SOFTMAC_EVENT_DISASSOCIATED:
148 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
149 we_event = SIOCGIWAP;
150 break;
151 case IEEE80211SOFTMAC_EVENT_SCAN_FINISHED:
152 we_event = SIOCGIWSCAN;
153 break;
154 default:
155 msg = event_descriptions[event];
156 if (!msg)
157 msg = "SOFTMAC EVENT BUG";
158 wrqu.data.length = strlen(msg);
159 we_event = IWEVCUSTOM;
160 break;
161 }
162 wireless_send_event(mac->dev, we_event, &wrqu, msg);
138 } 163 }
139 164
140 if (!list_empty(&mac->events)) 165 if (!list_empty(&mac->events))
@@ -142,6 +167,9 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve
142 if ((eventptr->event_type == event || eventptr->event_type == -1) 167 if ((eventptr->event_type == event || eventptr->event_type == -1)
143 && (eventptr->event_context == NULL || eventptr->event_context == event_ctx)) { 168 && (eventptr->event_context == NULL || eventptr->event_context == event_ctx)) {
144 list_del(&eventptr->list); 169 list_del(&eventptr->list);
170 /* User may have subscribed to ANY event, so
171 * we tell them which event triggered it. */
172 eventptr->event_type = event;
145 schedule_work(&eventptr->work); 173 schedule_work(&eventptr->work);
146 } 174 }
147 } 175 }
diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c
index febc51dbb412..09541611e48c 100644
--- a/net/ieee80211/softmac/ieee80211softmac_io.c
+++ b/net/ieee80211/softmac/ieee80211softmac_io.c
@@ -149,6 +149,56 @@ ieee80211softmac_hdr_3addr(struct ieee80211softmac_device *mac,
149 * shouldn't the sequence number be in ieee80211? */ 149 * shouldn't the sequence number be in ieee80211? */
150} 150}
151 151
152static u16
153ieee80211softmac_capabilities(struct ieee80211softmac_device *mac,
154 struct ieee80211softmac_network *net)
155{
156 u16 capability = 0;
157
158 /* ESS and IBSS bits are set according to the current mode */
159 switch (mac->ieee->iw_mode) {
160 case IW_MODE_INFRA:
161 capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
162 break;
163 case IW_MODE_ADHOC:
164 capability = cpu_to_le16(WLAN_CAPABILITY_IBSS);
165 break;
166 case IW_MODE_AUTO:
167 capability = net->capabilities &
168 (WLAN_CAPABILITY_ESS|WLAN_CAPABILITY_IBSS);
169 break;
170 default:
171 /* bleh. we don't ever go to these modes */
172 printk(KERN_ERR PFX "invalid iw_mode!\n");
173 break;
174 }
175
176 /* CF Pollable / CF Poll Request */
177 /* Needs to be implemented, for now, the 0's == not supported */
178
179 /* Privacy Bit */
180 capability |= mac->ieee->sec.level ?
181 cpu_to_le16(WLAN_CAPABILITY_PRIVACY) : 0;
182
183 /* Short Preamble */
184 /* Always supported: we probably won't ever be powering devices which
185 * dont support this... */
186 capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
187
188 /* PBCC */
189 /* Not widely used */
190
191 /* Channel Agility */
192 /* Not widely used */
193
194 /* Short Slot */
195 /* Will be implemented later */
196
197 /* DSSS-OFDM */
198 /* Not widely used */
199
200 return capability;
201}
152 202
153/***************************************************************************** 203/*****************************************************************************
154 * Create Management packets 204 * Create Management packets
@@ -179,15 +229,6 @@ ieee80211softmac_assoc_req(struct ieee80211_assoc_request **pkt,
179 return 0; 229 return 0;
180 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_ASSOC_REQ, net->bssid, net->bssid); 230 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_ASSOC_REQ, net->bssid, net->bssid);
181 231
182 /* Fill in capability Info */
183 (*pkt)->capability = (mac->ieee->iw_mode == IW_MODE_MASTER) || (mac->ieee->iw_mode == IW_MODE_INFRA) ?
184 cpu_to_le16(WLAN_CAPABILITY_ESS) :
185 cpu_to_le16(WLAN_CAPABILITY_IBSS);
186 /* Need to add this
187 (*pkt)->capability |= mac->ieee->short_slot ?
188 cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME) : 0;
189 */
190 (*pkt)->capability |= mac->ieee->sec.level ? cpu_to_le16(WLAN_CAPABILITY_PRIVACY) : 0;
191 /* Fill in Listen Interval (?) */ 232 /* Fill in Listen Interval (?) */
192 (*pkt)->listen_interval = cpu_to_le16(10); 233 (*pkt)->listen_interval = cpu_to_le16(10);
193 234
@@ -227,17 +268,9 @@ ieee80211softmac_reassoc_req(struct ieee80211_reassoc_request **pkt,
227 return 0; 268 return 0;
228 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_REASSOC_REQ, net->bssid, net->bssid); 269 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_REASSOC_REQ, net->bssid, net->bssid);
229 270
230 /* Fill in capability Info */ 271 /* Fill in the capabilities */
231 (*pkt)->capability = mac->ieee->iw_mode == IW_MODE_MASTER ? 272 (*pkt)->capability = ieee80211softmac_capabilities(mac, net);
232 cpu_to_le16(WLAN_CAPABILITY_ESS) : 273
233 cpu_to_le16(WLAN_CAPABILITY_IBSS);
234 /*
235 (*pkt)->capability |= mac->ieee->short_slot ?
236 cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME) : 0;
237 */
238 (*pkt)->capability |= mac->ieee->sec.level ?
239 cpu_to_le16(WLAN_CAPABILITY_PRIVACY) : 0;
240
241 /* Fill in Listen Interval (?) */ 274 /* Fill in Listen Interval (?) */
242 (*pkt)->listen_interval = cpu_to_le16(10); 275 (*pkt)->listen_interval = cpu_to_le16(10);
243 /* Fill in the current AP MAC */ 276 /* Fill in the current AP MAC */
@@ -256,26 +289,27 @@ ieee80211softmac_reassoc_req(struct ieee80211_reassoc_request **pkt,
256static u32 289static u32
257ieee80211softmac_auth(struct ieee80211_auth **pkt, 290ieee80211softmac_auth(struct ieee80211_auth **pkt,
258 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net, 291 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net,
259 u16 transaction, u16 status) 292 u16 transaction, u16 status, int *encrypt_mpdu)
260{ 293{
261 u8 *data; 294 u8 *data;
295 int auth_mode = mac->ieee->sec.auth_mode;
296 int is_shared_response = (auth_mode == WLAN_AUTH_SHARED_KEY
297 && transaction == IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE);
298
262 /* Allocate Packet */ 299 /* Allocate Packet */
263 (*pkt) = (struct ieee80211_auth *)ieee80211softmac_alloc_mgt( 300 (*pkt) = (struct ieee80211_auth *)ieee80211softmac_alloc_mgt(
264 2 + /* Auth Algorithm */ 301 2 + /* Auth Algorithm */
265 2 + /* Auth Transaction Seq */ 302 2 + /* Auth Transaction Seq */
266 2 + /* Status Code */ 303 2 + /* Status Code */
267 /* Challenge Text IE */ 304 /* Challenge Text IE */
268 mac->ieee->open_wep ? 0 : 305 is_shared_response ? 0 : 1 + 1 + net->challenge_len
269 1 + 1 + WLAN_AUTH_CHALLENGE_LEN 306 );
270 );
271 if (unlikely((*pkt) == NULL)) 307 if (unlikely((*pkt) == NULL))
272 return 0; 308 return 0;
273 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_AUTH, net->bssid, net->bssid); 309 ieee80211softmac_hdr_3addr(mac, &((*pkt)->header), IEEE80211_STYPE_AUTH, net->bssid, net->bssid);
274 310
275 /* Algorithm */ 311 /* Algorithm */
276 (*pkt)->algorithm = mac->ieee->open_wep ? 312 (*pkt)->algorithm = cpu_to_le16(auth_mode);
277 cpu_to_le16(WLAN_AUTH_OPEN) :
278 cpu_to_le16(WLAN_AUTH_SHARED_KEY);
279 /* Transaction */ 313 /* Transaction */
280 (*pkt)->transaction = cpu_to_le16(transaction); 314 (*pkt)->transaction = cpu_to_le16(transaction);
281 /* Status */ 315 /* Status */
@@ -283,18 +317,20 @@ ieee80211softmac_auth(struct ieee80211_auth **pkt,
283 317
284 data = (u8 *)(*pkt)->info_element; 318 data = (u8 *)(*pkt)->info_element;
285 /* Challenge Text */ 319 /* Challenge Text */
286 if(!mac->ieee->open_wep){ 320 if (is_shared_response) {
287 *data = MFIE_TYPE_CHALLENGE; 321 *data = MFIE_TYPE_CHALLENGE;
288 data++; 322 data++;
289 323
290 /* Copy the challenge in */ 324 /* Copy the challenge in */
291 // *data = challenge length 325 *data = net->challenge_len;
292 // data += sizeof(u16); 326 data++;
293 // memcpy(data, challenge, challenge length); 327 memcpy(data, net->challenge, net->challenge_len);
294 // data += challenge length; 328 data += net->challenge_len;
295 329
296 /* Add the full size to the packet length */ 330 /* Make sure this frame gets encrypted with the shared key */
297 } 331 *encrypt_mpdu = 1;
332 } else
333 *encrypt_mpdu = 0;
298 334
299 /* Return the packet size */ 335 /* Return the packet size */
300 return (data - (u8 *)(*pkt)); 336 return (data - (u8 *)(*pkt));
@@ -384,6 +420,7 @@ ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
384{ 420{
385 void *pkt = NULL; 421 void *pkt = NULL;
386 u32 pkt_size = 0; 422 u32 pkt_size = 0;
423 int encrypt_mpdu = 0;
387 424
388 switch(type) { 425 switch(type) {
389 case IEEE80211_STYPE_ASSOC_REQ: 426 case IEEE80211_STYPE_ASSOC_REQ:
@@ -393,7 +430,7 @@ ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
393 pkt_size = ieee80211softmac_reassoc_req((struct ieee80211_reassoc_request **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg); 430 pkt_size = ieee80211softmac_reassoc_req((struct ieee80211_reassoc_request **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg);
394 break; 431 break;
395 case IEEE80211_STYPE_AUTH: 432 case IEEE80211_STYPE_AUTH:
396 pkt_size = ieee80211softmac_auth((struct ieee80211_auth **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg, (u16)(arg & 0xFFFF), (u16) (arg >> 16)); 433 pkt_size = ieee80211softmac_auth((struct ieee80211_auth **)(&pkt), mac, (struct ieee80211softmac_network *)ptrarg, (u16)(arg & 0xFFFF), (u16) (arg >> 16), &encrypt_mpdu);
397 break; 434 break;
398 case IEEE80211_STYPE_DISASSOC: 435 case IEEE80211_STYPE_DISASSOC:
399 case IEEE80211_STYPE_DEAUTH: 436 case IEEE80211_STYPE_DEAUTH:
@@ -422,52 +459,8 @@ ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
422 * or get rid of it alltogether? 459 * or get rid of it alltogether?
423 * Does this work for you now? 460 * Does this work for you now?
424 */ 461 */
425 ieee80211_tx_frame(mac->ieee, (struct ieee80211_hdr *)pkt, pkt_size); 462 ieee80211_tx_frame(mac->ieee, (struct ieee80211_hdr *)pkt,
426 463 IEEE80211_3ADDR_LEN, pkt_size, encrypt_mpdu);
427 kfree(pkt);
428 return 0;
429}
430
431
432/* Create an rts/cts frame */
433static u32
434ieee80211softmac_rts_cts(struct ieee80211_hdr_2addr **pkt,
435 struct ieee80211softmac_device *mac, struct ieee80211softmac_network *net,
436 u32 type)
437{
438 /* Allocate Packet */
439 (*pkt) = kmalloc(IEEE80211_2ADDR_LEN, GFP_ATOMIC);
440 memset(*pkt, 0, IEEE80211_2ADDR_LEN);
441 if((*pkt) == NULL)
442 return 0;
443 ieee80211softmac_hdr_2addr(mac, (*pkt), type, net->bssid);
444 return IEEE80211_2ADDR_LEN;
445}
446
447
448/* Sends a control packet */
449static int
450ieee80211softmac_send_ctl_frame(struct ieee80211softmac_device *mac,
451 struct ieee80211softmac_network *net, u32 type, u32 arg)
452{
453 void *pkt = NULL;
454 u32 pkt_size = 0;
455
456 switch(type) {
457 case IEEE80211_STYPE_RTS:
458 case IEEE80211_STYPE_CTS:
459 pkt_size = ieee80211softmac_rts_cts((struct ieee80211_hdr_2addr **)(&pkt), mac, net, type);
460 break;
461 default:
462 printkl(KERN_DEBUG PFX "Unsupported Control Frame type: %i\n", type);
463 return -EINVAL;
464 }
465
466 if(pkt_size == 0)
467 return -ENOMEM;
468
469 /* Send the packet to the ieee80211 layer for tx */
470 ieee80211_tx_frame(mac->ieee, (struct ieee80211_hdr *) pkt, pkt_size);
471 464
472 kfree(pkt); 465 kfree(pkt);
473 return 0; 466 return 0;
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index 60f06a31f0d1..4b2e57d12418 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -26,6 +26,7 @@
26 26
27#include "ieee80211softmac_priv.h" 27#include "ieee80211softmac_priv.h"
28#include <linux/sort.h> 28#include <linux/sort.h>
29#include <linux/etherdevice.h>
29 30
30struct net_device *alloc_ieee80211softmac(int sizeof_priv) 31struct net_device *alloc_ieee80211softmac(int sizeof_priv)
31{ 32{
@@ -45,6 +46,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
45 softmac->ieee->handle_disassoc = ieee80211softmac_handle_disassoc; 46 softmac->ieee->handle_disassoc = ieee80211softmac_handle_disassoc;
46 softmac->scaninfo = NULL; 47 softmac->scaninfo = NULL;
47 48
49 softmac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT;
50
48 /* TODO: initialise all the other callbacks in the ieee struct 51 /* TODO: initialise all the other callbacks in the ieee struct
49 * (once they're written) 52 * (once they're written)
50 */ 53 */
@@ -59,14 +62,6 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
59 softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; 62 softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
60 softmac->stop_scan = ieee80211softmac_stop_scan_implementation; 63 softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
61 64
62 //TODO: The mcast rate has to be assigned dynamically somewhere (in scanning, association. Not sure...)
63 // It has to be set to the highest rate all stations in the current network can handle.
64 softmac->txrates.mcast_rate = IEEE80211_CCK_RATE_1MB;
65 softmac->txrates.mcast_fallback = IEEE80211_CCK_RATE_1MB;
66 /* This is reassigned in ieee80211softmac_start to sane values. */
67 softmac->txrates.default_rate = IEEE80211_CCK_RATE_1MB;
68 softmac->txrates.default_fallback = IEEE80211_CCK_RATE_1MB;
69
70 /* to start with, we can't send anything ... */ 65 /* to start with, we can't send anything ... */
71 netif_carrier_off(dev); 66 netif_carrier_off(dev);
72 67
@@ -87,6 +82,8 @@ ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm)
87 ieee80211softmac_wait_for_scan(sm); 82 ieee80211softmac_wait_for_scan(sm);
88 83
89 spin_lock_irqsave(&sm->lock, flags); 84 spin_lock_irqsave(&sm->lock, flags);
85 sm->running = 0;
86
90 /* Free all pending assoc work items */ 87 /* Free all pending assoc work items */
91 cancel_delayed_work(&sm->associnfo.work); 88 cancel_delayed_work(&sm->associnfo.work);
92 89
@@ -166,15 +163,82 @@ static void ieee80211softmac_start_check_rates(struct ieee80211softmac_device *m
166 } 163 }
167} 164}
168 165
169void ieee80211softmac_start(struct net_device *dev) 166int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate)
167{
168 int search;
169 u8 search_rate;
170
171 for (search = 0; search < ri->count; search++) {
172 search_rate = ri->rates[search];
173 search_rate &= ~IEEE80211_BASIC_RATE_MASK;
174 if (rate == search_rate)
175 return 1;
176 }
177
178 return 0;
179}
180
181/* Finds the highest rate which is:
182 * 1. Present in ri (optionally a basic rate)
183 * 2. Supported by the device
184 * 3. Less than or equal to the user-defined rate
185 */
186static u8 highest_supported_rate(struct ieee80211softmac_device *mac,
187 struct ieee80211softmac_ratesinfo *ri, int basic_only)
188{
189 u8 user_rate = mac->txrates.user_rate;
190 int i;
191
192 if (ri->count == 0) {
193 dprintk(KERN_ERR PFX "empty ratesinfo?\n");
194 return IEEE80211_CCK_RATE_1MB;
195 }
196
197 for (i = ri->count - 1; i >= 0; i--) {
198 u8 rate = ri->rates[i];
199 if (basic_only && !(rate & IEEE80211_BASIC_RATE_MASK))
200 continue;
201 rate &= ~IEEE80211_BASIC_RATE_MASK;
202 if (rate > user_rate)
203 continue;
204 if (ieee80211softmac_ratesinfo_rate_supported(&mac->ratesinfo, rate))
205 return rate;
206 }
207
208 /* If we haven't found a suitable rate by now, just trust the user */
209 return user_rate;
210}
211
212void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac)
213{
214 struct ieee80211softmac_txrates *txrates = &mac->txrates;
215 struct ieee80211softmac_txrates oldrates;
216 u32 change = 0;
217
218 if (mac->txrates_change)
219 oldrates = mac->txrates;
220
221 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
222 txrates->default_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 0);
223
224 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
225 txrates->default_fallback = lower_rate(mac, txrates->default_rate);
226
227 change |= IEEE80211SOFTMAC_TXRATECHG_MCAST;
228 txrates->mcast_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 1);
229
230 if (mac->txrates_change)
231 mac->txrates_change(mac->dev, change, &oldrates);
232
233}
234
235void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac)
170{ 236{
171 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
172 struct ieee80211_device *ieee = mac->ieee; 237 struct ieee80211_device *ieee = mac->ieee;
173 u32 change = 0; 238 u32 change = 0;
239 struct ieee80211softmac_txrates *txrates = &mac->txrates;
174 struct ieee80211softmac_txrates oldrates; 240 struct ieee80211softmac_txrates oldrates;
175 241
176 ieee80211softmac_start_check_rates(mac);
177
178 /* TODO: We need some kind of state machine to lower the default rates 242 /* TODO: We need some kind of state machine to lower the default rates
179 * if we loose too many packets. 243 * if we loose too many packets.
180 */ 244 */
@@ -189,19 +253,36 @@ void ieee80211softmac_start(struct net_device *dev)
189 more reliable. Note similar logic in 253 more reliable. Note similar logic in
190 ieee80211softmac_wx_set_rate() */ 254 ieee80211softmac_wx_set_rate() */
191 if (ieee->modulation & IEEE80211_CCK_MODULATION) { 255 if (ieee->modulation & IEEE80211_CCK_MODULATION) {
192 mac->txrates.default_rate = IEEE80211_CCK_RATE_11MB; 256 txrates->user_rate = IEEE80211_CCK_RATE_11MB;
193 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
194 mac->txrates.default_fallback = IEEE80211_CCK_RATE_5MB;
195 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
196 } else if (ieee->modulation & IEEE80211_OFDM_MODULATION) { 257 } else if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
197 mac->txrates.default_rate = IEEE80211_OFDM_RATE_54MB; 258 txrates->user_rate = IEEE80211_OFDM_RATE_54MB;
198 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
199 mac->txrates.default_fallback = IEEE80211_OFDM_RATE_24MB;
200 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
201 } else 259 } else
202 assert(0); 260 assert(0);
261
262 txrates->default_rate = IEEE80211_CCK_RATE_1MB;
263 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
264
265 txrates->default_fallback = IEEE80211_CCK_RATE_1MB;
266 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
267
268 txrates->mcast_rate = IEEE80211_CCK_RATE_1MB;
269 change |= IEEE80211SOFTMAC_TXRATECHG_MCAST;
270
271 txrates->mgt_mcast_rate = IEEE80211_CCK_RATE_1MB;
272 change |= IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST;
273
203 if (mac->txrates_change) 274 if (mac->txrates_change)
204 mac->txrates_change(dev, change, &oldrates); 275 mac->txrates_change(mac->dev, change, &oldrates);
276
277 mac->running = 1;
278}
279
280void ieee80211softmac_start(struct net_device *dev)
281{
282 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
283
284 ieee80211softmac_start_check_rates(mac);
285 ieee80211softmac_init_txrates(mac);
205} 286}
206EXPORT_SYMBOL_GPL(ieee80211softmac_start); 287EXPORT_SYMBOL_GPL(ieee80211softmac_start);
207 288
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
index 65d9816c8ecc..fa1f8e3acfc0 100644
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ b/net/ieee80211/softmac/ieee80211softmac_priv.h
@@ -116,7 +116,10 @@ ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac,
116 struct ieee80211softmac_essid *essid); 116 struct ieee80211softmac_essid *essid);
117 117
118/* Rates related */ 118/* Rates related */
119int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate);
119u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta); 120u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta);
121void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac);
122void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac);
120static inline u8 lower_rate(struct ieee80211softmac_device *mac, u8 rate) { 123static inline u8 lower_rate(struct ieee80211softmac_device *mac, u8 rate) {
121 return ieee80211softmac_lower_rate_delta(mac, rate, 1); 124 return ieee80211softmac_lower_rate_delta(mac, rate, 1);
122} 125}
@@ -150,6 +153,8 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev,
150int ieee80211softmac_handle_reassoc_req(struct net_device * dev, 153int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
151 struct ieee80211_reassoc_request * reassoc); 154 struct ieee80211_reassoc_request * reassoc);
152void ieee80211softmac_assoc_timeout(void *d); 155void ieee80211softmac_assoc_timeout(void *d);
156void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
157void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
153 158
154/* some helper functions */ 159/* some helper functions */
155static inline int ieee80211softmac_scan_handlers_check_self(struct ieee80211softmac_device *sm) 160static inline int ieee80211softmac_scan_handlers_check_self(struct ieee80211softmac_device *sm)
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
index bb9ab8b45d09..d31cf77498c4 100644
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ b/net/ieee80211/softmac/ieee80211softmac_scan.c
@@ -47,6 +47,7 @@ ieee80211softmac_start_scan(struct ieee80211softmac_device *sm)
47 sm->scanning = 1; 47 sm->scanning = 1;
48 spin_unlock_irqrestore(&sm->lock, flags); 48 spin_unlock_irqrestore(&sm->lock, flags);
49 49
50 netif_tx_disable(sm->ieee->dev);
50 ret = sm->start_scan(sm->dev); 51 ret = sm->start_scan(sm->dev);
51 if (ret) { 52 if (ret) {
52 spin_lock_irqsave(&sm->lock, flags); 53 spin_lock_irqsave(&sm->lock, flags);
@@ -114,7 +115,15 @@ void ieee80211softmac_scan(void *d)
114 // TODO: is this if correct, or should we do this only if scanning from assoc request? 115 // TODO: is this if correct, or should we do this only if scanning from assoc request?
115 if (sm->associnfo.req_essid.len) 116 if (sm->associnfo.req_essid.len)
116 ieee80211softmac_send_mgt_frame(sm, &sm->associnfo.req_essid, IEEE80211_STYPE_PROBE_REQ, 0); 117 ieee80211softmac_send_mgt_frame(sm, &sm->associnfo.req_essid, IEEE80211_STYPE_PROBE_REQ, 0);
118
119 spin_lock_irqsave(&sm->lock, flags);
120 if (unlikely(!sm->running)) {
121 /* Prevent reschedule on workqueue flush */
122 spin_unlock_irqrestore(&sm->lock, flags);
123 break;
124 }
117 schedule_delayed_work(&si->softmac_scan, IEEE80211SOFTMAC_PROBE_DELAY); 125 schedule_delayed_work(&si->softmac_scan, IEEE80211SOFTMAC_PROBE_DELAY);
126 spin_unlock_irqrestore(&sm->lock, flags);
118 return; 127 return;
119 } else { 128 } else {
120 dprintk(PFX "Not probing Channel %d (not allowed here)\n", si->channels[current_channel_idx].channel); 129 dprintk(PFX "Not probing Channel %d (not allowed here)\n", si->channels[current_channel_idx].channel);
@@ -239,6 +248,7 @@ void ieee80211softmac_scan_finished(struct ieee80211softmac_device *sm)
239 if (net) 248 if (net)
240 sm->set_channel(sm->dev, net->channel); 249 sm->set_channel(sm->dev, net->channel);
241 } 250 }
251 netif_wake_queue(sm->ieee->dev);
242 ieee80211softmac_call_events(sm, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, NULL); 252 ieee80211softmac_call_events(sm, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, NULL);
243} 253}
244EXPORT_SYMBOL_GPL(ieee80211softmac_scan_finished); 254EXPORT_SYMBOL_GPL(ieee80211softmac_scan_finished);
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index b559aa9b5507..22aa6199185b 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -27,7 +27,8 @@
27#include "ieee80211softmac_priv.h" 27#include "ieee80211softmac_priv.h"
28 28
29#include <net/iw_handler.h> 29#include <net/iw_handler.h>
30 30/* for is_broadcast_ether_addr and is_zero_ether_addr */
31#include <linux/etherdevice.h>
31 32
32int 33int
33ieee80211softmac_wx_trigger_scan(struct net_device *net_dev, 34ieee80211softmac_wx_trigger_scan(struct net_device *net_dev,
@@ -41,13 +42,23 @@ ieee80211softmac_wx_trigger_scan(struct net_device *net_dev,
41EXPORT_SYMBOL_GPL(ieee80211softmac_wx_trigger_scan); 42EXPORT_SYMBOL_GPL(ieee80211softmac_wx_trigger_scan);
42 43
43 44
45/* if we're still scanning, return -EAGAIN so that userspace tools
46 * can get the complete scan results, otherwise return 0. */
44int 47int
45ieee80211softmac_wx_get_scan_results(struct net_device *net_dev, 48ieee80211softmac_wx_get_scan_results(struct net_device *net_dev,
46 struct iw_request_info *info, 49 struct iw_request_info *info,
47 union iwreq_data *data, 50 union iwreq_data *data,
48 char *extra) 51 char *extra)
49{ 52{
53 unsigned long flags;
50 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); 54 struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
55
56 spin_lock_irqsave(&sm->lock, flags);
57 if (sm->scanning) {
58 spin_unlock_irqrestore(&sm->lock, flags);
59 return -EAGAIN;
60 }
61 spin_unlock_irqrestore(&sm->lock, flags);
51 return ieee80211_wx_get_scan(sm->ieee, info, data, extra); 62 return ieee80211_wx_get_scan(sm->ieee, info, data, extra);
52} 63}
53EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_scan_results); 64EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_scan_results);
@@ -73,7 +84,6 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
73 sm->associnfo.static_essid = 1; 84 sm->associnfo.static_essid = 1;
74 } 85 }
75 } 86 }
76 sm->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT;
77 87
78 /* set our requested ESSID length. 88 /* set our requested ESSID length.
79 * If applicable, we have already copied the data in */ 89 * If applicable, we have already copied the data in */
@@ -201,8 +211,8 @@ ieee80211softmac_wx_set_rate(struct net_device *net_dev,
201 if (is_ofdm && !(ieee->modulation & IEEE80211_OFDM_MODULATION)) 211 if (is_ofdm && !(ieee->modulation & IEEE80211_OFDM_MODULATION))
202 goto out_unlock; 212 goto out_unlock;
203 213
204 mac->txrates.default_rate = rate; 214 mac->txrates.user_rate = rate;
205 mac->txrates.default_fallback = lower_rate(mac, rate); 215 ieee80211softmac_recalc_txrates(mac);
206 err = 0; 216 err = 0;
207 217
208out_unlock: 218out_unlock:
@@ -300,8 +310,6 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
300 char *extra) 310 char *extra)
301{ 311{
302 struct ieee80211softmac_device *mac = ieee80211_priv(net_dev); 312 struct ieee80211softmac_device *mac = ieee80211_priv(net_dev);
303 static const unsigned char any[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
304 static const unsigned char off[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
305 unsigned long flags; 313 unsigned long flags;
306 314
307 /* sanity check */ 315 /* sanity check */
@@ -310,10 +318,17 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
310 } 318 }
311 319
312 spin_lock_irqsave(&mac->lock, flags); 320 spin_lock_irqsave(&mac->lock, flags);
313 if (!memcmp(any, data->ap_addr.sa_data, ETH_ALEN) || 321 if (is_broadcast_ether_addr(data->ap_addr.sa_data)) {
314 !memcmp(off, data->ap_addr.sa_data, ETH_ALEN)) { 322 /* the bssid we have is not to be fixed any longer,
315 schedule_work(&mac->associnfo.work); 323 * and we should reassociate to the best AP. */
316 goto out; 324 mac->associnfo.bssfixed = 0;
325 /* force reassociation */
326 mac->associnfo.bssvalid = 0;
327 if (mac->associated)
328 schedule_work(&mac->associnfo.work);
329 } else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
330 /* the bssid we have is no longer fixed */
331 mac->associnfo.bssfixed = 0;
317 } else { 332 } else {
318 if (!memcmp(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN)) { 333 if (!memcmp(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN)) {
319 if (mac->associnfo.associating || mac->associated) { 334 if (mac->associnfo.associating || mac->associated) {
@@ -323,12 +338,14 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
323 } else { 338 } else {
324 /* copy new value in data->ap_addr.sa_data to bssid */ 339 /* copy new value in data->ap_addr.sa_data to bssid */
325 memcpy(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN); 340 memcpy(mac->associnfo.bssid, data->ap_addr.sa_data, ETH_ALEN);
326 } 341 }
342 /* tell the other code that this bssid should be used no matter what */
343 mac->associnfo.bssfixed = 1;
327 /* queue associate if new bssid or (old one again and not associated) */ 344 /* queue associate if new bssid or (old one again and not associated) */
328 schedule_work(&mac->associnfo.work); 345 schedule_work(&mac->associnfo.work);
329 } 346 }
330 347
331out: 348 out:
332 spin_unlock_irqrestore(&mac->lock, flags); 349 spin_unlock_irqrestore(&mac->lock, flags);
333 return 0; 350 return 0;
334} 351}
@@ -414,3 +431,35 @@ ieee80211softmac_wx_get_genie(struct net_device *dev,
414} 431}
415EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_genie); 432EXPORT_SYMBOL_GPL(ieee80211softmac_wx_get_genie);
416 433
434int
435ieee80211softmac_wx_set_mlme(struct net_device *dev,
436 struct iw_request_info *info,
437 union iwreq_data *wrqu,
438 char *extra)
439{
440 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
441 struct iw_mlme *mlme = (struct iw_mlme *)extra;
442 u16 reason = cpu_to_le16(mlme->reason_code);
443 struct ieee80211softmac_network *net;
444
445 if (memcmp(mac->associnfo.bssid, mlme->addr.sa_data, ETH_ALEN)) {
446 printk(KERN_DEBUG PFX "wx_set_mlme: requested operation on net we don't use\n");
447 return -EINVAL;
448 }
449
450 switch (mlme->cmd) {
451 case IW_MLME_DEAUTH:
452 net = ieee80211softmac_get_network_by_bssid_locked(mac, mlme->addr.sa_data);
453 if (!net) {
454 printk(KERN_DEBUG PFX "wx_set_mlme: we should know the net here...\n");
455 return -EINVAL;
456 }
457 return ieee80211softmac_deauth_req(mac, net, reason);
458 case IW_MLME_DISASSOC:
459 ieee80211softmac_send_disassoc_req(mac, reason);
460 return 0;
461 default:
462 return -EOPNOTSUPP;
463 }
464}
465EXPORT_SYMBOL_GPL(ieee80211softmac_wx_set_mlme);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index dc206f1f914f..0a277453526b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1257,7 +1257,7 @@ out_unregister_udp_proto:
1257 goto out; 1257 goto out;
1258} 1258}
1259 1259
1260module_init(inet_init); 1260fs_initcall(inet_init);
1261 1261
1262/* ------------------------------------------------------------------------ */ 1262/* ------------------------------------------------------------------------ */
1263 1263
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 041dadde31af..4749d504c629 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -928,7 +928,8 @@ static void parp_redo(struct sk_buff *skb)
928 * Receive an arp request from the device layer. 928 * Receive an arp request from the device layer.
929 */ 929 */
930 930
931int arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 931static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
932 struct packet_type *pt, struct net_device *orig_dev)
932{ 933{
933 struct arphdr *arp; 934 struct arphdr *arp;
934 935
@@ -1417,7 +1418,6 @@ static int __init arp_proc_init(void)
1417 1418
1418EXPORT_SYMBOL(arp_broken_ops); 1419EXPORT_SYMBOL(arp_broken_ops);
1419EXPORT_SYMBOL(arp_find); 1420EXPORT_SYMBOL(arp_find);
1420EXPORT_SYMBOL(arp_rcv);
1421EXPORT_SYMBOL(arp_create); 1421EXPORT_SYMBOL(arp_create);
1422EXPORT_SYMBOL(arp_xmit); 1422EXPORT_SYMBOL(arp_xmit);
1423EXPORT_SYMBOL(arp_send); 1423EXPORT_SYMBOL(arp_send);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 81c2f7885292..54419b27686f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1556,7 +1556,6 @@ void __init devinet_init(void)
1556#endif 1556#endif
1557} 1557}
1558 1558
1559EXPORT_SYMBOL(devinet_ioctl);
1560EXPORT_SYMBOL(in_dev_finish_destroy); 1559EXPORT_SYMBOL(in_dev_finish_destroy);
1561EXPORT_SYMBOL(inet_select_addr); 1560EXPORT_SYMBOL(inet_select_addr);
1562EXPORT_SYMBOL(inetdev_by_index); 1561EXPORT_SYMBOL(inetdev_by_index);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4e3d3811dea2..cdde96390960 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -666,4 +666,3 @@ void __init ip_fib_init(void)
666} 666}
667 667
668EXPORT_SYMBOL(inet_addr_type); 668EXPORT_SYMBOL(inet_addr_type);
669EXPORT_SYMBOL(ip_rt_ioctl);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index ccd3efc6a173..95a639f2e3db 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -50,7 +50,7 @@
50 * Patrick McHardy <kaber@trash.net> 50 * Patrick McHardy <kaber@trash.net>
51 */ 51 */
52 52
53#define VERSION "0.406" 53#define VERSION "0.407"
54 54
55#include <linux/config.h> 55#include <linux/config.h>
56#include <asm/uaccess.h> 56#include <asm/uaccess.h>
@@ -314,11 +314,6 @@ static void __leaf_free_rcu(struct rcu_head *head)
314 kfree(container_of(head, struct leaf, rcu)); 314 kfree(container_of(head, struct leaf, rcu));
315} 315}
316 316
317static inline void free_leaf(struct leaf *leaf)
318{
319 call_rcu(&leaf->rcu, __leaf_free_rcu);
320}
321
322static void __leaf_info_free_rcu(struct rcu_head *head) 317static void __leaf_info_free_rcu(struct rcu_head *head)
323{ 318{
324 kfree(container_of(head, struct leaf_info, rcu)); 319 kfree(container_of(head, struct leaf_info, rcu));
@@ -357,7 +352,12 @@ static void __tnode_free_rcu(struct rcu_head *head)
357 352
358static inline void tnode_free(struct tnode *tn) 353static inline void tnode_free(struct tnode *tn)
359{ 354{
360 call_rcu(&tn->rcu, __tnode_free_rcu); 355 if(IS_LEAF(tn)) {
356 struct leaf *l = (struct leaf *) tn;
357 call_rcu_bh(&l->rcu, __leaf_free_rcu);
358 }
359 else
360 call_rcu(&tn->rcu, __tnode_free_rcu);
361} 361}
362 362
363static struct leaf *leaf_new(void) 363static struct leaf *leaf_new(void)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 9831fd2c73a0..2a0455911ee0 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1107,7 +1107,7 @@ void __init icmp_init(struct net_proto_family *ops)
1107 struct inet_sock *inet; 1107 struct inet_sock *inet;
1108 int i; 1108 int i;
1109 1109
1110 for_each_cpu(i) { 1110 for_each_possible_cpu(i) {
1111 int err; 1111 int err;
1112 1112
1113 err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, 1113 err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index ef7366fc132f..ee9b5515b9ae 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -43,8 +43,6 @@ struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep,
43 return tb; 43 return tb;
44} 44}
45 45
46EXPORT_SYMBOL(inet_bind_bucket_create);
47
48/* 46/*
49 * Caller must hold hashbucket lock for this tb with local BH disabled 47 * Caller must hold hashbucket lock for this tb with local BH disabled
50 */ 48 */
@@ -64,8 +62,6 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
64 inet_csk(sk)->icsk_bind_hash = tb; 62 inet_csk(sk)->icsk_bind_hash = tb;
65} 63}
66 64
67EXPORT_SYMBOL(inet_bind_hash);
68
69/* 65/*
70 * Get rid of any references to a local port held by the given sock. 66 * Get rid of any references to a local port held by the given sock.
71 */ 67 */
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 0923add122b4..9f0bb529ab70 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -116,6 +116,7 @@ sr_failed:
116 116
117too_many_hops: 117too_many_hops:
118 /* Tell the sender its packet died... */ 118 /* Tell the sender its packet died... */
119 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
119 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); 120 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
120drop: 121drop:
121 kfree_skb(skb); 122 kfree_skb(skb);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 2a8adda15e11..da734c439179 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -304,13 +304,17 @@ out:
304 304
305/* Creation primitives. */ 305/* Creation primitives. */
306 306
307static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in) 307static struct ipq *ip_frag_intern(struct ipq *qp_in)
308{ 308{
309 struct ipq *qp; 309 struct ipq *qp;
310#ifdef CONFIG_SMP 310#ifdef CONFIG_SMP
311 struct hlist_node *n; 311 struct hlist_node *n;
312#endif 312#endif
313 unsigned int hash;
314
313 write_lock(&ipfrag_lock); 315 write_lock(&ipfrag_lock);
316 hash = ipqhashfn(qp_in->id, qp_in->saddr, qp_in->daddr,
317 qp_in->protocol);
314#ifdef CONFIG_SMP 318#ifdef CONFIG_SMP
315 /* With SMP race we have to recheck hash table, because 319 /* With SMP race we have to recheck hash table, because
316 * such entry could be created on other cpu, while we 320 * such entry could be created on other cpu, while we
@@ -345,7 +349,7 @@ static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in)
345} 349}
346 350
347/* Add an entry to the 'ipq' queue for a newly received IP datagram. */ 351/* Add an entry to the 'ipq' queue for a newly received IP datagram. */
348static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph, u32 user) 352static struct ipq *ip_frag_create(struct iphdr *iph, u32 user)
349{ 353{
350 struct ipq *qp; 354 struct ipq *qp;
351 355
@@ -371,7 +375,7 @@ static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph, u32 user)
371 spin_lock_init(&qp->lock); 375 spin_lock_init(&qp->lock);
372 atomic_set(&qp->refcnt, 1); 376 atomic_set(&qp->refcnt, 1);
373 377
374 return ip_frag_intern(hash, qp); 378 return ip_frag_intern(qp);
375 379
376out_nomem: 380out_nomem:
377 LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n"); 381 LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
@@ -387,11 +391,12 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
387 __u32 saddr = iph->saddr; 391 __u32 saddr = iph->saddr;
388 __u32 daddr = iph->daddr; 392 __u32 daddr = iph->daddr;
389 __u8 protocol = iph->protocol; 393 __u8 protocol = iph->protocol;
390 unsigned int hash = ipqhashfn(id, saddr, daddr, protocol); 394 unsigned int hash;
391 struct ipq *qp; 395 struct ipq *qp;
392 struct hlist_node *n; 396 struct hlist_node *n;
393 397
394 read_lock(&ipfrag_lock); 398 read_lock(&ipfrag_lock);
399 hash = ipqhashfn(id, saddr, daddr, protocol);
395 hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { 400 hlist_for_each_entry(qp, n, &ipq_hash[hash], list) {
396 if(qp->id == id && 401 if(qp->id == id &&
397 qp->saddr == saddr && 402 qp->saddr == saddr &&
@@ -405,7 +410,7 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
405 } 410 }
406 read_unlock(&ipfrag_lock); 411 read_unlock(&ipfrag_lock);
407 412
408 return ip_frag_create(hash, iph, user); 413 return ip_frag_create(iph, user);
409} 414}
410 415
411/* Is the fragment too far ahead to be part of ipq? */ 416/* Is the fragment too far ahead to be part of ipq? */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 9981dcd68f11..ab99bebdcdc8 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -656,7 +656,7 @@ static int ipgre_rcv(struct sk_buff *skb)
656 read_unlock(&ipgre_lock); 656 read_unlock(&ipgre_lock);
657 return(0); 657 return(0);
658 } 658 }
659 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0); 659 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
660 660
661drop: 661drop:
662 read_unlock(&ipgre_lock); 662 read_unlock(&ipgre_lock);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 18d7fad474d7..c9026dbf4c93 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -337,7 +337,7 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
337 * Initialise the virtual path cache for the packet. It describes 337 * Initialise the virtual path cache for the packet. It describes
338 * how the packet travels inside Linux networking. 338 * how the packet travels inside Linux networking.
339 */ 339 */
340 if (likely(skb->dst == NULL)) { 340 if (skb->dst == NULL) {
341 int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, 341 int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
342 skb->dev); 342 skb->dev);
343 if (unlikely(err)) { 343 if (unlikely(err)) {
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 9bebad07bf2e..cbcae6544622 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -209,7 +209,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
209 209
210void ip_options_fragment(struct sk_buff * skb) 210void ip_options_fragment(struct sk_buff * skb)
211{ 211{
212 unsigned char * optptr = skb->nh.raw; 212 unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr);
213 struct ip_options * opt = &(IPCB(skb)->opt); 213 struct ip_options * opt = &(IPCB(skb)->opt);
214 int l = opt->optlen; 214 int l = opt->optlen;
215 int optlen; 215 int optlen;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index f75ff1d96551..cff9c3a72daf 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -86,8 +86,6 @@
86 86
87int sysctl_ip_default_ttl = IPDEFTTL; 87int sysctl_ip_default_ttl = IPDEFTTL;
88 88
89static int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*));
90
91/* Generate a checksum for an outgoing IP datagram. */ 89/* Generate a checksum for an outgoing IP datagram. */
92__inline__ void ip_send_check(struct iphdr *iph) 90__inline__ void ip_send_check(struct iphdr *iph)
93{ 91{
@@ -421,7 +419,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
421 * single device frame, and queue such a frame for sending. 419 * single device frame, and queue such a frame for sending.
422 */ 420 */
423 421
424static int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) 422int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
425{ 423{
426 struct iphdr *iph; 424 struct iphdr *iph;
427 int raw = 0; 425 int raw = 0;
@@ -673,6 +671,8 @@ fail:
673 return err; 671 return err;
674} 672}
675 673
674EXPORT_SYMBOL(ip_fragment);
675
676int 676int
677ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) 677ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
678{ 678{
@@ -904,7 +904,7 @@ alloc_new_skb:
904 * because we have no idea what fragment will be 904 * because we have no idea what fragment will be
905 * the last. 905 * the last.
906 */ 906 */
907 if (datalen == length) 907 if (datalen == length + fraggap)
908 alloclen += rt->u.dst.trailer_len; 908 alloclen += rt->u.dst.trailer_len;
909 909
910 if (transhdrlen) { 910 if (transhdrlen) {
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 0a1d86a0f632..95278b22b669 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -210,7 +210,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
210 skb->h.icmph->code != ICMP_FRAG_NEEDED) 210 skb->h.icmph->code != ICMP_FRAG_NEEDED)
211 return; 211 return;
212 212
213 spi = ntohl(ntohs(ipch->cpi)); 213 spi = htonl(ntohs(ipch->cpi));
214 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, 214 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr,
215 spi, IPPROTO_COMP, AF_INET); 215 spi, IPPROTO_COMP, AF_INET);
216 if (!x) 216 if (!x)
@@ -290,11 +290,8 @@ static void ipcomp_free_scratches(void)
290 if (!scratches) 290 if (!scratches)
291 return; 291 return;
292 292
293 for_each_cpu(i) { 293 for_each_possible_cpu(i)
294 void *scratch = *per_cpu_ptr(scratches, i); 294 vfree(*per_cpu_ptr(scratches, i));
295 if (scratch)
296 vfree(scratch);
297 }
298 295
299 free_percpu(scratches); 296 free_percpu(scratches);
300} 297}
@@ -313,7 +310,7 @@ static void **ipcomp_alloc_scratches(void)
313 310
314 ipcomp_scratches = scratches; 311 ipcomp_scratches = scratches;
315 312
316 for_each_cpu(i) { 313 for_each_possible_cpu(i) {
317 void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); 314 void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
318 if (!scratch) 315 if (!scratch)
319 return NULL; 316 return NULL;
@@ -344,7 +341,7 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms)
344 if (!tfms) 341 if (!tfms)
345 return; 342 return;
346 343
347 for_each_cpu(cpu) { 344 for_each_possible_cpu(cpu) {
348 struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); 345 struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
349 crypto_free_tfm(tfm); 346 crypto_free_tfm(tfm);
350 } 347 }
@@ -384,7 +381,7 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
384 if (!tfms) 381 if (!tfms)
385 goto error; 382 goto error;
386 383
387 for_each_cpu(cpu) { 384 for_each_possible_cpu(cpu) {
388 struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); 385 struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
389 if (!tfm) 386 if (!tfm)
390 goto error; 387 goto error;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index eef07b0916a3..ea398ee43f28 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -474,9 +474,6 @@ static int ipip_rcv(struct sk_buff *skb)
474 struct iphdr *iph; 474 struct iphdr *iph;
475 struct ip_tunnel *tunnel; 475 struct ip_tunnel *tunnel;
476 476
477 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
478 goto out;
479
480 iph = skb->nh.iph; 477 iph = skb->nh.iph;
481 478
482 read_lock(&ipip_lock); 479 read_lock(&ipip_lock);
@@ -508,7 +505,6 @@ static int ipip_rcv(struct sk_buff *skb)
508 } 505 }
509 read_unlock(&ipip_lock); 506 read_unlock(&ipip_lock);
510 507
511out:
512 return -1; 508 return -1;
513} 509}
514 510
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index b5ad9ac2fbcc..6a9e34b794bc 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -133,7 +133,7 @@ struct ip_rt_info {
133 u_int8_t tos; 133 u_int8_t tos;
134}; 134};
135 135
136static void queue_save(const struct sk_buff *skb, struct nf_info *info) 136static void nf_ip_saveroute(const struct sk_buff *skb, struct nf_info *info)
137{ 137{
138 struct ip_rt_info *rt_info = nf_info_reroute(info); 138 struct ip_rt_info *rt_info = nf_info_reroute(info);
139 139
@@ -146,7 +146,7 @@ static void queue_save(const struct sk_buff *skb, struct nf_info *info)
146 } 146 }
147} 147}
148 148
149static int queue_reroute(struct sk_buff **pskb, const struct nf_info *info) 149static int nf_ip_reroute(struct sk_buff **pskb, const struct nf_info *info)
150{ 150{
151 const struct ip_rt_info *rt_info = nf_info_reroute(info); 151 const struct ip_rt_info *rt_info = nf_info_reroute(info);
152 152
@@ -161,20 +161,54 @@ static int queue_reroute(struct sk_buff **pskb, const struct nf_info *info)
161 return 0; 161 return 0;
162} 162}
163 163
164static struct nf_queue_rerouter ip_reroute = { 164unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
165 .rer_size = sizeof(struct ip_rt_info), 165 unsigned int dataoff, u_int8_t protocol)
166 .save = queue_save, 166{
167 .reroute = queue_reroute, 167 struct iphdr *iph = skb->nh.iph;
168 unsigned int csum = 0;
169
170 switch (skb->ip_summed) {
171 case CHECKSUM_HW:
172 if (hook != NF_IP_PRE_ROUTING && hook != NF_IP_LOCAL_IN)
173 break;
174 if ((protocol == 0 && !(u16)csum_fold(skb->csum)) ||
175 !csum_tcpudp_magic(iph->saddr, iph->daddr,
176 skb->len - dataoff, protocol,
177 skb->csum)) {
178 skb->ip_summed = CHECKSUM_UNNECESSARY;
179 break;
180 }
181 /* fall through */
182 case CHECKSUM_NONE:
183 if (protocol == 0)
184 skb->csum = 0;
185 else
186 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
187 skb->len - dataoff,
188 protocol, 0);
189 csum = __skb_checksum_complete(skb);
190 }
191 return csum;
192}
193
194EXPORT_SYMBOL(nf_ip_checksum);
195
196static struct nf_afinfo nf_ip_afinfo = {
197 .family = AF_INET,
198 .checksum = nf_ip_checksum,
199 .saveroute = nf_ip_saveroute,
200 .reroute = nf_ip_reroute,
201 .route_key_size = sizeof(struct ip_rt_info),
168}; 202};
169 203
170static int ipv4_netfilter_init(void) 204static int ipv4_netfilter_init(void)
171{ 205{
172 return nf_register_queue_rerouter(PF_INET, &ip_reroute); 206 return nf_register_afinfo(&nf_ip_afinfo);
173} 207}
174 208
175static void ipv4_netfilter_fini(void) 209static void ipv4_netfilter_fini(void)
176{ 210{
177 nf_unregister_queue_rerouter(PF_INET); 211 nf_unregister_afinfo(&nf_ip_afinfo);
178} 212}
179 213
180module_init(ipv4_netfilter_init); 214module_init(ipv4_netfilter_init);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 77855ccd6b43..d4072533da21 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -69,6 +69,7 @@ config IP_NF_CONNTRACK_NETLINK
69 tristate 'Connection tracking netlink interface (EXPERIMENTAL)' 69 tristate 'Connection tracking netlink interface (EXPERIMENTAL)'
70 depends on EXPERIMENTAL && IP_NF_CONNTRACK && NETFILTER_NETLINK 70 depends on EXPERIMENTAL && IP_NF_CONNTRACK && NETFILTER_NETLINK
71 depends on IP_NF_CONNTRACK!=y || NETFILTER_NETLINK!=m 71 depends on IP_NF_CONNTRACK!=y || NETFILTER_NETLINK!=m
72 depends on IP_NF_NAT=n || IP_NF_NAT
72 help 73 help
73 This option enables support for a netlink-based userspace interface 74 This option enables support for a netlink-based userspace interface
74 75
@@ -169,8 +170,8 @@ config IP_NF_PPTP
169 Documentation/modules.txt. If unsure, say `N'. 170 Documentation/modules.txt. If unsure, say `N'.
170 171
171config IP_NF_H323 172config IP_NF_H323
172 tristate 'H.323 protocol support' 173 tristate 'H.323 protocol support (EXPERIMENTAL)'
173 depends on IP_NF_CONNTRACK 174 depends on IP_NF_CONNTRACK && EXPERIMENTAL
174 help 175 help
175 H.323 is a VoIP signalling protocol from ITU-T. As one of the most 176 H.323 is a VoIP signalling protocol from ITU-T. As one of the most
176 important VoIP protocols, it is widely used by voice hardware and 177 important VoIP protocols, it is widely used by voice hardware and
@@ -344,7 +345,7 @@ config IP_NF_TARGET_LOG
344 To compile it as a module, choose M here. If unsure, say N. 345 To compile it as a module, choose M here. If unsure, say N.
345 346
346config IP_NF_TARGET_ULOG 347config IP_NF_TARGET_ULOG
347 tristate "ULOG target support (OBSOLETE)" 348 tristate "ULOG target support"
348 depends on IP_NF_IPTABLES 349 depends on IP_NF_IPTABLES
349 ---help--- 350 ---help---
350 351
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index a44a5d73457d..d0d19192026d 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -646,7 +646,7 @@ static int translate_table(const char *name,
646 } 646 }
647 647
648 /* And one copy for every other CPU */ 648 /* And one copy for every other CPU */
649 for_each_cpu(i) { 649 for_each_possible_cpu(i) {
650 if (newinfo->entries[i] && newinfo->entries[i] != entry0) 650 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
651 memcpy(newinfo->entries[i], entry0, newinfo->size); 651 memcpy(newinfo->entries[i], entry0, newinfo->size);
652 } 652 }
@@ -696,7 +696,7 @@ static void get_counters(const struct xt_table_info *t,
696 counters, 696 counters,
697 &i); 697 &i);
698 698
699 for_each_cpu(cpu) { 699 for_each_possible_cpu(cpu) {
700 if (cpu == curcpu) 700 if (cpu == curcpu)
701 continue; 701 continue;
702 i = 0; 702 i = 0;
@@ -948,7 +948,7 @@ static int do_add_counters(void __user *user, unsigned int len)
948 948
949 write_lock_bh(&t->lock); 949 write_lock_bh(&t->lock);
950 private = t->private; 950 private = t->private;
951 if (private->number != paddc->num_counters) { 951 if (private->number != tmp.num_counters) {
952 ret = -EINVAL; 952 ret = -EINVAL;
953 goto unlock_up_free; 953 goto unlock_up_free;
954 } 954 }
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index d0d379c7df9a..d7c472faa53b 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -181,33 +181,26 @@ static struct nf_hook_ops arpt_ops[] = {
181 181
182static int __init arptable_filter_init(void) 182static int __init arptable_filter_init(void)
183{ 183{
184 int ret, i; 184 int ret;
185 185
186 /* Register table */ 186 /* Register table */
187 ret = arpt_register_table(&packet_filter, &initial_table.repl); 187 ret = arpt_register_table(&packet_filter, &initial_table.repl);
188 if (ret < 0) 188 if (ret < 0)
189 return ret; 189 return ret;
190 190
191 for (i = 0; i < ARRAY_SIZE(arpt_ops); i++) 191 ret = nf_register_hooks(arpt_ops, ARRAY_SIZE(arpt_ops));
192 if ((ret = nf_register_hook(&arpt_ops[i])) < 0) 192 if (ret < 0)
193 goto cleanup_hooks; 193 goto cleanup_table;
194 return ret; 194 return ret;
195 195
196cleanup_hooks: 196cleanup_table:
197 while (--i >= 0)
198 nf_unregister_hook(&arpt_ops[i]);
199
200 arpt_unregister_table(&packet_filter); 197 arpt_unregister_table(&packet_filter);
201 return ret; 198 return ret;
202} 199}
203 200
204static void __exit arptable_filter_fini(void) 201static void __exit arptable_filter_fini(void)
205{ 202{
206 unsigned int i; 203 nf_unregister_hooks(arpt_ops, ARRAY_SIZE(arpt_ops));
207
208 for (i = 0; i < ARRAY_SIZE(arpt_ops); i++)
209 nf_unregister_hook(&arpt_ops[i]);
210
211 arpt_unregister_table(&packet_filter); 204 arpt_unregister_table(&packet_filter);
212} 205}
213 206
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index ceaabc18202b..a297da7bbef5 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -133,7 +133,7 @@ static void ip_ct_event_cache_flush(void)
133 struct ip_conntrack_ecache *ecache; 133 struct ip_conntrack_ecache *ecache;
134 int cpu; 134 int cpu;
135 135
136 for_each_cpu(cpu) { 136 for_each_possible_cpu(cpu) {
137 ecache = &per_cpu(ip_conntrack_ecache, cpu); 137 ecache = &per_cpu(ip_conntrack_ecache, cpu);
138 if (ecache->ct) 138 if (ecache->ct)
139 ip_conntrack_put(ecache->ct); 139 ip_conntrack_put(ecache->ct);
@@ -1318,6 +1318,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
1318 .tuple.dst.u.tcp.port; 1318 .tuple.dst.u.tcp.port;
1319 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] 1319 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
1320 .tuple.dst.ip; 1320 .tuple.dst.ip;
1321 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
1321 1322
1322 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", 1323 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
1323 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); 1324 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
index daeb1395faa4..518f581d39ec 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c
@@ -9,37 +9,6 @@
9 * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> 9 * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
10 * 10 *
11 * For more information, please see http://nath323.sourceforge.net/ 11 * For more information, please see http://nath323.sourceforge.net/
12 *
13 * Changes:
14 * 2006-02-01 - initial version 0.1
15 *
16 * 2006-02-20 - version 0.2
17 * 1. Changed source format to follow kernel conventions
18 * 2. Deleted some unnecessary structures
19 * 3. Minor fixes
20 *
21 * 2006-03-10 - version 0.3
22 * 1. Added support for multiple TPKTs in one packet (suggested by
23 * Patrick McHardy)
24 * 2. Avoid excessive stack usage (based on Patrick McHardy's patch)
25 * 3. Added support for non-linear skb (based on Patrick McHardy's patch)
26 * 4. Fixed missing H.245 module owner (Patrick McHardy)
27 * 5. Avoid long RAS expectation chains (Patrick McHardy)
28 * 6. Fixed incorrect __exit attribute (Patrick McHardy)
29 * 7. Eliminated unnecessary return code
30 * 8. Fixed incorrect use of NAT data from conntrack code (suggested by
31 * Patrick McHardy)
32 * 9. Fixed TTL calculation error in RCF
33 * 10. Added TTL support in RRQ
34 * 11. Better support for separate TPKT header and data
35 *
36 * 2006-03-15 - version 0.4
37 * 1. Added support for T.120 channels
38 * 2. Added parameter gkrouted_only (suggested by Patrick McHardy)
39 * 3. Splitted ASN.1 code and data (suggested by Patrick McHardy)
40 * 4. Sort ASN.1 data to avoid forwarding declarations (suggested by
41 * Patrick McHardy)
42 * 5. Reset next TPKT data length in get_tpkt_data()
43 */ 12 */
44 13
45#include <linux/config.h> 14#include <linux/config.h>
@@ -54,8 +23,6 @@
54#include <linux/netfilter_ipv4/ip_conntrack_h323.h> 23#include <linux/netfilter_ipv4/ip_conntrack_h323.h>
55#include <linux/moduleparam.h> 24#include <linux/moduleparam.h>
56 25
57#include "ip_conntrack_helper_h323_asn1.h"
58
59#if 0 26#if 0
60#define DEBUGP printk 27#define DEBUGP printk
61#else 28#else
@@ -63,6 +30,10 @@
63#endif 30#endif
64 31
65/* Parameters */ 32/* Parameters */
33static unsigned int default_rrq_ttl = 300;
34module_param(default_rrq_ttl, uint, 0600);
35MODULE_PARM_DESC(default_rrq_ttl, "use this TTL if it's missing in RRQ");
36
66static int gkrouted_only = 1; 37static int gkrouted_only = 1;
67module_param(gkrouted_only, int, 0600); 38module_param(gkrouted_only, int, 0600);
68MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper"); 39MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper");
@@ -191,6 +162,8 @@ static int get_tpkt_data(struct sk_buff **pskb, struct ip_conntrack *ct,
191 162
192 /* Validate TPKT length */ 163 /* Validate TPKT length */
193 tpktlen = tpkt[2] * 256 + tpkt[3]; 164 tpktlen = tpkt[2] * 256 + tpkt[3];
165 if (tpktlen < 4)
166 goto clear_out;
194 if (tpktlen > tcpdatalen) { 167 if (tpktlen > tcpdatalen) {
195 if (tcpdatalen == 4) { /* Separate TPKT header */ 168 if (tcpdatalen == 4) { /* Separate TPKT header */
196 /* Netmeeting sends TPKT header and data separately */ 169 /* Netmeeting sends TPKT header and data separately */
@@ -222,8 +195,8 @@ static int get_tpkt_data(struct sk_buff **pskb, struct ip_conntrack *ct,
222} 195}
223 196
224/****************************************************************************/ 197/****************************************************************************/
225int get_h245_addr(unsigned char *data, H245_TransportAddress * addr, 198static int get_h245_addr(unsigned char *data, H245_TransportAddress * addr,
226 u_int32_t * ip, u_int16_t * port) 199 u_int32_t * ip, u_int16_t * port)
227{ 200{
228 unsigned char *p; 201 unsigned char *p;
229 202
@@ -1302,7 +1275,7 @@ static int process_rrq(struct sk_buff **pskb, struct ip_conntrack *ct,
1302 DEBUGP("ip_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive); 1275 DEBUGP("ip_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive);
1303 info->timeout = rrq->timeToLive; 1276 info->timeout = rrq->timeToLive;
1304 } else 1277 } else
1305 info->timeout = 0; 1278 info->timeout = default_rrq_ttl;
1306 1279
1307 return 0; 1280 return 0;
1308} 1281}
@@ -1713,18 +1686,17 @@ static int __init init(void)
1713module_init(init); 1686module_init(init);
1714module_exit(fini); 1687module_exit(fini);
1715 1688
1716EXPORT_SYMBOL(get_h245_addr); 1689EXPORT_SYMBOL_GPL(get_h225_addr);
1717EXPORT_SYMBOL(get_h225_addr); 1690EXPORT_SYMBOL_GPL(ip_conntrack_h245_expect);
1718EXPORT_SYMBOL(ip_conntrack_h245_expect); 1691EXPORT_SYMBOL_GPL(ip_conntrack_q931_expect);
1719EXPORT_SYMBOL(ip_conntrack_q931_expect); 1692EXPORT_SYMBOL_GPL(set_h245_addr_hook);
1720EXPORT_SYMBOL(set_h245_addr_hook); 1693EXPORT_SYMBOL_GPL(set_h225_addr_hook);
1721EXPORT_SYMBOL(set_h225_addr_hook); 1694EXPORT_SYMBOL_GPL(set_sig_addr_hook);
1722EXPORT_SYMBOL(set_sig_addr_hook); 1695EXPORT_SYMBOL_GPL(set_ras_addr_hook);
1723EXPORT_SYMBOL(set_ras_addr_hook); 1696EXPORT_SYMBOL_GPL(nat_rtp_rtcp_hook);
1724EXPORT_SYMBOL(nat_rtp_rtcp_hook); 1697EXPORT_SYMBOL_GPL(nat_t120_hook);
1725EXPORT_SYMBOL(nat_t120_hook); 1698EXPORT_SYMBOL_GPL(nat_h245_hook);
1726EXPORT_SYMBOL(nat_h245_hook); 1699EXPORT_SYMBOL_GPL(nat_q931_hook);
1727EXPORT_SYMBOL(nat_q931_hook);
1728 1700
1729MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>"); 1701MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>");
1730MODULE_DESCRIPTION("H.323 connection tracking helper"); 1702MODULE_DESCRIPTION("H.323 connection tracking helper");
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c b/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c
index afa525129b51..26dfecadb335 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c
@@ -2,7 +2,7 @@
2 * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323 2 * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323
3 * conntrack/NAT module. 3 * conntrack/NAT module.
4 * 4 *
5 * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@hotmail.com> 5 * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
6 * 6 *
7 * This source code is licensed under General Public License version 2. 7 * This source code is licensed under General Public License version 2.
8 * 8 *
@@ -15,7 +15,7 @@
15#else 15#else
16#include <stdio.h> 16#include <stdio.h>
17#endif 17#endif
18#include "ip_conntrack_helper_h323_asn1.h" 18#include <linux/netfilter_ipv4/ip_conntrack_helper_h323_asn1.h>
19 19
20/* Trace Flag */ 20/* Trace Flag */
21#ifndef H323_TRACE 21#ifndef H323_TRACE
@@ -528,14 +528,15 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
528 528
529 /* Decode */ 529 /* Decode */
530 if ((err = (Decoders[son->type]) (bs, son, base, 530 if ((err = (Decoders[son->type]) (bs, son, base,
531 level + 1)) > 531 level + 1)) <
532 H323_ERROR_STOP) 532 H323_ERROR_NONE)
533 return err; 533 return err;
534 534
535 bs->cur = beg + len; 535 bs->cur = beg + len;
536 bs->bit = 0; 536 bs->bit = 0;
537 } else if ((err = (Decoders[son->type]) (bs, son, base, 537 } else if ((err = (Decoders[son->type]) (bs, son, base,
538 level + 1))) 538 level + 1)) <
539 H323_ERROR_NONE)
539 return err; 540 return err;
540 } 541 }
541 542
@@ -554,7 +555,7 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
554 555
555 /* Decode the extension components */ 556 /* Decode the extension components */
556 for (opt = 0; opt < bmp2_len; opt++, i++, son++) { 557 for (opt = 0; opt < bmp2_len; opt++, i++, son++) {
557 if (son->attr & STOP) { 558 if (i < f->ub && son->attr & STOP) {
558 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", 559 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
559 son->name); 560 son->name);
560 return H323_ERROR_STOP; 561 return H323_ERROR_STOP;
@@ -584,8 +585,8 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
584 beg = bs->cur; 585 beg = bs->cur;
585 586
586 if ((err = (Decoders[son->type]) (bs, son, base, 587 if ((err = (Decoders[son->type]) (bs, son, base,
587 level + 1)) > 588 level + 1)) <
588 H323_ERROR_STOP) 589 H323_ERROR_NONE)
589 return err; 590 return err;
590 591
591 bs->cur = beg + len; 592 bs->cur = beg + len;
@@ -660,18 +661,20 @@ int decode_seqof(bitstr_t * bs, field_t * f, char *base, int level)
660 i < 661 i <
661 effective_count ? 662 effective_count ?
662 base : NULL, 663 base : NULL,
663 level + 1)) > 664 level + 1)) <
664 H323_ERROR_STOP) 665 H323_ERROR_NONE)
665 return err; 666 return err;
666 667
667 bs->cur = beg + len; 668 bs->cur = beg + len;
668 bs->bit = 0; 669 bs->bit = 0;
669 } else 670 } else
670 if ((err = (Decoders[son->type]) (bs, son, 671 if ((err = (Decoders[son->type]) (bs, son,
671 i < effective_count ? 672 i <
672 base : NULL, 673 effective_count ?
673 level + 1))) 674 base : NULL,
674 return err; 675 level + 1)) <
676 H323_ERROR_NONE)
677 return err;
675 678
676 if (base) 679 if (base)
677 base += son->offset; 680 base += son->offset;
@@ -703,6 +706,10 @@ int decode_choice(bitstr_t * bs, field_t * f, char *base, int level)
703 type = get_bits(bs, f->sz); 706 type = get_bits(bs, f->sz);
704 } 707 }
705 708
709 /* Write Type */
710 if (base)
711 *(unsigned *) base = type;
712
706 /* Check Range */ 713 /* Check Range */
707 if (type >= f->ub) { /* Newer version? */ 714 if (type >= f->ub) { /* Newer version? */
708 BYTE_ALIGN(bs); 715 BYTE_ALIGN(bs);
@@ -712,10 +719,6 @@ int decode_choice(bitstr_t * bs, field_t * f, char *base, int level)
712 return H323_ERROR_NONE; 719 return H323_ERROR_NONE;
713 } 720 }
714 721
715 /* Write Type */
716 if (base)
717 *(unsigned *) base = type;
718
719 /* Transfer to son level */ 722 /* Transfer to son level */
720 son = &f->fields[type]; 723 son = &f->fields[type];
721 if (son->attr & STOP) { 724 if (son->attr & STOP) {
@@ -735,13 +738,14 @@ int decode_choice(bitstr_t * bs, field_t * f, char *base, int level)
735 } 738 }
736 beg = bs->cur; 739 beg = bs->cur;
737 740
738 if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) > 741 if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) <
739 H323_ERROR_STOP) 742 H323_ERROR_NONE)
740 return err; 743 return err;
741 744
742 bs->cur = beg + len; 745 bs->cur = beg + len;
743 bs->bit = 0; 746 bs->bit = 0;
744 } else if ((err = (Decoders[son->type]) (bs, son, base, level + 1))) 747 } else if ((err = (Decoders[son->type]) (bs, son, base, level + 1)) <
748 H323_ERROR_NONE)
745 return err; 749 return err;
746 750
747 return H323_ERROR_NONE; 751 return H323_ERROR_NONE;
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.h b/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.h
deleted file mode 100644
index 0bd828081c0c..000000000000
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.h
+++ /dev/null
@@ -1,98 +0,0 @@
1/****************************************************************************
2 * ip_conntrack_helper_h323_asn1.h - BER and PER decoding library for H.323
3 * conntrack/NAT module.
4 *
5 * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@hotmail.com>
6 *
7 * This source code is licensed under General Public License version 2.
8 *
9 *
10 * This library is based on H.225 version 4, H.235 version 2 and H.245
11 * version 7. It is extremely optimized to decode only the absolutely
12 * necessary objects in a signal for Linux kernel NAT module use, so don't
13 * expect it to be a full ASN.1 library.
14 *
15 * Features:
16 *
17 * 1. Small. The total size of code plus data is less than 20 KB (IA32).
18 * 2. Fast. Decoding Netmeeting's Setup signal 1 million times on a PIII 866
19 * takes only 3.9 seconds.
20 * 3. No memory allocation. It uses a static object. No need to initialize or
21 * cleanup.
22 * 4. Thread safe.
23 * 5. Support embedded architectures that has no misaligned memory access
24 * support.
25 *
26 * Limitations:
27 *
28 * 1. At most 30 faststart entries. Actually this is limited by ethernet's MTU.
29 * If a Setup signal contains more than 30 faststart, the packet size will
30 * very likely exceed the MTU size, then the TPKT will be fragmented. I
31 * don't know how to handle this in a Netfilter module. Anybody can help?
32 * Although I think 30 is enough for most of the cases.
33 * 2. IPv4 addresses only.
34 *
35 ****************************************************************************/
36
37#ifndef _IP_CONNTRACK_HELPER_H323_ASN1_H_
38#define _IP_CONNTRACK_HELPER_H323_ASN1_H_
39
40/*****************************************************************************
41 * H.323 Types
42 ****************************************************************************/
43#include "ip_conntrack_helper_h323_types.h"
44
45typedef struct {
46 enum {
47 Q931_NationalEscape = 0x00,
48 Q931_Alerting = 0x01,
49 Q931_CallProceeding = 0x02,
50 Q931_Connect = 0x07,
51 Q931_ConnectAck = 0x0F,
52 Q931_Progress = 0x03,
53 Q931_Setup = 0x05,
54 Q931_SetupAck = 0x0D,
55 Q931_Resume = 0x26,
56 Q931_ResumeAck = 0x2E,
57 Q931_ResumeReject = 0x22,
58 Q931_Suspend = 0x25,
59 Q931_SuspendAck = 0x2D,
60 Q931_SuspendReject = 0x21,
61 Q931_UserInformation = 0x20,
62 Q931_Disconnect = 0x45,
63 Q931_Release = 0x4D,
64 Q931_ReleaseComplete = 0x5A,
65 Q931_Restart = 0x46,
66 Q931_RestartAck = 0x4E,
67 Q931_Segment = 0x60,
68 Q931_CongestionCtrl = 0x79,
69 Q931_Information = 0x7B,
70 Q931_Notify = 0x6E,
71 Q931_Status = 0x7D,
72 Q931_StatusEnquiry = 0x75,
73 Q931_Facility = 0x62
74 } MessageType;
75 H323_UserInformation UUIE;
76} Q931;
77
78/*****************************************************************************
79 * Decode Functions Return Codes
80 ****************************************************************************/
81
82#define H323_ERROR_NONE 0 /* Decoded successfully */
83#define H323_ERROR_STOP 1 /* Decoding stopped, not really an error */
84#define H323_ERROR_BOUND -1
85#define H323_ERROR_RANGE -2
86
87
88/*****************************************************************************
89 * Decode Functions
90 ****************************************************************************/
91
92int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage * ras);
93int DecodeQ931(unsigned char *buf, size_t sz, Q931 * q931);
94int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
95 MultimediaSystemControlMessage *
96 mscm);
97
98#endif
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323_types.h b/net/ipv4/netfilter/ip_conntrack_helper_h323_types.h
deleted file mode 100644
index cc98f7aa5abe..000000000000
--- a/net/ipv4/netfilter/ip_conntrack_helper_h323_types.h
+++ /dev/null
@@ -1,938 +0,0 @@
1/* Generated by Jing Min Zhao's ASN.1 parser, Mar 15 2006
2 *
3 * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
4 *
5 * This source code is licensed under General Public License version 2.
6 */
7
8typedef struct TransportAddress_ipAddress { /* SEQUENCE */
9 int options; /* No use */
10 unsigned ip;
11} TransportAddress_ipAddress;
12
13typedef struct TransportAddress { /* CHOICE */
14 enum {
15 eTransportAddress_ipAddress,
16 eTransportAddress_ipSourceRoute,
17 eTransportAddress_ipxAddress,
18 eTransportAddress_ip6Address,
19 eTransportAddress_netBios,
20 eTransportAddress_nsap,
21 eTransportAddress_nonStandardAddress,
22 } choice;
23 union {
24 TransportAddress_ipAddress ipAddress;
25 };
26} TransportAddress;
27
28typedef struct DataProtocolCapability { /* CHOICE */
29 enum {
30 eDataProtocolCapability_nonStandard,
31 eDataProtocolCapability_v14buffered,
32 eDataProtocolCapability_v42lapm,
33 eDataProtocolCapability_hdlcFrameTunnelling,
34 eDataProtocolCapability_h310SeparateVCStack,
35 eDataProtocolCapability_h310SingleVCStack,
36 eDataProtocolCapability_transparent,
37 eDataProtocolCapability_segmentationAndReassembly,
38 eDataProtocolCapability_hdlcFrameTunnelingwSAR,
39 eDataProtocolCapability_v120,
40 eDataProtocolCapability_separateLANStack,
41 eDataProtocolCapability_v76wCompression,
42 eDataProtocolCapability_tcp,
43 eDataProtocolCapability_udp,
44 } choice;
45} DataProtocolCapability;
46
47typedef struct DataApplicationCapability_application { /* CHOICE */
48 enum {
49 eDataApplicationCapability_application_nonStandard,
50 eDataApplicationCapability_application_t120,
51 eDataApplicationCapability_application_dsm_cc,
52 eDataApplicationCapability_application_userData,
53 eDataApplicationCapability_application_t84,
54 eDataApplicationCapability_application_t434,
55 eDataApplicationCapability_application_h224,
56 eDataApplicationCapability_application_nlpid,
57 eDataApplicationCapability_application_dsvdControl,
58 eDataApplicationCapability_application_h222DataPartitioning,
59 eDataApplicationCapability_application_t30fax,
60 eDataApplicationCapability_application_t140,
61 eDataApplicationCapability_application_t38fax,
62 eDataApplicationCapability_application_genericDataCapability,
63 } choice;
64 union {
65 DataProtocolCapability t120;
66 };
67} DataApplicationCapability_application;
68
69typedef struct DataApplicationCapability { /* SEQUENCE */
70 int options; /* No use */
71 DataApplicationCapability_application application;
72} DataApplicationCapability;
73
74typedef struct DataType { /* CHOICE */
75 enum {
76 eDataType_nonStandard,
77 eDataType_nullData,
78 eDataType_videoData,
79 eDataType_audioData,
80 eDataType_data,
81 eDataType_encryptionData,
82 eDataType_h235Control,
83 eDataType_h235Media,
84 eDataType_multiplexedStream,
85 } choice;
86 union {
87 DataApplicationCapability data;
88 };
89} DataType;
90
91typedef struct UnicastAddress_iPAddress { /* SEQUENCE */
92 int options; /* No use */
93 unsigned network;
94} UnicastAddress_iPAddress;
95
96typedef struct UnicastAddress { /* CHOICE */
97 enum {
98 eUnicastAddress_iPAddress,
99 eUnicastAddress_iPXAddress,
100 eUnicastAddress_iP6Address,
101 eUnicastAddress_netBios,
102 eUnicastAddress_iPSourceRouteAddress,
103 eUnicastAddress_nsap,
104 eUnicastAddress_nonStandardAddress,
105 } choice;
106 union {
107 UnicastAddress_iPAddress iPAddress;
108 };
109} UnicastAddress;
110
111typedef struct H245_TransportAddress { /* CHOICE */
112 enum {
113 eH245_TransportAddress_unicastAddress,
114 eH245_TransportAddress_multicastAddress,
115 } choice;
116 union {
117 UnicastAddress unicastAddress;
118 };
119} H245_TransportAddress;
120
121typedef struct H2250LogicalChannelParameters { /* SEQUENCE */
122 enum {
123 eH2250LogicalChannelParameters_nonStandard = (1 << 31),
124 eH2250LogicalChannelParameters_associatedSessionID =
125 (1 << 30),
126 eH2250LogicalChannelParameters_mediaChannel = (1 << 29),
127 eH2250LogicalChannelParameters_mediaGuaranteedDelivery =
128 (1 << 28),
129 eH2250LogicalChannelParameters_mediaControlChannel =
130 (1 << 27),
131 eH2250LogicalChannelParameters_mediaControlGuaranteedDelivery
132 = (1 << 26),
133 eH2250LogicalChannelParameters_silenceSuppression = (1 << 25),
134 eH2250LogicalChannelParameters_destination = (1 << 24),
135 eH2250LogicalChannelParameters_dynamicRTPPayloadType =
136 (1 << 23),
137 eH2250LogicalChannelParameters_mediaPacketization = (1 << 22),
138 eH2250LogicalChannelParameters_transportCapability =
139 (1 << 21),
140 eH2250LogicalChannelParameters_redundancyEncoding = (1 << 20),
141 eH2250LogicalChannelParameters_source = (1 << 19),
142 } options;
143 H245_TransportAddress mediaChannel;
144 H245_TransportAddress mediaControlChannel;
145} H2250LogicalChannelParameters;
146
147typedef struct OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters { /* CHOICE */
148 enum {
149 eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters,
150 eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters,
151 eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters,
152 eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters,
153 eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_none,
154 } choice;
155 union {
156 H2250LogicalChannelParameters h2250LogicalChannelParameters;
157 };
158} OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters;
159
160typedef struct OpenLogicalChannel_forwardLogicalChannelParameters { /* SEQUENCE */
161 enum {
162 eOpenLogicalChannel_forwardLogicalChannelParameters_portNumber
163 = (1 << 31),
164 eOpenLogicalChannel_forwardLogicalChannelParameters_forwardLogicalChannelDependency
165 = (1 << 30),
166 eOpenLogicalChannel_forwardLogicalChannelParameters_replacementFor
167 = (1 << 29),
168 } options;
169 DataType dataType;
170 OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters
171 multiplexParameters;
172} OpenLogicalChannel_forwardLogicalChannelParameters;
173
174typedef struct OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters { /* CHOICE */
175 enum {
176 eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters,
177 eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters,
178 eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters,
179 } choice;
180 union {
181 H2250LogicalChannelParameters h2250LogicalChannelParameters;
182 };
183} OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters;
184
185typedef struct OpenLogicalChannel_reverseLogicalChannelParameters { /* SEQUENCE */
186 enum {
187 eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters
188 = (1 << 31),
189 eOpenLogicalChannel_reverseLogicalChannelParameters_reverseLogicalChannelDependency
190 = (1 << 30),
191 eOpenLogicalChannel_reverseLogicalChannelParameters_replacementFor
192 = (1 << 29),
193 } options;
194 OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters
195 multiplexParameters;
196} OpenLogicalChannel_reverseLogicalChannelParameters;
197
198typedef struct NetworkAccessParameters_networkAddress { /* CHOICE */
199 enum {
200 eNetworkAccessParameters_networkAddress_q2931Address,
201 eNetworkAccessParameters_networkAddress_e164Address,
202 eNetworkAccessParameters_networkAddress_localAreaAddress,
203 } choice;
204 union {
205 H245_TransportAddress localAreaAddress;
206 };
207} NetworkAccessParameters_networkAddress;
208
209typedef struct NetworkAccessParameters { /* SEQUENCE */
210 enum {
211 eNetworkAccessParameters_distribution = (1 << 31),
212 eNetworkAccessParameters_externalReference = (1 << 30),
213 eNetworkAccessParameters_t120SetupProcedure = (1 << 29),
214 } options;
215 NetworkAccessParameters_networkAddress networkAddress;
216} NetworkAccessParameters;
217
218typedef struct OpenLogicalChannel { /* SEQUENCE */
219 enum {
220 eOpenLogicalChannel_reverseLogicalChannelParameters =
221 (1 << 31),
222 eOpenLogicalChannel_separateStack = (1 << 30),
223 eOpenLogicalChannel_encryptionSync = (1 << 29),
224 } options;
225 OpenLogicalChannel_forwardLogicalChannelParameters
226 forwardLogicalChannelParameters;
227 OpenLogicalChannel_reverseLogicalChannelParameters
228 reverseLogicalChannelParameters;
229 NetworkAccessParameters separateStack;
230} OpenLogicalChannel;
231
232typedef struct Setup_UUIE_fastStart { /* SEQUENCE OF */
233 int count;
234 OpenLogicalChannel item[30];
235} Setup_UUIE_fastStart;
236
237typedef struct Setup_UUIE { /* SEQUENCE */
238 enum {
239 eSetup_UUIE_h245Address = (1 << 31),
240 eSetup_UUIE_sourceAddress = (1 << 30),
241 eSetup_UUIE_destinationAddress = (1 << 29),
242 eSetup_UUIE_destCallSignalAddress = (1 << 28),
243 eSetup_UUIE_destExtraCallInfo = (1 << 27),
244 eSetup_UUIE_destExtraCRV = (1 << 26),
245 eSetup_UUIE_callServices = (1 << 25),
246 eSetup_UUIE_sourceCallSignalAddress = (1 << 24),
247 eSetup_UUIE_remoteExtensionAddress = (1 << 23),
248 eSetup_UUIE_callIdentifier = (1 << 22),
249 eSetup_UUIE_h245SecurityCapability = (1 << 21),
250 eSetup_UUIE_tokens = (1 << 20),
251 eSetup_UUIE_cryptoTokens = (1 << 19),
252 eSetup_UUIE_fastStart = (1 << 18),
253 eSetup_UUIE_mediaWaitForConnect = (1 << 17),
254 eSetup_UUIE_canOverlapSend = (1 << 16),
255 eSetup_UUIE_endpointIdentifier = (1 << 15),
256 eSetup_UUIE_multipleCalls = (1 << 14),
257 eSetup_UUIE_maintainConnection = (1 << 13),
258 eSetup_UUIE_connectionParameters = (1 << 12),
259 eSetup_UUIE_language = (1 << 11),
260 eSetup_UUIE_presentationIndicator = (1 << 10),
261 eSetup_UUIE_screeningIndicator = (1 << 9),
262 eSetup_UUIE_serviceControl = (1 << 8),
263 eSetup_UUIE_symmetricOperationRequired = (1 << 7),
264 eSetup_UUIE_capacity = (1 << 6),
265 eSetup_UUIE_circuitInfo = (1 << 5),
266 eSetup_UUIE_desiredProtocols = (1 << 4),
267 eSetup_UUIE_neededFeatures = (1 << 3),
268 eSetup_UUIE_desiredFeatures = (1 << 2),
269 eSetup_UUIE_supportedFeatures = (1 << 1),
270 eSetup_UUIE_parallelH245Control = (1 << 0),
271 } options;
272 TransportAddress h245Address;
273 TransportAddress destCallSignalAddress;
274 TransportAddress sourceCallSignalAddress;
275 Setup_UUIE_fastStart fastStart;
276} Setup_UUIE;
277
278typedef struct CallProceeding_UUIE_fastStart { /* SEQUENCE OF */
279 int count;
280 OpenLogicalChannel item[30];
281} CallProceeding_UUIE_fastStart;
282
283typedef struct CallProceeding_UUIE { /* SEQUENCE */
284 enum {
285 eCallProceeding_UUIE_h245Address = (1 << 31),
286 eCallProceeding_UUIE_callIdentifier = (1 << 30),
287 eCallProceeding_UUIE_h245SecurityMode = (1 << 29),
288 eCallProceeding_UUIE_tokens = (1 << 28),
289 eCallProceeding_UUIE_cryptoTokens = (1 << 27),
290 eCallProceeding_UUIE_fastStart = (1 << 26),
291 eCallProceeding_UUIE_multipleCalls = (1 << 25),
292 eCallProceeding_UUIE_maintainConnection = (1 << 24),
293 eCallProceeding_UUIE_fastConnectRefused = (1 << 23),
294 eCallProceeding_UUIE_featureSet = (1 << 22),
295 } options;
296 TransportAddress h245Address;
297 CallProceeding_UUIE_fastStart fastStart;
298} CallProceeding_UUIE;
299
300typedef struct Connect_UUIE_fastStart { /* SEQUENCE OF */
301 int count;
302 OpenLogicalChannel item[30];
303} Connect_UUIE_fastStart;
304
305typedef struct Connect_UUIE { /* SEQUENCE */
306 enum {
307 eConnect_UUIE_h245Address = (1 << 31),
308 eConnect_UUIE_callIdentifier = (1 << 30),
309 eConnect_UUIE_h245SecurityMode = (1 << 29),
310 eConnect_UUIE_tokens = (1 << 28),
311 eConnect_UUIE_cryptoTokens = (1 << 27),
312 eConnect_UUIE_fastStart = (1 << 26),
313 eConnect_UUIE_multipleCalls = (1 << 25),
314 eConnect_UUIE_maintainConnection = (1 << 24),
315 eConnect_UUIE_language = (1 << 23),
316 eConnect_UUIE_connectedAddress = (1 << 22),
317 eConnect_UUIE_presentationIndicator = (1 << 21),
318 eConnect_UUIE_screeningIndicator = (1 << 20),
319 eConnect_UUIE_fastConnectRefused = (1 << 19),
320 eConnect_UUIE_serviceControl = (1 << 18),
321 eConnect_UUIE_capacity = (1 << 17),
322 eConnect_UUIE_featureSet = (1 << 16),
323 } options;
324 TransportAddress h245Address;
325 Connect_UUIE_fastStart fastStart;
326} Connect_UUIE;
327
328typedef struct Alerting_UUIE_fastStart { /* SEQUENCE OF */
329 int count;
330 OpenLogicalChannel item[30];
331} Alerting_UUIE_fastStart;
332
333typedef struct Alerting_UUIE { /* SEQUENCE */
334 enum {
335 eAlerting_UUIE_h245Address = (1 << 31),
336 eAlerting_UUIE_callIdentifier = (1 << 30),
337 eAlerting_UUIE_h245SecurityMode = (1 << 29),
338 eAlerting_UUIE_tokens = (1 << 28),
339 eAlerting_UUIE_cryptoTokens = (1 << 27),
340 eAlerting_UUIE_fastStart = (1 << 26),
341 eAlerting_UUIE_multipleCalls = (1 << 25),
342 eAlerting_UUIE_maintainConnection = (1 << 24),
343 eAlerting_UUIE_alertingAddress = (1 << 23),
344 eAlerting_UUIE_presentationIndicator = (1 << 22),
345 eAlerting_UUIE_screeningIndicator = (1 << 21),
346 eAlerting_UUIE_fastConnectRefused = (1 << 20),
347 eAlerting_UUIE_serviceControl = (1 << 19),
348 eAlerting_UUIE_capacity = (1 << 18),
349 eAlerting_UUIE_featureSet = (1 << 17),
350 } options;
351 TransportAddress h245Address;
352 Alerting_UUIE_fastStart fastStart;
353} Alerting_UUIE;
354
355typedef struct Information_UUIE_fastStart { /* SEQUENCE OF */
356 int count;
357 OpenLogicalChannel item[30];
358} Information_UUIE_fastStart;
359
360typedef struct Information_UUIE { /* SEQUENCE */
361 enum {
362 eInformation_UUIE_callIdentifier = (1 << 31),
363 eInformation_UUIE_tokens = (1 << 30),
364 eInformation_UUIE_cryptoTokens = (1 << 29),
365 eInformation_UUIE_fastStart = (1 << 28),
366 eInformation_UUIE_fastConnectRefused = (1 << 27),
367 eInformation_UUIE_circuitInfo = (1 << 26),
368 } options;
369 Information_UUIE_fastStart fastStart;
370} Information_UUIE;
371
372typedef struct FacilityReason { /* CHOICE */
373 enum {
374 eFacilityReason_routeCallToGatekeeper,
375 eFacilityReason_callForwarded,
376 eFacilityReason_routeCallToMC,
377 eFacilityReason_undefinedReason,
378 eFacilityReason_conferenceListChoice,
379 eFacilityReason_startH245,
380 eFacilityReason_noH245,
381 eFacilityReason_newTokens,
382 eFacilityReason_featureSetUpdate,
383 eFacilityReason_forwardedElements,
384 eFacilityReason_transportedInformation,
385 } choice;
386} FacilityReason;
387
388typedef struct Facility_UUIE_fastStart { /* SEQUENCE OF */
389 int count;
390 OpenLogicalChannel item[30];
391} Facility_UUIE_fastStart;
392
393typedef struct Facility_UUIE { /* SEQUENCE */
394 enum {
395 eFacility_UUIE_alternativeAddress = (1 << 31),
396 eFacility_UUIE_alternativeAliasAddress = (1 << 30),
397 eFacility_UUIE_conferenceID = (1 << 29),
398 eFacility_UUIE_callIdentifier = (1 << 28),
399 eFacility_UUIE_destExtraCallInfo = (1 << 27),
400 eFacility_UUIE_remoteExtensionAddress = (1 << 26),
401 eFacility_UUIE_tokens = (1 << 25),
402 eFacility_UUIE_cryptoTokens = (1 << 24),
403 eFacility_UUIE_conferences = (1 << 23),
404 eFacility_UUIE_h245Address = (1 << 22),
405 eFacility_UUIE_fastStart = (1 << 21),
406 eFacility_UUIE_multipleCalls = (1 << 20),
407 eFacility_UUIE_maintainConnection = (1 << 19),
408 eFacility_UUIE_fastConnectRefused = (1 << 18),
409 eFacility_UUIE_serviceControl = (1 << 17),
410 eFacility_UUIE_circuitInfo = (1 << 16),
411 eFacility_UUIE_featureSet = (1 << 15),
412 eFacility_UUIE_destinationInfo = (1 << 14),
413 eFacility_UUIE_h245SecurityMode = (1 << 13),
414 } options;
415 FacilityReason reason;
416 TransportAddress h245Address;
417 Facility_UUIE_fastStart fastStart;
418} Facility_UUIE;
419
420typedef struct Progress_UUIE_fastStart { /* SEQUENCE OF */
421 int count;
422 OpenLogicalChannel item[30];
423} Progress_UUIE_fastStart;
424
425typedef struct Progress_UUIE { /* SEQUENCE */
426 enum {
427 eProgress_UUIE_h245Address = (1 << 31),
428 eProgress_UUIE_h245SecurityMode = (1 << 30),
429 eProgress_UUIE_tokens = (1 << 29),
430 eProgress_UUIE_cryptoTokens = (1 << 28),
431 eProgress_UUIE_fastStart = (1 << 27),
432 eProgress_UUIE_multipleCalls = (1 << 26),
433 eProgress_UUIE_maintainConnection = (1 << 25),
434 eProgress_UUIE_fastConnectRefused = (1 << 24),
435 } options;
436 TransportAddress h245Address;
437 Progress_UUIE_fastStart fastStart;
438} Progress_UUIE;
439
440typedef struct H323_UU_PDU_h323_message_body { /* CHOICE */
441 enum {
442 eH323_UU_PDU_h323_message_body_setup,
443 eH323_UU_PDU_h323_message_body_callProceeding,
444 eH323_UU_PDU_h323_message_body_connect,
445 eH323_UU_PDU_h323_message_body_alerting,
446 eH323_UU_PDU_h323_message_body_information,
447 eH323_UU_PDU_h323_message_body_releaseComplete,
448 eH323_UU_PDU_h323_message_body_facility,
449 eH323_UU_PDU_h323_message_body_progress,
450 eH323_UU_PDU_h323_message_body_empty,
451 eH323_UU_PDU_h323_message_body_status,
452 eH323_UU_PDU_h323_message_body_statusInquiry,
453 eH323_UU_PDU_h323_message_body_setupAcknowledge,
454 eH323_UU_PDU_h323_message_body_notify,
455 } choice;
456 union {
457 Setup_UUIE setup;
458 CallProceeding_UUIE callProceeding;
459 Connect_UUIE connect;
460 Alerting_UUIE alerting;
461 Information_UUIE information;
462 Facility_UUIE facility;
463 Progress_UUIE progress;
464 };
465} H323_UU_PDU_h323_message_body;
466
467typedef struct RequestMessage { /* CHOICE */
468 enum {
469 eRequestMessage_nonStandard,
470 eRequestMessage_masterSlaveDetermination,
471 eRequestMessage_terminalCapabilitySet,
472 eRequestMessage_openLogicalChannel,
473 eRequestMessage_closeLogicalChannel,
474 eRequestMessage_requestChannelClose,
475 eRequestMessage_multiplexEntrySend,
476 eRequestMessage_requestMultiplexEntry,
477 eRequestMessage_requestMode,
478 eRequestMessage_roundTripDelayRequest,
479 eRequestMessage_maintenanceLoopRequest,
480 eRequestMessage_communicationModeRequest,
481 eRequestMessage_conferenceRequest,
482 eRequestMessage_multilinkRequest,
483 eRequestMessage_logicalChannelRateRequest,
484 } choice;
485 union {
486 OpenLogicalChannel openLogicalChannel;
487 };
488} RequestMessage;
489
490typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters { /* CHOICE */
491 enum {
492 eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters,
493 eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters,
494 } choice;
495 union {
496 H2250LogicalChannelParameters h2250LogicalChannelParameters;
497 };
498} OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters;
499
500typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters { /* SEQUENCE */
501 enum {
502 eOpenLogicalChannelAck_reverseLogicalChannelParameters_portNumber
503 = (1 << 31),
504 eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters
505 = (1 << 30),
506 eOpenLogicalChannelAck_reverseLogicalChannelParameters_replacementFor
507 = (1 << 29),
508 } options;
509 OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters
510 multiplexParameters;
511} OpenLogicalChannelAck_reverseLogicalChannelParameters;
512
513typedef struct H2250LogicalChannelAckParameters { /* SEQUENCE */
514 enum {
515 eH2250LogicalChannelAckParameters_nonStandard = (1 << 31),
516 eH2250LogicalChannelAckParameters_sessionID = (1 << 30),
517 eH2250LogicalChannelAckParameters_mediaChannel = (1 << 29),
518 eH2250LogicalChannelAckParameters_mediaControlChannel =
519 (1 << 28),
520 eH2250LogicalChannelAckParameters_dynamicRTPPayloadType =
521 (1 << 27),
522 eH2250LogicalChannelAckParameters_flowControlToZero =
523 (1 << 26),
524 eH2250LogicalChannelAckParameters_portNumber = (1 << 25),
525 } options;
526 H245_TransportAddress mediaChannel;
527 H245_TransportAddress mediaControlChannel;
528} H2250LogicalChannelAckParameters;
529
530typedef struct OpenLogicalChannelAck_forwardMultiplexAckParameters { /* CHOICE */
531 enum {
532 eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters,
533 } choice;
534 union {
535 H2250LogicalChannelAckParameters
536 h2250LogicalChannelAckParameters;
537 };
538} OpenLogicalChannelAck_forwardMultiplexAckParameters;
539
540typedef struct OpenLogicalChannelAck { /* SEQUENCE */
541 enum {
542 eOpenLogicalChannelAck_reverseLogicalChannelParameters =
543 (1 << 31),
544 eOpenLogicalChannelAck_separateStack = (1 << 30),
545 eOpenLogicalChannelAck_forwardMultiplexAckParameters =
546 (1 << 29),
547 eOpenLogicalChannelAck_encryptionSync = (1 << 28),
548 } options;
549 OpenLogicalChannelAck_reverseLogicalChannelParameters
550 reverseLogicalChannelParameters;
551 OpenLogicalChannelAck_forwardMultiplexAckParameters
552 forwardMultiplexAckParameters;
553} OpenLogicalChannelAck;
554
555typedef struct ResponseMessage { /* CHOICE */
556 enum {
557 eResponseMessage_nonStandard,
558 eResponseMessage_masterSlaveDeterminationAck,
559 eResponseMessage_masterSlaveDeterminationReject,
560 eResponseMessage_terminalCapabilitySetAck,
561 eResponseMessage_terminalCapabilitySetReject,
562 eResponseMessage_openLogicalChannelAck,
563 eResponseMessage_openLogicalChannelReject,
564 eResponseMessage_closeLogicalChannelAck,
565 eResponseMessage_requestChannelCloseAck,
566 eResponseMessage_requestChannelCloseReject,
567 eResponseMessage_multiplexEntrySendAck,
568 eResponseMessage_multiplexEntrySendReject,
569 eResponseMessage_requestMultiplexEntryAck,
570 eResponseMessage_requestMultiplexEntryReject,
571 eResponseMessage_requestModeAck,
572 eResponseMessage_requestModeReject,
573 eResponseMessage_roundTripDelayResponse,
574 eResponseMessage_maintenanceLoopAck,
575 eResponseMessage_maintenanceLoopReject,
576 eResponseMessage_communicationModeResponse,
577 eResponseMessage_conferenceResponse,
578 eResponseMessage_multilinkResponse,
579 eResponseMessage_logicalChannelRateAcknowledge,
580 eResponseMessage_logicalChannelRateReject,
581 } choice;
582 union {
583 OpenLogicalChannelAck openLogicalChannelAck;
584 };
585} ResponseMessage;
586
587typedef struct MultimediaSystemControlMessage { /* CHOICE */
588 enum {
589 eMultimediaSystemControlMessage_request,
590 eMultimediaSystemControlMessage_response,
591 eMultimediaSystemControlMessage_command,
592 eMultimediaSystemControlMessage_indication,
593 } choice;
594 union {
595 RequestMessage request;
596 ResponseMessage response;
597 };
598} MultimediaSystemControlMessage;
599
600typedef struct H323_UU_PDU_h245Control { /* SEQUENCE OF */
601 int count;
602 MultimediaSystemControlMessage item[4];
603} H323_UU_PDU_h245Control;
604
605typedef struct H323_UU_PDU { /* SEQUENCE */
606 enum {
607 eH323_UU_PDU_nonStandardData = (1 << 31),
608 eH323_UU_PDU_h4501SupplementaryService = (1 << 30),
609 eH323_UU_PDU_h245Tunneling = (1 << 29),
610 eH323_UU_PDU_h245Control = (1 << 28),
611 eH323_UU_PDU_nonStandardControl = (1 << 27),
612 eH323_UU_PDU_callLinkage = (1 << 26),
613 eH323_UU_PDU_tunnelledSignallingMessage = (1 << 25),
614 eH323_UU_PDU_provisionalRespToH245Tunneling = (1 << 24),
615 eH323_UU_PDU_stimulusControl = (1 << 23),
616 eH323_UU_PDU_genericData = (1 << 22),
617 } options;
618 H323_UU_PDU_h323_message_body h323_message_body;
619 H323_UU_PDU_h245Control h245Control;
620} H323_UU_PDU;
621
622typedef struct H323_UserInformation { /* SEQUENCE */
623 enum {
624 eH323_UserInformation_user_data = (1 << 31),
625 } options;
626 H323_UU_PDU h323_uu_pdu;
627} H323_UserInformation;
628
629typedef struct GatekeeperRequest { /* SEQUENCE */
630 enum {
631 eGatekeeperRequest_nonStandardData = (1 << 31),
632 eGatekeeperRequest_gatekeeperIdentifier = (1 << 30),
633 eGatekeeperRequest_callServices = (1 << 29),
634 eGatekeeperRequest_endpointAlias = (1 << 28),
635 eGatekeeperRequest_alternateEndpoints = (1 << 27),
636 eGatekeeperRequest_tokens = (1 << 26),
637 eGatekeeperRequest_cryptoTokens = (1 << 25),
638 eGatekeeperRequest_authenticationCapability = (1 << 24),
639 eGatekeeperRequest_algorithmOIDs = (1 << 23),
640 eGatekeeperRequest_integrity = (1 << 22),
641 eGatekeeperRequest_integrityCheckValue = (1 << 21),
642 eGatekeeperRequest_supportsAltGK = (1 << 20),
643 eGatekeeperRequest_featureSet = (1 << 19),
644 eGatekeeperRequest_genericData = (1 << 18),
645 } options;
646 TransportAddress rasAddress;
647} GatekeeperRequest;
648
649typedef struct GatekeeperConfirm { /* SEQUENCE */
650 enum {
651 eGatekeeperConfirm_nonStandardData = (1 << 31),
652 eGatekeeperConfirm_gatekeeperIdentifier = (1 << 30),
653 eGatekeeperConfirm_alternateGatekeeper = (1 << 29),
654 eGatekeeperConfirm_authenticationMode = (1 << 28),
655 eGatekeeperConfirm_tokens = (1 << 27),
656 eGatekeeperConfirm_cryptoTokens = (1 << 26),
657 eGatekeeperConfirm_algorithmOID = (1 << 25),
658 eGatekeeperConfirm_integrity = (1 << 24),
659 eGatekeeperConfirm_integrityCheckValue = (1 << 23),
660 eGatekeeperConfirm_featureSet = (1 << 22),
661 eGatekeeperConfirm_genericData = (1 << 21),
662 } options;
663 TransportAddress rasAddress;
664} GatekeeperConfirm;
665
666typedef struct RegistrationRequest_callSignalAddress { /* SEQUENCE OF */
667 int count;
668 TransportAddress item[10];
669} RegistrationRequest_callSignalAddress;
670
671typedef struct RegistrationRequest_rasAddress { /* SEQUENCE OF */
672 int count;
673 TransportAddress item[10];
674} RegistrationRequest_rasAddress;
675
676typedef struct RegistrationRequest { /* SEQUENCE */
677 enum {
678 eRegistrationRequest_nonStandardData = (1 << 31),
679 eRegistrationRequest_terminalAlias = (1 << 30),
680 eRegistrationRequest_gatekeeperIdentifier = (1 << 29),
681 eRegistrationRequest_alternateEndpoints = (1 << 28),
682 eRegistrationRequest_timeToLive = (1 << 27),
683 eRegistrationRequest_tokens = (1 << 26),
684 eRegistrationRequest_cryptoTokens = (1 << 25),
685 eRegistrationRequest_integrityCheckValue = (1 << 24),
686 eRegistrationRequest_keepAlive = (1 << 23),
687 eRegistrationRequest_endpointIdentifier = (1 << 22),
688 eRegistrationRequest_willSupplyUUIEs = (1 << 21),
689 eRegistrationRequest_maintainConnection = (1 << 20),
690 eRegistrationRequest_alternateTransportAddresses = (1 << 19),
691 eRegistrationRequest_additiveRegistration = (1 << 18),
692 eRegistrationRequest_terminalAliasPattern = (1 << 17),
693 eRegistrationRequest_supportsAltGK = (1 << 16),
694 eRegistrationRequest_usageReportingCapability = (1 << 15),
695 eRegistrationRequest_multipleCalls = (1 << 14),
696 eRegistrationRequest_supportedH248Packages = (1 << 13),
697 eRegistrationRequest_callCreditCapability = (1 << 12),
698 eRegistrationRequest_capacityReportingCapability = (1 << 11),
699 eRegistrationRequest_capacity = (1 << 10),
700 eRegistrationRequest_featureSet = (1 << 9),
701 eRegistrationRequest_genericData = (1 << 8),
702 } options;
703 RegistrationRequest_callSignalAddress callSignalAddress;
704 RegistrationRequest_rasAddress rasAddress;
705 unsigned timeToLive;
706} RegistrationRequest;
707
708typedef struct RegistrationConfirm_callSignalAddress { /* SEQUENCE OF */
709 int count;
710 TransportAddress item[10];
711} RegistrationConfirm_callSignalAddress;
712
713typedef struct RegistrationConfirm { /* SEQUENCE */
714 enum {
715 eRegistrationConfirm_nonStandardData = (1 << 31),
716 eRegistrationConfirm_terminalAlias = (1 << 30),
717 eRegistrationConfirm_gatekeeperIdentifier = (1 << 29),
718 eRegistrationConfirm_alternateGatekeeper = (1 << 28),
719 eRegistrationConfirm_timeToLive = (1 << 27),
720 eRegistrationConfirm_tokens = (1 << 26),
721 eRegistrationConfirm_cryptoTokens = (1 << 25),
722 eRegistrationConfirm_integrityCheckValue = (1 << 24),
723 eRegistrationConfirm_willRespondToIRR = (1 << 23),
724 eRegistrationConfirm_preGrantedARQ = (1 << 22),
725 eRegistrationConfirm_maintainConnection = (1 << 21),
726 eRegistrationConfirm_serviceControl = (1 << 20),
727 eRegistrationConfirm_supportsAdditiveRegistration = (1 << 19),
728 eRegistrationConfirm_terminalAliasPattern = (1 << 18),
729 eRegistrationConfirm_supportedPrefixes = (1 << 17),
730 eRegistrationConfirm_usageSpec = (1 << 16),
731 eRegistrationConfirm_featureServerAlias = (1 << 15),
732 eRegistrationConfirm_capacityReportingSpec = (1 << 14),
733 eRegistrationConfirm_featureSet = (1 << 13),
734 eRegistrationConfirm_genericData = (1 << 12),
735 } options;
736 RegistrationConfirm_callSignalAddress callSignalAddress;
737 unsigned timeToLive;
738} RegistrationConfirm;
739
740typedef struct UnregistrationRequest_callSignalAddress { /* SEQUENCE OF */
741 int count;
742 TransportAddress item[10];
743} UnregistrationRequest_callSignalAddress;
744
745typedef struct UnregistrationRequest { /* SEQUENCE */
746 enum {
747 eUnregistrationRequest_endpointAlias = (1 << 31),
748 eUnregistrationRequest_nonStandardData = (1 << 30),
749 eUnregistrationRequest_endpointIdentifier = (1 << 29),
750 eUnregistrationRequest_alternateEndpoints = (1 << 28),
751 eUnregistrationRequest_gatekeeperIdentifier = (1 << 27),
752 eUnregistrationRequest_tokens = (1 << 26),
753 eUnregistrationRequest_cryptoTokens = (1 << 25),
754 eUnregistrationRequest_integrityCheckValue = (1 << 24),
755 eUnregistrationRequest_reason = (1 << 23),
756 eUnregistrationRequest_endpointAliasPattern = (1 << 22),
757 eUnregistrationRequest_supportedPrefixes = (1 << 21),
758 eUnregistrationRequest_alternateGatekeeper = (1 << 20),
759 eUnregistrationRequest_genericData = (1 << 19),
760 } options;
761 UnregistrationRequest_callSignalAddress callSignalAddress;
762} UnregistrationRequest;
763
764typedef struct AdmissionRequest { /* SEQUENCE */
765 enum {
766 eAdmissionRequest_callModel = (1 << 31),
767 eAdmissionRequest_destinationInfo = (1 << 30),
768 eAdmissionRequest_destCallSignalAddress = (1 << 29),
769 eAdmissionRequest_destExtraCallInfo = (1 << 28),
770 eAdmissionRequest_srcCallSignalAddress = (1 << 27),
771 eAdmissionRequest_nonStandardData = (1 << 26),
772 eAdmissionRequest_callServices = (1 << 25),
773 eAdmissionRequest_canMapAlias = (1 << 24),
774 eAdmissionRequest_callIdentifier = (1 << 23),
775 eAdmissionRequest_srcAlternatives = (1 << 22),
776 eAdmissionRequest_destAlternatives = (1 << 21),
777 eAdmissionRequest_gatekeeperIdentifier = (1 << 20),
778 eAdmissionRequest_tokens = (1 << 19),
779 eAdmissionRequest_cryptoTokens = (1 << 18),
780 eAdmissionRequest_integrityCheckValue = (1 << 17),
781 eAdmissionRequest_transportQOS = (1 << 16),
782 eAdmissionRequest_willSupplyUUIEs = (1 << 15),
783 eAdmissionRequest_callLinkage = (1 << 14),
784 eAdmissionRequest_gatewayDataRate = (1 << 13),
785 eAdmissionRequest_capacity = (1 << 12),
786 eAdmissionRequest_circuitInfo = (1 << 11),
787 eAdmissionRequest_desiredProtocols = (1 << 10),
788 eAdmissionRequest_desiredTunnelledProtocol = (1 << 9),
789 eAdmissionRequest_featureSet = (1 << 8),
790 eAdmissionRequest_genericData = (1 << 7),
791 } options;
792 TransportAddress destCallSignalAddress;
793 TransportAddress srcCallSignalAddress;
794} AdmissionRequest;
795
796typedef struct AdmissionConfirm { /* SEQUENCE */
797 enum {
798 eAdmissionConfirm_irrFrequency = (1 << 31),
799 eAdmissionConfirm_nonStandardData = (1 << 30),
800 eAdmissionConfirm_destinationInfo = (1 << 29),
801 eAdmissionConfirm_destExtraCallInfo = (1 << 28),
802 eAdmissionConfirm_destinationType = (1 << 27),
803 eAdmissionConfirm_remoteExtensionAddress = (1 << 26),
804 eAdmissionConfirm_alternateEndpoints = (1 << 25),
805 eAdmissionConfirm_tokens = (1 << 24),
806 eAdmissionConfirm_cryptoTokens = (1 << 23),
807 eAdmissionConfirm_integrityCheckValue = (1 << 22),
808 eAdmissionConfirm_transportQOS = (1 << 21),
809 eAdmissionConfirm_willRespondToIRR = (1 << 20),
810 eAdmissionConfirm_uuiesRequested = (1 << 19),
811 eAdmissionConfirm_language = (1 << 18),
812 eAdmissionConfirm_alternateTransportAddresses = (1 << 17),
813 eAdmissionConfirm_useSpecifiedTransport = (1 << 16),
814 eAdmissionConfirm_circuitInfo = (1 << 15),
815 eAdmissionConfirm_usageSpec = (1 << 14),
816 eAdmissionConfirm_supportedProtocols = (1 << 13),
817 eAdmissionConfirm_serviceControl = (1 << 12),
818 eAdmissionConfirm_multipleCalls = (1 << 11),
819 eAdmissionConfirm_featureSet = (1 << 10),
820 eAdmissionConfirm_genericData = (1 << 9),
821 } options;
822 TransportAddress destCallSignalAddress;
823} AdmissionConfirm;
824
825typedef struct LocationRequest { /* SEQUENCE */
826 enum {
827 eLocationRequest_endpointIdentifier = (1 << 31),
828 eLocationRequest_nonStandardData = (1 << 30),
829 eLocationRequest_sourceInfo = (1 << 29),
830 eLocationRequest_canMapAlias = (1 << 28),
831 eLocationRequest_gatekeeperIdentifier = (1 << 27),
832 eLocationRequest_tokens = (1 << 26),
833 eLocationRequest_cryptoTokens = (1 << 25),
834 eLocationRequest_integrityCheckValue = (1 << 24),
835 eLocationRequest_desiredProtocols = (1 << 23),
836 eLocationRequest_desiredTunnelledProtocol = (1 << 22),
837 eLocationRequest_featureSet = (1 << 21),
838 eLocationRequest_genericData = (1 << 20),
839 eLocationRequest_hopCount = (1 << 19),
840 eLocationRequest_circuitInfo = (1 << 18),
841 } options;
842 TransportAddress replyAddress;
843} LocationRequest;
844
845typedef struct LocationConfirm { /* SEQUENCE */
846 enum {
847 eLocationConfirm_nonStandardData = (1 << 31),
848 eLocationConfirm_destinationInfo = (1 << 30),
849 eLocationConfirm_destExtraCallInfo = (1 << 29),
850 eLocationConfirm_destinationType = (1 << 28),
851 eLocationConfirm_remoteExtensionAddress = (1 << 27),
852 eLocationConfirm_alternateEndpoints = (1 << 26),
853 eLocationConfirm_tokens = (1 << 25),
854 eLocationConfirm_cryptoTokens = (1 << 24),
855 eLocationConfirm_integrityCheckValue = (1 << 23),
856 eLocationConfirm_alternateTransportAddresses = (1 << 22),
857 eLocationConfirm_supportedProtocols = (1 << 21),
858 eLocationConfirm_multipleCalls = (1 << 20),
859 eLocationConfirm_featureSet = (1 << 19),
860 eLocationConfirm_genericData = (1 << 18),
861 eLocationConfirm_circuitInfo = (1 << 17),
862 eLocationConfirm_serviceControl = (1 << 16),
863 } options;
864 TransportAddress callSignalAddress;
865 TransportAddress rasAddress;
866} LocationConfirm;
867
868typedef struct InfoRequestResponse_callSignalAddress { /* SEQUENCE OF */
869 int count;
870 TransportAddress item[10];
871} InfoRequestResponse_callSignalAddress;
872
873typedef struct InfoRequestResponse { /* SEQUENCE */
874 enum {
875 eInfoRequestResponse_nonStandardData = (1 << 31),
876 eInfoRequestResponse_endpointAlias = (1 << 30),
877 eInfoRequestResponse_perCallInfo = (1 << 29),
878 eInfoRequestResponse_tokens = (1 << 28),
879 eInfoRequestResponse_cryptoTokens = (1 << 27),
880 eInfoRequestResponse_integrityCheckValue = (1 << 26),
881 eInfoRequestResponse_needResponse = (1 << 25),
882 eInfoRequestResponse_capacity = (1 << 24),
883 eInfoRequestResponse_irrStatus = (1 << 23),
884 eInfoRequestResponse_unsolicited = (1 << 22),
885 eInfoRequestResponse_genericData = (1 << 21),
886 } options;
887 TransportAddress rasAddress;
888 InfoRequestResponse_callSignalAddress callSignalAddress;
889} InfoRequestResponse;
890
891typedef struct RasMessage { /* CHOICE */
892 enum {
893 eRasMessage_gatekeeperRequest,
894 eRasMessage_gatekeeperConfirm,
895 eRasMessage_gatekeeperReject,
896 eRasMessage_registrationRequest,
897 eRasMessage_registrationConfirm,
898 eRasMessage_registrationReject,
899 eRasMessage_unregistrationRequest,
900 eRasMessage_unregistrationConfirm,
901 eRasMessage_unregistrationReject,
902 eRasMessage_admissionRequest,
903 eRasMessage_admissionConfirm,
904 eRasMessage_admissionReject,
905 eRasMessage_bandwidthRequest,
906 eRasMessage_bandwidthConfirm,
907 eRasMessage_bandwidthReject,
908 eRasMessage_disengageRequest,
909 eRasMessage_disengageConfirm,
910 eRasMessage_disengageReject,
911 eRasMessage_locationRequest,
912 eRasMessage_locationConfirm,
913 eRasMessage_locationReject,
914 eRasMessage_infoRequest,
915 eRasMessage_infoRequestResponse,
916 eRasMessage_nonStandardMessage,
917 eRasMessage_unknownMessageResponse,
918 eRasMessage_requestInProgress,
919 eRasMessage_resourcesAvailableIndicate,
920 eRasMessage_resourcesAvailableConfirm,
921 eRasMessage_infoRequestAck,
922 eRasMessage_infoRequestNak,
923 eRasMessage_serviceControlIndication,
924 eRasMessage_serviceControlResponse,
925 } choice;
926 union {
927 GatekeeperRequest gatekeeperRequest;
928 GatekeeperConfirm gatekeeperConfirm;
929 RegistrationRequest registrationRequest;
930 RegistrationConfirm registrationConfirm;
931 UnregistrationRequest unregistrationRequest;
932 AdmissionRequest admissionRequest;
933 AdmissionConfirm admissionConfirm;
934 LocationRequest locationRequest;
935 LocationConfirm locationConfirm;
936 InfoRequestResponse infoRequestResponse;
937 };
938} RasMessage;
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
index 7d3ba4302e9e..8ccfe17bb253 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
@@ -469,8 +469,8 @@ pptp_inbound_pkt(struct sk_buff **pskb,
469 DEBUGP("%s but no session\n", pptp_msg_name[msg]); 469 DEBUGP("%s but no session\n", pptp_msg_name[msg]);
470 break; 470 break;
471 } 471 }
472 if (info->sstate != PPTP_CALL_IN_REP 472 if (info->cstate != PPTP_CALL_IN_REP
473 && info->sstate != PPTP_CALL_IN_CONF) { 473 && info->cstate != PPTP_CALL_IN_CONF) {
474 DEBUGP("%s but never sent IN_CALL_REPLY\n", 474 DEBUGP("%s but never sent IN_CALL_REPLY\n",
475 pptp_msg_name[msg]); 475 pptp_msg_name[msg]);
476 break; 476 break;
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
index 3021af0910f1..d8b14a9010a6 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
@@ -224,25 +224,14 @@ icmp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
224 } 224 }
225 225
226 /* See ip_conntrack_proto_tcp.c */ 226 /* See ip_conntrack_proto_tcp.c */
227 if (hooknum != NF_IP_PRE_ROUTING) 227 if (hooknum == NF_IP_PRE_ROUTING &&
228 goto checksum_skipped; 228 nf_ip_checksum(skb, hooknum, skb->nh.iph->ihl * 4, 0)) {
229 229 if (LOG_INVALID(IPPROTO_ICMP))
230 switch (skb->ip_summed) { 230 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
231 case CHECKSUM_HW: 231 "ip_ct_icmp: bad ICMP checksum ");
232 if (!(u16)csum_fold(skb->csum)) 232 return -NF_ACCEPT;
233 break;
234 /* fall through */
235 case CHECKSUM_NONE:
236 skb->csum = 0;
237 if (__skb_checksum_complete(skb)) {
238 if (LOG_INVALID(IPPROTO_ICMP))
239 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
240 "ip_ct_icmp: bad ICMP checksum ");
241 return -NF_ACCEPT;
242 }
243 } 233 }
244 234
245checksum_skipped:
246 /* 235 /*
247 * 18 is the highest 'known' ICMP type. Anything else is a mystery 236 * 18 is the highest 'known' ICMP type. Anything else is a mystery
248 * 237 *
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
index 5259abd0fb42..0416073c5600 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
@@ -235,12 +235,15 @@ static int do_basic_checks(struct ip_conntrack *conntrack,
235 flag = 1; 235 flag = 1;
236 } 236 }
237 237
238 /* Cookie Ack/Echo chunks not the first OR 238 /*
239 Init / Init Ack / Shutdown compl chunks not the only chunks */ 239 * Cookie Ack/Echo chunks not the first OR
240 if ((sch->type == SCTP_CID_COOKIE_ACK 240 * Init / Init Ack / Shutdown compl chunks not the only chunks
241 * OR zero-length.
242 */
243 if (((sch->type == SCTP_CID_COOKIE_ACK
241 || sch->type == SCTP_CID_COOKIE_ECHO 244 || sch->type == SCTP_CID_COOKIE_ECHO
242 || flag) 245 || flag)
243 && count !=0 ) { 246 && count !=0) || !sch->length) {
244 DEBUGP("Basic checks failed\n"); 247 DEBUGP("Basic checks failed\n");
245 return 1; 248 return 1;
246 } 249 }
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index e0dc37063545..062b252b58ad 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -870,11 +870,8 @@ static int tcp_error(struct sk_buff *skb,
870 * and moreover root might send raw packets. 870 * and moreover root might send raw packets.
871 */ 871 */
872 /* FIXME: Source route IP option packets --RR */ 872 /* FIXME: Source route IP option packets --RR */
873 if (hooknum == NF_IP_PRE_ROUTING 873 if (hooknum == NF_IP_PRE_ROUTING &&
874 && skb->ip_summed != CHECKSUM_UNNECESSARY 874 nf_ip_checksum(skb, hooknum, iph->ihl * 4, IPPROTO_TCP)) {
875 && csum_tcpudp_magic(iph->saddr, iph->daddr, tcplen, IPPROTO_TCP,
876 skb->ip_summed == CHECKSUM_HW ? skb->csum
877 : skb_checksum(skb, iph->ihl*4, tcplen, 0))) {
878 if (LOG_INVALID(IPPROTO_TCP)) 875 if (LOG_INVALID(IPPROTO_TCP))
879 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, 876 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
880 "ip_ct_tcp: bad TCP checksum "); 877 "ip_ct_tcp: bad TCP checksum ");
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_udp.c b/net/ipv4/netfilter/ip_conntrack_proto_udp.c
index 55b7d3210adf..70899868783b 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_udp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_udp.c
@@ -120,11 +120,8 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
120 * because the semantic of CHECKSUM_HW is different there 120 * because the semantic of CHECKSUM_HW is different there
121 * and moreover root might send raw packets. 121 * and moreover root might send raw packets.
122 * FIXME: Source route IP option packets --RR */ 122 * FIXME: Source route IP option packets --RR */
123 if (hooknum == NF_IP_PRE_ROUTING 123 if (hooknum == NF_IP_PRE_ROUTING &&
124 && skb->ip_summed != CHECKSUM_UNNECESSARY 124 nf_ip_checksum(skb, hooknum, iph->ihl * 4, IPPROTO_UDP)) {
125 && csum_tcpudp_magic(iph->saddr, iph->daddr, udplen, IPPROTO_UDP,
126 skb->ip_summed == CHECKSUM_HW ? skb->csum
127 : skb_checksum(skb, iph->ihl*4, udplen, 0))) {
128 if (LOG_INVALID(IPPROTO_UDP)) 125 if (LOG_INVALID(IPPROTO_UDP))
129 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, 126 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
130 "ip_ct_udp: bad UDP checksum "); 127 "ip_ct_udp: bad UDP checksum ");
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index 52076026db36..929d61f7be91 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -469,70 +469,63 @@ static unsigned int ip_conntrack_local(unsigned int hooknum,
469 469
470/* Connection tracking may drop packets, but never alters them, so 470/* Connection tracking may drop packets, but never alters them, so
471 make it the first hook. */ 471 make it the first hook. */
472static struct nf_hook_ops ip_conntrack_defrag_ops = { 472static struct nf_hook_ops ip_conntrack_ops[] = {
473 .hook = ip_conntrack_defrag, 473 {
474 .owner = THIS_MODULE, 474 .hook = ip_conntrack_defrag,
475 .pf = PF_INET, 475 .owner = THIS_MODULE,
476 .hooknum = NF_IP_PRE_ROUTING, 476 .pf = PF_INET,
477 .priority = NF_IP_PRI_CONNTRACK_DEFRAG, 477 .hooknum = NF_IP_PRE_ROUTING,
478}; 478 .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
479 479 },
480static struct nf_hook_ops ip_conntrack_in_ops = { 480 {
481 .hook = ip_conntrack_in, 481 .hook = ip_conntrack_in,
482 .owner = THIS_MODULE, 482 .owner = THIS_MODULE,
483 .pf = PF_INET, 483 .pf = PF_INET,
484 .hooknum = NF_IP_PRE_ROUTING, 484 .hooknum = NF_IP_PRE_ROUTING,
485 .priority = NF_IP_PRI_CONNTRACK, 485 .priority = NF_IP_PRI_CONNTRACK,
486}; 486 },
487 487 {
488static struct nf_hook_ops ip_conntrack_defrag_local_out_ops = { 488 .hook = ip_conntrack_defrag,
489 .hook = ip_conntrack_defrag, 489 .owner = THIS_MODULE,
490 .owner = THIS_MODULE, 490 .pf = PF_INET,
491 .pf = PF_INET, 491 .hooknum = NF_IP_LOCAL_OUT,
492 .hooknum = NF_IP_LOCAL_OUT, 492 .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
493 .priority = NF_IP_PRI_CONNTRACK_DEFRAG, 493 },
494}; 494 {
495 495 .hook = ip_conntrack_local,
496static struct nf_hook_ops ip_conntrack_local_out_ops = { 496 .owner = THIS_MODULE,
497 .hook = ip_conntrack_local, 497 .pf = PF_INET,
498 .owner = THIS_MODULE, 498 .hooknum = NF_IP_LOCAL_OUT,
499 .pf = PF_INET, 499 .priority = NF_IP_PRI_CONNTRACK,
500 .hooknum = NF_IP_LOCAL_OUT, 500 },
501 .priority = NF_IP_PRI_CONNTRACK, 501 {
502}; 502 .hook = ip_conntrack_help,
503 503 .owner = THIS_MODULE,
504/* helpers */ 504 .pf = PF_INET,
505static struct nf_hook_ops ip_conntrack_helper_out_ops = { 505 .hooknum = NF_IP_POST_ROUTING,
506 .hook = ip_conntrack_help, 506 .priority = NF_IP_PRI_CONNTRACK_HELPER,
507 .owner = THIS_MODULE, 507 },
508 .pf = PF_INET, 508 {
509 .hooknum = NF_IP_POST_ROUTING, 509 .hook = ip_conntrack_help,
510 .priority = NF_IP_PRI_CONNTRACK_HELPER, 510 .owner = THIS_MODULE,
511}; 511 .pf = PF_INET,
512 512 .hooknum = NF_IP_LOCAL_IN,
513static struct nf_hook_ops ip_conntrack_helper_in_ops = { 513 .priority = NF_IP_PRI_CONNTRACK_HELPER,
514 .hook = ip_conntrack_help, 514 },
515 .owner = THIS_MODULE, 515 {
516 .pf = PF_INET, 516 .hook = ip_confirm,
517 .hooknum = NF_IP_LOCAL_IN, 517 .owner = THIS_MODULE,
518 .priority = NF_IP_PRI_CONNTRACK_HELPER, 518 .pf = PF_INET,
519}; 519 .hooknum = NF_IP_POST_ROUTING,
520 520 .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
521/* Refragmenter; last chance. */ 521 },
522static struct nf_hook_ops ip_conntrack_out_ops = { 522 {
523 .hook = ip_confirm, 523 .hook = ip_confirm,
524 .owner = THIS_MODULE, 524 .owner = THIS_MODULE,
525 .pf = PF_INET, 525 .pf = PF_INET,
526 .hooknum = NF_IP_POST_ROUTING, 526 .hooknum = NF_IP_LOCAL_IN,
527 .priority = NF_IP_PRI_CONNTRACK_CONFIRM, 527 .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
528}; 528 },
529
530static struct nf_hook_ops ip_conntrack_local_in_ops = {
531 .hook = ip_confirm,
532 .owner = THIS_MODULE,
533 .pf = PF_INET,
534 .hooknum = NF_IP_LOCAL_IN,
535 .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
536}; 529};
537 530
538/* Sysctl support */ 531/* Sysctl support */
@@ -783,18 +776,46 @@ static ctl_table ip_ct_net_table[] = {
783EXPORT_SYMBOL(ip_ct_log_invalid); 776EXPORT_SYMBOL(ip_ct_log_invalid);
784#endif /* CONFIG_SYSCTL */ 777#endif /* CONFIG_SYSCTL */
785 778
786static int init_or_cleanup(int init) 779/* FIXME: Allow NULL functions and sub in pointers to generic for
780 them. --RR */
781int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto)
782{
783 int ret = 0;
784
785 write_lock_bh(&ip_conntrack_lock);
786 if (ip_ct_protos[proto->proto] != &ip_conntrack_generic_protocol) {
787 ret = -EBUSY;
788 goto out;
789 }
790 ip_ct_protos[proto->proto] = proto;
791 out:
792 write_unlock_bh(&ip_conntrack_lock);
793 return ret;
794}
795
796void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto)
797{
798 write_lock_bh(&ip_conntrack_lock);
799 ip_ct_protos[proto->proto] = &ip_conntrack_generic_protocol;
800 write_unlock_bh(&ip_conntrack_lock);
801
802 /* Somebody could be still looking at the proto in bh. */
803 synchronize_net();
804
805 /* Remove all contrack entries for this protocol */
806 ip_ct_iterate_cleanup(kill_proto, &proto->proto);
807}
808
809static int __init ip_conntrack_standalone_init(void)
787{ 810{
788#ifdef CONFIG_PROC_FS 811#ifdef CONFIG_PROC_FS
789 struct proc_dir_entry *proc, *proc_exp, *proc_stat; 812 struct proc_dir_entry *proc, *proc_exp, *proc_stat;
790#endif 813#endif
791 int ret = 0; 814 int ret = 0;
792 815
793 if (!init) goto cleanup;
794
795 ret = ip_conntrack_init(); 816 ret = ip_conntrack_init();
796 if (ret < 0) 817 if (ret < 0)
797 goto cleanup_nothing; 818 return ret;
798 819
799#ifdef CONFIG_PROC_FS 820#ifdef CONFIG_PROC_FS
800 ret = -ENOMEM; 821 ret = -ENOMEM;
@@ -813,78 +834,25 @@ static int init_or_cleanup(int init)
813 proc_stat->owner = THIS_MODULE; 834 proc_stat->owner = THIS_MODULE;
814#endif 835#endif
815 836
816 ret = nf_register_hook(&ip_conntrack_defrag_ops); 837 ret = nf_register_hooks(ip_conntrack_ops, ARRAY_SIZE(ip_conntrack_ops));
817 if (ret < 0) { 838 if (ret < 0) {
818 printk("ip_conntrack: can't register pre-routing defrag hook.\n"); 839 printk("ip_conntrack: can't register hooks.\n");
819 goto cleanup_proc_stat; 840 goto cleanup_proc_stat;
820 } 841 }
821 ret = nf_register_hook(&ip_conntrack_defrag_local_out_ops);
822 if (ret < 0) {
823 printk("ip_conntrack: can't register local_out defrag hook.\n");
824 goto cleanup_defragops;
825 }
826 ret = nf_register_hook(&ip_conntrack_in_ops);
827 if (ret < 0) {
828 printk("ip_conntrack: can't register pre-routing hook.\n");
829 goto cleanup_defraglocalops;
830 }
831 ret = nf_register_hook(&ip_conntrack_local_out_ops);
832 if (ret < 0) {
833 printk("ip_conntrack: can't register local out hook.\n");
834 goto cleanup_inops;
835 }
836 ret = nf_register_hook(&ip_conntrack_helper_in_ops);
837 if (ret < 0) {
838 printk("ip_conntrack: can't register local in helper hook.\n");
839 goto cleanup_inandlocalops;
840 }
841 ret = nf_register_hook(&ip_conntrack_helper_out_ops);
842 if (ret < 0) {
843 printk("ip_conntrack: can't register postrouting helper hook.\n");
844 goto cleanup_helperinops;
845 }
846 ret = nf_register_hook(&ip_conntrack_out_ops);
847 if (ret < 0) {
848 printk("ip_conntrack: can't register post-routing hook.\n");
849 goto cleanup_helperoutops;
850 }
851 ret = nf_register_hook(&ip_conntrack_local_in_ops);
852 if (ret < 0) {
853 printk("ip_conntrack: can't register local in hook.\n");
854 goto cleanup_inoutandlocalops;
855 }
856#ifdef CONFIG_SYSCTL 842#ifdef CONFIG_SYSCTL
857 ip_ct_sysctl_header = register_sysctl_table(ip_ct_net_table, 0); 843 ip_ct_sysctl_header = register_sysctl_table(ip_ct_net_table, 0);
858 if (ip_ct_sysctl_header == NULL) { 844 if (ip_ct_sysctl_header == NULL) {
859 printk("ip_conntrack: can't register to sysctl.\n"); 845 printk("ip_conntrack: can't register to sysctl.\n");
860 ret = -ENOMEM; 846 ret = -ENOMEM;
861 goto cleanup_localinops; 847 goto cleanup_hooks;
862 } 848 }
863#endif 849#endif
864
865 return ret; 850 return ret;
866 851
867 cleanup:
868 synchronize_net();
869#ifdef CONFIG_SYSCTL 852#ifdef CONFIG_SYSCTL
870 unregister_sysctl_table(ip_ct_sysctl_header); 853 cleanup_hooks:
871 cleanup_localinops: 854 nf_unregister_hooks(ip_conntrack_ops, ARRAY_SIZE(ip_conntrack_ops));
872#endif 855#endif
873 nf_unregister_hook(&ip_conntrack_local_in_ops);
874 cleanup_inoutandlocalops:
875 nf_unregister_hook(&ip_conntrack_out_ops);
876 cleanup_helperoutops:
877 nf_unregister_hook(&ip_conntrack_helper_out_ops);
878 cleanup_helperinops:
879 nf_unregister_hook(&ip_conntrack_helper_in_ops);
880 cleanup_inandlocalops:
881 nf_unregister_hook(&ip_conntrack_local_out_ops);
882 cleanup_inops:
883 nf_unregister_hook(&ip_conntrack_in_ops);
884 cleanup_defraglocalops:
885 nf_unregister_hook(&ip_conntrack_defrag_local_out_ops);
886 cleanup_defragops:
887 nf_unregister_hook(&ip_conntrack_defrag_ops);
888 cleanup_proc_stat: 856 cleanup_proc_stat:
889#ifdef CONFIG_PROC_FS 857#ifdef CONFIG_PROC_FS
890 remove_proc_entry("ip_conntrack", proc_net_stat); 858 remove_proc_entry("ip_conntrack", proc_net_stat);
@@ -895,48 +863,22 @@ static int init_or_cleanup(int init)
895 cleanup_init: 863 cleanup_init:
896#endif /* CONFIG_PROC_FS */ 864#endif /* CONFIG_PROC_FS */
897 ip_conntrack_cleanup(); 865 ip_conntrack_cleanup();
898 cleanup_nothing:
899 return ret;
900}
901
902/* FIXME: Allow NULL functions and sub in pointers to generic for
903 them. --RR */
904int ip_conntrack_protocol_register(struct ip_conntrack_protocol *proto)
905{
906 int ret = 0;
907
908 write_lock_bh(&ip_conntrack_lock);
909 if (ip_ct_protos[proto->proto] != &ip_conntrack_generic_protocol) {
910 ret = -EBUSY;
911 goto out;
912 }
913 ip_ct_protos[proto->proto] = proto;
914 out:
915 write_unlock_bh(&ip_conntrack_lock);
916 return ret; 866 return ret;
917} 867}
918 868
919void ip_conntrack_protocol_unregister(struct ip_conntrack_protocol *proto)
920{
921 write_lock_bh(&ip_conntrack_lock);
922 ip_ct_protos[proto->proto] = &ip_conntrack_generic_protocol;
923 write_unlock_bh(&ip_conntrack_lock);
924
925 /* Somebody could be still looking at the proto in bh. */
926 synchronize_net();
927
928 /* Remove all contrack entries for this protocol */
929 ip_ct_iterate_cleanup(kill_proto, &proto->proto);
930}
931
932static int __init ip_conntrack_standalone_init(void)
933{
934 return init_or_cleanup(1);
935}
936
937static void __exit ip_conntrack_standalone_fini(void) 869static void __exit ip_conntrack_standalone_fini(void)
938{ 870{
939 init_or_cleanup(0); 871 synchronize_net();
872#ifdef CONFIG_SYSCTL
873 unregister_sysctl_table(ip_ct_sysctl_header);
874#endif
875 nf_unregister_hooks(ip_conntrack_ops, ARRAY_SIZE(ip_conntrack_ops));
876#ifdef CONFIG_PROC_FS
877 remove_proc_entry("ip_conntrack", proc_net_stat);
878 proc_net_remove("ip_conntrack_expect");
879 proc_net_remove("ip_conntrack");
880#endif /* CONFIG_PROC_FS */
881 ip_conntrack_cleanup();
940} 882}
941 883
942module_init(ip_conntrack_standalone_init); 884module_init(ip_conntrack_standalone_init);
diff --git a/net/ipv4/netfilter/ip_nat_helper_h323.c b/net/ipv4/netfilter/ip_nat_helper_h323.c
index a0bc883928c0..d45663d137a7 100644
--- a/net/ipv4/netfilter/ip_nat_helper_h323.c
+++ b/net/ipv4/netfilter/ip_nat_helper_h323.c
@@ -7,24 +7,6 @@
7 * 7 *
8 * Based on the 'brute force' H.323 NAT module by 8 * Based on the 'brute force' H.323 NAT module by
9 * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> 9 * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
10 *
11 * Changes:
12 * 2006-02-01 - initial version 0.1
13 *
14 * 2006-02-20 - version 0.2
15 * 1. Changed source format to follow kernel conventions
16 * 2. Deleted some unnecessary structures
17 * 3. Minor fixes
18 *
19 * 2006-03-10 - version 0.3
20 * 1. Added support for multiple TPKTs in one packet (suggested by
21 * Patrick McHardy)
22 * 2. Added support for non-linear skb (based on Patrick McHardy's patch)
23 * 3. Eliminated unnecessary return code
24 *
25 * 2006-03-15 - version 0.4
26 * 1. Added support for T.120 channels
27 * 2. Added parameter gkrouted_only (suggested by Patrick McHardy)
28 */ 10 */
29 11
30#include <linux/module.h> 12#include <linux/module.h>
@@ -41,65 +23,12 @@
41#include <linux/netfilter_ipv4/ip_conntrack_h323.h> 23#include <linux/netfilter_ipv4/ip_conntrack_h323.h>
42#include <linux/netfilter_ipv4/ip_conntrack_helper.h> 24#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
43 25
44#include "ip_conntrack_helper_h323_asn1.h"
45
46#if 0 26#if 0
47#define DEBUGP printk 27#define DEBUGP printk
48#else 28#else
49#define DEBUGP(format, args...) 29#define DEBUGP(format, args...)
50#endif 30#endif
51 31
52extern int get_h245_addr(unsigned char *data, H245_TransportAddress * addr,
53 u_int32_t * ip, u_int16_t * port);
54extern int get_h225_addr(unsigned char *data, TransportAddress * addr,
55 u_int32_t * ip, u_int16_t * port);
56extern void ip_conntrack_h245_expect(struct ip_conntrack *new,
57 struct ip_conntrack_expect *this);
58extern void ip_conntrack_q931_expect(struct ip_conntrack *new,
59 struct ip_conntrack_expect *this);
60extern int (*set_h245_addr_hook) (struct sk_buff ** pskb,
61 unsigned char **data, int dataoff,
62 H245_TransportAddress * addr,
63 u_int32_t ip, u_int16_t port);
64extern int (*set_h225_addr_hook) (struct sk_buff ** pskb,
65 unsigned char **data, int dataoff,
66 TransportAddress * addr,
67 u_int32_t ip, u_int16_t port);
68extern int (*set_sig_addr_hook) (struct sk_buff ** pskb,
69 struct ip_conntrack * ct,
70 enum ip_conntrack_info ctinfo,
71 unsigned char **data,
72 TransportAddress * addr, int count);
73extern int (*set_ras_addr_hook) (struct sk_buff ** pskb,
74 struct ip_conntrack * ct,
75 enum ip_conntrack_info ctinfo,
76 unsigned char **data,
77 TransportAddress * addr, int count);
78extern int (*nat_rtp_rtcp_hook) (struct sk_buff ** pskb,
79 struct ip_conntrack * ct,
80 enum ip_conntrack_info ctinfo,
81 unsigned char **data, int dataoff,
82 H245_TransportAddress * addr,
83 u_int16_t port, u_int16_t rtp_port,
84 struct ip_conntrack_expect * rtp_exp,
85 struct ip_conntrack_expect * rtcp_exp);
86extern int (*nat_t120_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct,
87 enum ip_conntrack_info ctinfo,
88 unsigned char **data, int dataoff,
89 H245_TransportAddress * addr, u_int16_t port,
90 struct ip_conntrack_expect * exp);
91extern int (*nat_h245_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct,
92 enum ip_conntrack_info ctinfo,
93 unsigned char **data, int dataoff,
94 TransportAddress * addr, u_int16_t port,
95 struct ip_conntrack_expect * exp);
96extern int (*nat_q931_hook) (struct sk_buff ** pskb, struct ip_conntrack * ct,
97 enum ip_conntrack_info ctinfo,
98 unsigned char **data, TransportAddress * addr,
99 int idx, u_int16_t port,
100 struct ip_conntrack_expect * exp);
101
102
103/****************************************************************************/ 32/****************************************************************************/
104static int set_addr(struct sk_buff **pskb, 33static int set_addr(struct sk_buff **pskb,
105 unsigned char **data, int dataoff, 34 unsigned char **data, int dataoff,
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
index 6c4899d8046a..96ceabaec402 100644
--- a/net/ipv4/netfilter/ip_nat_proto_gre.c
+++ b/net/ipv4/netfilter/ip_nat_proto_gre.c
@@ -49,15 +49,15 @@ gre_in_range(const struct ip_conntrack_tuple *tuple,
49 const union ip_conntrack_manip_proto *min, 49 const union ip_conntrack_manip_proto *min,
50 const union ip_conntrack_manip_proto *max) 50 const union ip_conntrack_manip_proto *max)
51{ 51{
52 u_int32_t key; 52 __be16 key;
53 53
54 if (maniptype == IP_NAT_MANIP_SRC) 54 if (maniptype == IP_NAT_MANIP_SRC)
55 key = tuple->src.u.gre.key; 55 key = tuple->src.u.gre.key;
56 else 56 else
57 key = tuple->dst.u.gre.key; 57 key = tuple->dst.u.gre.key;
58 58
59 return ntohl(key) >= ntohl(min->gre.key) 59 return ntohs(key) >= ntohs(min->gre.key)
60 && ntohl(key) <= ntohl(max->gre.key); 60 && ntohs(key) <= ntohs(max->gre.key);
61} 61}
62 62
63/* generate unique tuple ... */ 63/* generate unique tuple ... */
@@ -81,14 +81,14 @@ gre_unique_tuple(struct ip_conntrack_tuple *tuple,
81 min = 1; 81 min = 1;
82 range_size = 0xffff; 82 range_size = 0xffff;
83 } else { 83 } else {
84 min = ntohl(range->min.gre.key); 84 min = ntohs(range->min.gre.key);
85 range_size = ntohl(range->max.gre.key) - min + 1; 85 range_size = ntohs(range->max.gre.key) - min + 1;
86 } 86 }
87 87
88 DEBUGP("min = %u, range_size = %u\n", min, range_size); 88 DEBUGP("min = %u, range_size = %u\n", min, range_size);
89 89
90 for (i = 0; i < range_size; i++, key++) { 90 for (i = 0; i < range_size; i++, key++) {
91 *keyptr = htonl(min + key % range_size); 91 *keyptr = htons(min + key % range_size);
92 if (!ip_nat_used_tuple(tuple, conntrack)) 92 if (!ip_nat_used_tuple(tuple, conntrack))
93 return 1; 93 return 1;
94 } 94 }
diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c
index efba8c4e42e0..1aba926c1cb0 100644
--- a/net/ipv4/netfilter/ip_nat_rule.c
+++ b/net/ipv4/netfilter/ip_nat_rule.c
@@ -279,7 +279,7 @@ static struct ipt_target ipt_dnat_reg = {
279 .target = ipt_dnat_target, 279 .target = ipt_dnat_target,
280 .targetsize = sizeof(struct ip_nat_multi_range_compat), 280 .targetsize = sizeof(struct ip_nat_multi_range_compat),
281 .table = "nat", 281 .table = "nat",
282 .hooks = 1 << NF_IP_PRE_ROUTING, 282 .hooks = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT),
283 .checkentry = ipt_dnat_checkentry, 283 .checkentry = ipt_dnat_checkentry,
284}; 284};
285 285
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c
index c62253845538..c33244263b90 100644
--- a/net/ipv4/netfilter/ip_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c
@@ -768,6 +768,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
768 len *= sizeof(unsigned long); 768 len *= sizeof(unsigned long);
769 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); 769 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
770 if (*obj == NULL) { 770 if (*obj == NULL) {
771 kfree(lp);
771 kfree(id); 772 kfree(id);
772 if (net_ratelimit()) 773 if (net_ratelimit())
773 printk("OOM in bsalg (%d)\n", __LINE__); 774 printk("OOM in bsalg (%d)\n", __LINE__);
@@ -1003,12 +1004,12 @@ static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
1003 1004
1004 return 1; 1005 return 1;
1005 1006
1007err_addr_free:
1008 kfree((unsigned long *)trap->ip_address);
1009
1006err_id_free: 1010err_id_free:
1007 kfree(trap->id); 1011 kfree(trap->id);
1008 1012
1009err_addr_free:
1010 kfree((unsigned long *)trap->ip_address);
1011
1012 return 0; 1013 return 0;
1013} 1014}
1014 1015
@@ -1126,11 +1127,10 @@ static int snmp_parse_mangle(unsigned char *msg,
1126 struct snmp_v1_trap trap; 1127 struct snmp_v1_trap trap;
1127 unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check); 1128 unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check);
1128 1129
1129 /* Discard trap allocations regardless */ 1130 if (ret) {
1130 kfree(trap.id); 1131 kfree(trap.id);
1131 kfree((unsigned long *)trap.ip_address); 1132 kfree((unsigned long *)trap.ip_address);
1132 1133 } else
1133 if (!ret)
1134 return ret; 1134 return ret;
1135 1135
1136 } else { 1136 } else {
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index 3505b0de2e04..67e676783da9 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -219,8 +219,10 @@ ip_nat_out(unsigned int hooknum,
219 const struct net_device *out, 219 const struct net_device *out,
220 int (*okfn)(struct sk_buff *)) 220 int (*okfn)(struct sk_buff *))
221{ 221{
222#ifdef CONFIG_XFRM
222 struct ip_conntrack *ct; 223 struct ip_conntrack *ct;
223 enum ip_conntrack_info ctinfo; 224 enum ip_conntrack_info ctinfo;
225#endif
224 unsigned int ret; 226 unsigned int ret;
225 227
226 /* root is playing with raw sockets. */ 228 /* root is playing with raw sockets. */
@@ -299,69 +301,63 @@ ip_nat_adjust(unsigned int hooknum,
299 301
300/* We must be after connection tracking and before packet filtering. */ 302/* We must be after connection tracking and before packet filtering. */
301 303
302/* Before packet filtering, change destination */ 304static struct nf_hook_ops ip_nat_ops[] = {
303static struct nf_hook_ops ip_nat_in_ops = { 305 /* Before packet filtering, change destination */
304 .hook = ip_nat_in, 306 {
305 .owner = THIS_MODULE, 307 .hook = ip_nat_in,
306 .pf = PF_INET, 308 .owner = THIS_MODULE,
307 .hooknum = NF_IP_PRE_ROUTING, 309 .pf = PF_INET,
308 .priority = NF_IP_PRI_NAT_DST, 310 .hooknum = NF_IP_PRE_ROUTING,
311 .priority = NF_IP_PRI_NAT_DST,
312 },
313 /* After packet filtering, change source */
314 {
315 .hook = ip_nat_out,
316 .owner = THIS_MODULE,
317 .pf = PF_INET,
318 .hooknum = NF_IP_POST_ROUTING,
319 .priority = NF_IP_PRI_NAT_SRC,
320 },
321 /* After conntrack, adjust sequence number */
322 {
323 .hook = ip_nat_adjust,
324 .owner = THIS_MODULE,
325 .pf = PF_INET,
326 .hooknum = NF_IP_POST_ROUTING,
327 .priority = NF_IP_PRI_NAT_SEQ_ADJUST,
328 },
329 /* Before packet filtering, change destination */
330 {
331 .hook = ip_nat_local_fn,
332 .owner = THIS_MODULE,
333 .pf = PF_INET,
334 .hooknum = NF_IP_LOCAL_OUT,
335 .priority = NF_IP_PRI_NAT_DST,
336 },
337 /* After packet filtering, change source */
338 {
339 .hook = ip_nat_fn,
340 .owner = THIS_MODULE,
341 .pf = PF_INET,
342 .hooknum = NF_IP_LOCAL_IN,
343 .priority = NF_IP_PRI_NAT_SRC,
344 },
345 /* After conntrack, adjust sequence number */
346 {
347 .hook = ip_nat_adjust,
348 .owner = THIS_MODULE,
349 .pf = PF_INET,
350 .hooknum = NF_IP_LOCAL_IN,
351 .priority = NF_IP_PRI_NAT_SEQ_ADJUST,
352 },
309}; 353};
310 354
311/* After packet filtering, change source */ 355static int __init ip_nat_standalone_init(void)
312static struct nf_hook_ops ip_nat_out_ops = {
313 .hook = ip_nat_out,
314 .owner = THIS_MODULE,
315 .pf = PF_INET,
316 .hooknum = NF_IP_POST_ROUTING,
317 .priority = NF_IP_PRI_NAT_SRC,
318};
319
320/* After conntrack, adjust sequence number */
321static struct nf_hook_ops ip_nat_adjust_out_ops = {
322 .hook = ip_nat_adjust,
323 .owner = THIS_MODULE,
324 .pf = PF_INET,
325 .hooknum = NF_IP_POST_ROUTING,
326 .priority = NF_IP_PRI_NAT_SEQ_ADJUST,
327};
328
329/* Before packet filtering, change destination */
330static struct nf_hook_ops ip_nat_local_out_ops = {
331 .hook = ip_nat_local_fn,
332 .owner = THIS_MODULE,
333 .pf = PF_INET,
334 .hooknum = NF_IP_LOCAL_OUT,
335 .priority = NF_IP_PRI_NAT_DST,
336};
337
338/* After packet filtering, change source for reply packets of LOCAL_OUT DNAT */
339static struct nf_hook_ops ip_nat_local_in_ops = {
340 .hook = ip_nat_fn,
341 .owner = THIS_MODULE,
342 .pf = PF_INET,
343 .hooknum = NF_IP_LOCAL_IN,
344 .priority = NF_IP_PRI_NAT_SRC,
345};
346
347/* After conntrack, adjust sequence number */
348static struct nf_hook_ops ip_nat_adjust_in_ops = {
349 .hook = ip_nat_adjust,
350 .owner = THIS_MODULE,
351 .pf = PF_INET,
352 .hooknum = NF_IP_LOCAL_IN,
353 .priority = NF_IP_PRI_NAT_SEQ_ADJUST,
354};
355
356
357static int init_or_cleanup(int init)
358{ 356{
359 int ret = 0; 357 int ret = 0;
360 358
361 need_conntrack(); 359 need_conntrack();
362 360
363 if (!init) goto cleanup;
364
365#ifdef CONFIG_XFRM 361#ifdef CONFIG_XFRM
366 BUG_ON(ip_nat_decode_session != NULL); 362 BUG_ON(ip_nat_decode_session != NULL);
367 ip_nat_decode_session = nat_decode_session; 363 ip_nat_decode_session = nat_decode_session;
@@ -371,50 +367,13 @@ static int init_or_cleanup(int init)
371 printk("ip_nat_init: can't setup rules.\n"); 367 printk("ip_nat_init: can't setup rules.\n");
372 goto cleanup_decode_session; 368 goto cleanup_decode_session;
373 } 369 }
374 ret = nf_register_hook(&ip_nat_in_ops); 370 ret = nf_register_hooks(ip_nat_ops, ARRAY_SIZE(ip_nat_ops));
375 if (ret < 0) { 371 if (ret < 0) {
376 printk("ip_nat_init: can't register in hook.\n"); 372 printk("ip_nat_init: can't register hooks.\n");
377 goto cleanup_rule_init; 373 goto cleanup_rule_init;
378 } 374 }
379 ret = nf_register_hook(&ip_nat_out_ops);
380 if (ret < 0) {
381 printk("ip_nat_init: can't register out hook.\n");
382 goto cleanup_inops;
383 }
384 ret = nf_register_hook(&ip_nat_adjust_in_ops);
385 if (ret < 0) {
386 printk("ip_nat_init: can't register adjust in hook.\n");
387 goto cleanup_outops;
388 }
389 ret = nf_register_hook(&ip_nat_adjust_out_ops);
390 if (ret < 0) {
391 printk("ip_nat_init: can't register adjust out hook.\n");
392 goto cleanup_adjustin_ops;
393 }
394 ret = nf_register_hook(&ip_nat_local_out_ops);
395 if (ret < 0) {
396 printk("ip_nat_init: can't register local out hook.\n");
397 goto cleanup_adjustout_ops;
398 }
399 ret = nf_register_hook(&ip_nat_local_in_ops);
400 if (ret < 0) {
401 printk("ip_nat_init: can't register local in hook.\n");
402 goto cleanup_localoutops;
403 }
404 return ret; 375 return ret;
405 376
406 cleanup:
407 nf_unregister_hook(&ip_nat_local_in_ops);
408 cleanup_localoutops:
409 nf_unregister_hook(&ip_nat_local_out_ops);
410 cleanup_adjustout_ops:
411 nf_unregister_hook(&ip_nat_adjust_out_ops);
412 cleanup_adjustin_ops:
413 nf_unregister_hook(&ip_nat_adjust_in_ops);
414 cleanup_outops:
415 nf_unregister_hook(&ip_nat_out_ops);
416 cleanup_inops:
417 nf_unregister_hook(&ip_nat_in_ops);
418 cleanup_rule_init: 377 cleanup_rule_init:
419 ip_nat_rule_cleanup(); 378 ip_nat_rule_cleanup();
420 cleanup_decode_session: 379 cleanup_decode_session:
@@ -425,14 +384,14 @@ static int init_or_cleanup(int init)
425 return ret; 384 return ret;
426} 385}
427 386
428static int __init ip_nat_standalone_init(void)
429{
430 return init_or_cleanup(1);
431}
432
433static void __exit ip_nat_standalone_fini(void) 387static void __exit ip_nat_standalone_fini(void)
434{ 388{
435 init_or_cleanup(0); 389 nf_unregister_hooks(ip_nat_ops, ARRAY_SIZE(ip_nat_ops));
390 ip_nat_rule_cleanup();
391#ifdef CONFIG_XFRM
392 ip_nat_decode_session = NULL;
393 synchronize_net();
394#endif
436} 395}
437 396
438module_init(ip_nat_standalone_init); 397module_init(ip_nat_standalone_init);
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 896a244f8f91..b93f0494362f 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -662,15 +662,11 @@ static struct nf_queue_handler nfqh = {
662 .outfn = &ipq_enqueue_packet, 662 .outfn = &ipq_enqueue_packet,
663}; 663};
664 664
665static int 665static int __init ip_queue_init(void)
666init_or_cleanup(int init)
667{ 666{
668 int status = -ENOMEM; 667 int status = -ENOMEM;
669 struct proc_dir_entry *proc; 668 struct proc_dir_entry *proc;
670 669
671 if (!init)
672 goto cleanup;
673
674 netlink_register_notifier(&ipq_nl_notifier); 670 netlink_register_notifier(&ipq_nl_notifier);
675 ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk, 671 ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk,
676 THIS_MODULE); 672 THIS_MODULE);
@@ -697,11 +693,6 @@ init_or_cleanup(int init)
697 } 693 }
698 return status; 694 return status;
699 695
700cleanup:
701 nf_unregister_queue_handlers(&nfqh);
702 synchronize_net();
703 ipq_flush(NF_DROP);
704
705cleanup_sysctl: 696cleanup_sysctl:
706 unregister_sysctl_table(ipq_sysctl_header); 697 unregister_sysctl_table(ipq_sysctl_header);
707 unregister_netdevice_notifier(&ipq_dev_notifier); 698 unregister_netdevice_notifier(&ipq_dev_notifier);
@@ -717,15 +708,21 @@ cleanup_netlink_notifier:
717 return status; 708 return status;
718} 709}
719 710
720static int __init ip_queue_init(void)
721{
722
723 return init_or_cleanup(1);
724}
725
726static void __exit ip_queue_fini(void) 711static void __exit ip_queue_fini(void)
727{ 712{
728 init_or_cleanup(0); 713 nf_unregister_queue_handlers(&nfqh);
714 synchronize_net();
715 ipq_flush(NF_DROP);
716
717 unregister_sysctl_table(ipq_sysctl_header);
718 unregister_netdevice_notifier(&ipq_dev_notifier);
719 proc_net_remove(IPQ_PROC_FS_NAME);
720
721 sock_release(ipqnl->sk_socket);
722 mutex_lock(&ipqnl_mutex);
723 mutex_unlock(&ipqnl_mutex);
724
725 netlink_unregister_notifier(&ipq_nl_notifier);
729} 726}
730 727
731MODULE_DESCRIPTION("IPv4 packet queue handler"); 728MODULE_DESCRIPTION("IPv4 packet queue handler");
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d5b8cdd361ce..cee3397ec277 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -735,7 +735,7 @@ translate_table(const char *name,
735 } 735 }
736 736
737 /* And one copy for every other CPU */ 737 /* And one copy for every other CPU */
738 for_each_cpu(i) { 738 for_each_possible_cpu(i) {
739 if (newinfo->entries[i] && newinfo->entries[i] != entry0) 739 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
740 memcpy(newinfo->entries[i], entry0, newinfo->size); 740 memcpy(newinfo->entries[i], entry0, newinfo->size);
741 } 741 }
@@ -788,7 +788,7 @@ get_counters(const struct xt_table_info *t,
788 counters, 788 counters,
789 &i); 789 &i);
790 790
791 for_each_cpu(cpu) { 791 for_each_possible_cpu(cpu) {
792 if (cpu == curcpu) 792 if (cpu == curcpu)
793 continue; 793 continue;
794 i = 0; 794 i = 0;
@@ -956,15 +956,16 @@ struct compat_ipt_standard_target
956 compat_int_t verdict; 956 compat_int_t verdict;
957}; 957};
958 958
959#define IPT_ST_OFFSET (sizeof(struct ipt_standard_target) - \
960 sizeof(struct compat_ipt_standard_target))
961
962struct compat_ipt_standard 959struct compat_ipt_standard
963{ 960{
964 struct compat_ipt_entry entry; 961 struct compat_ipt_entry entry;
965 struct compat_ipt_standard_target target; 962 struct compat_ipt_standard_target target;
966}; 963};
967 964
965#define IPT_ST_LEN XT_ALIGN(sizeof(struct ipt_standard_target))
966#define IPT_ST_COMPAT_LEN COMPAT_XT_ALIGN(sizeof(struct compat_ipt_standard_target))
967#define IPT_ST_OFFSET (IPT_ST_LEN - IPT_ST_COMPAT_LEN)
968
968static int compat_ipt_standard_fn(void *target, 969static int compat_ipt_standard_fn(void *target,
969 void **dstptr, int *size, int convert) 970 void **dstptr, int *size, int convert)
970{ 971{
@@ -975,35 +976,29 @@ static int compat_ipt_standard_fn(void *target,
975 ret = 0; 976 ret = 0;
976 switch (convert) { 977 switch (convert) {
977 case COMPAT_TO_USER: 978 case COMPAT_TO_USER:
978 pst = (struct ipt_standard_target *)target; 979 pst = target;
979 memcpy(&compat_st.target, &pst->target, 980 memcpy(&compat_st.target, &pst->target,
980 sizeof(struct ipt_entry_target)); 981 sizeof(compat_st.target));
981 compat_st.verdict = pst->verdict; 982 compat_st.verdict = pst->verdict;
982 if (compat_st.verdict > 0) 983 if (compat_st.verdict > 0)
983 compat_st.verdict -= 984 compat_st.verdict -=
984 compat_calc_jump(compat_st.verdict); 985 compat_calc_jump(compat_st.verdict);
985 compat_st.target.u.user.target_size = 986 compat_st.target.u.user.target_size = IPT_ST_COMPAT_LEN;
986 sizeof(struct compat_ipt_standard_target); 987 if (copy_to_user(*dstptr, &compat_st, IPT_ST_COMPAT_LEN))
987 if (__copy_to_user(*dstptr, &compat_st,
988 sizeof(struct compat_ipt_standard_target)))
989 ret = -EFAULT; 988 ret = -EFAULT;
990 *size -= IPT_ST_OFFSET; 989 *size -= IPT_ST_OFFSET;
991 *dstptr += sizeof(struct compat_ipt_standard_target); 990 *dstptr += IPT_ST_COMPAT_LEN;
992 break; 991 break;
993 case COMPAT_FROM_USER: 992 case COMPAT_FROM_USER:
994 pcompat_st = 993 pcompat_st = target;
995 (struct compat_ipt_standard_target *)target; 994 memcpy(&st.target, &pcompat_st->target, IPT_ST_COMPAT_LEN);
996 memcpy(&st.target, &pcompat_st->target,
997 sizeof(struct ipt_entry_target));
998 st.verdict = pcompat_st->verdict; 995 st.verdict = pcompat_st->verdict;
999 if (st.verdict > 0) 996 if (st.verdict > 0)
1000 st.verdict += compat_calc_jump(st.verdict); 997 st.verdict += compat_calc_jump(st.verdict);
1001 st.target.u.user.target_size = 998 st.target.u.user.target_size = IPT_ST_LEN;
1002 sizeof(struct ipt_standard_target); 999 memcpy(*dstptr, &st, IPT_ST_LEN);
1003 memcpy(*dstptr, &st,
1004 sizeof(struct ipt_standard_target));
1005 *size += IPT_ST_OFFSET; 1000 *size += IPT_ST_OFFSET;
1006 *dstptr += sizeof(struct ipt_standard_target); 1001 *dstptr += IPT_ST_LEN;
1007 break; 1002 break;
1008 case COMPAT_CALC_SIZE: 1003 case COMPAT_CALC_SIZE:
1009 *size += IPT_ST_OFFSET; 1004 *size += IPT_ST_OFFSET;
@@ -1446,7 +1441,7 @@ static int compat_copy_entry_to_user(struct ipt_entry *e,
1446 ret = -EFAULT; 1441 ret = -EFAULT;
1447 origsize = *size; 1442 origsize = *size;
1448 ce = (struct compat_ipt_entry __user *)*dstptr; 1443 ce = (struct compat_ipt_entry __user *)*dstptr;
1449 if (__copy_to_user(ce, e, sizeof(struct ipt_entry))) 1444 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1450 goto out; 1445 goto out;
1451 1446
1452 *dstptr += sizeof(struct compat_ipt_entry); 1447 *dstptr += sizeof(struct compat_ipt_entry);
@@ -1464,9 +1459,9 @@ static int compat_copy_entry_to_user(struct ipt_entry *e,
1464 goto out; 1459 goto out;
1465 ret = -EFAULT; 1460 ret = -EFAULT;
1466 next_offset = e->next_offset - (origsize - *size); 1461 next_offset = e->next_offset - (origsize - *size);
1467 if (__put_user(target_offset, &ce->target_offset)) 1462 if (put_user(target_offset, &ce->target_offset))
1468 goto out; 1463 goto out;
1469 if (__put_user(next_offset, &ce->next_offset)) 1464 if (put_user(next_offset, &ce->next_offset))
1470 goto out; 1465 goto out;
1471 return 0; 1466 return 0;
1472out: 1467out:
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index e4768a31718b..aad9d28c8d71 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -725,22 +725,17 @@ static struct file_operations clusterip_proc_fops = {
725 725
726#endif /* CONFIG_PROC_FS */ 726#endif /* CONFIG_PROC_FS */
727 727
728static int init_or_cleanup(int fini) 728static int __init ipt_clusterip_init(void)
729{ 729{
730 int ret; 730 int ret;
731 731
732 if (fini) 732 ret = ipt_register_target(&clusterip_tgt);
733 goto cleanup; 733 if (ret < 0)
734 734 return ret;
735 if (ipt_register_target(&clusterip_tgt)) {
736 ret = -EINVAL;
737 goto cleanup_none;
738 }
739 735
740 if (nf_register_hook(&cip_arp_ops) < 0) { 736 ret = nf_register_hook(&cip_arp_ops);
741 ret = -EINVAL; 737 if (ret < 0)
742 goto cleanup_target; 738 goto cleanup_target;
743 }
744 739
745#ifdef CONFIG_PROC_FS 740#ifdef CONFIG_PROC_FS
746 clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", proc_net); 741 clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", proc_net);
@@ -753,31 +748,24 @@ static int init_or_cleanup(int fini)
753 748
754 printk(KERN_NOTICE "ClusterIP Version %s loaded successfully\n", 749 printk(KERN_NOTICE "ClusterIP Version %s loaded successfully\n",
755 CLUSTERIP_VERSION); 750 CLUSTERIP_VERSION);
756
757 return 0; 751 return 0;
758 752
759cleanup:
760 printk(KERN_NOTICE "ClusterIP Version %s unloading\n",
761 CLUSTERIP_VERSION);
762#ifdef CONFIG_PROC_FS
763 remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent);
764#endif
765cleanup_hook: 753cleanup_hook:
766 nf_unregister_hook(&cip_arp_ops); 754 nf_unregister_hook(&cip_arp_ops);
767cleanup_target: 755cleanup_target:
768 ipt_unregister_target(&clusterip_tgt); 756 ipt_unregister_target(&clusterip_tgt);
769cleanup_none: 757 return ret;
770 return -EINVAL;
771}
772
773static int __init ipt_clusterip_init(void)
774{
775 return init_or_cleanup(0);
776} 758}
777 759
778static void __exit ipt_clusterip_fini(void) 760static void __exit ipt_clusterip_fini(void)
779{ 761{
780 init_or_cleanup(1); 762 printk(KERN_NOTICE "ClusterIP Version %s unloading\n",
763 CLUSTERIP_VERSION);
764#ifdef CONFIG_PROC_FS
765 remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent);
766#endif
767 nf_unregister_hook(&cip_arp_ops);
768 ipt_unregister_target(&clusterip_tgt);
781} 769}
782 770
783module_init(ipt_clusterip_init); 771module_init(ipt_clusterip_init);
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 39fd4c2a2386..b98f7b08b084 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -428,7 +428,7 @@ ipt_log_target(struct sk_buff **pskb,
428 428
429 if (loginfo->logflags & IPT_LOG_NFLOG) 429 if (loginfo->logflags & IPT_LOG_NFLOG)
430 nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, 430 nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
431 loginfo->prefix); 431 "%s", loginfo->prefix);
432 else 432 else
433 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, 433 ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
434 loginfo->prefix); 434 loginfo->prefix);
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 4269a5440d43..0bba3c2bb786 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -106,7 +106,6 @@ static void send_reset(struct sk_buff *oldskb, int hook)
106 struct rtable *rt; 106 struct rtable *rt;
107 u_int16_t tmp_port; 107 u_int16_t tmp_port;
108 u_int32_t tmp_addr; 108 u_int32_t tmp_addr;
109 unsigned int tcplen;
110 int needs_ack; 109 int needs_ack;
111 int hh_len; 110 int hh_len;
112 111
@@ -124,13 +123,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
124 return; 123 return;
125 124
126 /* Check checksum */ 125 /* Check checksum */
127 tcplen = oldskb->len - iph->ihl * 4; 126 if (nf_ip_checksum(oldskb, hook, iph->ihl * 4, IPPROTO_TCP))
128 if (((hook != NF_IP_LOCAL_IN && oldskb->ip_summed != CHECKSUM_HW) ||
129 (hook == NF_IP_LOCAL_IN &&
130 oldskb->ip_summed != CHECKSUM_UNNECESSARY)) &&
131 csum_tcpudp_magic(iph->saddr, iph->daddr, tcplen, IPPROTO_TCP,
132 oldskb->ip_summed == CHECKSUM_HW ? oldskb->csum :
133 skb_checksum(oldskb, iph->ihl * 4, tcplen, 0)))
134 return; 127 return;
135 128
136 if ((rt = route_reverse(oldskb, oth, hook)) == NULL) 129 if ((rt = route_reverse(oldskb, oth, hook)) == NULL)
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 143843285702..b847ee409efb 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -821,6 +821,7 @@ checkentry(const char *tablename,
821 /* Create our proc 'status' entry. */ 821 /* Create our proc 'status' entry. */
822 curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent); 822 curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent);
823 if (!curr_table->status_proc) { 823 if (!curr_table->status_proc) {
824 vfree(hold);
824 printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n"); 825 printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n");
825 /* Destroy the created table */ 826 /* Destroy the created table */
826 spin_lock_bh(&recent_lock); 827 spin_lock_bh(&recent_lock);
@@ -845,7 +846,6 @@ checkentry(const char *tablename,
845 spin_unlock_bh(&recent_lock); 846 spin_unlock_bh(&recent_lock);
846 vfree(curr_table->time_info); 847 vfree(curr_table->time_info);
847 vfree(curr_table->hash_table); 848 vfree(curr_table->hash_table);
848 vfree(hold);
849 vfree(curr_table->table); 849 vfree(curr_table->table);
850 vfree(curr_table); 850 vfree(curr_table);
851 return 0; 851 return 0;
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 3d80aefe9cfa..7f417484bfbf 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -157,37 +157,20 @@ static int __init iptable_filter_init(void)
157 return ret; 157 return ret;
158 158
159 /* Register hooks */ 159 /* Register hooks */
160 ret = nf_register_hook(&ipt_ops[0]); 160 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
161 if (ret < 0) 161 if (ret < 0)
162 goto cleanup_table; 162 goto cleanup_table;
163 163
164 ret = nf_register_hook(&ipt_ops[1]);
165 if (ret < 0)
166 goto cleanup_hook0;
167
168 ret = nf_register_hook(&ipt_ops[2]);
169 if (ret < 0)
170 goto cleanup_hook1;
171
172 return ret; 164 return ret;
173 165
174 cleanup_hook1:
175 nf_unregister_hook(&ipt_ops[1]);
176 cleanup_hook0:
177 nf_unregister_hook(&ipt_ops[0]);
178 cleanup_table: 166 cleanup_table:
179 ipt_unregister_table(&packet_filter); 167 ipt_unregister_table(&packet_filter);
180
181 return ret; 168 return ret;
182} 169}
183 170
184static void __exit iptable_filter_fini(void) 171static void __exit iptable_filter_fini(void)
185{ 172{
186 unsigned int i; 173 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
187
188 for (i = 0; i < sizeof(ipt_ops)/sizeof(struct nf_hook_ops); i++)
189 nf_unregister_hook(&ipt_ops[i]);
190
191 ipt_unregister_table(&packet_filter); 174 ipt_unregister_table(&packet_filter);
192} 175}
193 176
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 412fc96cc896..397b95cc026b 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -211,49 +211,20 @@ static int __init iptable_mangle_init(void)
211 return ret; 211 return ret;
212 212
213 /* Register hooks */ 213 /* Register hooks */
214 ret = nf_register_hook(&ipt_ops[0]); 214 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
215 if (ret < 0) 215 if (ret < 0)
216 goto cleanup_table; 216 goto cleanup_table;
217 217
218 ret = nf_register_hook(&ipt_ops[1]);
219 if (ret < 0)
220 goto cleanup_hook0;
221
222 ret = nf_register_hook(&ipt_ops[2]);
223 if (ret < 0)
224 goto cleanup_hook1;
225
226 ret = nf_register_hook(&ipt_ops[3]);
227 if (ret < 0)
228 goto cleanup_hook2;
229
230 ret = nf_register_hook(&ipt_ops[4]);
231 if (ret < 0)
232 goto cleanup_hook3;
233
234 return ret; 218 return ret;
235 219
236 cleanup_hook3:
237 nf_unregister_hook(&ipt_ops[3]);
238 cleanup_hook2:
239 nf_unregister_hook(&ipt_ops[2]);
240 cleanup_hook1:
241 nf_unregister_hook(&ipt_ops[1]);
242 cleanup_hook0:
243 nf_unregister_hook(&ipt_ops[0]);
244 cleanup_table: 220 cleanup_table:
245 ipt_unregister_table(&packet_mangler); 221 ipt_unregister_table(&packet_mangler);
246
247 return ret; 222 return ret;
248} 223}
249 224
250static void __exit iptable_mangle_fini(void) 225static void __exit iptable_mangle_fini(void)
251{ 226{
252 unsigned int i; 227 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
253
254 for (i = 0; i < sizeof(ipt_ops)/sizeof(struct nf_hook_ops); i++)
255 nf_unregister_hook(&ipt_ops[i]);
256
257 ipt_unregister_table(&packet_mangler); 228 ipt_unregister_table(&packet_mangler);
258} 229}
259 230
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 03cc79a6160a..7912cce1e1b8 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -101,18 +101,18 @@ ipt_hook(unsigned int hook,
101/* 'raw' is the very first table. */ 101/* 'raw' is the very first table. */
102static struct nf_hook_ops ipt_ops[] = { 102static struct nf_hook_ops ipt_ops[] = {
103 { 103 {
104 .hook = ipt_hook, 104 .hook = ipt_hook,
105 .pf = PF_INET, 105 .pf = PF_INET,
106 .hooknum = NF_IP_PRE_ROUTING, 106 .hooknum = NF_IP_PRE_ROUTING,
107 .priority = NF_IP_PRI_RAW, 107 .priority = NF_IP_PRI_RAW,
108 .owner = THIS_MODULE, 108 .owner = THIS_MODULE,
109 }, 109 },
110 { 110 {
111 .hook = ipt_hook, 111 .hook = ipt_hook,
112 .pf = PF_INET, 112 .pf = PF_INET,
113 .hooknum = NF_IP_LOCAL_OUT, 113 .hooknum = NF_IP_LOCAL_OUT,
114 .priority = NF_IP_PRI_RAW, 114 .priority = NF_IP_PRI_RAW,
115 .owner = THIS_MODULE, 115 .owner = THIS_MODULE,
116 }, 116 },
117}; 117};
118 118
@@ -126,31 +126,20 @@ static int __init iptable_raw_init(void)
126 return ret; 126 return ret;
127 127
128 /* Register hooks */ 128 /* Register hooks */
129 ret = nf_register_hook(&ipt_ops[0]); 129 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
130 if (ret < 0) 130 if (ret < 0)
131 goto cleanup_table; 131 goto cleanup_table;
132 132
133 ret = nf_register_hook(&ipt_ops[1]);
134 if (ret < 0)
135 goto cleanup_hook0;
136
137 return ret; 133 return ret;
138 134
139 cleanup_hook0:
140 nf_unregister_hook(&ipt_ops[0]);
141 cleanup_table: 135 cleanup_table:
142 ipt_unregister_table(&packet_raw); 136 ipt_unregister_table(&packet_raw);
143
144 return ret; 137 return ret;
145} 138}
146 139
147static void __exit iptable_raw_fini(void) 140static void __exit iptable_raw_fini(void)
148{ 141{
149 unsigned int i; 142 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
150
151 for (i = 0; i < sizeof(ipt_ops)/sizeof(struct nf_hook_ops); i++)
152 nf_unregister_hook(&ipt_ops[i]);
153
154 ipt_unregister_table(&packet_raw); 143 ipt_unregister_table(&packet_raw);
155} 144}
156 145
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 4afbc699d3ba..77d974443c7b 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -210,71 +210,63 @@ static unsigned int ipv4_conntrack_local(unsigned int hooknum,
210 210
211/* Connection tracking may drop packets, but never alters them, so 211/* Connection tracking may drop packets, but never alters them, so
212 make it the first hook. */ 212 make it the first hook. */
213static struct nf_hook_ops ipv4_conntrack_defrag_ops = { 213static struct nf_hook_ops ipv4_conntrack_ops[] = {
214 .hook = ipv4_conntrack_defrag, 214 {
215 .owner = THIS_MODULE, 215 .hook = ipv4_conntrack_defrag,
216 .pf = PF_INET, 216 .owner = THIS_MODULE,
217 .hooknum = NF_IP_PRE_ROUTING, 217 .pf = PF_INET,
218 .priority = NF_IP_PRI_CONNTRACK_DEFRAG, 218 .hooknum = NF_IP_PRE_ROUTING,
219}; 219 .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
220 220 },
221static struct nf_hook_ops ipv4_conntrack_in_ops = { 221 {
222 .hook = ipv4_conntrack_in, 222 .hook = ipv4_conntrack_in,
223 .owner = THIS_MODULE, 223 .owner = THIS_MODULE,
224 .pf = PF_INET, 224 .pf = PF_INET,
225 .hooknum = NF_IP_PRE_ROUTING, 225 .hooknum = NF_IP_PRE_ROUTING,
226 .priority = NF_IP_PRI_CONNTRACK, 226 .priority = NF_IP_PRI_CONNTRACK,
227}; 227 },
228 228 {
229static struct nf_hook_ops ipv4_conntrack_defrag_local_out_ops = { 229 .hook = ipv4_conntrack_defrag,
230 .hook = ipv4_conntrack_defrag, 230 .owner = THIS_MODULE,
231 .owner = THIS_MODULE, 231 .pf = PF_INET,
232 .pf = PF_INET, 232 .hooknum = NF_IP_LOCAL_OUT,
233 .hooknum = NF_IP_LOCAL_OUT, 233 .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
234 .priority = NF_IP_PRI_CONNTRACK_DEFRAG, 234 },
235}; 235 {
236 236 .hook = ipv4_conntrack_local,
237static struct nf_hook_ops ipv4_conntrack_local_out_ops = { 237 .owner = THIS_MODULE,
238 .hook = ipv4_conntrack_local, 238 .pf = PF_INET,
239 .owner = THIS_MODULE, 239 .hooknum = NF_IP_LOCAL_OUT,
240 .pf = PF_INET, 240 .priority = NF_IP_PRI_CONNTRACK,
241 .hooknum = NF_IP_LOCAL_OUT, 241 },
242 .priority = NF_IP_PRI_CONNTRACK, 242 {
243}; 243 .hook = ipv4_conntrack_help,
244 244 .owner = THIS_MODULE,
245/* helpers */ 245 .pf = PF_INET,
246static struct nf_hook_ops ipv4_conntrack_helper_out_ops = { 246 .hooknum = NF_IP_POST_ROUTING,
247 .hook = ipv4_conntrack_help, 247 .priority = NF_IP_PRI_CONNTRACK_HELPER,
248 .owner = THIS_MODULE, 248 },
249 .pf = PF_INET, 249 {
250 .hooknum = NF_IP_POST_ROUTING, 250 .hook = ipv4_conntrack_help,
251 .priority = NF_IP_PRI_CONNTRACK_HELPER, 251 .owner = THIS_MODULE,
252}; 252 .pf = PF_INET,
253 253 .hooknum = NF_IP_LOCAL_IN,
254static struct nf_hook_ops ipv4_conntrack_helper_in_ops = { 254 .priority = NF_IP_PRI_CONNTRACK_HELPER,
255 .hook = ipv4_conntrack_help, 255 },
256 .owner = THIS_MODULE, 256 {
257 .pf = PF_INET, 257 .hook = ipv4_confirm,
258 .hooknum = NF_IP_LOCAL_IN, 258 .owner = THIS_MODULE,
259 .priority = NF_IP_PRI_CONNTRACK_HELPER, 259 .pf = PF_INET,
260}; 260 .hooknum = NF_IP_POST_ROUTING,
261 261 .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
262 262 },
263/* Refragmenter; last chance. */ 263 {
264static struct nf_hook_ops ipv4_conntrack_out_ops = { 264 .hook = ipv4_confirm,
265 .hook = ipv4_confirm, 265 .owner = THIS_MODULE,
266 .owner = THIS_MODULE, 266 .pf = PF_INET,
267 .pf = PF_INET, 267 .hooknum = NF_IP_LOCAL_IN,
268 .hooknum = NF_IP_POST_ROUTING, 268 .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
269 .priority = NF_IP_PRI_CONNTRACK_CONFIRM, 269 },
270};
271
272static struct nf_hook_ops ipv4_conntrack_local_in_ops = {
273 .hook = ipv4_confirm,
274 .owner = THIS_MODULE,
275 .pf = PF_INET,
276 .hooknum = NF_IP_LOCAL_IN,
277 .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
278}; 270};
279 271
280#ifdef CONFIG_SYSCTL 272#ifdef CONFIG_SYSCTL
@@ -356,6 +348,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
356 .tuple.dst.u.tcp.port; 348 .tuple.dst.u.tcp.port;
357 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] 349 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
358 .tuple.dst.u3.ip; 350 .tuple.dst.u3.ip;
351 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
359 352
360 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", 353 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
361 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); 354 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
@@ -440,16 +433,20 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = {
440extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp4; 433extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp4;
441extern struct nf_conntrack_protocol nf_conntrack_protocol_udp4; 434extern struct nf_conntrack_protocol nf_conntrack_protocol_udp4;
442extern struct nf_conntrack_protocol nf_conntrack_protocol_icmp; 435extern struct nf_conntrack_protocol nf_conntrack_protocol_icmp;
443static int init_or_cleanup(int init) 436
437MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
438MODULE_LICENSE("GPL");
439
440static int __init nf_conntrack_l3proto_ipv4_init(void)
444{ 441{
445 int ret = 0; 442 int ret = 0;
446 443
447 if (!init) goto cleanup; 444 need_conntrack();
448 445
449 ret = nf_register_sockopt(&so_getorigdst); 446 ret = nf_register_sockopt(&so_getorigdst);
450 if (ret < 0) { 447 if (ret < 0) {
451 printk(KERN_ERR "Unable to register netfilter socket option\n"); 448 printk(KERN_ERR "Unable to register netfilter socket option\n");
452 goto cleanup_nothing; 449 return ret;
453 } 450 }
454 451
455 ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp4); 452 ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp4);
@@ -476,84 +473,26 @@ static int init_or_cleanup(int init)
476 goto cleanup_icmp; 473 goto cleanup_icmp;
477 } 474 }
478 475
479 ret = nf_register_hook(&ipv4_conntrack_defrag_ops); 476 ret = nf_register_hooks(ipv4_conntrack_ops,
477 ARRAY_SIZE(ipv4_conntrack_ops));
480 if (ret < 0) { 478 if (ret < 0) {
481 printk("nf_conntrack_ipv4: can't register pre-routing defrag hook.\n"); 479 printk("nf_conntrack_ipv4: can't register hooks.\n");
482 goto cleanup_ipv4; 480 goto cleanup_ipv4;
483 } 481 }
484 ret = nf_register_hook(&ipv4_conntrack_defrag_local_out_ops);
485 if (ret < 0) {
486 printk("nf_conntrack_ipv4: can't register local_out defrag hook.\n");
487 goto cleanup_defragops;
488 }
489
490 ret = nf_register_hook(&ipv4_conntrack_in_ops);
491 if (ret < 0) {
492 printk("nf_conntrack_ipv4: can't register pre-routing hook.\n");
493 goto cleanup_defraglocalops;
494 }
495
496 ret = nf_register_hook(&ipv4_conntrack_local_out_ops);
497 if (ret < 0) {
498 printk("nf_conntrack_ipv4: can't register local out hook.\n");
499 goto cleanup_inops;
500 }
501
502 ret = nf_register_hook(&ipv4_conntrack_helper_in_ops);
503 if (ret < 0) {
504 printk("nf_conntrack_ipv4: can't register local helper hook.\n");
505 goto cleanup_inandlocalops;
506 }
507
508 ret = nf_register_hook(&ipv4_conntrack_helper_out_ops);
509 if (ret < 0) {
510 printk("nf_conntrack_ipv4: can't register postrouting helper hook.\n");
511 goto cleanup_helperinops;
512 }
513
514 ret = nf_register_hook(&ipv4_conntrack_out_ops);
515 if (ret < 0) {
516 printk("nf_conntrack_ipv4: can't register post-routing hook.\n");
517 goto cleanup_helperoutops;
518 }
519
520 ret = nf_register_hook(&ipv4_conntrack_local_in_ops);
521 if (ret < 0) {
522 printk("nf_conntrack_ipv4: can't register local in hook.\n");
523 goto cleanup_inoutandlocalops;
524 }
525
526#ifdef CONFIG_SYSCTL 482#ifdef CONFIG_SYSCTL
527 nf_ct_ipv4_sysctl_header = register_sysctl_table(nf_ct_net_table, 0); 483 nf_ct_ipv4_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
528 if (nf_ct_ipv4_sysctl_header == NULL) { 484 if (nf_ct_ipv4_sysctl_header == NULL) {
529 printk("nf_conntrack: can't register to sysctl.\n"); 485 printk("nf_conntrack: can't register to sysctl.\n");
530 ret = -ENOMEM; 486 ret = -ENOMEM;
531 goto cleanup_localinops; 487 goto cleanup_hooks;
532 } 488 }
533#endif 489#endif
534 return ret; 490 return ret;
535 491
536 cleanup:
537 synchronize_net();
538#ifdef CONFIG_SYSCTL 492#ifdef CONFIG_SYSCTL
539 unregister_sysctl_table(nf_ct_ipv4_sysctl_header); 493 cleanup_hooks:
540 cleanup_localinops: 494 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
541#endif 495#endif
542 nf_unregister_hook(&ipv4_conntrack_local_in_ops);
543 cleanup_inoutandlocalops:
544 nf_unregister_hook(&ipv4_conntrack_out_ops);
545 cleanup_helperoutops:
546 nf_unregister_hook(&ipv4_conntrack_helper_out_ops);
547 cleanup_helperinops:
548 nf_unregister_hook(&ipv4_conntrack_helper_in_ops);
549 cleanup_inandlocalops:
550 nf_unregister_hook(&ipv4_conntrack_local_out_ops);
551 cleanup_inops:
552 nf_unregister_hook(&ipv4_conntrack_in_ops);
553 cleanup_defraglocalops:
554 nf_unregister_hook(&ipv4_conntrack_defrag_local_out_ops);
555 cleanup_defragops:
556 nf_unregister_hook(&ipv4_conntrack_defrag_ops);
557 cleanup_ipv4: 496 cleanup_ipv4:
558 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); 497 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
559 cleanup_icmp: 498 cleanup_icmp:
@@ -564,22 +503,21 @@ static int init_or_cleanup(int init)
564 nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp4); 503 nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp4);
565 cleanup_sockopt: 504 cleanup_sockopt:
566 nf_unregister_sockopt(&so_getorigdst); 505 nf_unregister_sockopt(&so_getorigdst);
567 cleanup_nothing:
568 return ret; 506 return ret;
569} 507}
570 508
571MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
572MODULE_LICENSE("GPL");
573
574static int __init nf_conntrack_l3proto_ipv4_init(void)
575{
576 need_conntrack();
577 return init_or_cleanup(1);
578}
579
580static void __exit nf_conntrack_l3proto_ipv4_fini(void) 509static void __exit nf_conntrack_l3proto_ipv4_fini(void)
581{ 510{
582 init_or_cleanup(0); 511 synchronize_net();
512#ifdef CONFIG_SYSCTL
513 unregister_sysctl_table(nf_ct_ipv4_sysctl_header);
514#endif
515 nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
516 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
517 nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmp);
518 nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp4);
519 nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp4);
520 nf_unregister_sockopt(&so_getorigdst);
583} 521}
584 522
585module_init(nf_conntrack_l3proto_ipv4_init); 523module_init(nf_conntrack_l3proto_ipv4_init);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 52dc175be39a..4b0d361cc6e6 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -235,30 +235,14 @@ icmp_error(struct sk_buff *skb, unsigned int dataoff,
235 } 235 }
236 236
237 /* See ip_conntrack_proto_tcp.c */ 237 /* See ip_conntrack_proto_tcp.c */
238 if (hooknum != NF_IP_PRE_ROUTING) 238 if (hooknum == NF_IP_PRE_ROUTING &&
239 goto checksum_skipped; 239 nf_ip_checksum(skb, hooknum, dataoff, 0)) {
240
241 switch (skb->ip_summed) {
242 case CHECKSUM_HW:
243 if (!(u16)csum_fold(skb->csum))
244 break;
245 if (LOG_INVALID(IPPROTO_ICMP)) 240 if (LOG_INVALID(IPPROTO_ICMP))
246 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, 241 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
247 "nf_ct_icmp: bad HW ICMP checksum "); 242 "nf_ct_icmp: bad HW ICMP checksum ");
248 return -NF_ACCEPT; 243 return -NF_ACCEPT;
249 case CHECKSUM_NONE:
250 if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))) {
251 if (LOG_INVALID(IPPROTO_ICMP))
252 nf_log_packet(PF_INET, 0, skb, NULL, NULL,
253 NULL,
254 "nf_ct_icmp: bad ICMP checksum ");
255 return -NF_ACCEPT;
256 }
257 default:
258 break;
259 } 244 }
260 245
261checksum_skipped:
262 /* 246 /*
263 * 18 is the highest 'known' ICMP type. Anything else is a mystery 247 * 18 is the highest 'known' ICMP type. Anything else is a mystery
264 * 248 *
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 1b167c4bb3be..d61e2a9d394d 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
49 int res = 0; 49 int res = 0;
50 int cpu; 50 int cpu;
51 51
52 for_each_cpu(cpu) 52 for_each_possible_cpu(cpu)
53 res += proto->stats[cpu].inuse; 53 res += proto->stats[cpu].inuse;
54 54
55 return res; 55 return res;
@@ -91,7 +91,7 @@ fold_field(void *mib[], int offt)
91 unsigned long res = 0; 91 unsigned long res = 0;
92 int i; 92 int i;
93 93
94 for_each_cpu(i) { 94 for_each_possible_cpu(i) {
95 res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); 95 res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
96 res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); 96 res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
97 } 97 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 94fcbc5e5a1b..cc9423de7311 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2741,7 +2741,10 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2741 /* Reserve room for dummy headers, this skb can pass 2741 /* Reserve room for dummy headers, this skb can pass
2742 through good chunk of routing engine. 2742 through good chunk of routing engine.
2743 */ 2743 */
2744 skb->mac.raw = skb->data; 2744 skb->mac.raw = skb->nh.raw = skb->data;
2745
2746 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2747 skb->nh.iph->protocol = IPPROTO_ICMP;
2745 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); 2748 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2746 2749
2747 if (rta[RTA_SRC - 1]) 2750 if (rta[RTA_SRC - 1])
@@ -3083,7 +3086,7 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
3083 memcpy(dst, src, length); 3086 memcpy(dst, src, length);
3084 3087
3085 /* Add the other cpus in, one int at a time */ 3088 /* Add the other cpus in, one int at a time */
3086 for_each_cpu(i) { 3089 for_each_possible_cpu(i) {
3087 unsigned int j; 3090 unsigned int j;
3088 3091
3089 src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset; 3092 src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 87f68e787d0c..e2b7b8055037 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1468,6 +1468,7 @@ void tcp_close(struct sock *sk, long timeout)
1468{ 1468{
1469 struct sk_buff *skb; 1469 struct sk_buff *skb;
1470 int data_was_unread = 0; 1470 int data_was_unread = 0;
1471 int state;
1471 1472
1472 lock_sock(sk); 1473 lock_sock(sk);
1473 sk->sk_shutdown = SHUTDOWN_MASK; 1474 sk->sk_shutdown = SHUTDOWN_MASK;
@@ -1544,6 +1545,11 @@ void tcp_close(struct sock *sk, long timeout)
1544 sk_stream_wait_close(sk, timeout); 1545 sk_stream_wait_close(sk, timeout);
1545 1546
1546adjudge_to_death: 1547adjudge_to_death:
1548 state = sk->sk_state;
1549 sock_hold(sk);
1550 sock_orphan(sk);
1551 atomic_inc(sk->sk_prot->orphan_count);
1552
1547 /* It is the last release_sock in its life. It will remove backlog. */ 1553 /* It is the last release_sock in its life. It will remove backlog. */
1548 release_sock(sk); 1554 release_sock(sk);
1549 1555
@@ -1555,8 +1561,9 @@ adjudge_to_death:
1555 bh_lock_sock(sk); 1561 bh_lock_sock(sk);
1556 BUG_TRAP(!sock_owned_by_user(sk)); 1562 BUG_TRAP(!sock_owned_by_user(sk));
1557 1563
1558 sock_hold(sk); 1564 /* Have we already been destroyed by a softirq or backlog? */
1559 sock_orphan(sk); 1565 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1566 goto out;
1560 1567
1561 /* This is a (useful) BSD violating of the RFC. There is a 1568 /* This is a (useful) BSD violating of the RFC. There is a
1562 * problem with TCP as specified in that the other end could 1569 * problem with TCP as specified in that the other end could
@@ -1584,7 +1591,6 @@ adjudge_to_death:
1584 if (tmo > TCP_TIMEWAIT_LEN) { 1591 if (tmo > TCP_TIMEWAIT_LEN) {
1585 inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); 1592 inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
1586 } else { 1593 } else {
1587 atomic_inc(sk->sk_prot->orphan_count);
1588 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 1594 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1589 goto out; 1595 goto out;
1590 } 1596 }
@@ -1603,7 +1609,6 @@ adjudge_to_death:
1603 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 1609 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1604 } 1610 }
1605 } 1611 }
1606 atomic_inc(sk->sk_prot->orphan_count);
1607 1612
1608 if (sk->sk_state == TCP_CLOSE) 1613 if (sk->sk_state == TCP_CLOSE)
1609 inet_csk_destroy_sock(sk); 1614 inet_csk_destroy_sock(sk);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index e0e9d1383c7c..ba7c63ca5bb1 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -135,10 +135,11 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
135 135
136 /* Do additive increase */ 136 /* Do additive increase */
137 if (tp->snd_cwnd < tp->snd_cwnd_clamp) { 137 if (tp->snd_cwnd < tp->snd_cwnd_clamp) {
138 tp->snd_cwnd_cnt += ca->ai; 138 /* cwnd = cwnd + a(w) / cwnd */
139 tp->snd_cwnd_cnt += ca->ai + 1;
139 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 140 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
140 tp->snd_cwnd++;
141 tp->snd_cwnd_cnt -= tp->snd_cwnd; 141 tp->snd_cwnd_cnt -= tp->snd_cwnd;
142 tp->snd_cwnd++;
142 } 143 }
143 } 144 }
144 } 145 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 195d83584558..b5521a9d3dc1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1649,7 +1649,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
1649 * Hence, we can detect timed out packets during fast 1649 * Hence, we can detect timed out packets during fast
1650 * retransmit without falling to slow start. 1650 * retransmit without falling to slow start.
1651 */ 1651 */
1652 if (tcp_head_timedout(sk, tp)) { 1652 if (!IsReno(tp) && tcp_head_timedout(sk, tp)) {
1653 struct sk_buff *skb; 1653 struct sk_buff *skb;
1654 1654
1655 skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint 1655 skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
@@ -4559,7 +4559,6 @@ discard:
4559 4559
4560EXPORT_SYMBOL(sysctl_tcp_ecn); 4560EXPORT_SYMBOL(sysctl_tcp_ecn);
4561EXPORT_SYMBOL(sysctl_tcp_reordering); 4561EXPORT_SYMBOL(sysctl_tcp_reordering);
4562EXPORT_SYMBOL(sysctl_tcp_abc);
4563EXPORT_SYMBOL(tcp_parse_options); 4562EXPORT_SYMBOL(tcp_parse_options);
4564EXPORT_SYMBOL(tcp_rcv_established); 4563EXPORT_SYMBOL(tcp_rcv_established);
4565EXPORT_SYMBOL(tcp_rcv_state_process); 4564EXPORT_SYMBOL(tcp_rcv_state_process);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9e85c0416109..672950e54c49 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1859,5 +1859,4 @@ EXPORT_SYMBOL(tcp_proc_unregister);
1859#endif 1859#endif
1860EXPORT_SYMBOL(sysctl_local_port_range); 1860EXPORT_SYMBOL(sysctl_local_port_range);
1861EXPORT_SYMBOL(sysctl_tcp_low_latency); 1861EXPORT_SYMBOL(sysctl_tcp_low_latency);
1862EXPORT_SYMBOL(sysctl_tcp_tw_reuse);
1863 1862
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9d79546d384e..f33c9dddaa12 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -59,9 +59,6 @@ int sysctl_tcp_tso_win_divisor = 3;
59int sysctl_tcp_mtu_probing = 0; 59int sysctl_tcp_mtu_probing = 0;
60int sysctl_tcp_base_mss = 512; 60int sysctl_tcp_base_mss = 512;
61 61
62EXPORT_SYMBOL(sysctl_tcp_mtu_probing);
63EXPORT_SYMBOL(sysctl_tcp_base_mss);
64
65static void update_send_head(struct sock *sk, struct tcp_sock *tp, 62static void update_send_head(struct sock *sk, struct tcp_sock *tp,
66 struct sk_buff *skb) 63 struct sk_buff *skb)
67{ 64{
@@ -468,7 +465,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
468 TCP_INC_STATS(TCP_MIB_OUTSEGS); 465 TCP_INC_STATS(TCP_MIB_OUTSEGS);
469 466
470 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 467 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
471 if (unlikely(err <= 0)) 468 if (likely(err <= 0))
472 return err; 469 return err;
473 470
474 tcp_enter_cwr(sk); 471 tcp_enter_cwr(sk);
@@ -536,6 +533,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
536 struct tcp_sock *tp = tcp_sk(sk); 533 struct tcp_sock *tp = tcp_sk(sk);
537 struct sk_buff *buff; 534 struct sk_buff *buff;
538 int nsize, old_factor; 535 int nsize, old_factor;
536 int nlen;
539 u16 flags; 537 u16 flags;
540 538
541 BUG_ON(len > skb->len); 539 BUG_ON(len > skb->len);
@@ -554,7 +552,11 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
554 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 552 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
555 if (buff == NULL) 553 if (buff == NULL)
556 return -ENOMEM; /* We'll just try again later. */ 554 return -ENOMEM; /* We'll just try again later. */
555
557 sk_charge_skb(sk, buff); 556 sk_charge_skb(sk, buff);
557 nlen = skb->len - len - nsize;
558 buff->truesize += nlen;
559 skb->truesize -= nlen;
558 560
559 /* Correct the sequence numbers. */ 561 /* Correct the sequence numbers. */
560 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 562 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
@@ -640,7 +642,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
640 * eventually). The difference is that pulled data not copied, but 642 * eventually). The difference is that pulled data not copied, but
641 * immediately discarded. 643 * immediately discarded.
642 */ 644 */
643static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len) 645static void __pskb_trim_head(struct sk_buff *skb, int len)
644{ 646{
645 int i, k, eat; 647 int i, k, eat;
646 648
@@ -665,7 +667,6 @@ static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len)
665 skb->tail = skb->data; 667 skb->tail = skb->data;
666 skb->data_len -= len; 668 skb->data_len -= len;
667 skb->len = skb->data_len; 669 skb->len = skb->data_len;
668 return skb->tail;
669} 670}
670 671
671int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 672int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
@@ -674,12 +675,11 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
674 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 675 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
675 return -ENOMEM; 676 return -ENOMEM;
676 677
677 if (len <= skb_headlen(skb)) { 678 /* If len == headlen, we avoid __skb_pull to preserve alignment. */
679 if (unlikely(len < skb_headlen(skb)))
678 __skb_pull(skb, len); 680 __skb_pull(skb, len);
679 } else { 681 else
680 if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL) 682 __pskb_trim_head(skb, len - skb_headlen(skb));
681 return -ENOMEM;
682 }
683 683
684 TCP_SKB_CB(skb)->seq += len; 684 TCP_SKB_CB(skb)->seq += len;
685 skb->ip_summed = CHECKSUM_HW; 685 skb->ip_summed = CHECKSUM_HW;
@@ -1040,7 +1040,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1040 if (unlikely(buff == NULL)) 1040 if (unlikely(buff == NULL))
1041 return -ENOMEM; 1041 return -ENOMEM;
1042 1042
1043 buff->truesize = nlen; 1043 sk_charge_skb(sk, buff);
1044 buff->truesize += nlen;
1044 skb->truesize -= nlen; 1045 skb->truesize -= nlen;
1045 1046
1046 /* Correct the sequence numbers. */ 1047 /* Correct the sequence numbers. */
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 0d7d386dac22..8d30c48f090e 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -8,6 +8,8 @@
8#include <linux/mutex.h> 8#include <linux/mutex.h>
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/skbuff.h> 10#include <linux/skbuff.h>
11#include <net/icmp.h>
12#include <net/ip.h>
11#include <net/protocol.h> 13#include <net/protocol.h>
12#include <net/xfrm.h> 14#include <net/xfrm.h>
13 15
@@ -70,10 +72,16 @@ static int tunnel4_rcv(struct sk_buff *skb)
70{ 72{
71 struct xfrm_tunnel *handler; 73 struct xfrm_tunnel *handler;
72 74
75 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
76 goto drop;
77
73 for (handler = tunnel4_handlers; handler; handler = handler->next) 78 for (handler = tunnel4_handlers; handler; handler = handler->next)
74 if (!handler->handler(skb)) 79 if (!handler->handler(skb))
75 return 0; 80 return 0;
76 81
82 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
83
84drop:
77 kfree_skb(skb); 85 kfree_skb(skb);
78 return 0; 86 return 0;
79} 87}
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index e1b8f4b90d80..3e174c83bfe7 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -37,8 +37,6 @@ static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq)
37{ 37{
38 switch (nexthdr) { 38 switch (nexthdr) {
39 case IPPROTO_IPIP: 39 case IPPROTO_IPIP:
40 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
41 return -EINVAL;
42 *spi = skb->nh.iph->saddr; 40 *spi = skb->nh.iph->saddr;
43 *seq = 0; 41 *seq = 0;
44 return 0; 42 return 0;
@@ -90,7 +88,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
90 if (unlikely(x->km.state != XFRM_STATE_VALID)) 88 if (unlikely(x->km.state != XFRM_STATE_VALID))
91 goto drop_unlock; 89 goto drop_unlock;
92 90
93 if (x->encap->encap_type != encap_type) 91 if ((x->encap ? x->encap->encap_type : 0) != encap_type)
94 goto drop_unlock; 92 goto drop_unlock;
95 93
96 if (x->props.replay_window && xfrm_replay_check(x, seq)) 94 if (x->props.replay_window && xfrm_replay_check(x, seq))
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 32ad229b4fed..4ef8efaf6a67 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -62,7 +62,7 @@ static void xfrm4_encap(struct sk_buff *skb)
62 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? 62 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
63 0 : (iph->frag_off & htons(IP_DF)); 63 0 : (iph->frag_off & htons(IP_DF));
64 if (!top_iph->frag_off) 64 if (!top_iph->frag_off)
65 __ip_select_ident(top_iph, dst, 0); 65 __ip_select_ident(top_iph, dst->child, 0);
66 66
67 top_iph->ttl = dst_metric(dst->child, RTAX_HOPLIMIT); 67 top_iph->ttl = dst_metric(dst->child, RTAX_HOPLIMIT);
68 68
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index f285bbf296e2..8604c747bca5 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -221,7 +221,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl)
221 if (pskb_may_pull(skb, xprth + 4 - skb->data)) { 221 if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
222 u16 *ipcomp_hdr = (u16 *)xprth; 222 u16 *ipcomp_hdr = (u16 *)xprth;
223 223
224 fl->fl_ipsec_spi = ntohl(ntohs(ipcomp_hdr[1])); 224 fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
225 } 225 }
226 break; 226 break;
227 default: 227 default:
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 2a1e7e45b890..a18d4256372c 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -485,15 +485,27 @@ static struct tlvtype_proc tlvprochopopt_lst[] = {
485 { -1, } 485 { -1, }
486}; 486};
487 487
488int ipv6_parse_hopopts(struct sk_buff *skb, int nhoff) 488int ipv6_parse_hopopts(struct sk_buff *skb)
489{ 489{
490 struct inet6_skb_parm *opt = IP6CB(skb); 490 struct inet6_skb_parm *opt = IP6CB(skb);
491 491
492 /*
493 * skb->nh.raw is equal to skb->data, and
494 * skb->h.raw - skb->nh.raw is always equal to
495 * sizeof(struct ipv6hdr) by definition of
496 * hop-by-hop options.
497 */
498 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
499 !pskb_may_pull(skb, sizeof(struct ipv6hdr) + ((skb->h.raw[1] + 1) << 3))) {
500 kfree_skb(skb);
501 return -1;
502 }
503
492 opt->hop = sizeof(struct ipv6hdr); 504 opt->hop = sizeof(struct ipv6hdr);
493 if (ip6_parse_tlv(tlvprochopopt_lst, skb)) { 505 if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
494 skb->h.raw += (skb->h.raw[1]+1)<<3; 506 skb->h.raw += (skb->h.raw[1]+1)<<3;
495 opt->nhoff = sizeof(struct ipv6hdr); 507 opt->nhoff = sizeof(struct ipv6hdr);
496 return sizeof(struct ipv6hdr); 508 return 1;
497 } 509 }
498 return -1; 510 return -1;
499} 511}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 21eb725e885f..1044b6fce0d5 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -717,7 +717,7 @@ int __init icmpv6_init(struct net_proto_family *ops)
717 struct sock *sk; 717 struct sock *sk;
718 int err, i, j; 718 int err, i, j;
719 719
720 for_each_cpu(i) { 720 for_each_possible_cpu(i) {
721 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, 721 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
722 &per_cpu(__icmpv6_socket, i)); 722 &per_cpu(__icmpv6_socket, i));
723 if (err < 0) { 723 if (err < 0) {
@@ -763,7 +763,7 @@ void icmpv6_cleanup(void)
763{ 763{
764 int i; 764 int i;
765 765
766 for_each_cpu(i) { 766 for_each_possible_cpu(i) {
767 sock_release(per_cpu(__icmpv6_socket, i)); 767 sock_release(per_cpu(__icmpv6_socket, i));
768 } 768 }
769 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); 769 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index f8f3a37a1494..eb2865d5ae28 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -173,6 +173,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
173 173
174 if (err) { 174 if (err) {
175 sk->sk_err_soft = -err; 175 sk->sk_err_soft = -err;
176 kfree_skb(skb);
176 return err; 177 return err;
177 } 178 }
178 179
@@ -181,6 +182,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
181 182
182 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) { 183 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
183 sk->sk_route_caps = 0; 184 sk->sk_route_caps = 0;
185 kfree_skb(skb);
184 return err; 186 return err;
185 } 187 }
186 188
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index bb8ffb8a14c5..2ae84c961678 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -23,6 +23,86 @@
23#include <net/inet6_hashtables.h> 23#include <net/inet6_hashtables.h>
24#include <net/ip.h> 24#include <net/ip.h>
25 25
26void __inet6_hash(struct inet_hashinfo *hashinfo,
27 struct sock *sk)
28{
29 struct hlist_head *list;
30 rwlock_t *lock;
31
32 BUG_TRAP(sk_unhashed(sk));
33
34 if (sk->sk_state == TCP_LISTEN) {
35 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
36 lock = &hashinfo->lhash_lock;
37 inet_listen_wlock(hashinfo);
38 } else {
39 unsigned int hash;
40 sk->sk_hash = hash = inet6_sk_ehashfn(sk);
41 hash &= (hashinfo->ehash_size - 1);
42 list = &hashinfo->ehash[hash].chain;
43 lock = &hashinfo->ehash[hash].lock;
44 write_lock(lock);
45 }
46
47 __sk_add_node(sk, list);
48 sock_prot_inc_use(sk->sk_prot);
49 write_unlock(lock);
50}
51EXPORT_SYMBOL(__inet6_hash);
52
53/*
54 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
55 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
56 *
57 * The sockhash lock must be held as a reader here.
58 */
59struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
60 const struct in6_addr *saddr,
61 const u16 sport,
62 const struct in6_addr *daddr,
63 const u16 hnum,
64 const int dif)
65{
66 struct sock *sk;
67 const struct hlist_node *node;
68 const __u32 ports = INET_COMBINED_PORTS(sport, hnum);
69 /* Optimize here for direct hit, only listening connections can
70 * have wildcards anyways.
71 */
72 unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport);
73 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
74
75 prefetch(head->chain.first);
76 read_lock(&head->lock);
77 sk_for_each(sk, node, &head->chain) {
78 /* For IPV6 do the cheaper port and family tests first. */
79 if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif))
80 goto hit; /* You sunk my battleship! */
81 }
82 /* Must check for a TIME_WAIT'er before going to listener hash. */
83 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
84 const struct inet_timewait_sock *tw = inet_twsk(sk);
85
86 if(*((__u32 *)&(tw->tw_dport)) == ports &&
87 sk->sk_family == PF_INET6) {
88 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
89
90 if (ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) &&
91 ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) &&
92 (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
93 goto hit;
94 }
95 }
96 read_unlock(&head->lock);
97 return NULL;
98
99hit:
100 sock_hold(sk);
101 read_unlock(&head->lock);
102 return sk;
103}
104EXPORT_SYMBOL(__inet6_lookup_established);
105
26struct sock *inet6_lookup_listener(struct inet_hashinfo *hashinfo, 106struct sock *inet6_lookup_listener(struct inet_hashinfo *hashinfo,
27 const struct in6_addr *daddr, 107 const struct in6_addr *daddr,
28 const unsigned short hnum, const int dif) 108 const unsigned short hnum, const int dif)
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 29f73592e68e..aceee252503d 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -114,11 +114,10 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
114 } 114 }
115 115
116 if (hdr->nexthdr == NEXTHDR_HOP) { 116 if (hdr->nexthdr == NEXTHDR_HOP) {
117 if (ipv6_parse_hopopts(skb, IP6CB(skb)->nhoff) < 0) { 117 if (ipv6_parse_hopopts(skb) < 0) {
118 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 118 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
119 return 0; 119 return 0;
120 } 120 }
121 hdr = skb->nh.ipv6h;
122 } 121 }
123 122
124 return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish); 123 return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index ff9040c92556..a995796b5a57 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -519,9 +519,6 @@ ip6ip6_rcv(struct sk_buff *skb)
519 struct ipv6hdr *ipv6h; 519 struct ipv6hdr *ipv6h;
520 struct ip6_tnl *t; 520 struct ip6_tnl *t;
521 521
522 if (!pskb_may_pull(skb, sizeof (*ipv6h)))
523 goto discard;
524
525 ipv6h = skb->nh.ipv6h; 522 ipv6h = skb->nh.ipv6h;
526 523
527 read_lock(&ip6ip6_lock); 524 read_lock(&ip6ip6_lock);
@@ -529,8 +526,7 @@ ip6ip6_rcv(struct sk_buff *skb)
529 if ((t = ip6ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) { 526 if ((t = ip6ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) {
530 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 527 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
531 read_unlock(&ip6ip6_lock); 528 read_unlock(&ip6ip6_lock);
532 kfree_skb(skb); 529 goto discard;
533 return 0;
534 } 530 }
535 531
536 if (!(t->parms.flags & IP6_TNL_F_CAP_RCV)) { 532 if (!(t->parms.flags & IP6_TNL_F_CAP_RCV)) {
@@ -557,9 +553,11 @@ ip6ip6_rcv(struct sk_buff *skb)
557 return 0; 553 return 0;
558 } 554 }
559 read_unlock(&ip6ip6_lock); 555 read_unlock(&ip6ip6_lock);
560 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
561discard:
562 return 1; 556 return 1;
557
558discard:
559 kfree_skb(skb);
560 return 0;
563} 561}
564 562
565static inline struct ipv6_txoptions *create_tel(__u8 encap_limit) 563static inline struct ipv6_txoptions *create_tel(__u8 encap_limit)
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 00f3fadfcca7..48636436028a 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -208,7 +208,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
208 if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG) 208 if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG)
209 return; 209 return;
210 210
211 spi = ntohl(ntohs(ipcomph->cpi)); 211 spi = htonl(ntohs(ipcomph->cpi));
212 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); 212 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6);
213 if (!x) 213 if (!x)
214 return; 214 return;
@@ -290,7 +290,7 @@ static void ipcomp6_free_scratches(void)
290 if (!scratches) 290 if (!scratches)
291 return; 291 return;
292 292
293 for_each_cpu(i) { 293 for_each_possible_cpu(i) {
294 void *scratch = *per_cpu_ptr(scratches, i); 294 void *scratch = *per_cpu_ptr(scratches, i);
295 295
296 vfree(scratch); 296 vfree(scratch);
@@ -313,7 +313,7 @@ static void **ipcomp6_alloc_scratches(void)
313 313
314 ipcomp6_scratches = scratches; 314 ipcomp6_scratches = scratches;
315 315
316 for_each_cpu(i) { 316 for_each_possible_cpu(i) {
317 void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); 317 void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
318 if (!scratch) 318 if (!scratch)
319 return NULL; 319 return NULL;
@@ -344,7 +344,7 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms)
344 if (!tfms) 344 if (!tfms)
345 return; 345 return;
346 346
347 for_each_cpu(cpu) { 347 for_each_possible_cpu(cpu) {
348 struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); 348 struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
349 crypto_free_tfm(tfm); 349 crypto_free_tfm(tfm);
350 } 350 }
@@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name)
384 if (!tfms) 384 if (!tfms)
385 goto error; 385 goto error;
386 386
387 for_each_cpu(cpu) { 387 for_each_possible_cpu(cpu) {
388 struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); 388 struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
389 if (!tfm) 389 if (!tfm)
390 goto error; 390 goto error;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index d750cfc019dc..395a417ba955 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -7,6 +7,7 @@
7#include <net/ipv6.h> 7#include <net/ipv6.h>
8#include <net/ip6_route.h> 8#include <net/ip6_route.h>
9#include <net/xfrm.h> 9#include <net/xfrm.h>
10#include <net/ip6_checksum.h>
10 11
11int ip6_route_me_harder(struct sk_buff *skb) 12int ip6_route_me_harder(struct sk_buff *skb)
12{ 13{
@@ -54,7 +55,7 @@ struct ip6_rt_info {
54 struct in6_addr saddr; 55 struct in6_addr saddr;
55}; 56};
56 57
57static void save(const struct sk_buff *skb, struct nf_info *info) 58static void nf_ip6_saveroute(const struct sk_buff *skb, struct nf_info *info)
58{ 59{
59 struct ip6_rt_info *rt_info = nf_info_reroute(info); 60 struct ip6_rt_info *rt_info = nf_info_reroute(info);
60 61
@@ -66,7 +67,7 @@ static void save(const struct sk_buff *skb, struct nf_info *info)
66 } 67 }
67} 68}
68 69
69static int reroute(struct sk_buff **pskb, const struct nf_info *info) 70static int nf_ip6_reroute(struct sk_buff **pskb, const struct nf_info *info)
70{ 71{
71 struct ip6_rt_info *rt_info = nf_info_reroute(info); 72 struct ip6_rt_info *rt_info = nf_info_reroute(info);
72 73
@@ -79,15 +80,50 @@ static int reroute(struct sk_buff **pskb, const struct nf_info *info)
79 return 0; 80 return 0;
80} 81}
81 82
82static struct nf_queue_rerouter ip6_reroute = { 83unsigned int nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
83 .rer_size = sizeof(struct ip6_rt_info), 84 unsigned int dataoff, u_int8_t protocol)
84 .save = &save, 85{
85 .reroute = &reroute, 86 struct ipv6hdr *ip6h = skb->nh.ipv6h;
87 unsigned int csum = 0;
88
89 switch (skb->ip_summed) {
90 case CHECKSUM_HW:
91 if (hook != NF_IP6_PRE_ROUTING && hook != NF_IP6_LOCAL_IN)
92 break;
93 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
94 skb->len - dataoff, protocol,
95 csum_sub(skb->csum,
96 skb_checksum(skb, 0,
97 dataoff, 0)))) {
98 skb->ip_summed = CHECKSUM_UNNECESSARY;
99 break;
100 }
101 /* fall through */
102 case CHECKSUM_NONE:
103 skb->csum = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
104 skb->len - dataoff,
105 protocol,
106 csum_sub(0,
107 skb_checksum(skb, 0,
108 dataoff, 0)));
109 csum = __skb_checksum_complete(skb);
110 }
111 return csum;
112}
113
114EXPORT_SYMBOL(nf_ip6_checksum);
115
116static struct nf_afinfo nf_ip6_afinfo = {
117 .family = AF_INET6,
118 .checksum = nf_ip6_checksum,
119 .saveroute = nf_ip6_saveroute,
120 .reroute = nf_ip6_reroute,
121 .route_key_size = sizeof(struct ip6_rt_info),
86}; 122};
87 123
88int __init ipv6_netfilter_init(void) 124int __init ipv6_netfilter_init(void)
89{ 125{
90 return nf_register_queue_rerouter(PF_INET6, &ip6_reroute); 126 return nf_register_afinfo(&nf_ip6_afinfo);
91} 127}
92 128
93/* This can be called from inet6_init() on errors, so it cannot 129/* This can be called from inet6_init() on errors, so it cannot
@@ -95,5 +131,5 @@ int __init ipv6_netfilter_init(void)
95 */ 131 */
96void ipv6_netfilter_fini(void) 132void ipv6_netfilter_fini(void)
97{ 133{
98 nf_unregister_queue_rerouter(PF_INET6); 134 nf_unregister_afinfo(&nf_ip6_afinfo);
99} 135}
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index e81c6a9dab81..b4b7d441af25 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -658,15 +658,11 @@ static struct nf_queue_handler nfqh = {
658 .outfn = &ipq_enqueue_packet, 658 .outfn = &ipq_enqueue_packet,
659}; 659};
660 660
661static int 661static int __init ip6_queue_init(void)
662init_or_cleanup(int init)
663{ 662{
664 int status = -ENOMEM; 663 int status = -ENOMEM;
665 struct proc_dir_entry *proc; 664 struct proc_dir_entry *proc;
666 665
667 if (!init)
668 goto cleanup;
669
670 netlink_register_notifier(&ipq_nl_notifier); 666 netlink_register_notifier(&ipq_nl_notifier);
671 ipqnl = netlink_kernel_create(NETLINK_IP6_FW, 0, ipq_rcv_sk, 667 ipqnl = netlink_kernel_create(NETLINK_IP6_FW, 0, ipq_rcv_sk,
672 THIS_MODULE); 668 THIS_MODULE);
@@ -693,11 +689,6 @@ init_or_cleanup(int init)
693 } 689 }
694 return status; 690 return status;
695 691
696cleanup:
697 nf_unregister_queue_handlers(&nfqh);
698 synchronize_net();
699 ipq_flush(NF_DROP);
700
701cleanup_sysctl: 692cleanup_sysctl:
702 unregister_sysctl_table(ipq_sysctl_header); 693 unregister_sysctl_table(ipq_sysctl_header);
703 unregister_netdevice_notifier(&ipq_dev_notifier); 694 unregister_netdevice_notifier(&ipq_dev_notifier);
@@ -713,15 +704,21 @@ cleanup_netlink_notifier:
713 return status; 704 return status;
714} 705}
715 706
716static int __init ip6_queue_init(void)
717{
718
719 return init_or_cleanup(1);
720}
721
722static void __exit ip6_queue_fini(void) 707static void __exit ip6_queue_fini(void)
723{ 708{
724 init_or_cleanup(0); 709 nf_unregister_queue_handlers(&nfqh);
710 synchronize_net();
711 ipq_flush(NF_DROP);
712
713 unregister_sysctl_table(ipq_sysctl_header);
714 unregister_netdevice_notifier(&ipq_dev_notifier);
715 proc_net_remove(IPQ_PROC_FS_NAME);
716
717 sock_release(ipqnl->sk_socket);
718 mutex_lock(&ipqnl_mutex);
719 mutex_unlock(&ipqnl_mutex);
720
721 netlink_unregister_notifier(&ipq_nl_notifier);
725} 722}
726 723
727MODULE_DESCRIPTION("IPv6 packet queue handler"); 724MODULE_DESCRIPTION("IPv6 packet queue handler");
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 3ecf2db841f8..2e72f89a7019 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -288,19 +288,6 @@ ip6t_do_table(struct sk_buff **pskb,
288 table_base = (void *)private->entries[smp_processor_id()]; 288 table_base = (void *)private->entries[smp_processor_id()];
289 e = get_entry(table_base, private->hook_entry[hook]); 289 e = get_entry(table_base, private->hook_entry[hook]);
290 290
291#ifdef CONFIG_NETFILTER_DEBUG
292 /* Check noone else using our table */
293 if (((struct ip6t_entry *)table_base)->comefrom != 0xdead57ac
294 && ((struct ip6t_entry *)table_base)->comefrom != 0xeeeeeeec) {
295 printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
296 smp_processor_id(),
297 table->name,
298 &((struct ip6t_entry *)table_base)->comefrom,
299 ((struct ip6t_entry *)table_base)->comefrom);
300 }
301 ((struct ip6t_entry *)table_base)->comefrom = 0x57acc001;
302#endif
303
304 /* For return from builtin chain */ 291 /* For return from builtin chain */
305 back = get_entry(table_base, private->underflow[hook]); 292 back = get_entry(table_base, private->underflow[hook]);
306 293
@@ -788,7 +775,7 @@ translate_table(const char *name,
788 } 775 }
789 776
790 /* And one copy for every other CPU */ 777 /* And one copy for every other CPU */
791 for_each_cpu(i) { 778 for_each_possible_cpu(i) {
792 if (newinfo->entries[i] && newinfo->entries[i] != entry0) 779 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
793 memcpy(newinfo->entries[i], entry0, newinfo->size); 780 memcpy(newinfo->entries[i], entry0, newinfo->size);
794 } 781 }
@@ -841,7 +828,7 @@ get_counters(const struct xt_table_info *t,
841 counters, 828 counters,
842 &i); 829 &i);
843 830
844 for_each_cpu(cpu) { 831 for_each_possible_cpu(cpu) {
845 if (cpu == curcpu) 832 if (cpu == curcpu)
846 continue; 833 continue;
847 i = 0; 834 i = 0;
@@ -1116,7 +1103,7 @@ do_add_counters(void __user *user, unsigned int len)
1116 1103
1117 write_lock_bh(&t->lock); 1104 write_lock_bh(&t->lock);
1118 private = t->private; 1105 private = t->private;
1119 if (private->number != paddc->num_counters) { 1106 if (private->number != tmp.num_counters) {
1120 ret = -EINVAL; 1107 ret = -EINVAL;
1121 goto unlock_up_free; 1108 goto unlock_up_free;
1122 } 1109 }
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index a96c0de14b00..73c6300109d6 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -439,7 +439,7 @@ ip6t_log_target(struct sk_buff **pskb,
439 439
440 if (loginfo->logflags & IP6T_LOG_NFLOG) 440 if (loginfo->logflags & IP6T_LOG_NFLOG)
441 nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, 441 nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
442 loginfo->prefix); 442 "%s", loginfo->prefix);
443 else 443 else
444 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, 444 ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
445 loginfo->prefix); 445 loginfo->prefix);
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index 94dbdb8b458d..4f6b84c8f4ab 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -40,7 +40,7 @@ match(const struct sk_buff *skb,
40 40
41 memset(eui64, 0, sizeof(eui64)); 41 memset(eui64, 0, sizeof(eui64));
42 42
43 if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) { 43 if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) {
44 if (skb->nh.ipv6h->version == 0x6) { 44 if (skb->nh.ipv6h->version == 0x6) {
45 memcpy(eui64, eth_hdr(skb)->h_source, 3); 45 memcpy(eui64, eth_hdr(skb)->h_source, 3);
46 memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3); 46 memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index e5e724d9ee60..60976c0c58e8 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -177,37 +177,20 @@ static int __init ip6table_filter_init(void)
177 return ret; 177 return ret;
178 178
179 /* Register hooks */ 179 /* Register hooks */
180 ret = nf_register_hook(&ip6t_ops[0]); 180 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
181 if (ret < 0) 181 if (ret < 0)
182 goto cleanup_table; 182 goto cleanup_table;
183 183
184 ret = nf_register_hook(&ip6t_ops[1]);
185 if (ret < 0)
186 goto cleanup_hook0;
187
188 ret = nf_register_hook(&ip6t_ops[2]);
189 if (ret < 0)
190 goto cleanup_hook1;
191
192 return ret; 184 return ret;
193 185
194 cleanup_hook1:
195 nf_unregister_hook(&ip6t_ops[1]);
196 cleanup_hook0:
197 nf_unregister_hook(&ip6t_ops[0]);
198 cleanup_table: 186 cleanup_table:
199 ip6t_unregister_table(&packet_filter); 187 ip6t_unregister_table(&packet_filter);
200
201 return ret; 188 return ret;
202} 189}
203 190
204static void __exit ip6table_filter_fini(void) 191static void __exit ip6table_filter_fini(void)
205{ 192{
206 unsigned int i; 193 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
207
208 for (i = 0; i < sizeof(ip6t_ops)/sizeof(struct nf_hook_ops); i++)
209 nf_unregister_hook(&ip6t_ops[i]);
210
211 ip6t_unregister_table(&packet_filter); 194 ip6t_unregister_table(&packet_filter);
212} 195}
213 196
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index e1f0f6ae9841..03a13eab1dae 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -238,49 +238,20 @@ static int __init ip6table_mangle_init(void)
238 return ret; 238 return ret;
239 239
240 /* Register hooks */ 240 /* Register hooks */
241 ret = nf_register_hook(&ip6t_ops[0]); 241 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
242 if (ret < 0) 242 if (ret < 0)
243 goto cleanup_table; 243 goto cleanup_table;
244 244
245 ret = nf_register_hook(&ip6t_ops[1]);
246 if (ret < 0)
247 goto cleanup_hook0;
248
249 ret = nf_register_hook(&ip6t_ops[2]);
250 if (ret < 0)
251 goto cleanup_hook1;
252
253 ret = nf_register_hook(&ip6t_ops[3]);
254 if (ret < 0)
255 goto cleanup_hook2;
256
257 ret = nf_register_hook(&ip6t_ops[4]);
258 if (ret < 0)
259 goto cleanup_hook3;
260
261 return ret; 245 return ret;
262 246
263 cleanup_hook3:
264 nf_unregister_hook(&ip6t_ops[3]);
265 cleanup_hook2:
266 nf_unregister_hook(&ip6t_ops[2]);
267 cleanup_hook1:
268 nf_unregister_hook(&ip6t_ops[1]);
269 cleanup_hook0:
270 nf_unregister_hook(&ip6t_ops[0]);
271 cleanup_table: 247 cleanup_table:
272 ip6t_unregister_table(&packet_mangler); 248 ip6t_unregister_table(&packet_mangler);
273
274 return ret; 249 return ret;
275} 250}
276 251
277static void __exit ip6table_mangle_fini(void) 252static void __exit ip6table_mangle_fini(void)
278{ 253{
279 unsigned int i; 254 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
280
281 for (i = 0; i < sizeof(ip6t_ops)/sizeof(struct nf_hook_ops); i++)
282 nf_unregister_hook(&ip6t_ops[i]);
283
284 ip6t_unregister_table(&packet_mangler); 255 ip6t_unregister_table(&packet_mangler);
285} 256}
286 257
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 54d1fffd62ba..61a7c58e99f8 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -152,31 +152,20 @@ static int __init ip6table_raw_init(void)
152 return ret; 152 return ret;
153 153
154 /* Register hooks */ 154 /* Register hooks */
155 ret = nf_register_hook(&ip6t_ops[0]); 155 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
156 if (ret < 0) 156 if (ret < 0)
157 goto cleanup_table; 157 goto cleanup_table;
158 158
159 ret = nf_register_hook(&ip6t_ops[1]);
160 if (ret < 0)
161 goto cleanup_hook0;
162
163 return ret; 159 return ret;
164 160
165 cleanup_hook0:
166 nf_unregister_hook(&ip6t_ops[0]);
167 cleanup_table: 161 cleanup_table:
168 ip6t_unregister_table(&packet_raw); 162 ip6t_unregister_table(&packet_raw);
169
170 return ret; 163 return ret;
171} 164}
172 165
173static void __exit ip6table_raw_fini(void) 166static void __exit ip6table_raw_fini(void)
174{ 167{
175 unsigned int i; 168 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
176
177 for (i = 0; i < sizeof(ip6t_ops)/sizeof(struct nf_hook_ops); i++)
178 nf_unregister_hook(&ip6t_ops[i]);
179
180 ip6t_unregister_table(&packet_raw); 169 ip6t_unregister_table(&packet_raw);
181} 170}
182 171
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index c8b5a96cbb0f..93bae36f2663 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -286,55 +286,49 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
286 return ipv6_conntrack_in(hooknum, pskb, in, out, okfn); 286 return ipv6_conntrack_in(hooknum, pskb, in, out, okfn);
287} 287}
288 288
289/* Connection tracking may drop packets, but never alters them, so 289static struct nf_hook_ops ipv6_conntrack_ops[] = {
290 make it the first hook. */ 290 {
291static struct nf_hook_ops ipv6_conntrack_defrag_ops = { 291 .hook = ipv6_defrag,
292 .hook = ipv6_defrag, 292 .owner = THIS_MODULE,
293 .owner = THIS_MODULE, 293 .pf = PF_INET6,
294 .pf = PF_INET6, 294 .hooknum = NF_IP6_PRE_ROUTING,
295 .hooknum = NF_IP6_PRE_ROUTING, 295 .priority = NF_IP6_PRI_CONNTRACK_DEFRAG,
296 .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, 296 },
297}; 297 {
298 298 .hook = ipv6_conntrack_in,
299static struct nf_hook_ops ipv6_conntrack_in_ops = { 299 .owner = THIS_MODULE,
300 .hook = ipv6_conntrack_in, 300 .pf = PF_INET6,
301 .owner = THIS_MODULE, 301 .hooknum = NF_IP6_PRE_ROUTING,
302 .pf = PF_INET6, 302 .priority = NF_IP6_PRI_CONNTRACK,
303 .hooknum = NF_IP6_PRE_ROUTING, 303 },
304 .priority = NF_IP6_PRI_CONNTRACK, 304 {
305}; 305 .hook = ipv6_conntrack_local,
306 306 .owner = THIS_MODULE,
307static struct nf_hook_ops ipv6_conntrack_local_out_ops = { 307 .pf = PF_INET6,
308 .hook = ipv6_conntrack_local, 308 .hooknum = NF_IP6_LOCAL_OUT,
309 .owner = THIS_MODULE, 309 .priority = NF_IP6_PRI_CONNTRACK,
310 .pf = PF_INET6, 310 },
311 .hooknum = NF_IP6_LOCAL_OUT, 311 {
312 .priority = NF_IP6_PRI_CONNTRACK, 312 .hook = ipv6_defrag,
313}; 313 .owner = THIS_MODULE,
314 314 .pf = PF_INET6,
315static struct nf_hook_ops ipv6_conntrack_defrag_local_out_ops = { 315 .hooknum = NF_IP6_LOCAL_OUT,
316 .hook = ipv6_defrag, 316 .priority = NF_IP6_PRI_CONNTRACK_DEFRAG,
317 .owner = THIS_MODULE, 317 },
318 .pf = PF_INET6, 318 {
319 .hooknum = NF_IP6_LOCAL_OUT, 319 .hook = ipv6_confirm,
320 .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, 320 .owner = THIS_MODULE,
321}; 321 .pf = PF_INET6,
322 322 .hooknum = NF_IP6_POST_ROUTING,
323/* Refragmenter; last chance. */ 323 .priority = NF_IP6_PRI_LAST,
324static struct nf_hook_ops ipv6_conntrack_out_ops = { 324 },
325 .hook = ipv6_confirm, 325 {
326 .owner = THIS_MODULE, 326 .hook = ipv6_confirm,
327 .pf = PF_INET6, 327 .owner = THIS_MODULE,
328 .hooknum = NF_IP6_POST_ROUTING, 328 .pf = PF_INET6,
329 .priority = NF_IP6_PRI_LAST, 329 .hooknum = NF_IP6_LOCAL_IN,
330}; 330 .priority = NF_IP6_PRI_LAST-1,
331 331 },
332static struct nf_hook_ops ipv6_conntrack_local_in_ops = {
333 .hook = ipv6_confirm,
334 .owner = THIS_MODULE,
335 .pf = PF_INET6,
336 .hooknum = NF_IP6_LOCAL_IN,
337 .priority = NF_IP6_PRI_LAST-1,
338}; 332};
339 333
340#ifdef CONFIG_SYSCTL 334#ifdef CONFIG_SYSCTL
@@ -470,16 +464,21 @@ extern struct nf_conntrack_protocol nf_conntrack_protocol_udp6;
470extern struct nf_conntrack_protocol nf_conntrack_protocol_icmpv6; 464extern struct nf_conntrack_protocol nf_conntrack_protocol_icmpv6;
471extern int nf_ct_frag6_init(void); 465extern int nf_ct_frag6_init(void);
472extern void nf_ct_frag6_cleanup(void); 466extern void nf_ct_frag6_cleanup(void);
473static int init_or_cleanup(int init) 467
468MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
469MODULE_LICENSE("GPL");
470MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
471
472static int __init nf_conntrack_l3proto_ipv6_init(void)
474{ 473{
475 int ret = 0; 474 int ret = 0;
476 475
477 if (!init) goto cleanup; 476 need_conntrack();
478 477
479 ret = nf_ct_frag6_init(); 478 ret = nf_ct_frag6_init();
480 if (ret < 0) { 479 if (ret < 0) {
481 printk("nf_conntrack_ipv6: can't initialize frag6.\n"); 480 printk("nf_conntrack_ipv6: can't initialize frag6.\n");
482 goto cleanup_nothing; 481 return ret;
483 } 482 }
484 ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp6); 483 ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp6);
485 if (ret < 0) { 484 if (ret < 0) {
@@ -505,71 +504,27 @@ static int init_or_cleanup(int init)
505 goto cleanup_icmpv6; 504 goto cleanup_icmpv6;
506 } 505 }
507 506
508 ret = nf_register_hook(&ipv6_conntrack_defrag_ops); 507 ret = nf_register_hooks(ipv6_conntrack_ops,
508 ARRAY_SIZE(ipv6_conntrack_ops));
509 if (ret < 0) { 509 if (ret < 0) {
510 printk("nf_conntrack_ipv6: can't register pre-routing defrag " 510 printk("nf_conntrack_ipv6: can't register pre-routing defrag "
511 "hook.\n"); 511 "hook.\n");
512 goto cleanup_ipv6; 512 goto cleanup_ipv6;
513 } 513 }
514
515 ret = nf_register_hook(&ipv6_conntrack_defrag_local_out_ops);
516 if (ret < 0) {
517 printk("nf_conntrack_ipv6: can't register local_out defrag "
518 "hook.\n");
519 goto cleanup_defragops;
520 }
521
522 ret = nf_register_hook(&ipv6_conntrack_in_ops);
523 if (ret < 0) {
524 printk("nf_conntrack_ipv6: can't register pre-routing hook.\n");
525 goto cleanup_defraglocalops;
526 }
527
528 ret = nf_register_hook(&ipv6_conntrack_local_out_ops);
529 if (ret < 0) {
530 printk("nf_conntrack_ipv6: can't register local out hook.\n");
531 goto cleanup_inops;
532 }
533
534 ret = nf_register_hook(&ipv6_conntrack_out_ops);
535 if (ret < 0) {
536 printk("nf_conntrack_ipv6: can't register post-routing hook.\n");
537 goto cleanup_inandlocalops;
538 }
539
540 ret = nf_register_hook(&ipv6_conntrack_local_in_ops);
541 if (ret < 0) {
542 printk("nf_conntrack_ipv6: can't register local in hook.\n");
543 goto cleanup_inoutandlocalops;
544 }
545
546#ifdef CONFIG_SYSCTL 514#ifdef CONFIG_SYSCTL
547 nf_ct_ipv6_sysctl_header = register_sysctl_table(nf_ct_net_table, 0); 515 nf_ct_ipv6_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
548 if (nf_ct_ipv6_sysctl_header == NULL) { 516 if (nf_ct_ipv6_sysctl_header == NULL) {
549 printk("nf_conntrack: can't register to sysctl.\n"); 517 printk("nf_conntrack: can't register to sysctl.\n");
550 ret = -ENOMEM; 518 ret = -ENOMEM;
551 goto cleanup_localinops; 519 goto cleanup_hooks;
552 } 520 }
553#endif 521#endif
554 return ret; 522 return ret;
555 523
556 cleanup:
557 synchronize_net();
558#ifdef CONFIG_SYSCTL 524#ifdef CONFIG_SYSCTL
559 unregister_sysctl_table(nf_ct_ipv6_sysctl_header); 525 cleanup_hooks:
560 cleanup_localinops: 526 nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
561#endif 527#endif
562 nf_unregister_hook(&ipv6_conntrack_local_in_ops);
563 cleanup_inoutandlocalops:
564 nf_unregister_hook(&ipv6_conntrack_out_ops);
565 cleanup_inandlocalops:
566 nf_unregister_hook(&ipv6_conntrack_local_out_ops);
567 cleanup_inops:
568 nf_unregister_hook(&ipv6_conntrack_in_ops);
569 cleanup_defraglocalops:
570 nf_unregister_hook(&ipv6_conntrack_defrag_local_out_ops);
571 cleanup_defragops:
572 nf_unregister_hook(&ipv6_conntrack_defrag_ops);
573 cleanup_ipv6: 528 cleanup_ipv6:
574 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); 529 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
575 cleanup_icmpv6: 530 cleanup_icmpv6:
@@ -580,23 +535,21 @@ static int init_or_cleanup(int init)
580 nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp6); 535 nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp6);
581 cleanup_frag6: 536 cleanup_frag6:
582 nf_ct_frag6_cleanup(); 537 nf_ct_frag6_cleanup();
583 cleanup_nothing:
584 return ret; 538 return ret;
585} 539}
586 540
587MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
588MODULE_LICENSE("GPL");
589MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
590
591static int __init nf_conntrack_l3proto_ipv6_init(void)
592{
593 need_conntrack();
594 return init_or_cleanup(1);
595}
596
597static void __exit nf_conntrack_l3proto_ipv6_fini(void) 541static void __exit nf_conntrack_l3proto_ipv6_fini(void)
598{ 542{
599 init_or_cleanup(0); 543 synchronize_net();
544#ifdef CONFIG_SYSCTL
545 unregister_sysctl_table(nf_ct_ipv6_sysctl_header);
546#endif
547 nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
548 nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
549 nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmpv6);
550 nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp6);
551 nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp6);
552 nf_ct_frag6_cleanup();
600} 553}
601 554
602module_init(nf_conntrack_l3proto_ipv6_init); 555module_init(nf_conntrack_l3proto_ipv6_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 09945c333055..86c6703265d0 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -233,21 +233,13 @@ icmpv6_error(struct sk_buff *skb, unsigned int dataoff,
233 return -NF_ACCEPT; 233 return -NF_ACCEPT;
234 } 234 }
235 235
236 if (hooknum != NF_IP6_PRE_ROUTING) 236 if (hooknum == NF_IP6_PRE_ROUTING &&
237 goto skipped; 237 nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) {
238
239 /* Ignore it if the checksum's bogus. */
240 if (csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
241 skb->len - dataoff, IPPROTO_ICMPV6,
242 skb_checksum(skb, dataoff,
243 skb->len - dataoff, 0))) {
244 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, 238 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
245 "nf_ct_icmpv6: ICMPv6 checksum failed\n"); 239 "nf_ct_icmpv6: ICMPv6 checksum failed\n");
246 return -NF_ACCEPT; 240 return -NF_ACCEPT;
247 } 241 }
248 242
249skipped:
250
251 /* is not error message ? */ 243 /* is not error message ? */
252 if (icmp6h->icmp6_type >= 128) 244 if (icmp6h->icmp6_type >= 128)
253 return NF_ACCEPT; 245 return NF_ACCEPT;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 4238b1ed8860..779ddf77f4d4 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
38 int res = 0; 38 int res = 0;
39 int cpu; 39 int cpu;
40 40
41 for_each_cpu(cpu) 41 for_each_possible_cpu(cpu)
42 res += proto->stats[cpu].inuse; 42 res += proto->stats[cpu].inuse;
43 43
44 return res; 44 return res;
@@ -140,7 +140,7 @@ fold_field(void *mib[], int offt)
140 unsigned long res = 0; 140 unsigned long res = 0;
141 int i; 141 int i;
142 142
143 for_each_cpu(i) { 143 for_each_possible_cpu(i) {
144 res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); 144 res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
145 res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); 145 res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
146 } 146 }
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index b67a45fb93e9..eef985e010ea 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -121,6 +121,10 @@ static __inline__ void fq_unlink(struct frag_queue *fq)
121 write_unlock(&ip6_frag_lock); 121 write_unlock(&ip6_frag_lock);
122} 122}
123 123
124/*
125 * callers should be careful not to use the hash value outside the ipfrag_lock
126 * as doing so could race with ipfrag_hash_rnd being recalculated.
127 */
124static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr, 128static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr,
125 struct in6_addr *daddr) 129 struct in6_addr *daddr)
126{ 130{
@@ -324,15 +328,16 @@ out:
324/* Creation primitives. */ 328/* Creation primitives. */
325 329
326 330
327static struct frag_queue *ip6_frag_intern(unsigned int hash, 331static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
328 struct frag_queue *fq_in)
329{ 332{
330 struct frag_queue *fq; 333 struct frag_queue *fq;
334 unsigned int hash;
331#ifdef CONFIG_SMP 335#ifdef CONFIG_SMP
332 struct hlist_node *n; 336 struct hlist_node *n;
333#endif 337#endif
334 338
335 write_lock(&ip6_frag_lock); 339 write_lock(&ip6_frag_lock);
340 hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr);
336#ifdef CONFIG_SMP 341#ifdef CONFIG_SMP
337 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { 342 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
338 if (fq->id == fq_in->id && 343 if (fq->id == fq_in->id &&
@@ -362,7 +367,7 @@ static struct frag_queue *ip6_frag_intern(unsigned int hash,
362 367
363 368
364static struct frag_queue * 369static struct frag_queue *
365ip6_frag_create(unsigned int hash, u32 id, struct in6_addr *src, struct in6_addr *dst) 370ip6_frag_create(u32 id, struct in6_addr *src, struct in6_addr *dst)
366{ 371{
367 struct frag_queue *fq; 372 struct frag_queue *fq;
368 373
@@ -379,7 +384,7 @@ ip6_frag_create(unsigned int hash, u32 id, struct in6_addr *src, struct in6_addr
379 spin_lock_init(&fq->lock); 384 spin_lock_init(&fq->lock);
380 atomic_set(&fq->refcnt, 1); 385 atomic_set(&fq->refcnt, 1);
381 386
382 return ip6_frag_intern(hash, fq); 387 return ip6_frag_intern(fq);
383 388
384oom: 389oom:
385 IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 390 IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
@@ -391,9 +396,10 @@ fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
391{ 396{
392 struct frag_queue *fq; 397 struct frag_queue *fq;
393 struct hlist_node *n; 398 struct hlist_node *n;
394 unsigned int hash = ip6qhashfn(id, src, dst); 399 unsigned int hash;
395 400
396 read_lock(&ip6_frag_lock); 401 read_lock(&ip6_frag_lock);
402 hash = ip6qhashfn(id, src, dst);
397 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { 403 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
398 if (fq->id == id && 404 if (fq->id == id &&
399 ipv6_addr_equal(src, &fq->saddr) && 405 ipv6_addr_equal(src, &fq->saddr) &&
@@ -405,7 +411,7 @@ fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
405 } 411 }
406 read_unlock(&ip6_frag_lock); 412 read_unlock(&ip6_frag_lock);
407 413
408 return ip6_frag_create(hash, id, src, dst); 414 return ip6_frag_create(id, src, dst);
409} 415}
410 416
411 417
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 79078747a646..8a777932786d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -280,10 +280,13 @@ static int inline rt6_check_neigh(struct rt6_info *rt)
280{ 280{
281 struct neighbour *neigh = rt->rt6i_nexthop; 281 struct neighbour *neigh = rt->rt6i_nexthop;
282 int m = 0; 282 int m = 0;
283 if (neigh) { 283 if (rt->rt6i_flags & RTF_NONEXTHOP ||
284 !(rt->rt6i_flags & RTF_GATEWAY))
285 m = 1;
286 else if (neigh) {
284 read_lock_bh(&neigh->lock); 287 read_lock_bh(&neigh->lock);
285 if (neigh->nud_state & NUD_VALID) 288 if (neigh->nud_state & NUD_VALID)
286 m = 1; 289 m = 2;
287 read_unlock_bh(&neigh->lock); 290 read_unlock_bh(&neigh->lock);
288 } 291 }
289 return m; 292 return m;
@@ -292,15 +295,18 @@ static int inline rt6_check_neigh(struct rt6_info *rt)
292static int rt6_score_route(struct rt6_info *rt, int oif, 295static int rt6_score_route(struct rt6_info *rt, int oif,
293 int strict) 296 int strict)
294{ 297{
295 int m = rt6_check_dev(rt, oif); 298 int m, n;
299
300 m = rt6_check_dev(rt, oif);
296 if (!m && (strict & RT6_SELECT_F_IFACE)) 301 if (!m && (strict & RT6_SELECT_F_IFACE))
297 return -1; 302 return -1;
298#ifdef CONFIG_IPV6_ROUTER_PREF 303#ifdef CONFIG_IPV6_ROUTER_PREF
299 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; 304 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
300#endif 305#endif
301 if (rt6_check_neigh(rt)) 306 n = rt6_check_neigh(rt);
307 if (n > 1)
302 m |= 16; 308 m |= 16;
303 else if (strict & RT6_SELECT_F_REACHABLE) 309 else if (!n && strict & RT6_SELECT_F_REACHABLE)
304 return -1; 310 return -1;
305 return m; 311 return m;
306} 312}
@@ -317,7 +323,7 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
317 __FUNCTION__, head, head ? *head : NULL, oif); 323 __FUNCTION__, head, head ? *head : NULL, oif);
318 324
319 for (rt = rt0, metric = rt0->rt6i_metric; 325 for (rt = rt0, metric = rt0->rt6i_metric;
320 rt && rt->rt6i_metric == metric; 326 rt && rt->rt6i_metric == metric && (!last || rt != rt0);
321 rt = rt->u.next) { 327 rt = rt->u.next) {
322 int m; 328 int m;
323 329
@@ -343,9 +349,12 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
343 (strict & RT6_SELECT_F_REACHABLE) && 349 (strict & RT6_SELECT_F_REACHABLE) &&
344 last && last != rt0) { 350 last && last != rt0) {
345 /* no entries matched; do round-robin */ 351 /* no entries matched; do round-robin */
352 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
353 spin_lock(&lock);
346 *head = rt0->u.next; 354 *head = rt0->u.next;
347 rt0->u.next = last->u.next; 355 rt0->u.next = last->u.next;
348 last->u.next = rt0; 356 last->u.next = rt0;
357 spin_unlock(&lock);
349 } 358 }
350 359
351 RT6_TRACE("%s() => %p, score=%d\n", 360 RT6_TRACE("%s() => %p, score=%d\n",
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c2d3e17beae6..6578c3080f47 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -397,7 +397,7 @@ static int ipip6_rcv(struct sk_buff *skb)
397 return 0; 397 return 0;
398 } 398 }
399 399
400 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0); 400 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
401 kfree_skb(skb); 401 kfree_skb(skb);
402 read_unlock(&ipip6_lock); 402 read_unlock(&ipip6_lock);
403out: 403out:
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 5659b52284bd..0ef9a35798d1 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -19,11 +19,13 @@
19 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> 19 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
20 */ 20 */
21 21
22#include <linux/icmpv6.h>
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/mutex.h> 25#include <linux/mutex.h>
25#include <linux/netdevice.h> 26#include <linux/netdevice.h>
26#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <net/ipv6.h>
27#include <net/protocol.h> 29#include <net/protocol.h>
28#include <net/xfrm.h> 30#include <net/xfrm.h>
29 31
@@ -87,10 +89,16 @@ static int tunnel6_rcv(struct sk_buff **pskb)
87 struct sk_buff *skb = *pskb; 89 struct sk_buff *skb = *pskb;
88 struct xfrm6_tunnel *handler; 90 struct xfrm6_tunnel *handler;
89 91
92 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
93 goto drop;
94
90 for (handler = tunnel6_handlers; handler; handler = handler->next) 95 for (handler = tunnel6_handlers; handler; handler = handler->next)
91 if (!handler->handler(skb)) 96 if (!handler->handler(skb))
92 return 0; 97 return 0;
93 98
99 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev);
100
101drop:
94 kfree_skb(skb); 102 kfree_skb(skb);
95 return 0; 103 return 0;
96} 104}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 91cce8b2d7a5..88c840f1beb6 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -191,16 +191,18 @@ error:
191static inline void 191static inline void
192_decode_session6(struct sk_buff *skb, struct flowi *fl) 192_decode_session6(struct sk_buff *skb, struct flowi *fl)
193{ 193{
194 u16 offset = sizeof(struct ipv6hdr); 194 u16 offset = skb->h.raw - skb->nh.raw;
195 struct ipv6hdr *hdr = skb->nh.ipv6h; 195 struct ipv6hdr *hdr = skb->nh.ipv6h;
196 struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); 196 struct ipv6_opt_hdr *exthdr;
197 u8 nexthdr = skb->nh.ipv6h->nexthdr; 197 u8 nexthdr = skb->nh.raw[IP6CB(skb)->nhoff];
198 198
199 memset(fl, 0, sizeof(struct flowi)); 199 memset(fl, 0, sizeof(struct flowi));
200 ipv6_addr_copy(&fl->fl6_dst, &hdr->daddr); 200 ipv6_addr_copy(&fl->fl6_dst, &hdr->daddr);
201 ipv6_addr_copy(&fl->fl6_src, &hdr->saddr); 201 ipv6_addr_copy(&fl->fl6_src, &hdr->saddr);
202 202
203 while (pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data)) { 203 while (pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data)) {
204 exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
205
204 switch (nexthdr) { 206 switch (nexthdr) {
205 case NEXTHDR_ROUTING: 207 case NEXTHDR_ROUTING:
206 case NEXTHDR_HOP: 208 case NEXTHDR_HOP:
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 2dbf134d5266..811d998725bc 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -944,9 +944,9 @@ out:
944 return rc; 944 return rc;
945} 945}
946 946
947static int ipx_map_frame_type(unsigned char type) 947static __be16 ipx_map_frame_type(unsigned char type)
948{ 948{
949 int rc = 0; 949 __be16 rc = 0;
950 950
951 switch (type) { 951 switch (type) {
952 case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break; 952 case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break;
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 67774448efd9..a394c6fe19a2 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -119,7 +119,7 @@ out:
119 return rc; 119 return rc;
120} 120}
121 121
122static int ipxrtr_delete(long net) 122static int ipxrtr_delete(__u32 net)
123{ 123{
124 struct ipx_route *r, *tmp; 124 struct ipx_route *r, *tmp;
125 int rc; 125 int rc;
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 254f90746900..2d2e2b1919f4 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -544,7 +544,8 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self,
544{ 544{
545 struct sk_buff *tx_skb; 545 struct sk_buff *tx_skb;
546 int n; 546 int n;
547 __u32 tmp_be32, tmp_be16; 547 __u32 tmp_be32;
548 __be16 tmp_be16;
548 __u8 *fp; 549 __u8 *fp;
549 550
550 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 551 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index c6d169fbdceb..82e665c79991 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -257,7 +257,6 @@ struct ias_attrib *irias_find_attrib(struct ias_object *obj, char *name)
257 /* Unsafe (locking), attrib might change */ 257 /* Unsafe (locking), attrib might change */
258 return attrib; 258 return attrib;
259} 259}
260EXPORT_SYMBOL(irias_find_attrib);
261 260
262/* 261/*
263 * Function irias_add_attribute (obj, attrib) 262 * Function irias_add_attribute (obj, attrib)
@@ -484,7 +483,6 @@ struct ias_value *irias_new_string_value(char *string)
484 483
485 return value; 484 return value;
486} 485}
487EXPORT_SYMBOL(irias_new_string_value);
488 486
489/* 487/*
490 * Function irias_new_octseq_value (octets, len) 488 * Function irias_new_octseq_value (octets, len)
@@ -519,7 +517,6 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
519 memcpy(value->t.oct_seq, octseq , len); 517 memcpy(value->t.oct_seq, octseq , len);
520 return value; 518 return value;
521} 519}
522EXPORT_SYMBOL(irias_new_octseq_value);
523 520
524struct ias_value *irias_new_missing_value(void) 521struct ias_value *irias_new_missing_value(void)
525{ 522{
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 7029618f5719..a16528657b4c 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -884,7 +884,8 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
884 if (now) { 884 if (now) {
885 /* Send down empty frame to trigger speed change */ 885 /* Send down empty frame to trigger speed change */
886 skb = dev_alloc_skb(0); 886 skb = dev_alloc_skb(0);
887 irlap_queue_xmit(self, skb); 887 if (skb)
888 irlap_queue_xmit(self, skb);
888 } 889 }
889} 890}
890 891
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 8f3addf0724c..d62e0f9b9da3 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -118,7 +118,8 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
118 u16 pdulen = eth_hdr(skb)->h_proto, 118 u16 pdulen = eth_hdr(skb)->h_proto,
119 data_size = ntohs(pdulen) - llc_len; 119 data_size = ntohs(pdulen) - llc_len;
120 120
121 skb_trim(skb, data_size); 121 if (unlikely(pskb_trim_rcsum(skb, data_size)))
122 return 0;
122 } 123 }
123 return 1; 124 return 1;
124} 125}
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 1ceb1a6c254b..8455a32ea5c4 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -27,6 +27,29 @@
27 27
28#include "nf_internals.h" 28#include "nf_internals.h"
29 29
30static DEFINE_SPINLOCK(afinfo_lock);
31
32struct nf_afinfo *nf_afinfo[NPROTO];
33EXPORT_SYMBOL(nf_afinfo);
34
35int nf_register_afinfo(struct nf_afinfo *afinfo)
36{
37 spin_lock(&afinfo_lock);
38 rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo);
39 spin_unlock(&afinfo_lock);
40 return 0;
41}
42EXPORT_SYMBOL_GPL(nf_register_afinfo);
43
44void nf_unregister_afinfo(struct nf_afinfo *afinfo)
45{
46 spin_lock(&afinfo_lock);
47 rcu_assign_pointer(nf_afinfo[afinfo->family], NULL);
48 spin_unlock(&afinfo_lock);
49 synchronize_rcu();
50}
51EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
52
30/* In this code, we can be waiting indefinitely for userspace to 53/* In this code, we can be waiting indefinitely for userspace to
31 * service a packet if a hook returns NF_QUEUE. We could keep a count 54 * service a packet if a hook returns NF_QUEUE. We could keep a count
32 * of skbuffs queued for userspace, and not deregister a hook unless 55 * of skbuffs queued for userspace, and not deregister a hook unless
@@ -63,6 +86,34 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
63} 86}
64EXPORT_SYMBOL(nf_unregister_hook); 87EXPORT_SYMBOL(nf_unregister_hook);
65 88
89int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
90{
91 unsigned int i;
92 int err = 0;
93
94 for (i = 0; i < n; i++) {
95 err = nf_register_hook(&reg[i]);
96 if (err)
97 goto err;
98 }
99 return err;
100
101err:
102 if (i > 0)
103 nf_unregister_hooks(reg, i);
104 return err;
105}
106EXPORT_SYMBOL(nf_register_hooks);
107
108void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
109{
110 unsigned int i;
111
112 for (i = 0; i < n; i++)
113 nf_unregister_hook(&reg[i]);
114}
115EXPORT_SYMBOL(nf_unregister_hooks);
116
66unsigned int nf_iterate(struct list_head *head, 117unsigned int nf_iterate(struct list_head *head,
67 struct sk_buff **skb, 118 struct sk_buff **skb,
68 int hook, 119 int hook,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 56389c83557c..f9b83f91371a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -146,7 +146,7 @@ static void nf_ct_event_cache_flush(void)
146 struct nf_conntrack_ecache *ecache; 146 struct nf_conntrack_ecache *ecache;
147 int cpu; 147 int cpu;
148 148
149 for_each_cpu(cpu) { 149 for_each_possible_cpu(cpu) {
150 ecache = &per_cpu(nf_conntrack_ecache, cpu); 150 ecache = &per_cpu(nf_conntrack_ecache, cpu);
151 if (ecache->ct) 151 if (ecache->ct)
152 nf_ct_put(ecache->ct); 152 nf_ct_put(ecache->ct);
@@ -178,9 +178,6 @@ static struct {
178 /* allocated slab cache + modules which uses this slab cache */ 178 /* allocated slab cache + modules which uses this slab cache */
179 int use; 179 int use;
180 180
181 /* Initialization */
182 int (*init_conntrack)(struct nf_conn *, u_int32_t);
183
184} nf_ct_cache[NF_CT_F_NUM]; 181} nf_ct_cache[NF_CT_F_NUM];
185 182
186/* protect members of nf_ct_cache except of "use" */ 183/* protect members of nf_ct_cache except of "use" */
@@ -208,10 +205,8 @@ nf_ct_proto_find_get(u_int16_t l3proto, u_int8_t protocol)
208 205
209 preempt_disable(); 206 preempt_disable();
210 p = __nf_ct_proto_find(l3proto, protocol); 207 p = __nf_ct_proto_find(l3proto, protocol);
211 if (p) { 208 if (!try_module_get(p->me))
212 if (!try_module_get(p->me)) 209 p = &nf_conntrack_generic_protocol;
213 p = &nf_conntrack_generic_protocol;
214 }
215 preempt_enable(); 210 preempt_enable();
216 211
217 return p; 212 return p;
@@ -229,10 +224,8 @@ nf_ct_l3proto_find_get(u_int16_t l3proto)
229 224
230 preempt_disable(); 225 preempt_disable();
231 p = __nf_ct_l3proto_find(l3proto); 226 p = __nf_ct_l3proto_find(l3proto);
232 if (p) { 227 if (!try_module_get(p->me))
233 if (!try_module_get(p->me)) 228 p = &nf_conntrack_generic_l3proto;
234 p = &nf_conntrack_generic_l3proto;
235 }
236 preempt_enable(); 229 preempt_enable();
237 230
238 return p; 231 return p;
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c
index 7de4f06c63c5..3fc58e454d4e 100644
--- a/net/netfilter/nf_conntrack_l3proto_generic.c
+++ b/net/netfilter/nf_conntrack_l3proto_generic.c
@@ -94,5 +94,4 @@ struct nf_conntrack_l3proto nf_conntrack_generic_l3proto = {
94 .print_conntrack = generic_print_conntrack, 94 .print_conntrack = generic_print_conntrack,
95 .prepare = generic_prepare, 95 .prepare = generic_prepare,
96 .get_features = generic_get_features, 96 .get_features = generic_get_features,
97 .me = THIS_MODULE,
98}; 97};
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 9cccc325b687..0c6da496cfa9 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -240,12 +240,15 @@ static int do_basic_checks(struct nf_conn *conntrack,
240 flag = 1; 240 flag = 1;
241 } 241 }
242 242
243 /* Cookie Ack/Echo chunks not the first OR 243 /*
244 Init / Init Ack / Shutdown compl chunks not the only chunks */ 244 * Cookie Ack/Echo chunks not the first OR
245 if ((sch->type == SCTP_CID_COOKIE_ACK 245 * Init / Init Ack / Shutdown compl chunks not the only chunks
246 * OR zero-length.
247 */
248 if (((sch->type == SCTP_CID_COOKIE_ACK
246 || sch->type == SCTP_CID_COOKIE_ECHO 249 || sch->type == SCTP_CID_COOKIE_ECHO
247 || flag) 250 || flag)
248 && count !=0 ) { 251 && count !=0) || !sch->length) {
249 DEBUGP("Basic checks failed\n"); 252 DEBUGP("Basic checks failed\n");
250 return 1; 253 return 1;
251 } 254 }
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 6492ed66fb3c..69899f27d26a 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -799,8 +799,7 @@ static int tcp_error(struct sk_buff *skb,
799 unsigned int dataoff, 799 unsigned int dataoff,
800 enum ip_conntrack_info *ctinfo, 800 enum ip_conntrack_info *ctinfo,
801 int pf, 801 int pf,
802 unsigned int hooknum, 802 unsigned int hooknum)
803 int(*csum)(const struct sk_buff *,unsigned int))
804{ 803{
805 struct tcphdr _tcph, *th; 804 struct tcphdr _tcph, *th;
806 unsigned int tcplen = skb->len - dataoff; 805 unsigned int tcplen = skb->len - dataoff;
@@ -830,9 +829,8 @@ static int tcp_error(struct sk_buff *skb,
830 */ 829 */
831 /* FIXME: Source route IP option packets --RR */ 830 /* FIXME: Source route IP option packets --RR */
832 if (((pf == PF_INET && hooknum == NF_IP_PRE_ROUTING) || 831 if (((pf == PF_INET && hooknum == NF_IP_PRE_ROUTING) ||
833 (pf == PF_INET6 && hooknum == NF_IP6_PRE_ROUTING)) 832 (pf == PF_INET6 && hooknum == NF_IP6_PRE_ROUTING)) &&
834 && skb->ip_summed != CHECKSUM_UNNECESSARY 833 nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) {
835 && csum(skb, dataoff)) {
836 if (LOG_INVALID(IPPROTO_TCP)) 834 if (LOG_INVALID(IPPROTO_TCP))
837 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 835 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
838 "nf_ct_tcp: bad TCP checksum "); 836 "nf_ct_tcp: bad TCP checksum ");
@@ -851,44 +849,6 @@ static int tcp_error(struct sk_buff *skb,
851 return NF_ACCEPT; 849 return NF_ACCEPT;
852} 850}
853 851
854static int csum4(const struct sk_buff *skb, unsigned int dataoff)
855{
856 return csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr,
857 skb->len - dataoff, IPPROTO_TCP,
858 skb->ip_summed == CHECKSUM_HW ? skb->csum
859 : skb_checksum(skb, dataoff,
860 skb->len - dataoff, 0));
861}
862
863static int csum6(const struct sk_buff *skb, unsigned int dataoff)
864{
865 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
866 skb->len - dataoff, IPPROTO_TCP,
867 skb->ip_summed == CHECKSUM_HW
868 ? csum_sub(skb->csum,
869 skb_checksum(skb, 0, dataoff, 0))
870 : skb_checksum(skb, dataoff, skb->len - dataoff,
871 0));
872}
873
874static int tcp_error4(struct sk_buff *skb,
875 unsigned int dataoff,
876 enum ip_conntrack_info *ctinfo,
877 int pf,
878 unsigned int hooknum)
879{
880 return tcp_error(skb, dataoff, ctinfo, pf, hooknum, csum4);
881}
882
883static int tcp_error6(struct sk_buff *skb,
884 unsigned int dataoff,
885 enum ip_conntrack_info *ctinfo,
886 int pf,
887 unsigned int hooknum)
888{
889 return tcp_error(skb, dataoff, ctinfo, pf, hooknum, csum6);
890}
891
892/* Returns verdict for packet, or -1 for invalid. */ 852/* Returns verdict for packet, or -1 for invalid. */
893static int tcp_packet(struct nf_conn *conntrack, 853static int tcp_packet(struct nf_conn *conntrack,
894 const struct sk_buff *skb, 854 const struct sk_buff *skb,
@@ -1218,7 +1178,7 @@ struct nf_conntrack_protocol nf_conntrack_protocol_tcp4 =
1218 .print_conntrack = tcp_print_conntrack, 1178 .print_conntrack = tcp_print_conntrack,
1219 .packet = tcp_packet, 1179 .packet = tcp_packet,
1220 .new = tcp_new, 1180 .new = tcp_new,
1221 .error = tcp_error4, 1181 .error = tcp_error,
1222#if defined(CONFIG_NF_CT_NETLINK) || \ 1182#if defined(CONFIG_NF_CT_NETLINK) || \
1223 defined(CONFIG_NF_CT_NETLINK_MODULE) 1183 defined(CONFIG_NF_CT_NETLINK_MODULE)
1224 .to_nfattr = tcp_to_nfattr, 1184 .to_nfattr = tcp_to_nfattr,
@@ -1239,7 +1199,7 @@ struct nf_conntrack_protocol nf_conntrack_protocol_tcp6 =
1239 .print_conntrack = tcp_print_conntrack, 1199 .print_conntrack = tcp_print_conntrack,
1240 .packet = tcp_packet, 1200 .packet = tcp_packet,
1241 .new = tcp_new, 1201 .new = tcp_new,
1242 .error = tcp_error6, 1202 .error = tcp_error,
1243#if defined(CONFIG_NF_CT_NETLINK) || \ 1203#if defined(CONFIG_NF_CT_NETLINK) || \
1244 defined(CONFIG_NF_CT_NETLINK_MODULE) 1204 defined(CONFIG_NF_CT_NETLINK_MODULE)
1245 .to_nfattr = tcp_to_nfattr, 1205 .to_nfattr = tcp_to_nfattr,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 831d206344e0..d93edbfde9e3 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -103,8 +103,7 @@ static int udp_new(struct nf_conn *conntrack, const struct sk_buff *skb,
103static int udp_error(struct sk_buff *skb, unsigned int dataoff, 103static int udp_error(struct sk_buff *skb, unsigned int dataoff,
104 enum ip_conntrack_info *ctinfo, 104 enum ip_conntrack_info *ctinfo,
105 int pf, 105 int pf,
106 unsigned int hooknum, 106 unsigned int hooknum)
107 int (*csum)(const struct sk_buff *, unsigned int))
108{ 107{
109 unsigned int udplen = skb->len - dataoff; 108 unsigned int udplen = skb->len - dataoff;
110 struct udphdr _hdr, *hdr; 109 struct udphdr _hdr, *hdr;
@@ -136,9 +135,8 @@ static int udp_error(struct sk_buff *skb, unsigned int dataoff,
136 * and moreover root might send raw packets. 135 * and moreover root might send raw packets.
137 * FIXME: Source route IP option packets --RR */ 136 * FIXME: Source route IP option packets --RR */
138 if (((pf == PF_INET && hooknum == NF_IP_PRE_ROUTING) || 137 if (((pf == PF_INET && hooknum == NF_IP_PRE_ROUTING) ||
139 (pf == PF_INET6 && hooknum == NF_IP6_PRE_ROUTING)) 138 (pf == PF_INET6 && hooknum == NF_IP6_PRE_ROUTING)) &&
140 && skb->ip_summed != CHECKSUM_UNNECESSARY 139 nf_checksum(skb, hooknum, dataoff, IPPROTO_UDP, pf)) {
141 && csum(skb, dataoff)) {
142 if (LOG_INVALID(IPPROTO_UDP)) 140 if (LOG_INVALID(IPPROTO_UDP))
143 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 141 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
144 "nf_ct_udp: bad UDP checksum "); 142 "nf_ct_udp: bad UDP checksum ");
@@ -148,44 +146,6 @@ static int udp_error(struct sk_buff *skb, unsigned int dataoff,
148 return NF_ACCEPT; 146 return NF_ACCEPT;
149} 147}
150 148
151static int csum4(const struct sk_buff *skb, unsigned int dataoff)
152{
153 return csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr,
154 skb->len - dataoff, IPPROTO_UDP,
155 skb->ip_summed == CHECKSUM_HW ? skb->csum
156 : skb_checksum(skb, dataoff,
157 skb->len - dataoff, 0));
158}
159
160static int csum6(const struct sk_buff *skb, unsigned int dataoff)
161{
162 return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
163 skb->len - dataoff, IPPROTO_UDP,
164 skb->ip_summed == CHECKSUM_HW
165 ? csum_sub(skb->csum,
166 skb_checksum(skb, 0, dataoff, 0))
167 : skb_checksum(skb, dataoff, skb->len - dataoff,
168 0));
169}
170
171static int udp_error4(struct sk_buff *skb,
172 unsigned int dataoff,
173 enum ip_conntrack_info *ctinfo,
174 int pf,
175 unsigned int hooknum)
176{
177 return udp_error(skb, dataoff, ctinfo, pf, hooknum, csum4);
178}
179
180static int udp_error6(struct sk_buff *skb,
181 unsigned int dataoff,
182 enum ip_conntrack_info *ctinfo,
183 int pf,
184 unsigned int hooknum)
185{
186 return udp_error(skb, dataoff, ctinfo, pf, hooknum, csum6);
187}
188
189struct nf_conntrack_protocol nf_conntrack_protocol_udp4 = 149struct nf_conntrack_protocol nf_conntrack_protocol_udp4 =
190{ 150{
191 .l3proto = PF_INET, 151 .l3proto = PF_INET,
@@ -197,7 +157,7 @@ struct nf_conntrack_protocol nf_conntrack_protocol_udp4 =
197 .print_conntrack = udp_print_conntrack, 157 .print_conntrack = udp_print_conntrack,
198 .packet = udp_packet, 158 .packet = udp_packet,
199 .new = udp_new, 159 .new = udp_new,
200 .error = udp_error4, 160 .error = udp_error,
201#if defined(CONFIG_NF_CT_NETLINK) || \ 161#if defined(CONFIG_NF_CT_NETLINK) || \
202 defined(CONFIG_NF_CT_NETLINK_MODULE) 162 defined(CONFIG_NF_CT_NETLINK_MODULE)
203 .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, 163 .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
@@ -216,7 +176,7 @@ struct nf_conntrack_protocol nf_conntrack_protocol_udp6 =
216 .print_conntrack = udp_print_conntrack, 176 .print_conntrack = udp_print_conntrack,
217 .packet = udp_packet, 177 .packet = udp_packet,
218 .new = udp_new, 178 .new = udp_new,
219 .error = udp_error6, 179 .error = udp_error,
220#if defined(CONFIG_NF_CT_NETLINK) || \ 180#if defined(CONFIG_NF_CT_NETLINK) || \
221 defined(CONFIG_NF_CT_NETLINK_MODULE) 181 defined(CONFIG_NF_CT_NETLINK_MODULE)
222 .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, 182 .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index c72aa3cd22e4..408960c6a544 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -649,63 +649,6 @@ static ctl_table nf_ct_net_table[] = {
649EXPORT_SYMBOL(nf_ct_log_invalid); 649EXPORT_SYMBOL(nf_ct_log_invalid);
650#endif /* CONFIG_SYSCTL */ 650#endif /* CONFIG_SYSCTL */
651 651
652static int init_or_cleanup(int init)
653{
654#ifdef CONFIG_PROC_FS
655 struct proc_dir_entry *proc, *proc_exp, *proc_stat;
656#endif
657 int ret = 0;
658
659 if (!init) goto cleanup;
660
661 ret = nf_conntrack_init();
662 if (ret < 0)
663 goto cleanup_nothing;
664
665#ifdef CONFIG_PROC_FS
666 proc = proc_net_fops_create("nf_conntrack", 0440, &ct_file_ops);
667 if (!proc) goto cleanup_init;
668
669 proc_exp = proc_net_fops_create("nf_conntrack_expect", 0440,
670 &exp_file_ops);
671 if (!proc_exp) goto cleanup_proc;
672
673 proc_stat = create_proc_entry("nf_conntrack", S_IRUGO, proc_net_stat);
674 if (!proc_stat)
675 goto cleanup_proc_exp;
676
677 proc_stat->proc_fops = &ct_cpu_seq_fops;
678 proc_stat->owner = THIS_MODULE;
679#endif
680#ifdef CONFIG_SYSCTL
681 nf_ct_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
682 if (nf_ct_sysctl_header == NULL) {
683 printk("nf_conntrack: can't register to sysctl.\n");
684 ret = -ENOMEM;
685 goto cleanup_proc_stat;
686 }
687#endif
688
689 return ret;
690
691 cleanup:
692#ifdef CONFIG_SYSCTL
693 unregister_sysctl_table(nf_ct_sysctl_header);
694 cleanup_proc_stat:
695#endif
696#ifdef CONFIG_PROC_FS
697 remove_proc_entry("nf_conntrack", proc_net_stat);
698 cleanup_proc_exp:
699 proc_net_remove("nf_conntrack_expect");
700 cleanup_proc:
701 proc_net_remove("nf_conntrack");
702 cleanup_init:
703#endif /* CNFIG_PROC_FS */
704 nf_conntrack_cleanup();
705 cleanup_nothing:
706 return ret;
707}
708
709int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto) 652int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
710{ 653{
711 int ret = 0; 654 int ret = 0;
@@ -808,12 +751,66 @@ void nf_conntrack_protocol_unregister(struct nf_conntrack_protocol *proto)
808 751
809static int __init nf_conntrack_standalone_init(void) 752static int __init nf_conntrack_standalone_init(void)
810{ 753{
811 return init_or_cleanup(1); 754#ifdef CONFIG_PROC_FS
755 struct proc_dir_entry *proc, *proc_exp, *proc_stat;
756#endif
757 int ret = 0;
758
759 ret = nf_conntrack_init();
760 if (ret < 0)
761 return ret;
762
763#ifdef CONFIG_PROC_FS
764 proc = proc_net_fops_create("nf_conntrack", 0440, &ct_file_ops);
765 if (!proc) goto cleanup_init;
766
767 proc_exp = proc_net_fops_create("nf_conntrack_expect", 0440,
768 &exp_file_ops);
769 if (!proc_exp) goto cleanup_proc;
770
771 proc_stat = create_proc_entry("nf_conntrack", S_IRUGO, proc_net_stat);
772 if (!proc_stat)
773 goto cleanup_proc_exp;
774
775 proc_stat->proc_fops = &ct_cpu_seq_fops;
776 proc_stat->owner = THIS_MODULE;
777#endif
778#ifdef CONFIG_SYSCTL
779 nf_ct_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
780 if (nf_ct_sysctl_header == NULL) {
781 printk("nf_conntrack: can't register to sysctl.\n");
782 ret = -ENOMEM;
783 goto cleanup_proc_stat;
784 }
785#endif
786 return ret;
787
788#ifdef CONFIG_SYSCTL
789 cleanup_proc_stat:
790#endif
791#ifdef CONFIG_PROC_FS
792 remove_proc_entry("nf_conntrack", proc_net_stat);
793 cleanup_proc_exp:
794 proc_net_remove("nf_conntrack_expect");
795 cleanup_proc:
796 proc_net_remove("nf_conntrack");
797 cleanup_init:
798#endif /* CNFIG_PROC_FS */
799 nf_conntrack_cleanup();
800 return ret;
812} 801}
813 802
814static void __exit nf_conntrack_standalone_fini(void) 803static void __exit nf_conntrack_standalone_fini(void)
815{ 804{
816 init_or_cleanup(0); 805#ifdef CONFIG_SYSCTL
806 unregister_sysctl_table(nf_ct_sysctl_header);
807#endif
808#ifdef CONFIG_PROC_FS
809 remove_proc_entry("nf_conntrack", proc_net_stat);
810 proc_net_remove("nf_conntrack_expect");
811 proc_net_remove("nf_conntrack");
812#endif /* CNFIG_PROC_FS */
813 nf_conntrack_cleanup();
817} 814}
818 815
819module_init(nf_conntrack_standalone_init); 816module_init(nf_conntrack_standalone_init);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index d9f0d7ef103b..ee8f70889f47 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -17,7 +17,6 @@
17 * for queueing and must reinject all packets it receives, no matter what. 17 * for queueing and must reinject all packets it receives, no matter what.
18 */ 18 */
19static struct nf_queue_handler *queue_handler[NPROTO]; 19static struct nf_queue_handler *queue_handler[NPROTO];
20static struct nf_queue_rerouter *queue_rerouter[NPROTO];
21 20
22static DEFINE_RWLOCK(queue_handler_lock); 21static DEFINE_RWLOCK(queue_handler_lock);
23 22
@@ -59,32 +58,6 @@ int nf_unregister_queue_handler(int pf)
59} 58}
60EXPORT_SYMBOL(nf_unregister_queue_handler); 59EXPORT_SYMBOL(nf_unregister_queue_handler);
61 60
62int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer)
63{
64 if (pf >= NPROTO)
65 return -EINVAL;
66
67 write_lock_bh(&queue_handler_lock);
68 rcu_assign_pointer(queue_rerouter[pf], rer);
69 write_unlock_bh(&queue_handler_lock);
70
71 return 0;
72}
73EXPORT_SYMBOL_GPL(nf_register_queue_rerouter);
74
75int nf_unregister_queue_rerouter(int pf)
76{
77 if (pf >= NPROTO)
78 return -EINVAL;
79
80 write_lock_bh(&queue_handler_lock);
81 rcu_assign_pointer(queue_rerouter[pf], NULL);
82 write_unlock_bh(&queue_handler_lock);
83 synchronize_rcu();
84 return 0;
85}
86EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter);
87
88void nf_unregister_queue_handlers(struct nf_queue_handler *qh) 61void nf_unregister_queue_handlers(struct nf_queue_handler *qh)
89{ 62{
90 int pf; 63 int pf;
@@ -116,7 +89,7 @@ int nf_queue(struct sk_buff **skb,
116 struct net_device *physindev = NULL; 89 struct net_device *physindev = NULL;
117 struct net_device *physoutdev = NULL; 90 struct net_device *physoutdev = NULL;
118#endif 91#endif
119 struct nf_queue_rerouter *rerouter; 92 struct nf_afinfo *afinfo;
120 93
121 /* QUEUE == DROP if noone is waiting, to be safe. */ 94 /* QUEUE == DROP if noone is waiting, to be safe. */
122 read_lock(&queue_handler_lock); 95 read_lock(&queue_handler_lock);
@@ -126,7 +99,14 @@ int nf_queue(struct sk_buff **skb,
126 return 1; 99 return 1;
127 } 100 }
128 101
129 info = kmalloc(sizeof(*info)+queue_rerouter[pf]->rer_size, GFP_ATOMIC); 102 afinfo = nf_get_afinfo(pf);
103 if (!afinfo) {
104 read_unlock(&queue_handler_lock);
105 kfree_skb(*skb);
106 return 1;
107 }
108
109 info = kmalloc(sizeof(*info) + afinfo->route_key_size, GFP_ATOMIC);
130 if (!info) { 110 if (!info) {
131 if (net_ratelimit()) 111 if (net_ratelimit())
132 printk(KERN_ERR "OOM queueing packet %p\n", 112 printk(KERN_ERR "OOM queueing packet %p\n",
@@ -158,10 +138,7 @@ int nf_queue(struct sk_buff **skb,
158 if (physoutdev) dev_hold(physoutdev); 138 if (physoutdev) dev_hold(physoutdev);
159 } 139 }
160#endif 140#endif
161 rerouter = rcu_dereference(queue_rerouter[pf]); 141 afinfo->saveroute(*skb, info);
162 if (rerouter)
163 rerouter->save(*skb, info);
164
165 status = queue_handler[pf]->outfn(*skb, info, queuenum, 142 status = queue_handler[pf]->outfn(*skb, info, queuenum,
166 queue_handler[pf]->data); 143 queue_handler[pf]->data);
167 144
@@ -190,7 +167,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
190{ 167{
191 struct list_head *elem = &info->elem->list; 168 struct list_head *elem = &info->elem->list;
192 struct list_head *i; 169 struct list_head *i;
193 struct nf_queue_rerouter *rerouter; 170 struct nf_afinfo *afinfo;
194 171
195 rcu_read_lock(); 172 rcu_read_lock();
196 173
@@ -228,8 +205,8 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
228 } 205 }
229 206
230 if (verdict == NF_ACCEPT) { 207 if (verdict == NF_ACCEPT) {
231 rerouter = rcu_dereference(queue_rerouter[info->pf]); 208 afinfo = nf_get_afinfo(info->pf);
232 if (rerouter && rerouter->reroute(&skb, info) < 0) 209 if (!afinfo || afinfo->reroute(&skb, info) < 0)
233 verdict = NF_DROP; 210 verdict = NF_DROP;
234 } 211 }
235 212
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3e3f5448bacb..61cdda4e5d3b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -321,7 +321,7 @@ static int
321nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags) 321nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
322{ 322{
323 spin_lock_bh(&inst->lock); 323 spin_lock_bh(&inst->lock);
324 inst->flags = ntohs(flags); 324 inst->flags = flags;
325 spin_unlock_bh(&inst->lock); 325 spin_unlock_bh(&inst->lock);
326 326
327 return 0; 327 return 0;
@@ -902,7 +902,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
902 if (nfula[NFULA_CFG_FLAGS-1]) { 902 if (nfula[NFULA_CFG_FLAGS-1]) {
903 u_int16_t flags = 903 u_int16_t flags =
904 *(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]); 904 *(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]);
905 nfulnl_set_flags(inst, ntohl(flags)); 905 nfulnl_set_flags(inst, ntohs(flags));
906 } 906 }
907 907
908out_put: 908out_put:
@@ -1033,17 +1033,13 @@ static struct file_operations nful_file_ops = {
1033 1033
1034#endif /* PROC_FS */ 1034#endif /* PROC_FS */
1035 1035
1036static int 1036static int __init nfnetlink_log_init(void)
1037init_or_cleanup(int init)
1038{ 1037{
1039 int i, status = -ENOMEM; 1038 int i, status = -ENOMEM;
1040#ifdef CONFIG_PROC_FS 1039#ifdef CONFIG_PROC_FS
1041 struct proc_dir_entry *proc_nful; 1040 struct proc_dir_entry *proc_nful;
1042#endif 1041#endif
1043 1042
1044 if (!init)
1045 goto cleanup;
1046
1047 for (i = 0; i < INSTANCE_BUCKETS; i++) 1043 for (i = 0; i < INSTANCE_BUCKETS; i++)
1048 INIT_HLIST_HEAD(&instance_table[i]); 1044 INIT_HLIST_HEAD(&instance_table[i]);
1049 1045
@@ -1066,30 +1062,25 @@ init_or_cleanup(int init)
1066 goto cleanup_subsys; 1062 goto cleanup_subsys;
1067 proc_nful->proc_fops = &nful_file_ops; 1063 proc_nful->proc_fops = &nful_file_ops;
1068#endif 1064#endif
1069
1070 return status; 1065 return status;
1071 1066
1072cleanup:
1073 nf_log_unregister_logger(&nfulnl_logger);
1074#ifdef CONFIG_PROC_FS 1067#ifdef CONFIG_PROC_FS
1075 remove_proc_entry("nfnetlink_log", proc_net_netfilter);
1076cleanup_subsys: 1068cleanup_subsys:
1077#endif
1078 nfnetlink_subsys_unregister(&nfulnl_subsys); 1069 nfnetlink_subsys_unregister(&nfulnl_subsys);
1070#endif
1079cleanup_netlink_notifier: 1071cleanup_netlink_notifier:
1080 netlink_unregister_notifier(&nfulnl_rtnl_notifier); 1072 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
1081 return status; 1073 return status;
1082} 1074}
1083 1075
1084static int __init nfnetlink_log_init(void)
1085{
1086
1087 return init_or_cleanup(1);
1088}
1089
1090static void __exit nfnetlink_log_fini(void) 1076static void __exit nfnetlink_log_fini(void)
1091{ 1077{
1092 init_or_cleanup(0); 1078 nf_log_unregister_logger(&nfulnl_logger);
1079#ifdef CONFIG_PROC_FS
1080 remove_proc_entry("nfnetlink_log", proc_net_netfilter);
1081#endif
1082 nfnetlink_subsys_unregister(&nfulnl_subsys);
1083 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
1093} 1084}
1094 1085
1095MODULE_DESCRIPTION("netfilter userspace logging"); 1086MODULE_DESCRIPTION("netfilter userspace logging");
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index d0e62f68139f..86a4ac33de34 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1071,17 +1071,13 @@ static struct file_operations nfqnl_file_ops = {
1071 1071
1072#endif /* PROC_FS */ 1072#endif /* PROC_FS */
1073 1073
1074static int 1074static int __init nfnetlink_queue_init(void)
1075init_or_cleanup(int init)
1076{ 1075{
1077 int i, status = -ENOMEM; 1076 int i, status = -ENOMEM;
1078#ifdef CONFIG_PROC_FS 1077#ifdef CONFIG_PROC_FS
1079 struct proc_dir_entry *proc_nfqueue; 1078 struct proc_dir_entry *proc_nfqueue;
1080#endif 1079#endif
1081 1080
1082 if (!init)
1083 goto cleanup;
1084
1085 for (i = 0; i < INSTANCE_BUCKETS; i++) 1081 for (i = 0; i < INSTANCE_BUCKETS; i++)
1086 INIT_HLIST_HEAD(&instance_table[i]); 1082 INIT_HLIST_HEAD(&instance_table[i]);
1087 1083
@@ -1101,31 +1097,26 @@ init_or_cleanup(int init)
1101#endif 1097#endif
1102 1098
1103 register_netdevice_notifier(&nfqnl_dev_notifier); 1099 register_netdevice_notifier(&nfqnl_dev_notifier);
1104
1105 return status; 1100 return status;
1106 1101
1107cleanup:
1108 nf_unregister_queue_handlers(&nfqh);
1109 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1110#ifdef CONFIG_PROC_FS 1102#ifdef CONFIG_PROC_FS
1111 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1112cleanup_subsys: 1103cleanup_subsys:
1113#endif
1114 nfnetlink_subsys_unregister(&nfqnl_subsys); 1104 nfnetlink_subsys_unregister(&nfqnl_subsys);
1105#endif
1115cleanup_netlink_notifier: 1106cleanup_netlink_notifier:
1116 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1107 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1117 return status; 1108 return status;
1118} 1109}
1119 1110
1120static int __init nfnetlink_queue_init(void)
1121{
1122
1123 return init_or_cleanup(1);
1124}
1125
1126static void __exit nfnetlink_queue_fini(void) 1111static void __exit nfnetlink_queue_fini(void)
1127{ 1112{
1128 init_or_cleanup(0); 1113 nf_unregister_queue_handlers(&nfqh);
1114 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1115#ifdef CONFIG_PROC_FS
1116 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1117#endif
1118 nfnetlink_subsys_unregister(&nfqnl_subsys);
1119 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1129} 1120}
1130 1121
1131MODULE_DESCRIPTION("netfilter packet queue handler"); 1122MODULE_DESCRIPTION("netfilter packet queue handler");
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index feb8a9e066b0..99293c63ff73 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -289,7 +289,7 @@ int xt_compat_match(void *match, void **dstptr, int *size, int convert)
289 case COMPAT_TO_USER: 289 case COMPAT_TO_USER:
290 pm = (struct xt_entry_match *)match; 290 pm = (struct xt_entry_match *)match;
291 msize = pm->u.user.match_size; 291 msize = pm->u.user.match_size;
292 if (__copy_to_user(*dstptr, pm, msize)) { 292 if (copy_to_user(*dstptr, pm, msize)) {
293 ret = -EFAULT; 293 ret = -EFAULT;
294 break; 294 break;
295 } 295 }
@@ -366,7 +366,7 @@ int xt_compat_target(void *target, void **dstptr, int *size, int convert)
366 case COMPAT_TO_USER: 366 case COMPAT_TO_USER:
367 pt = (struct xt_entry_target *)target; 367 pt = (struct xt_entry_target *)target;
368 tsize = pt->u.user.target_size; 368 tsize = pt->u.user.target_size;
369 if (__copy_to_user(*dstptr, pt, tsize)) { 369 if (copy_to_user(*dstptr, pt, tsize)) {
370 ret = -EFAULT; 370 ret = -EFAULT;
371 break; 371 break;
372 } 372 }
@@ -413,7 +413,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
413 413
414 newinfo->size = size; 414 newinfo->size = size;
415 415
416 for_each_cpu(cpu) { 416 for_each_possible_cpu(cpu) {
417 if (size <= PAGE_SIZE) 417 if (size <= PAGE_SIZE)
418 newinfo->entries[cpu] = kmalloc_node(size, 418 newinfo->entries[cpu] = kmalloc_node(size,
419 GFP_KERNEL, 419 GFP_KERNEL,
@@ -436,7 +436,7 @@ void xt_free_table_info(struct xt_table_info *info)
436{ 436{
437 int cpu; 437 int cpu;
438 438
439 for_each_cpu(cpu) { 439 for_each_possible_cpu(cpu) {
440 if (info->size <= PAGE_SIZE) 440 if (info->size <= PAGE_SIZE)
441 kfree(info->entries[cpu]); 441 kfree(info->entries[cpu]);
442 else 442 else
@@ -529,6 +529,7 @@ int xt_register_table(struct xt_table *table,
529 529
530 /* Simplifies replace_table code. */ 530 /* Simplifies replace_table code. */
531 table->private = bootstrap; 531 table->private = bootstrap;
532 rwlock_init(&table->lock);
532 if (!xt_replace_table(table, 0, newinfo, &ret)) 533 if (!xt_replace_table(table, 0, newinfo, &ret))
533 goto unlock; 534 goto unlock;
534 535
@@ -538,7 +539,6 @@ int xt_register_table(struct xt_table *table,
538 /* save number of initial entries */ 539 /* save number of initial entries */
539 private->initial_entries = private->number; 540 private->initial_entries = private->number;
540 541
541 rwlock_init(&table->lock);
542 list_prepend(&xt[table->af].tables, table); 542 list_prepend(&xt[table->af].tables, table);
543 543
544 ret = 0; 544 ret = 0;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2a233ffcf618..3862e73d14d7 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -56,12 +56,12 @@
56#include <linux/mm.h> 56#include <linux/mm.h>
57#include <linux/types.h> 57#include <linux/types.h>
58#include <linux/audit.h> 58#include <linux/audit.h>
59#include <linux/selinux.h>
59 60
60#include <net/sock.h> 61#include <net/sock.h>
61#include <net/scm.h> 62#include <net/scm.h>
62#include <net/netlink.h> 63#include <net/netlink.h>
63 64
64#define Nprintk(a...)
65#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) 65#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
66 66
67struct netlink_sock { 67struct netlink_sock {
@@ -1157,6 +1157,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1157 NETLINK_CB(skb).dst_pid = dst_pid; 1157 NETLINK_CB(skb).dst_pid = dst_pid;
1158 NETLINK_CB(skb).dst_group = dst_group; 1158 NETLINK_CB(skb).dst_group = dst_group;
1159 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context); 1159 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
1160 selinux_get_task_sid(current, &(NETLINK_CB(skb).sid));
1160 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1161 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1161 1162
1162 /* What can I do? Netlink is asynchronous, so that 1163 /* What can I do? Netlink is asynchronous, so that
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index d44981f5a619..3669cb953e6e 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -425,11 +425,16 @@ static int nr_create(struct socket *sock, int protocol)
425 425
426 nr_init_timers(sk); 426 nr_init_timers(sk);
427 427
428 nr->t1 = sysctl_netrom_transport_timeout; 428 nr->t1 =
429 nr->t2 = sysctl_netrom_transport_acknowledge_delay; 429 msecs_to_jiffies(sysctl_netrom_transport_timeout);
430 nr->n2 = sysctl_netrom_transport_maximum_tries; 430 nr->t2 =
431 nr->t4 = sysctl_netrom_transport_busy_delay; 431 msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
432 nr->idle = sysctl_netrom_transport_no_activity_timeout; 432 nr->n2 =
433 msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
434 nr->t4 =
435 msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
436 nr->idle =
437 msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
433 nr->window = sysctl_netrom_transport_requested_window_size; 438 nr->window = sysctl_netrom_transport_requested_window_size;
434 439
435 nr->bpqext = 1; 440 nr->bpqext = 1;
@@ -1365,8 +1370,6 @@ static struct notifier_block nr_dev_notifier = {
1365 1370
1366static struct net_device **dev_nr; 1371static struct net_device **dev_nr;
1367 1372
1368static char banner[] __initdata = KERN_INFO "G4KLX NET/ROM for Linux. Version 0.7 for AX25.037 Linux 2.4\n";
1369
1370static int __init nr_proto_init(void) 1373static int __init nr_proto_init(void)
1371{ 1374{
1372 int i; 1375 int i;
@@ -1414,7 +1417,6 @@ static int __init nr_proto_init(void)
1414 } 1417 }
1415 1418
1416 register_netdevice_notifier(&nr_dev_notifier); 1419 register_netdevice_notifier(&nr_dev_notifier);
1417 printk(banner);
1418 1420
1419 ax25_protocol_register(AX25_P_NETROM, nr_route_frame); 1421 ax25_protocol_register(AX25_P_NETROM, nr_route_frame);
1420 ax25_linkfail_register(nr_link_failed); 1422 ax25_linkfail_register(nr_link_failed);
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 509afddae569..621e5586ab03 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -185,7 +185,6 @@ static struct net_device_stats *nr_get_stats(struct net_device *dev)
185 185
186void nr_setup(struct net_device *dev) 186void nr_setup(struct net_device *dev)
187{ 187{
188 SET_MODULE_OWNER(dev);
189 dev->mtu = NR_MAX_PACKET_SIZE; 188 dev->mtu = NR_MAX_PACKET_SIZE;
190 dev->hard_start_xmit = nr_xmit; 189 dev->hard_start_xmit = nr_xmit;
191 dev->open = nr_open; 190 dev->open = nr_open;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index ea65396d1619..55564efccf11 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -518,11 +518,11 @@ static int rose_create(struct socket *sock, int protocol)
518 init_timer(&rose->timer); 518 init_timer(&rose->timer);
519 init_timer(&rose->idletimer); 519 init_timer(&rose->idletimer);
520 520
521 rose->t1 = sysctl_rose_call_request_timeout; 521 rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout);
522 rose->t2 = sysctl_rose_reset_request_timeout; 522 rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
523 rose->t3 = sysctl_rose_clear_request_timeout; 523 rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
524 rose->hb = sysctl_rose_ack_hold_back_timeout; 524 rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
525 rose->idle = sysctl_rose_no_activity_timeout; 525 rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
526 526
527 rose->state = ROSE_STATE_0; 527 rose->state = ROSE_STATE_0;
528 528
@@ -1469,8 +1469,6 @@ static struct notifier_block rose_dev_notifier = {
1469 1469
1470static struct net_device **dev_rose; 1470static struct net_device **dev_rose;
1471 1471
1472static const char banner[] = KERN_INFO "F6FBB/G4KLX ROSE for Linux. Version 0.62 for AX25.037 Linux 2.4\n";
1473
1474static int __init rose_proto_init(void) 1472static int __init rose_proto_init(void)
1475{ 1473{
1476 int i; 1474 int i;
@@ -1519,7 +1517,6 @@ static int __init rose_proto_init(void)
1519 1517
1520 sock_register(&rose_family_ops); 1518 sock_register(&rose_family_ops);
1521 register_netdevice_notifier(&rose_dev_notifier); 1519 register_netdevice_notifier(&rose_dev_notifier);
1522 printk(banner);
1523 1520
1524 ax25_protocol_register(AX25_P_ROSE, rose_route_frame); 1521 ax25_protocol_register(AX25_P_ROSE, rose_route_frame);
1525 ax25_linkfail_register(rose_link_failed); 1522 ax25_linkfail_register(rose_link_failed);
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index d297af737d10..2a1bf8e119e5 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -135,7 +135,6 @@ static struct net_device_stats *rose_get_stats(struct net_device *dev)
135 135
136void rose_setup(struct net_device *dev) 136void rose_setup(struct net_device *dev)
137{ 137{
138 SET_MODULE_OWNER(dev);
139 dev->mtu = ROSE_MAX_PACKET_SIZE - 2; 138 dev->mtu = ROSE_MAX_PACKET_SIZE - 2;
140 dev->hard_start_xmit = rose_xmit; 139 dev->hard_start_xmit = rose_xmit;
141 dev->open = rose_open; 140 dev->open = rose_open;
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index 09e9e9d04d92..bd86a63960ce 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -40,7 +40,8 @@ void rose_start_ftimer(struct rose_neigh *neigh)
40 40
41 neigh->ftimer.data = (unsigned long)neigh; 41 neigh->ftimer.data = (unsigned long)neigh;
42 neigh->ftimer.function = &rose_ftimer_expiry; 42 neigh->ftimer.function = &rose_ftimer_expiry;
43 neigh->ftimer.expires = jiffies + sysctl_rose_link_fail_timeout; 43 neigh->ftimer.expires =
44 jiffies + msecs_to_jiffies(sysctl_rose_link_fail_timeout);
44 45
45 add_timer(&neigh->ftimer); 46 add_timer(&neigh->ftimer);
46} 47}
@@ -51,7 +52,8 @@ static void rose_start_t0timer(struct rose_neigh *neigh)
51 52
52 neigh->t0timer.data = (unsigned long)neigh; 53 neigh->t0timer.data = (unsigned long)neigh;
53 neigh->t0timer.function = &rose_t0timer_expiry; 54 neigh->t0timer.function = &rose_t0timer_expiry;
54 neigh->t0timer.expires = jiffies + sysctl_rose_restart_request_timeout; 55 neigh->t0timer.expires =
56 jiffies + msecs_to_jiffies(sysctl_rose_restart_request_timeout);
55 57
56 add_timer(&neigh->t0timer); 58 add_timer(&neigh->t0timer);
57} 59}
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 8631b65a7312..a22542fa1bc8 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -48,8 +48,6 @@ static DEFINE_SPINLOCK(rose_route_list_lock);
48 48
49struct rose_neigh *rose_loopback_neigh; 49struct rose_neigh *rose_loopback_neigh;
50 50
51static void rose_remove_neigh(struct rose_neigh *);
52
53/* 51/*
54 * Add a new route to a node, and in the process add the node and the 52 * Add a new route to a node, and in the process add the node and the
55 * neighbour if it is new. 53 * neighbour if it is new.
@@ -235,11 +233,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
235 233
236 skb_queue_purge(&rose_neigh->queue); 234 skb_queue_purge(&rose_neigh->queue);
237 235
238 spin_lock_bh(&rose_neigh_list_lock);
239
240 if ((s = rose_neigh_list) == rose_neigh) { 236 if ((s = rose_neigh_list) == rose_neigh) {
241 rose_neigh_list = rose_neigh->next; 237 rose_neigh_list = rose_neigh->next;
242 spin_unlock_bh(&rose_neigh_list_lock);
243 kfree(rose_neigh->digipeat); 238 kfree(rose_neigh->digipeat);
244 kfree(rose_neigh); 239 kfree(rose_neigh);
245 return; 240 return;
@@ -248,7 +243,6 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
248 while (s != NULL && s->next != NULL) { 243 while (s != NULL && s->next != NULL) {
249 if (s->next == rose_neigh) { 244 if (s->next == rose_neigh) {
250 s->next = rose_neigh->next; 245 s->next = rose_neigh->next;
251 spin_unlock_bh(&rose_neigh_list_lock);
252 kfree(rose_neigh->digipeat); 246 kfree(rose_neigh->digipeat);
253 kfree(rose_neigh); 247 kfree(rose_neigh);
254 return; 248 return;
@@ -256,7 +250,6 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
256 250
257 s = s->next; 251 s = s->next;
258 } 252 }
259 spin_unlock_bh(&rose_neigh_list_lock);
260} 253}
261 254
262/* 255/*
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 6056d20ef429..37640c6fc014 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -69,6 +69,11 @@ ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
69 DPRINTK("ipt_init_target: found %s\n", target->name); 69 DPRINTK("ipt_init_target: found %s\n", target->name);
70 t->u.kernel.target = target; 70 t->u.kernel.target = target;
71 71
72 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
73 table, hook, 0, 0);
74 if (ret)
75 return ret;
76
72 if (t->u.kernel.target->checkentry 77 if (t->u.kernel.target->checkentry
73 && !t->u.kernel.target->checkentry(table, NULL, 78 && !t->u.kernel.target->checkentry(table, NULL,
74 t->u.kernel.target, t->data, 79 t->u.kernel.target, t->data,
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index fa877f8f652c..24c348fa8922 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -66,7 +66,7 @@ static __inline__ struct tcf_police * tcf_police_lookup(u32 index)
66} 66}
67 67
68#ifdef CONFIG_NET_CLS_ACT 68#ifdef CONFIG_NET_CLS_ACT
69static int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, 69static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
70 int type, struct tc_action *a) 70 int type, struct tc_action *a)
71{ 71{
72 struct tcf_police *p; 72 struct tcf_police *p;
@@ -113,7 +113,7 @@ rtattr_failure:
113} 113}
114 114
115static inline int 115static inline int
116tcf_hash_search(struct tc_action *a, u32 index) 116tcf_act_police_hash_search(struct tc_action *a, u32 index)
117{ 117{
118 struct tcf_police *p = tcf_police_lookup(index); 118 struct tcf_police *p = tcf_police_lookup(index);
119 119
@@ -387,9 +387,9 @@ static struct tc_action_ops act_police_ops = {
387 .act = tcf_act_police, 387 .act = tcf_act_police,
388 .dump = tcf_act_police_dump, 388 .dump = tcf_act_police_dump,
389 .cleanup = tcf_act_police_cleanup, 389 .cleanup = tcf_act_police_cleanup,
390 .lookup = tcf_hash_search, 390 .lookup = tcf_act_police_hash_search,
391 .init = tcf_act_police_locate, 391 .init = tcf_act_police_locate,
392 .walk = tcf_generic_walker 392 .walk = tcf_act_police_walker
393}; 393};
394 394
395static int __init 395static int __init
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 31eb83717c26..138ea92ed268 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -193,8 +193,10 @@ static void dev_watchdog(unsigned long arg)
193 netif_running(dev) && 193 netif_running(dev) &&
194 netif_carrier_ok(dev)) { 194 netif_carrier_ok(dev)) {
195 if (netif_queue_stopped(dev) && 195 if (netif_queue_stopped(dev) &&
196 (jiffies - dev->trans_start) > dev->watchdog_timeo) { 196 time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
197 printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name); 197
198 printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
199 dev->name);
198 dev->tx_timeout(dev); 200 dev->tx_timeout(dev);
199 } 201 }
200 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) 202 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 91132f6871d7..f1c7bd29f2cd 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -974,10 +974,10 @@ hfsc_adjust_levels(struct hfsc_class *cl)
974 do { 974 do {
975 level = 0; 975 level = 0;
976 list_for_each_entry(p, &cl->children, siblings) { 976 list_for_each_entry(p, &cl->children, siblings) {
977 if (p->level > level) 977 if (p->level >= level)
978 level = p->level; 978 level = p->level + 1;
979 } 979 }
980 cl->level = level + 1; 980 cl->level = level;
981 } while ((cl = cl->cl_parent) != NULL); 981 } while ((cl = cl->cl_parent) != NULL);
982} 982}
983 983
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 7228d30512c7..5a4a4d0ae502 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -167,7 +167,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
167 if (count == 0) { 167 if (count == 0) {
168 sch->qstats.drops++; 168 sch->qstats.drops++;
169 kfree_skb(skb); 169 kfree_skb(skb);
170 return NET_XMIT_DROP; 170 return NET_XMIT_BYPASS;
171 } 171 }
172 172
173 /* 173 /*
diff --git a/net/sctp/input.c b/net/sctp/input.c
index d117ebc75cf8..1662f9cc869e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -73,6 +73,8 @@ static struct sctp_association *__sctp_lookup_association(
73 const union sctp_addr *peer, 73 const union sctp_addr *peer,
74 struct sctp_transport **pt); 74 struct sctp_transport **pt);
75 75
76static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
77
76 78
77/* Calculate the SCTP checksum of an SCTP packet. */ 79/* Calculate the SCTP checksum of an SCTP packet. */
78static inline int sctp_rcv_checksum(struct sk_buff *skb) 80static inline int sctp_rcv_checksum(struct sk_buff *skb)
@@ -186,7 +188,6 @@ int sctp_rcv(struct sk_buff *skb)
186 */ 188 */
187 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) 189 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
188 { 190 {
189 sock_put(sk);
190 if (asoc) { 191 if (asoc) {
191 sctp_association_put(asoc); 192 sctp_association_put(asoc);
192 asoc = NULL; 193 asoc = NULL;
@@ -197,7 +198,6 @@ int sctp_rcv(struct sk_buff *skb)
197 sk = sctp_get_ctl_sock(); 198 sk = sctp_get_ctl_sock();
198 ep = sctp_sk(sk)->ep; 199 ep = sctp_sk(sk)->ep;
199 sctp_endpoint_hold(ep); 200 sctp_endpoint_hold(ep);
200 sock_hold(sk);
201 rcvr = &ep->base; 201 rcvr = &ep->base;
202 } 202 }
203 203
@@ -253,25 +253,18 @@ int sctp_rcv(struct sk_buff *skb)
253 */ 253 */
254 sctp_bh_lock_sock(sk); 254 sctp_bh_lock_sock(sk);
255 255
256 /* It is possible that the association could have moved to a different
257 * socket if it is peeled off. If so, update the sk.
258 */
259 if (sk != rcvr->sk) {
260 sctp_bh_lock_sock(rcvr->sk);
261 sctp_bh_unlock_sock(sk);
262 sk = rcvr->sk;
263 }
264
265 if (sock_owned_by_user(sk)) 256 if (sock_owned_by_user(sk))
266 sk_add_backlog(sk, skb); 257 sctp_add_backlog(sk, skb);
267 else 258 else
268 sctp_backlog_rcv(sk, skb); 259 sctp_inq_push(&chunk->rcvr->inqueue, chunk);
269 260
270 /* Release the sock and the sock ref we took in the lookup calls.
271 * The asoc/ep ref will be released in sctp_backlog_rcv.
272 */
273 sctp_bh_unlock_sock(sk); 261 sctp_bh_unlock_sock(sk);
274 sock_put(sk); 262
263 /* Release the asoc/ep ref we took in the lookup calls. */
264 if (asoc)
265 sctp_association_put(asoc);
266 else
267 sctp_endpoint_put(ep);
275 268
276 return 0; 269 return 0;
277 270
@@ -280,8 +273,7 @@ discard_it:
280 return 0; 273 return 0;
281 274
282discard_release: 275discard_release:
283 /* Release any structures we may be holding. */ 276 /* Release the asoc/ep ref we took in the lookup calls. */
284 sock_put(sk);
285 if (asoc) 277 if (asoc)
286 sctp_association_put(asoc); 278 sctp_association_put(asoc);
287 else 279 else
@@ -290,56 +282,87 @@ discard_release:
290 goto discard_it; 282 goto discard_it;
291} 283}
292 284
293/* Handle second half of inbound skb processing. If the sock was busy, 285/* Process the backlog queue of the socket. Every skb on
294 * we may have need to delay processing until later when the sock is 286 * the backlog holds a ref on an association or endpoint.
295 * released (on the backlog). If not busy, we call this routine 287 * We hold this ref throughout the state machine to make
296 * directly from the bottom half. 288 * sure that the structure we need is still around.
297 */ 289 */
298int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) 290int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
299{ 291{
300 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 292 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
301 struct sctp_inq *inqueue = NULL; 293 struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
302 struct sctp_ep_common *rcvr = NULL; 294 struct sctp_ep_common *rcvr = NULL;
295 int backloged = 0;
303 296
304 rcvr = chunk->rcvr; 297 rcvr = chunk->rcvr;
305 298
306 BUG_TRAP(rcvr->sk == sk); 299 /* If the rcvr is dead then the association or endpoint
307 300 * has been deleted and we can safely drop the chunk
308 if (rcvr->dead) { 301 * and refs that we are holding.
309 sctp_chunk_free(chunk); 302 */
310 } else { 303 if (rcvr->dead) {
311 inqueue = &chunk->rcvr->inqueue; 304 sctp_chunk_free(chunk);
312 sctp_inq_push(inqueue, chunk); 305 goto done;
313 } 306 }
314 307
315 /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */ 308 if (unlikely(rcvr->sk != sk)) {
316 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 309 /* In this case, the association moved from one socket to
317 sctp_association_put(sctp_assoc(rcvr)); 310 * another. We are currently sitting on the backlog of the
318 else 311 * old socket, so we need to move.
319 sctp_endpoint_put(sctp_ep(rcvr)); 312 * However, since we are here in the process context we
320 313 * need to take make sure that the user doesn't own
314 * the new socket when we process the packet.
315 * If the new socket is user-owned, queue the chunk to the
316 * backlog of the new socket without dropping any refs.
317 * Otherwise, we can safely push the chunk on the inqueue.
318 */
319
320 sk = rcvr->sk;
321 sctp_bh_lock_sock(sk);
322
323 if (sock_owned_by_user(sk)) {
324 sk_add_backlog(sk, skb);
325 backloged = 1;
326 } else
327 sctp_inq_push(inqueue, chunk);
328
329 sctp_bh_unlock_sock(sk);
330
331 /* If the chunk was backloged again, don't drop refs */
332 if (backloged)
333 return 0;
334 } else {
335 sctp_inq_push(inqueue, chunk);
336 }
337
338done:
339 /* Release the refs we took in sctp_add_backlog */
340 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
341 sctp_association_put(sctp_assoc(rcvr));
342 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
343 sctp_endpoint_put(sctp_ep(rcvr));
344 else
345 BUG();
346
321 return 0; 347 return 0;
322} 348}
323 349
324void sctp_backlog_migrate(struct sctp_association *assoc, 350static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
325 struct sock *oldsk, struct sock *newsk)
326{ 351{
327 struct sk_buff *skb; 352 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
328 struct sctp_chunk *chunk; 353 struct sctp_ep_common *rcvr = chunk->rcvr;
329 354
330 skb = oldsk->sk_backlog.head; 355 /* Hold the assoc/ep while hanging on the backlog queue.
331 oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL; 356 * This way, we know structures we need will not disappear from us
332 while (skb != NULL) { 357 */
333 struct sk_buff *next = skb->next; 358 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
334 359 sctp_association_hold(sctp_assoc(rcvr));
335 chunk = SCTP_INPUT_CB(skb)->chunk; 360 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
336 skb->next = NULL; 361 sctp_endpoint_hold(sctp_ep(rcvr));
337 if (&assoc->base == chunk->rcvr) 362 else
338 sk_add_backlog(newsk, skb); 363 BUG();
339 else 364
340 sk_add_backlog(oldsk, skb); 365 sk_add_backlog(sk, skb);
341 skb = next;
342 }
343} 366}
344 367
345/* Handle icmp frag needed error. */ 368/* Handle icmp frag needed error. */
@@ -412,7 +435,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
412 union sctp_addr daddr; 435 union sctp_addr daddr;
413 struct sctp_af *af; 436 struct sctp_af *af;
414 struct sock *sk = NULL; 437 struct sock *sk = NULL;
415 struct sctp_association *asoc = NULL; 438 struct sctp_association *asoc;
416 struct sctp_transport *transport = NULL; 439 struct sctp_transport *transport = NULL;
417 440
418 *app = NULL; *tpp = NULL; 441 *app = NULL; *tpp = NULL;
@@ -453,7 +476,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
453 return sk; 476 return sk;
454 477
455out: 478out:
456 sock_put(sk);
457 if (asoc) 479 if (asoc)
458 sctp_association_put(asoc); 480 sctp_association_put(asoc);
459 return NULL; 481 return NULL;
@@ -463,7 +485,6 @@ out:
463void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) 485void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
464{ 486{
465 sctp_bh_unlock_sock(sk); 487 sctp_bh_unlock_sock(sk);
466 sock_put(sk);
467 if (asoc) 488 if (asoc)
468 sctp_association_put(asoc); 489 sctp_association_put(asoc);
469} 490}
@@ -490,7 +511,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
490 int type = skb->h.icmph->type; 511 int type = skb->h.icmph->type;
491 int code = skb->h.icmph->code; 512 int code = skb->h.icmph->code;
492 struct sock *sk; 513 struct sock *sk;
493 struct sctp_association *asoc; 514 struct sctp_association *asoc = NULL;
494 struct sctp_transport *transport; 515 struct sctp_transport *transport;
495 struct inet_sock *inet; 516 struct inet_sock *inet;
496 char *saveip, *savesctp; 517 char *saveip, *savesctp;
@@ -716,7 +737,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
716 737
717hit: 738hit:
718 sctp_endpoint_hold(ep); 739 sctp_endpoint_hold(ep);
719 sock_hold(epb->sk);
720 read_unlock(&head->lock); 740 read_unlock(&head->lock);
721 return ep; 741 return ep;
722} 742}
@@ -818,7 +838,6 @@ static struct sctp_association *__sctp_lookup_association(
818hit: 838hit:
819 *pt = transport; 839 *pt = transport;
820 sctp_association_hold(asoc); 840 sctp_association_hold(asoc);
821 sock_hold(epb->sk);
822 read_unlock(&head->lock); 841 read_unlock(&head->lock);
823 return asoc; 842 return asoc;
824} 843}
@@ -846,7 +865,6 @@ int sctp_has_association(const union sctp_addr *laddr,
846 struct sctp_transport *transport; 865 struct sctp_transport *transport;
847 866
848 if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { 867 if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
849 sock_put(asoc->base.sk);
850 sctp_association_put(asoc); 868 sctp_association_put(asoc);
851 return 1; 869 return 1;
852 } 870 }
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 297b8951463e..cf0c767d43ae 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -149,6 +149,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
149 /* This is the first chunk in the packet. */ 149 /* This is the first chunk in the packet. */
150 chunk->singleton = 1; 150 chunk->singleton = 1;
151 ch = (sctp_chunkhdr_t *) chunk->skb->data; 151 ch = (sctp_chunkhdr_t *) chunk->skb->data;
152 chunk->data_accepted = 0;
152 } 153 }
153 154
154 chunk->chunk_hdr = ch; 155 chunk->chunk_hdr = ch;
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index d47a52c303a8..5b3b0e0ae7e5 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -69,7 +69,7 @@ fold_field(void *mib[], int nr)
69 unsigned long res = 0; 69 unsigned long res = 0;
70 int i; 70 int i;
71 71
72 for_each_cpu(i) { 72 for_each_possible_cpu(i) {
73 res += 73 res +=
74 *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + 74 *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
75 sizeof (unsigned long) * nr)); 75 sizeof (unsigned long) * nr));
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8d1dc24bab4c..c5beb2ad7ef7 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -498,10 +498,6 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
498 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 498 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
499 SCTP_STATE(SCTP_STATE_CLOSED)); 499 SCTP_STATE(SCTP_STATE_CLOSED));
500 500
501 /* Set sk_err to ECONNRESET on a 1-1 style socket. */
502 if (!sctp_style(asoc->base.sk, UDP))
503 asoc->base.sk->sk_err = ECONNRESET;
504
505 /* SEND_FAILED sent later when cleaning up the association. */ 501 /* SEND_FAILED sent later when cleaning up the association. */
506 asoc->outqueue.error = error; 502 asoc->outqueue.error = error;
507 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 503 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
@@ -838,6 +834,15 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
838 return; 834 return;
839} 835}
840 836
837/* Helper function to set sk_err on a 1-1 style socket. */
838static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
839{
840 struct sock *sk = asoc->base.sk;
841
842 if (!sctp_style(sk, UDP))
843 sk->sk_err = error;
844}
845
841/* These three macros allow us to pull the debugging code out of the 846/* These three macros allow us to pull the debugging code out of the
842 * main flow of sctp_do_sm() to keep attention focused on the real 847 * main flow of sctp_do_sm() to keep attention focused on the real
843 * functionality there. 848 * functionality there.
@@ -1458,6 +1463,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1458 local_cork = 0; 1463 local_cork = 0;
1459 asoc->peer.retran_path = t; 1464 asoc->peer.retran_path = t;
1460 break; 1465 break;
1466 case SCTP_CMD_SET_SK_ERR:
1467 sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1468 break;
1461 default: 1469 default:
1462 printk(KERN_WARNING "Impossible command: %u, %p\n", 1470 printk(KERN_WARNING "Impossible command: %u, %p\n",
1463 cmd->verb, cmd->obj.ptr); 1471 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 2b9a832b29a7..8bc279219a72 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -93,7 +93,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
93static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); 93static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
94 94
95static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, 95static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
96 __u16 error, 96 __u16 error, int sk_err,
97 const struct sctp_association *asoc, 97 const struct sctp_association *asoc,
98 struct sctp_transport *transport); 98 struct sctp_transport *transport);
99 99
@@ -448,7 +448,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
448 __u32 init_tag; 448 __u32 init_tag;
449 struct sctp_chunk *err_chunk; 449 struct sctp_chunk *err_chunk;
450 struct sctp_packet *packet; 450 struct sctp_packet *packet;
451 sctp_disposition_t ret; 451 __u16 error;
452 452
453 if (!sctp_vtag_verify(chunk, asoc)) 453 if (!sctp_vtag_verify(chunk, asoc))
454 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 454 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -480,11 +480,9 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
480 goto nomem; 480 goto nomem;
481 481
482 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 482 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
483 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 483 return sctp_stop_t1_and_abort(commands, SCTP_ERROR_INV_PARAM,
484 SCTP_STATE(SCTP_STATE_CLOSED)); 484 ECONNREFUSED, asoc,
485 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 485 chunk->transport);
486 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
487 return SCTP_DISPOSITION_DELETE_TCB;
488 } 486 }
489 487
490 /* Verify the INIT chunk before processing it. */ 488 /* Verify the INIT chunk before processing it. */
@@ -511,27 +509,16 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
511 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, 509 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
512 SCTP_PACKET(packet)); 510 SCTP_PACKET(packet));
513 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); 511 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
514 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 512 error = SCTP_ERROR_INV_PARAM;
515 SCTP_STATE(SCTP_STATE_CLOSED));
516 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
517 SCTP_NULL());
518 return SCTP_DISPOSITION_CONSUME;
519 } else { 513 } else {
520 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 514 error = SCTP_ERROR_NO_RESOURCE;
521 SCTP_STATE(SCTP_STATE_CLOSED));
522 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
523 SCTP_NULL());
524 return SCTP_DISPOSITION_NOMEM;
525 } 515 }
526 } else { 516 } else {
527 ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg, 517 sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
528 commands); 518 error = SCTP_ERROR_INV_PARAM;
529 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
530 SCTP_STATE(SCTP_STATE_CLOSED));
531 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
532 SCTP_NULL());
533 return ret;
534 } 519 }
520 return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
521 asoc, chunk->transport);
535 } 522 }
536 523
537 /* Tag the variable length parameters. Note that we never 524 /* Tag the variable length parameters. Note that we never
@@ -636,8 +623,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
636 */ 623 */
637 chunk->subh.cookie_hdr = 624 chunk->subh.cookie_hdr =
638 (struct sctp_signed_cookie *)chunk->skb->data; 625 (struct sctp_signed_cookie *)chunk->skb->data;
639 skb_pull(chunk->skb, 626 if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
640 ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t)); 627 sizeof(sctp_chunkhdr_t)))
628 goto nomem;
641 629
642 /* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint 630 /* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint
643 * "Z" will reply with a COOKIE ACK chunk after building a TCB 631 * "Z" will reply with a COOKIE ACK chunk after building a TCB
@@ -885,6 +873,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
885 struct sctp_transport *transport = (struct sctp_transport *) arg; 873 struct sctp_transport *transport = (struct sctp_transport *) arg;
886 874
887 if (asoc->overall_error_count >= asoc->max_retrans) { 875 if (asoc->overall_error_count >= asoc->max_retrans) {
876 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
877 SCTP_ERROR(ETIMEDOUT));
888 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 878 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
889 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 879 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
890 SCTP_U32(SCTP_ERROR_NO_ERROR)); 880 SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -965,7 +955,8 @@ sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep,
965 */ 955 */
966 chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data; 956 chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data;
967 paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); 957 paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
968 skb_pull(chunk->skb, paylen); 958 if (!pskb_pull(chunk->skb, paylen))
959 goto nomem;
969 960
970 reply = sctp_make_heartbeat_ack(asoc, chunk, 961 reply = sctp_make_heartbeat_ack(asoc, chunk,
971 chunk->subh.hb_hdr, paylen); 962 chunk->subh.hb_hdr, paylen);
@@ -1028,6 +1019,12 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1028 commands); 1019 commands);
1029 1020
1030 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; 1021 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
1022 /* Make sure that the length of the parameter is what we expect */
1023 if (ntohs(hbinfo->param_hdr.length) !=
1024 sizeof(sctp_sender_hb_info_t)) {
1025 return SCTP_DISPOSITION_DISCARD;
1026 }
1027
1031 from_addr = hbinfo->daddr; 1028 from_addr = hbinfo->daddr;
1032 link = sctp_assoc_lookup_paddr(asoc, &from_addr); 1029 link = sctp_assoc_lookup_paddr(asoc, &from_addr);
1033 1030
@@ -1860,8 +1857,9 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
1860 * are in good shape. 1857 * are in good shape.
1861 */ 1858 */
1862 chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data; 1859 chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
1863 skb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - 1860 if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
1864 sizeof(sctp_chunkhdr_t)); 1861 sizeof(sctp_chunkhdr_t)))
1862 goto nomem;
1865 1863
1866 /* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie 1864 /* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie
1867 * of a duplicate COOKIE ECHO match the Verification Tags of the 1865 * of a duplicate COOKIE ECHO match the Verification Tags of the
@@ -2123,6 +2121,8 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
2123 int attempts = asoc->init_err_counter + 1; 2121 int attempts = asoc->init_err_counter + 1;
2124 2122
2125 if (attempts > asoc->max_init_attempts) { 2123 if (attempts > asoc->max_init_attempts) {
2124 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
2125 SCTP_ERROR(ETIMEDOUT));
2126 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 2126 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
2127 SCTP_U32(SCTP_ERROR_STALE_COOKIE)); 2127 SCTP_U32(SCTP_ERROR_STALE_COOKIE));
2128 return SCTP_DISPOSITION_DELETE_TCB; 2128 return SCTP_DISPOSITION_DELETE_TCB;
@@ -2259,6 +2259,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2259 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2259 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
2260 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2260 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2261 2261
2262 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
2262 /* ASSOC_FAILED will DELETE_TCB. */ 2263 /* ASSOC_FAILED will DELETE_TCB. */
2263 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error)); 2264 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error));
2264 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 2265 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -2303,7 +2304,8 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
2303 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2304 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
2304 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2305 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2305 2306
2306 return sctp_stop_t1_and_abort(commands, error, asoc, chunk->transport); 2307 return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
2308 chunk->transport);
2307} 2309}
2308 2310
2309/* 2311/*
@@ -2315,7 +2317,8 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep
2315 void *arg, 2317 void *arg,
2316 sctp_cmd_seq_t *commands) 2318 sctp_cmd_seq_t *commands)
2317{ 2319{
2318 return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, asoc, 2320 return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
2321 ENOPROTOOPT, asoc,
2319 (struct sctp_transport *)arg); 2322 (struct sctp_transport *)arg);
2320} 2323}
2321 2324
@@ -2340,7 +2343,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
2340 * This is common code called by several sctp_sf_*_abort() functions above. 2343 * This is common code called by several sctp_sf_*_abort() functions above.
2341 */ 2344 */
2342static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, 2345static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
2343 __u16 error, 2346 __u16 error, int sk_err,
2344 const struct sctp_association *asoc, 2347 const struct sctp_association *asoc,
2345 struct sctp_transport *transport) 2348 struct sctp_transport *transport)
2346{ 2349{
@@ -2350,6 +2353,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
2350 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 2353 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
2351 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 2354 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
2352 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 2355 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
2356 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
2353 /* CMD_INIT_FAILED will DELETE_TCB. */ 2357 /* CMD_INIT_FAILED will DELETE_TCB. */
2354 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 2358 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
2355 SCTP_U32(error)); 2359 SCTP_U32(error));
@@ -3333,6 +3337,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3333 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 3337 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
3334 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 3338 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
3335 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); 3339 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
3340 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3341 SCTP_ERROR(ECONNABORTED));
3336 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3342 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3337 SCTP_U32(SCTP_ERROR_ASCONF_ACK)); 3343 SCTP_U32(SCTP_ERROR_ASCONF_ACK));
3338 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3344 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3359,6 +3365,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3359 * processing the rest of the chunks in the packet. 3365 * processing the rest of the chunks in the packet.
3360 */ 3366 */
3361 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); 3367 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
3368 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3369 SCTP_ERROR(ECONNABORTED));
3362 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3370 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3363 SCTP_U32(SCTP_ERROR_ASCONF_ACK)); 3371 SCTP_U32(SCTP_ERROR_ASCONF_ACK));
3364 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 3372 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3711,9 +3719,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
3711 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { 3719 if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
3712 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 3720 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
3713 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 3721 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
3722 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3723 SCTP_ERROR(ECONNREFUSED));
3714 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 3724 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
3715 SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); 3725 SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
3716 } else { 3726 } else {
3727 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
3728 SCTP_ERROR(ECONNABORTED));
3717 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 3729 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
3718 SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); 3730 SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
3719 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 3731 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@@ -4031,6 +4043,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
4031 * TCB. This is a departure from our typical NOMEM handling. 4043 * TCB. This is a departure from our typical NOMEM handling.
4032 */ 4044 */
4033 4045
4046 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4047 SCTP_ERROR(ECONNABORTED));
4034 /* Delete the established association. */ 4048 /* Delete the established association. */
4035 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4049 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4036 SCTP_U32(SCTP_ERROR_USER_ABORT)); 4050 SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4172,6 +4186,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4172 * TCB. This is a departure from our typical NOMEM handling. 4186 * TCB. This is a departure from our typical NOMEM handling.
4173 */ 4187 */
4174 4188
4189 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4190 SCTP_ERROR(ECONNREFUSED));
4175 /* Delete the established association. */ 4191 /* Delete the established association. */
4176 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 4192 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
4177 SCTP_U32(SCTP_ERROR_USER_ABORT)); 4193 SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4540,6 +4556,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
4540 struct sctp_transport *transport = arg; 4556 struct sctp_transport *transport = arg;
4541 4557
4542 if (asoc->overall_error_count >= asoc->max_retrans) { 4558 if (asoc->overall_error_count >= asoc->max_retrans) {
4559 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4560 SCTP_ERROR(ETIMEDOUT));
4543 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 4561 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
4544 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4562 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4545 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4563 SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4659,6 +4677,8 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
4659 SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d" 4677 SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d"
4660 " max_init_attempts: %d\n", 4678 " max_init_attempts: %d\n",
4661 attempts, asoc->max_init_attempts); 4679 attempts, asoc->max_init_attempts);
4680 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4681 SCTP_ERROR(ETIMEDOUT));
4662 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 4682 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
4663 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4683 SCTP_U32(SCTP_ERROR_NO_ERROR));
4664 return SCTP_DISPOSITION_DELETE_TCB; 4684 return SCTP_DISPOSITION_DELETE_TCB;
@@ -4708,6 +4728,8 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
4708 4728
4709 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 4729 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
4710 } else { 4730 } else {
4731 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4732 SCTP_ERROR(ETIMEDOUT));
4711 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, 4733 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
4712 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4734 SCTP_U32(SCTP_ERROR_NO_ERROR));
4713 return SCTP_DISPOSITION_DELETE_TCB; 4735 return SCTP_DISPOSITION_DELETE_TCB;
@@ -4739,6 +4761,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
4739 4761
4740 SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); 4762 SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
4741 if (asoc->overall_error_count >= asoc->max_retrans) { 4763 if (asoc->overall_error_count >= asoc->max_retrans) {
4764 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4765 SCTP_ERROR(ETIMEDOUT));
4742 /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 4766 /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
4743 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4767 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4744 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4768 SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4814,6 +4838,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
4814 if (asoc->overall_error_count >= asoc->max_retrans) { 4838 if (asoc->overall_error_count >= asoc->max_retrans) {
4815 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 4839 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
4816 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 4840 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
4841 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4842 SCTP_ERROR(ETIMEDOUT));
4817 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4843 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4818 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4844 SCTP_U32(SCTP_ERROR_NO_ERROR));
4819 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 4845 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -4867,6 +4893,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
4867 goto nomem; 4893 goto nomem;
4868 4894
4869 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); 4895 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
4896 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
4897 SCTP_ERROR(ETIMEDOUT));
4870 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 4898 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
4871 SCTP_U32(SCTP_ERROR_NO_ERROR)); 4899 SCTP_U32(SCTP_ERROR_NO_ERROR));
4872 4900
@@ -5151,7 +5179,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5151 int tmp; 5179 int tmp;
5152 __u32 tsn; 5180 __u32 tsn;
5153 int account_value; 5181 int account_value;
5182 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
5154 struct sock *sk = asoc->base.sk; 5183 struct sock *sk = asoc->base.sk;
5184 int rcvbuf_over = 0;
5155 5185
5156 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; 5186 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
5157 skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); 5187 skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
@@ -5162,10 +5192,16 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5162 /* ASSERT: Now skb->data is really the user data. */ 5192 /* ASSERT: Now skb->data is really the user data. */
5163 5193
5164 /* 5194 /*
5165 * if we are established, and we have used up our receive 5195 * If we are established, and we have used up our receive buffer
5166 * buffer memory, drop the frame 5196 * memory, think about droping the frame.
5167 */ 5197 * Note that we have an opportunity to improve performance here.
5168 if (asoc->state == SCTP_STATE_ESTABLISHED) { 5198 * If we accept one chunk from an skbuff, we have to keep all the
5199 * memory of that skbuff around until the chunk is read into user
5200 * space. Therefore, once we accept 1 chunk we may as well accept all
5201 * remaining chunks in the skbuff. The data_accepted flag helps us do
5202 * that.
5203 */
5204 if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) {
5169 /* 5205 /*
5170 * If the receive buffer policy is 1, then each 5206 * If the receive buffer policy is 1, then each
5171 * association can allocate up to sk_rcvbuf bytes 5207 * association can allocate up to sk_rcvbuf bytes
@@ -5176,9 +5212,25 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5176 account_value = atomic_read(&asoc->rmem_alloc); 5212 account_value = atomic_read(&asoc->rmem_alloc);
5177 else 5213 else
5178 account_value = atomic_read(&sk->sk_rmem_alloc); 5214 account_value = atomic_read(&sk->sk_rmem_alloc);
5179 5215 if (account_value > sk->sk_rcvbuf) {
5180 if (account_value > sk->sk_rcvbuf) 5216 /*
5181 return SCTP_IERROR_IGNORE_TSN; 5217 * We need to make forward progress, even when we are
5218 * under memory pressure, so we always allow the
5219 * next tsn after the ctsn ack point to be accepted.
5220 * This lets us avoid deadlocks in which we have to
5221 * drop frames that would otherwise let us drain the
5222 * receive queue.
5223 */
5224 if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn)
5225 return SCTP_IERROR_IGNORE_TSN;
5226
5227 /*
5228 * We're going to accept the frame but we should renege
5229 * to make space for it. This will send us down that
5230 * path later in this function.
5231 */
5232 rcvbuf_over = 1;
5233 }
5182 } 5234 }
5183 5235
5184 /* Process ECN based congestion. 5236 /* Process ECN based congestion.
@@ -5226,6 +5278,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5226 datalen -= sizeof(sctp_data_chunk_t); 5278 datalen -= sizeof(sctp_data_chunk_t);
5227 5279
5228 deliver = SCTP_CMD_CHUNK_ULP; 5280 deliver = SCTP_CMD_CHUNK_ULP;
5281 chunk->data_accepted = 1;
5229 5282
5230 /* Think about partial delivery. */ 5283 /* Think about partial delivery. */
5231 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { 5284 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
@@ -5242,7 +5295,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5242 * large spill over. 5295 * large spill over.
5243 */ 5296 */
5244 if (!asoc->rwnd || asoc->rwnd_over || 5297 if (!asoc->rwnd || asoc->rwnd_over ||
5245 (datalen > asoc->rwnd + asoc->frag_point)) { 5298 (datalen > asoc->rwnd + asoc->frag_point) ||
5299 rcvbuf_over) {
5246 5300
5247 /* If this is the next TSN, consider reneging to make 5301 /* If this is the next TSN, consider reneging to make
5248 * room. Note: Playing nice with a confused sender. A 5302 * room. Note: Playing nice with a confused sender. A
@@ -5250,8 +5304,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5250 * space and in the future we may want to detect and 5304 * space and in the future we may want to detect and
5251 * do more drastic reneging. 5305 * do more drastic reneging.
5252 */ 5306 */
5253 if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) && 5307 if (sctp_tsnmap_has_gap(map) &&
5254 (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) { 5308 (sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
5255 SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn); 5309 SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
5256 deliver = SCTP_CMD_RENEGE; 5310 deliver = SCTP_CMD_RENEGE;
5257 } else { 5311 } else {
@@ -5280,6 +5334,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
5280 * processing the rest of the chunks in the packet. 5334 * processing the rest of the chunks in the packet.
5281 */ 5335 */
5282 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); 5336 sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
5337 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5338 SCTP_ERROR(ECONNABORTED));
5283 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5339 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
5284 SCTP_U32(SCTP_ERROR_NO_DATA)); 5340 SCTP_U32(SCTP_ERROR_NO_DATA));
5285 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 5341 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 75ef10408764..8bcca5676151 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -366,9 +366,9 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
366 /* SCTP_STATE_EMPTY */ \ 366 /* SCTP_STATE_EMPTY */ \
367 {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ 367 {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \
368 /* SCTP_STATE_CLOSED */ \ 368 /* SCTP_STATE_CLOSED */ \
369 {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ 369 {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
370 /* SCTP_STATE_COOKIE_WAIT */ \ 370 /* SCTP_STATE_COOKIE_WAIT */ \
371 {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ 371 {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
372 /* SCTP_STATE_COOKIE_ECHOED */ \ 372 /* SCTP_STATE_COOKIE_ECHOED */ \
373 {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ 373 {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \
374 /* SCTP_STATE_ESTABLISHED */ \ 374 /* SCTP_STATE_ESTABLISHED */ \
@@ -380,7 +380,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
380 /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ 380 /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
381 {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ 381 {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \
382 /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ 382 /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
383 {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ 383 {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
384} /* TYPE_SCTP_ECN_ECNE */ 384} /* TYPE_SCTP_ECN_ECNE */
385 385
386#define TYPE_SCTP_ECN_CWR { \ 386#define TYPE_SCTP_ECN_CWR { \
@@ -401,7 +401,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
401 /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ 401 /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
402 {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ 402 {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
403 /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ 403 /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
404 {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ 404 {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \
405} /* TYPE_SCTP_ECN_CWR */ 405} /* TYPE_SCTP_ECN_CWR */
406 406
407#define TYPE_SCTP_SHUTDOWN_COMPLETE { \ 407#define TYPE_SCTP_SHUTDOWN_COMPLETE { \
@@ -647,7 +647,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
647 /* SCTP_STATE_EMPTY */ \ 647 /* SCTP_STATE_EMPTY */ \
648 {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ 648 {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
649 /* SCTP_STATE_CLOSED */ \ 649 /* SCTP_STATE_CLOSED */ \
650 {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ 650 {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \
651 /* SCTP_STATE_COOKIE_WAIT */ \ 651 /* SCTP_STATE_COOKIE_WAIT */ \
652 {.fn = sctp_sf_do_prm_requestheartbeat, \ 652 {.fn = sctp_sf_do_prm_requestheartbeat, \
653 .name = "sctp_sf_do_prm_requestheartbeat"}, \ 653 .name = "sctp_sf_do_prm_requestheartbeat"}, \
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b6e4b89539b3..174d4d35e951 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1057,6 +1057,7 @@ static int __sctp_connect(struct sock* sk,
1057 inet_sk(sk)->dport = htons(asoc->peer.port); 1057 inet_sk(sk)->dport = htons(asoc->peer.port);
1058 af = sctp_get_af_specific(to.sa.sa_family); 1058 af = sctp_get_af_specific(to.sa.sa_family);
1059 af->to_sk_daddr(&to, sk); 1059 af->to_sk_daddr(&to, sk);
1060 sk->sk_err = 0;
1060 1061
1061 timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK); 1062 timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
1062 err = sctp_wait_for_connect(asoc, &timeo); 1063 err = sctp_wait_for_connect(asoc, &timeo);
@@ -1228,7 +1229,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1228 1229
1229 ep = sctp_sk(sk)->ep; 1230 ep = sctp_sk(sk)->ep;
1230 1231
1231 /* Walk all associations on a socket, not on an endpoint. */ 1232 /* Walk all associations on an endpoint. */
1232 list_for_each_safe(pos, temp, &ep->asocs) { 1233 list_for_each_safe(pos, temp, &ep->asocs) {
1233 asoc = list_entry(pos, struct sctp_association, asocs); 1234 asoc = list_entry(pos, struct sctp_association, asocs);
1234 1235
@@ -1241,13 +1242,13 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1241 if (sctp_state(asoc, CLOSED)) { 1242 if (sctp_state(asoc, CLOSED)) {
1242 sctp_unhash_established(asoc); 1243 sctp_unhash_established(asoc);
1243 sctp_association_free(asoc); 1244 sctp_association_free(asoc);
1245 continue;
1246 }
1247 }
1244 1248
1245 } else if (sock_flag(sk, SOCK_LINGER) && 1249 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)
1246 !sk->sk_lingertime) 1250 sctp_primitive_ABORT(asoc, NULL);
1247 sctp_primitive_ABORT(asoc, NULL); 1251 else
1248 else
1249 sctp_primitive_SHUTDOWN(asoc, NULL);
1250 } else
1251 sctp_primitive_SHUTDOWN(asoc, NULL); 1252 sctp_primitive_SHUTDOWN(asoc, NULL);
1252 } 1253 }
1253 1254
@@ -5317,6 +5318,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
5317 */ 5318 */
5318 sctp_release_sock(sk); 5319 sctp_release_sock(sk);
5319 current_timeo = schedule_timeout(current_timeo); 5320 current_timeo = schedule_timeout(current_timeo);
5321 BUG_ON(sk != asoc->base.sk);
5320 sctp_lock_sock(sk); 5322 sctp_lock_sock(sk);
5321 5323
5322 *timeo_p = current_timeo; 5324 *timeo_p = current_timeo;
@@ -5604,12 +5606,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
5604 */ 5606 */
5605 newsp->type = type; 5607 newsp->type = type;
5606 5608
5607 spin_lock_bh(&oldsk->sk_lock.slock); 5609 /* Mark the new socket "in-use" by the user so that any packets
5608 /* Migrate the backlog from oldsk to newsk. */ 5610 * that may arrive on the association after we've moved it are
5609 sctp_backlog_migrate(assoc, oldsk, newsk); 5611 * queued to the backlog. This prevents a potential race between
5610 /* Migrate the association to the new socket. */ 5612 * backlog processing on the old socket and new-packet processing
5613 * on the new socket.
5614 */
5615 sctp_lock_sock(newsk);
5611 sctp_assoc_migrate(assoc, newsk); 5616 sctp_assoc_migrate(assoc, newsk);
5612 spin_unlock_bh(&oldsk->sk_lock.slock);
5613 5617
5614 /* If the association on the newsk is already closed before accept() 5618 /* If the association on the newsk is already closed before accept()
5615 * is called, set RCV_SHUTDOWN flag. 5619 * is called, set RCV_SHUTDOWN flag.
@@ -5618,6 +5622,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
5618 newsk->sk_shutdown |= RCV_SHUTDOWN; 5622 newsk->sk_shutdown |= RCV_SHUTDOWN;
5619 5623
5620 newsk->sk_state = SCTP_SS_ESTABLISHED; 5624 newsk->sk_state = SCTP_SS_ESTABLISHED;
5625 sctp_release_sock(newsk);
5621} 5626}
5622 5627
5623/* This proto struct describes the ULP interface for SCTP. */ 5628/* This proto struct describes the ULP interface for SCTP. */
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 2080b2d28c98..575e556aeb3e 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -279,6 +279,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
279static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) 279static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
280{ 280{
281 struct sk_buff *pos; 281 struct sk_buff *pos;
282 struct sk_buff *new = NULL;
282 struct sctp_ulpevent *event; 283 struct sctp_ulpevent *event;
283 struct sk_buff *pnext, *last; 284 struct sk_buff *pnext, *last;
284 struct sk_buff *list = skb_shinfo(f_frag)->frag_list; 285 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
@@ -297,11 +298,33 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
297 */ 298 */
298 if (last) 299 if (last)
299 last->next = pos; 300 last->next = pos;
300 else 301 else {
301 skb_shinfo(f_frag)->frag_list = pos; 302 if (skb_cloned(f_frag)) {
303 /* This is a cloned skb, we can't just modify
304 * the frag_list. We need a new skb to do that.
305 * Instead of calling skb_unshare(), we'll do it
306 * ourselves since we need to delay the free.
307 */
308 new = skb_copy(f_frag, GFP_ATOMIC);
309 if (!new)
310 return NULL; /* try again later */
311
312 new->sk = f_frag->sk;
313
314 skb_shinfo(new)->frag_list = pos;
315 } else
316 skb_shinfo(f_frag)->frag_list = pos;
317 }
302 318
303 /* Remove the first fragment from the reassembly queue. */ 319 /* Remove the first fragment from the reassembly queue. */
304 __skb_unlink(f_frag, queue); 320 __skb_unlink(f_frag, queue);
321
322 /* if we did unshare, then free the old skb and re-assign */
323 if (new) {
324 kfree_skb(f_frag);
325 f_frag = new;
326 }
327
305 while (pos) { 328 while (pos) {
306 329
307 pnext = pos->next; 330 pnext = pos->next;
diff --git a/net/socket.c b/net/socket.c
index b807f360e02c..02948b622bd2 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -119,10 +119,6 @@ static ssize_t sock_writev(struct file *file, const struct iovec *vector,
119static ssize_t sock_sendpage(struct file *file, struct page *page, 119static ssize_t sock_sendpage(struct file *file, struct page *page,
120 int offset, size_t size, loff_t *ppos, int more); 120 int offset, size_t size, loff_t *ppos, int more);
121 121
122extern ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
123 size_t len, unsigned int flags);
124
125
126/* 122/*
127 * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear 123 * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear
128 * in the operation structures but are done directly via the socketcall() multiplexor. 124 * in the operation structures but are done directly via the socketcall() multiplexor.
@@ -271,6 +267,8 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, int __user *ule
271 return -EINVAL; 267 return -EINVAL;
272 if(len) 268 if(len)
273 { 269 {
270 if (audit_sockaddr(klen, kaddr))
271 return -ENOMEM;
274 if(copy_to_user(uaddr,kaddr,len)) 272 if(copy_to_user(uaddr,kaddr,len))
275 return -EFAULT; 273 return -EFAULT;
276 } 274 }
@@ -494,6 +492,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
494 struct file *file; 492 struct file *file;
495 struct socket *sock; 493 struct socket *sock;
496 494
495 *err = -EBADF;
497 file = fget_light(fd, fput_needed); 496 file = fget_light(fd, fput_needed);
498 if (file) { 497 if (file) {
499 sock = sock_from_file(file, err); 498 sock = sock_from_file(file, err);
@@ -2136,7 +2135,7 @@ void socket_seq_show(struct seq_file *seq)
2136 int cpu; 2135 int cpu;
2137 int counter = 0; 2136 int counter = 0;
2138 2137
2139 for_each_cpu(cpu) 2138 for_each_possible_cpu(cpu)
2140 counter += per_cpu(sockets_in_use, cpu); 2139 counter += per_cpu(sockets_in_use, cpu);
2141 2140
2142 /* It can be negative, by the way. 8) */ 2141 /* It can be negative, by the way. 8) */
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 900ef31f5a0e..519ebc17c028 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -794,7 +794,6 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
794 794
795out_err: 795out_err:
796 dprintk("RPC: gss_create_cred failed with error %d\n", err); 796 dprintk("RPC: gss_create_cred failed with error %d\n", err);
797 if (cred) gss_destroy_cred(&cred->gc_base);
798 return ERR_PTR(err); 797 return ERR_PTR(err);
799} 798}
800 799
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 97c981fa6b8e..76b969e6904f 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -212,7 +212,6 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
212 char *cksumname; 212 char *cksumname;
213 struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ 213 struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */
214 struct scatterlist sg[1]; 214 struct scatterlist sg[1];
215 u32 code = GSS_S_FAILURE;
216 215
217 switch (cksumtype) { 216 switch (cksumtype) {
218 case CKSUMTYPE_RSA_MD5: 217 case CKSUMTYPE_RSA_MD5:
@@ -221,13 +220,11 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
221 default: 220 default:
222 dprintk("RPC: krb5_make_checksum:" 221 dprintk("RPC: krb5_make_checksum:"
223 " unsupported checksum %d", cksumtype); 222 " unsupported checksum %d", cksumtype);
224 goto out; 223 return GSS_S_FAILURE;
225 } 224 }
226 if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP))) 225 if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP)))
227 goto out; 226 return GSS_S_FAILURE;
228 cksum->len = crypto_tfm_alg_digestsize(tfm); 227 cksum->len = crypto_tfm_alg_digestsize(tfm);
229 if ((cksum->data = kmalloc(cksum->len, GFP_KERNEL)) == NULL)
230 goto out;
231 228
232 crypto_digest_init(tfm); 229 crypto_digest_init(tfm);
233 sg_set_buf(sg, header, hdrlen); 230 sg_set_buf(sg, header, hdrlen);
@@ -235,10 +232,8 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
235 process_xdr_buf(body, body_offset, body->len - body_offset, 232 process_xdr_buf(body, body_offset, body->len - body_offset,
236 checksummer, tfm); 233 checksummer, tfm);
237 crypto_digest_final(tfm, cksum->data); 234 crypto_digest_final(tfm, cksum->data);
238 code = 0;
239out:
240 crypto_free_tfm(tfm); 235 crypto_free_tfm(tfm);
241 return code; 236 return 0;
242} 237}
243 238
244EXPORT_SYMBOL(make_checksum); 239EXPORT_SYMBOL(make_checksum);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 4d7eb9e704da..d51e316c5821 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1122,18 +1122,20 @@ svcauth_gss_release(struct svc_rqst *rqstp)
1122 integ_len)) 1122 integ_len))
1123 BUG(); 1123 BUG();
1124 if (resbuf->page_len == 0 1124 if (resbuf->page_len == 0
1125 && resbuf->tail[0].iov_len + RPC_MAX_AUTH_SIZE 1125 && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
1126 < PAGE_SIZE) { 1126 < PAGE_SIZE) {
1127 BUG_ON(resbuf->tail[0].iov_len); 1127 BUG_ON(resbuf->tail[0].iov_len);
1128 /* Use head for everything */ 1128 /* Use head for everything */
1129 resv = &resbuf->head[0]; 1129 resv = &resbuf->head[0];
1130 } else if (resbuf->tail[0].iov_base == NULL) { 1130 } else if (resbuf->tail[0].iov_base == NULL) {
1131 /* copied from nfsd4_encode_read */ 1131 if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
1132 svc_take_page(rqstp); 1132 > PAGE_SIZE)
1133 resbuf->tail[0].iov_base = page_address(rqstp 1133 goto out_err;
1134 ->rq_respages[rqstp->rq_resused-1]); 1134 resbuf->tail[0].iov_base =
1135 rqstp->rq_restailpage = rqstp->rq_resused-1; 1135 resbuf->head[0].iov_base
1136 + resbuf->head[0].iov_len;
1136 resbuf->tail[0].iov_len = 0; 1137 resbuf->tail[0].iov_len = 0;
1138 rqstp->rq_restailpage = 0;
1137 resv = &resbuf->tail[0]; 1139 resv = &resbuf->tail[0];
1138 } else { 1140 } else {
1139 resv = &resbuf->tail[0]; 1141 resv = &resbuf->tail[0];
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 3ac4193a78ed..7026b0866b7b 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -159,6 +159,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
159 detail->update(tmp, new); 159 detail->update(tmp, new);
160 tmp->next = *head; 160 tmp->next = *head;
161 *head = tmp; 161 *head = tmp;
162 detail->entries++;
162 cache_get(tmp); 163 cache_get(tmp);
163 is_new = cache_fresh_locked(tmp, new->expiry_time); 164 is_new = cache_fresh_locked(tmp, new->expiry_time);
164 cache_fresh_locked(old, 0); 165 cache_fresh_locked(old, 0);
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index dea529666d69..15c2db26767b 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -176,7 +176,8 @@ void rpc_count_iostats(struct rpc_task *task)
176 op_metrics->om_execute += execute; 176 op_metrics->om_execute += execute;
177} 177}
178 178
179void _print_name(struct seq_file *seq, unsigned int op, struct rpc_procinfo *procs) 179static void _print_name(struct seq_file *seq, unsigned int op,
180 struct rpc_procinfo *procs)
180{ 181{
181 if (procs[op].p_name) 182 if (procs[op].p_name)
182 seq_printf(seq, "\t%12s: ", procs[op].p_name); 183 seq_printf(seq, "\t%12s: ", procs[op].p_name);
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 55538f6b60ff..58a1b6b42ddd 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -37,14 +37,6 @@ struct ctl_table net_table[] = {
37 .mode = 0555, 37 .mode = 0555,
38 .child = core_table, 38 .child = core_table,
39 }, 39 },
40#ifdef CONFIG_NET
41 {
42 .ctl_name = NET_ETHER,
43 .procname = "ethernet",
44 .mode = 0555,
45 .child = ether_table,
46 },
47#endif
48#ifdef CONFIG_INET 40#ifdef CONFIG_INET
49 { 41 {
50 .ctl_name = NET_IPV4, 42 .ctl_name = NET_IPV4,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 953307a9df1d..a3bbc891f959 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -229,8 +229,7 @@ static void node_is_down(struct publication *publ)
229 publ->node, publ->ref, publ->key); 229 publ->node, publ->ref, publ->key);
230 assert(p == publ); 230 assert(p == publ);
231 write_unlock_bh(&tipc_nametbl_lock); 231 write_unlock_bh(&tipc_nametbl_lock);
232 if (publ) 232 kfree(publ);
233 kfree(publ);
234} 233}
235 234
236/** 235/**
diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c
index 8b9bf4a763b5..b1265187b4a8 100644
--- a/net/wanrouter/af_wanpipe.c
+++ b/net/wanrouter/af_wanpipe.c
@@ -55,12 +55,10 @@
55#include <asm/uaccess.h> 55#include <asm/uaccess.h>
56#include <linux/module.h> 56#include <linux/module.h>
57#include <linux/init.h> 57#include <linux/init.h>
58#include <linux/wanpipe.h>
59#include <linux/if_wanpipe.h> 58#include <linux/if_wanpipe.h>
60#include <linux/pkt_sched.h> 59#include <linux/pkt_sched.h>
61#include <linux/tcp_states.h> 60#include <linux/tcp_states.h>
62#include <linux/if_wanpipe_common.h> 61#include <linux/if_wanpipe_common.h>
63#include <linux/sdla_x25.h>
64 62
65#ifdef CONFIG_INET 63#ifdef CONFIG_INET
66#include <net/inet_common.h> 64#include <net/inet_common.h>
diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c
index 0a92e1da3922..71ff3088f6fe 100644
--- a/net/x25/x25_timer.c
+++ b/net/x25/x25_timer.c
@@ -114,8 +114,9 @@ static void x25_heartbeat_expiry(unsigned long param)
114 if (sock_flag(sk, SOCK_DESTROY) || 114 if (sock_flag(sk, SOCK_DESTROY) ||
115 (sk->sk_state == TCP_LISTEN && 115 (sk->sk_state == TCP_LISTEN &&
116 sock_flag(sk, SOCK_DEAD))) { 116 sock_flag(sk, SOCK_DEAD))) {
117 bh_unlock_sock(sk);
117 x25_destroy_socket(sk); 118 x25_destroy_socket(sk);
118 goto unlock; 119 return;
119 } 120 }
120 break; 121 break;
121 122
@@ -128,7 +129,6 @@ static void x25_heartbeat_expiry(unsigned long param)
128 } 129 }
129restart_heartbeat: 130restart_heartbeat:
130 x25_start_heartbeat(sk); 131 x25_start_heartbeat(sk);
131unlock:
132 bh_unlock_sock(sk); 132 bh_unlock_sock(sk);
133} 133}
134 134
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b54971059f16..891a6090cc09 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -62,7 +62,7 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq)
62 case IPPROTO_COMP: 62 case IPPROTO_COMP:
63 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) 63 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
64 return -EINVAL; 64 return -EINVAL;
65 *spi = ntohl(ntohs(*(u16*)(skb->h.raw + 2))); 65 *spi = htonl(ntohs(*(u16*)(skb->h.raw + 2)));
66 *seq = 0; 66 *seq = 0;
67 return 0; 67 return 0;
68 default: 68 default:
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c3725fe2a8fb..b469c8b54613 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -57,12 +57,12 @@ int xfrm_register_type(struct xfrm_type *type, unsigned short family)
57 return -EAFNOSUPPORT; 57 return -EAFNOSUPPORT;
58 typemap = afinfo->type_map; 58 typemap = afinfo->type_map;
59 59
60 write_lock(&typemap->lock); 60 write_lock_bh(&typemap->lock);
61 if (likely(typemap->map[type->proto] == NULL)) 61 if (likely(typemap->map[type->proto] == NULL))
62 typemap->map[type->proto] = type; 62 typemap->map[type->proto] = type;
63 else 63 else
64 err = -EEXIST; 64 err = -EEXIST;
65 write_unlock(&typemap->lock); 65 write_unlock_bh(&typemap->lock);
66 xfrm_policy_put_afinfo(afinfo); 66 xfrm_policy_put_afinfo(afinfo);
67 return err; 67 return err;
68} 68}
@@ -78,12 +78,12 @@ int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
78 return -EAFNOSUPPORT; 78 return -EAFNOSUPPORT;
79 typemap = afinfo->type_map; 79 typemap = afinfo->type_map;
80 80
81 write_lock(&typemap->lock); 81 write_lock_bh(&typemap->lock);
82 if (unlikely(typemap->map[type->proto] != type)) 82 if (unlikely(typemap->map[type->proto] != type))
83 err = -ENOENT; 83 err = -ENOENT;
84 else 84 else
85 typemap->map[type->proto] = NULL; 85 typemap->map[type->proto] = NULL;
86 write_unlock(&typemap->lock); 86 write_unlock_bh(&typemap->lock);
87 xfrm_policy_put_afinfo(afinfo); 87 xfrm_policy_put_afinfo(afinfo);
88 return err; 88 return err;
89} 89}
@@ -1251,7 +1251,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
1251 return -EINVAL; 1251 return -EINVAL;
1252 if (unlikely(afinfo->family >= NPROTO)) 1252 if (unlikely(afinfo->family >= NPROTO))
1253 return -EAFNOSUPPORT; 1253 return -EAFNOSUPPORT;
1254 write_lock(&xfrm_policy_afinfo_lock); 1254 write_lock_bh(&xfrm_policy_afinfo_lock);
1255 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 1255 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
1256 err = -ENOBUFS; 1256 err = -ENOBUFS;
1257 else { 1257 else {
@@ -1268,7 +1268,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
1268 afinfo->garbage_collect = __xfrm_garbage_collect; 1268 afinfo->garbage_collect = __xfrm_garbage_collect;
1269 xfrm_policy_afinfo[afinfo->family] = afinfo; 1269 xfrm_policy_afinfo[afinfo->family] = afinfo;
1270 } 1270 }
1271 write_unlock(&xfrm_policy_afinfo_lock); 1271 write_unlock_bh(&xfrm_policy_afinfo_lock);
1272 return err; 1272 return err;
1273} 1273}
1274EXPORT_SYMBOL(xfrm_policy_register_afinfo); 1274EXPORT_SYMBOL(xfrm_policy_register_afinfo);
@@ -1280,7 +1280,7 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
1280 return -EINVAL; 1280 return -EINVAL;
1281 if (unlikely(afinfo->family >= NPROTO)) 1281 if (unlikely(afinfo->family >= NPROTO))
1282 return -EAFNOSUPPORT; 1282 return -EAFNOSUPPORT;
1283 write_lock(&xfrm_policy_afinfo_lock); 1283 write_lock_bh(&xfrm_policy_afinfo_lock);
1284 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 1284 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
1285 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 1285 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
1286 err = -EINVAL; 1286 err = -EINVAL;
@@ -1294,7 +1294,7 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
1294 afinfo->garbage_collect = NULL; 1294 afinfo->garbage_collect = NULL;
1295 } 1295 }
1296 } 1296 }
1297 write_unlock(&xfrm_policy_afinfo_lock); 1297 write_unlock_bh(&xfrm_policy_afinfo_lock);
1298 return err; 1298 return err;
1299} 1299}
1300EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 1300EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index a8e14dc1b04e..93a2f36ad3db 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -805,16 +805,22 @@ void xfrm_replay_notify(struct xfrm_state *x, int event)
805 case XFRM_REPLAY_UPDATE: 805 case XFRM_REPLAY_UPDATE:
806 if (x->replay_maxdiff && 806 if (x->replay_maxdiff &&
807 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) && 807 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
808 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) 808 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
809 return; 809 if (x->xflags & XFRM_TIME_DEFER)
810 event = XFRM_REPLAY_TIMEOUT;
811 else
812 return;
813 }
810 814
811 break; 815 break;
812 816
813 case XFRM_REPLAY_TIMEOUT: 817 case XFRM_REPLAY_TIMEOUT:
814 if ((x->replay.seq == x->preplay.seq) && 818 if ((x->replay.seq == x->preplay.seq) &&
815 (x->replay.bitmap == x->preplay.bitmap) && 819 (x->replay.bitmap == x->preplay.bitmap) &&
816 (x->replay.oseq == x->preplay.oseq)) 820 (x->replay.oseq == x->preplay.oseq)) {
821 x->xflags |= XFRM_TIME_DEFER;
817 return; 822 return;
823 }
818 824
819 break; 825 break;
820 } 826 }
@@ -825,8 +831,10 @@ void xfrm_replay_notify(struct xfrm_state *x, int event)
825 km_state_notify(x, &c); 831 km_state_notify(x, &c);
826 832
827 if (x->replay_maxage && 833 if (x->replay_maxage &&
828 !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) 834 !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) {
829 xfrm_state_hold(x); 835 xfrm_state_hold(x);
836 x->xflags &= ~XFRM_TIME_DEFER;
837 }
830} 838}
831EXPORT_SYMBOL(xfrm_replay_notify); 839EXPORT_SYMBOL(xfrm_replay_notify);
832 840
@@ -836,10 +844,15 @@ static void xfrm_replay_timer_handler(unsigned long data)
836 844
837 spin_lock(&x->lock); 845 spin_lock(&x->lock);
838 846
839 if (xfrm_aevent_is_on() && x->km.state == XFRM_STATE_VALID) 847 if (x->km.state == XFRM_STATE_VALID) {
840 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT); 848 if (xfrm_aevent_is_on())
849 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
850 else
851 x->xflags |= XFRM_TIME_DEFER;
852 }
841 853
842 spin_unlock(&x->lock); 854 spin_unlock(&x->lock);
855 xfrm_state_put(x);
843} 856}
844 857
845int xfrm_replay_check(struct xfrm_state *x, u32 seq) 858int xfrm_replay_check(struct xfrm_state *x, u32 seq)
@@ -1048,7 +1061,7 @@ int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1048 return -EINVAL; 1061 return -EINVAL;
1049 if (unlikely(afinfo->family >= NPROTO)) 1062 if (unlikely(afinfo->family >= NPROTO))
1050 return -EAFNOSUPPORT; 1063 return -EAFNOSUPPORT;
1051 write_lock(&xfrm_state_afinfo_lock); 1064 write_lock_bh(&xfrm_state_afinfo_lock);
1052 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) 1065 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1053 err = -ENOBUFS; 1066 err = -ENOBUFS;
1054 else { 1067 else {
@@ -1056,7 +1069,7 @@ int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1056 afinfo->state_byspi = xfrm_state_byspi; 1069 afinfo->state_byspi = xfrm_state_byspi;
1057 xfrm_state_afinfo[afinfo->family] = afinfo; 1070 xfrm_state_afinfo[afinfo->family] = afinfo;
1058 } 1071 }
1059 write_unlock(&xfrm_state_afinfo_lock); 1072 write_unlock_bh(&xfrm_state_afinfo_lock);
1060 return err; 1073 return err;
1061} 1074}
1062EXPORT_SYMBOL(xfrm_state_register_afinfo); 1075EXPORT_SYMBOL(xfrm_state_register_afinfo);
@@ -1068,7 +1081,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1068 return -EINVAL; 1081 return -EINVAL;
1069 if (unlikely(afinfo->family >= NPROTO)) 1082 if (unlikely(afinfo->family >= NPROTO))
1070 return -EAFNOSUPPORT; 1083 return -EAFNOSUPPORT;
1071 write_lock(&xfrm_state_afinfo_lock); 1084 write_lock_bh(&xfrm_state_afinfo_lock);
1072 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { 1085 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1073 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo)) 1086 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1074 err = -EINVAL; 1087 err = -EINVAL;
@@ -1078,7 +1091,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1078 afinfo->state_bydst = NULL; 1091 afinfo->state_bydst = NULL;
1079 } 1092 }
1080 } 1093 }
1081 write_unlock(&xfrm_state_afinfo_lock); 1094 write_unlock_bh(&xfrm_state_afinfo_lock);
1082 return err; 1095 return err;
1083} 1096}
1084EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 1097EXPORT_SYMBOL(xfrm_state_unregister_afinfo);